xref: /xnu-12377.81.4/osfmk/vm/vm_compressor.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <vm/vm_compressor_internal.h>
30 
31 #if CONFIG_PHANTOM_CACHE
32 #include <vm/vm_phantom_cache_internal.h>
33 #endif
34 
35 #include <vm/vm_map_xnu.h>
36 #include <vm/vm_pageout_xnu.h>
37 #include <vm/vm_map_internal.h>
38 #include <vm/memory_object.h>
39 #include <vm/vm_compressor_algorithms_internal.h>
40 #include <vm/vm_compressor_backing_store_internal.h>
41 #include <vm/vm_fault.h>
42 #include <vm/vm_protos.h>
43 #include <vm/vm_kern_xnu.h>
44 #include <vm/vm_compressor_pager_internal.h>
45 #include <vm/vm_iokit.h>
46 #include <vm/vm_far.h>
47 #include <mach/mach_host.h>             /* for host_info() */
48 #if DEVELOPMENT || DEBUG
49 #include <kern/hvg_hypercall.h>
50 #include <vm/vm_compressor_info.h>         /* for c_segment_info */
51 #endif
52 #include <kern/ledger.h>
53 #include <kern/policy_internal.h>
54 #include <kern/thread_group.h>
55 #include <san/kasan.h>
56 #include <sys/kern_memorystatus_xnu.h>
57 #include <os/atomic_private.h>
58 #include <vm/vm_log.h>
59 #include <pexpert/pexpert.h>
60 #include <pexpert/device_tree.h>
61 
62 #if defined(__x86_64__)
63 #include <i386/misc_protos.h>
64 #endif
65 #if defined(__arm64__)
66 #include <arm/machine_routines.h>
67 #endif
68 #if HAS_MTE
69 #include <arm64/mte_xnu.h>
70 #include <arm64/vm_mte_compress.h>
71 #endif /* HAS_MTE */
72 
73 #include <IOKit/IOHibernatePrivate.h>
74 
75 /*
76  * The segment buffer size is a tradeoff.
77  * A larger buffer leads to faster I/O throughput, better compression ratios
78  * (since fewer bytes are wasted at the end of the segment),
79  * and less overhead (both in time and space).
80  * However, a smaller buffer causes less swap when the system is overcommited
81  * b/c a higher percentage of the swapped-in segment is definitely accessed
82  * before it goes back out to storage.
83  *
84  * So on systems without swap, a larger segment is a clear win.
85  * On systems with swap, the choice is murkier. Empirically, we've
86  * found that a 64KB segment provides a better tradeoff both in terms of
87  * performance and swap writes than a 256KB segment on systems with fast SSDs
88  * and a HW compression block.
89  */
90 #define C_SEG_BUFSIZE_ARM_SWAP (1024 * 64)
91 #if XNU_TARGET_OS_OSX && defined(__arm64__)
92 #define C_SEG_BUFSIZE_DEFAULT C_SEG_BUFSIZE_ARM_SWAP
93 #else
94 #define C_SEG_BUFSIZE_DEFAULT (1024 * 256)
95 #endif /* TARGET_OS_OSX && defined(__arm64__) */
96 uint32_t c_seg_bufsize;
97 
98 uint32_t c_seg_max_pages; /* maximum number of pages the compressed data of a segment can take  */
99 uint32_t c_seg_off_limit; /* if we've reached this size while filling the segment, don't bother trying to fill anymore
100                            * because it's unlikely to succeed, in units of uint32_t, same as c_nextoffset */
101 uint32_t c_seg_allocsize, c_seg_slot_var_array_min_len;
102 
103 extern boolean_t vm_darkwake_mode;
104 extern zone_t vm_page_zone;
105 
106 #if DEVELOPMENT || DEBUG
107 /* sysctl defined in bsd/dev/arm64/sysctl.c */
108 static event_t debug_cseg_wait_event = NULL;
109 #endif /* DEVELOPMENT || DEBUG */
110 
111 #if CONFIG_FREEZE
112 bool freezer_incore_cseg_acct = TRUE; /* Only count incore compressed memory for jetsams. */
113 #endif /* CONFIG_FREEZE */
114 
115 #if POPCOUNT_THE_COMPRESSED_DATA
116 boolean_t popcount_c_segs = TRUE;
117 
118 static inline uint32_t
vmc_pop(uintptr_t ins,int sz)119 vmc_pop(uintptr_t ins, int sz)
120 {
121 	uint32_t rv = 0;
122 
123 	if (__probable(popcount_c_segs == FALSE)) {
124 		return 0xDEAD707C;
125 	}
126 
127 	while (sz >= 16) {
128 		uint32_t rv1, rv2;
129 		uint64_t *ins64 = (uint64_t *) ins;
130 		uint64_t *ins642 = (uint64_t *) (ins + 8);
131 		rv1 = __builtin_popcountll(*ins64);
132 		rv2 = __builtin_popcountll(*ins642);
133 		rv += rv1 + rv2;
134 		sz -= 16;
135 		ins += 16;
136 	}
137 
138 	while (sz >= 4) {
139 		uint32_t *ins32 = (uint32_t *) ins;
140 		rv += __builtin_popcount(*ins32);
141 		sz -= 4;
142 		ins += 4;
143 	}
144 
145 	while (sz > 0) {
146 		char *ins8 = (char *)ins;
147 		rv += __builtin_popcount(*ins8);
148 		sz--;
149 		ins++;
150 	}
151 	return rv;
152 }
153 #endif
154 
155 #if VALIDATE_C_SEGMENTS
156 boolean_t validate_c_segs = TRUE;
157 #endif
158 /*
159  * vm_compressor_mode has a hierarchy of control to set its value.
160  * boot-args are checked first, then device-tree, and finally
161  * the default value that is defined below. See vm_fault_init() for
162  * the boot-arg & device-tree code.
163  */
164 
165 #if !XNU_TARGET_OS_OSX
166 
167 #if CONFIG_FREEZE
168 int     vm_compressor_mode = VM_PAGER_FREEZER_DEFAULT;
169 struct  freezer_context freezer_context_global;
170 #else /* CONFIG_FREEZE */
171 int     vm_compressor_mode = VM_PAGER_NOT_CONFIGURED;
172 #endif /* CONFIG_FREEZE */
173 
174 #else /* !XNU_TARGET_OS_OSX */
175 int             vm_compressor_mode = VM_PAGER_COMPRESSOR_WITH_SWAP;
176 
177 #endif /* !XNU_TARGET_OS_OSX */
178 
179 TUNABLE(uint32_t, vm_compression_limit, "vm_compression_limit", 0);
180 boolean_t             vm_compressor_is_active = 0;
181 boolean_t             vm_compressor_available = 0;
182 
183 extern uint64_t vm_swap_get_max_configured_space(void);
184 extern void     vm_pageout_io_throttle(void);
185 
186 #if CHECKSUM_THE_DATA || CHECKSUM_THE_SWAP || CHECKSUM_THE_COMPRESSED_DATA
187 extern unsigned int hash_string(char *cp, int len);
188 static unsigned int vmc_hash(char *, int);
189 boolean_t checksum_c_segs = TRUE;
190 
191 unsigned int
vmc_hash(char * cp,int len)192 vmc_hash(char *cp, int len)
193 {
194 	unsigned int result;
195 	if (__probable(checksum_c_segs == FALSE)) {
196 		return 0xDEAD7A37;
197 	}
198 	vm_memtag_disable_checking();
199 	result = hash_string(cp, len);
200 	vm_memtag_enable_checking();
201 	return result;
202 }
203 #endif
204 
205 #define UNPACK_C_SIZE(cs)       ((cs->c_size == (PAGE_SIZE-1)) ? PAGE_SIZE : cs->c_size)
206 #define PACK_C_SIZE(cs, size)   (cs->c_size = ((size == PAGE_SIZE) ? PAGE_SIZE - 1 : size))
207 
208 
209 struct c_sv_hash_entry {
210 	union {
211 		struct  {
212 			uint32_t        c_sv_he_ref;
213 			uint32_t        c_sv_he_data;
214 		} c_sv_he;
215 		uint64_t        c_sv_he_record;
216 	} c_sv_he_un;
217 };
218 
219 #define he_ref  c_sv_he_un.c_sv_he.c_sv_he_ref
220 #define he_data c_sv_he_un.c_sv_he.c_sv_he_data
221 #define he_record c_sv_he_un.c_sv_he_record
222 
223 #define C_SV_HASH_MAX_MISS      32
224 #define C_SV_HASH_SIZE          ((1 << 10))
225 #define C_SV_HASH_MASK          ((1 << 10) - 1)
226 
227 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
228 #define C_SV_CSEG_ID            ((1 << 21) - 1)
229 #else /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
230 #define C_SV_CSEG_ID            ((1 << 22) - 1)
231 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
232 
233 /* elements of c_segments array */
234 union c_segu {
235 	c_segment_t     c_seg;
236 	uintptr_t       c_segno;  /* index of the next element in the segments free-list, c_free_segno_head is the head */
237 };
238 
239 #define C_SLOT_ASSERT_PACKABLE(ptr) \
240 	VM_ASSERT_POINTER_PACKABLE((vm_offset_t)(ptr), C_SLOT_PACKED_PTR);
241 
242 #define C_SLOT_PACK_PTR(ptr) \
243 	VM_PACK_POINTER((vm_offset_t)(ptr), C_SLOT_PACKED_PTR)
244 
245 #define C_SLOT_UNPACK_PTR(cslot) \
246 	(c_slot_mapping_t)VM_UNPACK_POINTER((cslot)->c_packed_ptr, C_SLOT_PACKED_PTR)
247 
248 /* for debugging purposes */
249 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) c_slot_packing_params =
250     VM_PACKING_PARAMS(C_SLOT_PACKED_PTR);
251 
252 uint32_t        c_segment_count = 0;       /* count all allocated c_segments in all queues */
253 uint32_t        c_segment_count_max = 0;   /* maximum c_segment_count has ever been */
254 
255 uint64_t        c_generation_id = 0;
256 uint64_t        c_generation_id_flush_barrier;
257 
258 boolean_t       hibernate_no_swapspace = FALSE;
259 boolean_t       hibernate_flush_timed_out = FALSE;
260 clock_sec_t     hibernate_flushing_deadline = 0;
261 
262 #if RECORD_THE_COMPRESSED_DATA
263 /* buffer used as an intermediate stage before writing to file */
264 char    *c_compressed_record_sbuf;  /* start */
265 char    *c_compressed_record_ebuf;  /* end */
266 char    *c_compressed_record_cptr;  /* next buffered write */
267 #endif
268 
269 /* the different queues a c_segment can be in via c_age_list */
270 queue_head_t    c_age_list_head;
271 queue_head_t    c_early_swappedin_list_head, c_regular_swappedin_list_head, c_late_swappedin_list_head;
272 queue_head_t    c_early_swapout_list_head, c_regular_swapout_list_head, c_late_swapout_list_head;
273 queue_head_t    c_swapio_list_head;
274 queue_head_t    c_swappedout_list_head;
275 queue_head_t    c_swappedout_sparse_list_head;
276 queue_head_t    c_major_list_head;
277 queue_head_t    c_filling_list_head;
278 queue_head_t    c_bad_list_head;
279 
280 /* count of each of the queues above */
281 uint32_t        c_age_count = 0;
282 uint32_t        c_early_swappedin_count = 0, c_regular_swappedin_count = 0, c_late_swappedin_count = 0;
283 uint32_t        c_early_swapout_count = 0, c_regular_swapout_count = 0, c_late_swapout_count = 0;
284 uint32_t        c_swapio_count = 0;
285 uint32_t        c_swappedout_count = 0;
286 uint32_t        c_swappedout_sparse_count = 0;
287 uint32_t        c_major_count = 0;
288 uint32_t        c_filling_count = 0;
289 uint32_t        c_empty_count = 0;
290 uint32_t        c_bad_count = 0;
291 
292 /* a c_segment can be in the minor-compact queue as well as one of the above ones, via c_list */
293 queue_head_t    c_minor_list_head;
294 uint32_t        c_minor_count = 0;
295 
296 int             c_overage_swapped_count = 0;
297 int             c_overage_swapped_limit = 0;
298 
299 int             c_seg_fixed_array_len;   /* number of slots in the c_segment inline slots array */
300 union  c_segu   *c_segments;             /* array of all c_segments, not all of it may be populated */
301 vm_offset_t     c_buffers;               /* starting address of all compressed data pointed to by c_segment.c_store.c_buffer */
302 vm_size_t       c_buffers_size;          /* total size allocated in c_buffers */
303 caddr_t         c_segments_next_page;    /* next page to populate for extending c_segments */
304 boolean_t       c_segments_busy;
305 uint32_t        c_segments_available;    /* how many segments are in populated memory (used or free), populated size of c_segments array */
306 uint32_t        c_segments_limit;        /* max size of c_segments array */
307 uint32_t        c_segments_nearing_limit;
308 
309 uint32_t        c_segment_svp_in_hash;
310 uint32_t        c_segment_svp_hash_succeeded;
311 uint32_t        c_segment_svp_hash_failed;
312 uint32_t        c_segment_svp_zero_compressions;
313 uint32_t        c_segment_svp_nonzero_compressions;
314 uint32_t        c_segment_svp_zero_decompressions;
315 uint32_t        c_segment_svp_nonzero_decompressions;
316 
317 uint32_t        c_segment_noncompressible_pages;
318 
319 uint32_t        c_segment_pages_compressed = 0; /* Tracks # of uncompressed pages fed into the compressor, including SV (single value) pages */
320 #if CONFIG_FREEZE
321 int32_t         c_segment_pages_compressed_incore = 0; /* Tracks # of uncompressed pages fed into the compressor that are in memory */
322 int32_t         c_segment_pages_compressed_incore_late_swapout = 0; /* Tracks # of uncompressed pages fed into the compressor that are in memory and tagged for swapout */
323 uint32_t        c_segments_incore_limit = 0; /* Tracks # of segments allowed to be in-core. Based on compressor pool size */
324 #endif /* CONFIG_FREEZE */
325 
326 uint32_t        c_segment_pages_compressed_limit;
327 uint32_t        c_segment_pages_compressed_nearing_limit;
328 uint32_t        c_free_segno_head = (uint32_t)-1;   /* head of free list of c_segment pointers in c_segments */
329 
330 uint32_t        vm_compressor_minorcompact_threshold_divisor = 10;
331 uint32_t        vm_compressor_majorcompact_threshold_divisor = 10;
332 uint32_t        vm_compressor_unthrottle_threshold_divisor = 10;
333 uint32_t        vm_compressor_catchup_threshold_divisor = 10;
334 
335 uint32_t        vm_compressor_minorcompact_threshold_divisor_overridden = 0;
336 uint32_t        vm_compressor_majorcompact_threshold_divisor_overridden = 0;
337 uint32_t        vm_compressor_unthrottle_threshold_divisor_overridden = 0;
338 uint32_t        vm_compressor_catchup_threshold_divisor_overridden = 0;
339 
340 #define         C_SEGMENTS_PER_PAGE     (PAGE_SIZE / sizeof(union c_segu))
341 
342 LCK_GRP_DECLARE(vm_compressor_lck_grp, "vm_compressor");
343 LCK_RW_DECLARE(c_master_lock, &vm_compressor_lck_grp);
344 LCK_MTX_DECLARE(c_list_lock_storage, &vm_compressor_lck_grp);
345 
346 boolean_t       decompressions_blocked = FALSE;
347 
348 zone_t          compressor_segment_zone;
349 int             c_compressor_swap_trigger = 0;
350 
351 uint32_t        compressor_cpus;
352 char            *compressor_scratch_bufs;
353 
354 struct vm_compressor_kdp_state vm_compressor_kdp_state;
355 
356 clock_sec_t     start_of_sample_period_sec = 0;
357 clock_nsec_t    start_of_sample_period_nsec = 0;
358 clock_sec_t     start_of_eval_period_sec = 0;
359 clock_nsec_t    start_of_eval_period_nsec = 0;
360 uint32_t        sample_period_decompression_count = 0;
361 uint32_t        sample_period_compression_count = 0;
362 uint32_t        last_eval_decompression_count = 0;
363 uint32_t        last_eval_compression_count = 0;
364 
365 #define         DECOMPRESSION_SAMPLE_MAX_AGE            (60 * 30)
366 
367 boolean_t       vm_swapout_ripe_segments = FALSE;
368 uint32_t        vm_ripe_target_age = (60 * 60 * 48);
369 
370 uint32_t        swapout_target_age = 0;
371 uint32_t        age_of_decompressions_during_sample_period[DECOMPRESSION_SAMPLE_MAX_AGE];
372 uint32_t        overage_decompressions_during_sample_period = 0;
373 
374 
375 void            do_fastwake_warmup(queue_head_t *, boolean_t);
376 boolean_t       fastwake_warmup = FALSE;
377 boolean_t       fastwake_recording_in_progress = FALSE;
378 uint64_t        dont_trim_until_ts = 0;
379 
380 uint64_t        c_segment_warmup_count;
381 uint64_t        first_c_segment_to_warm_generation_id = 0;
382 uint64_t        last_c_segment_to_warm_generation_id = 0;
383 boolean_t       hibernate_flushing = FALSE;
384 
385 _Atomic uint64_t c_segment_input_bytes = 0;
386 _Atomic uint64_t c_segment_compressed_bytes = 0;
387 _Atomic uint64_t compressor_bytes_used = 0;
388 
389 /* Keeps track of the most recent timestamp for when major compaction finished. */
390 mach_timespec_t major_compact_ts;
391 
392 struct c_sv_hash_entry c_segment_sv_hash_table[C_SV_HASH_SIZE]  __attribute__ ((aligned(8)));
393 
394 static void vm_compressor_swap_trigger_thread(void);
395 static void vm_compressor_do_delayed_compactions(boolean_t);
396 static void vm_compressor_compact_and_swap(boolean_t);
397 static void vm_compressor_process_regular_swapped_in_segments(boolean_t);
398 static void vm_compressor_process_special_swapped_in_segments_locked(void);
399 
400 struct vm_compressor_swapper_stats vmcs_stats;
401 
402 static void vm_compressor_process_major_segments(bool);
403 
404 void compute_swapout_target_age(void);
405 
406 boolean_t c_seg_coalesce(c_segment_t, c_segment_t);
407 boolean_t c_seg_major_compact_ok(c_segment_t, c_segment_t);
408 
409 int  c_seg_minor_compaction_and_unlock(c_segment_t, boolean_t);
410 int  c_seg_do_minor_compaction_and_unlock(c_segment_t, boolean_t, boolean_t, boolean_t);
411 void c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg);
412 
413 void c_seg_move_to_sparse_list(c_segment_t);
414 void c_seg_insert_into_q(queue_head_t *, c_segment_t);
415 
416 uint64_t vm_available_memory(void);
417 
418 /*
419  * Get the address of a given entry in the c_segments array
420  */
421 static inline union c_segu *
c_segments_get(uint32_t segno)422 c_segments_get(uint32_t segno)
423 {
424 	return VM_FAR_ADD_PTR_UNBOUNDED(c_segments, segno);
425 }
426 
427 /*
428  * indicate the need to do a major compaction if
429  * the overall set of in-use compression segments
430  * becomes sparse... on systems that support pressure
431  * driven swapping, this will also cause swapouts to
432  * be initiated.
433  */
434 static bool
vm_compressor_needs_to_major_compact(void)435 vm_compressor_needs_to_major_compact(void)
436 {
437 	uint32_t        incore_seg_count;
438 
439 	incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
440 
441 	/* second condition:
442 	 *   first term:
443 	 *   - (incore_seg_count * c_seg_max_pages) is the maximum number of pages that all resident segments can hold in their buffers
444 	 *   - VM_PAGE_COMPRESSOR_COUNT is the current size that is actually held by the buffers
445 	 *   -- subtracting these gives the amount of pages that is wasted as holes due to segments not being full
446 	 *   second term:
447 	 *   - 1/8 of the maximum size that can be held by this many segments
448 	 *   meaning of the comparison: is the ratio of wasted space greater than 1/8
449 	 * first condition:
450 	 *   compare number of segments being used vs the number of segments that can ever be allocated
451 	 *   if we don't have a lot of data in the compressor, then we don't need to bother caring about wasted space in holes
452 	 */
453 
454 	if ((c_segment_count >= (c_segments_nearing_limit / 8)) &&
455 	    ((incore_seg_count * c_seg_max_pages) - VM_PAGE_COMPRESSOR_COUNT) >
456 	    ((incore_seg_count / 8) * c_seg_max_pages)) {
457 		return true;
458 	}
459 	return false;
460 }
461 
462 uint32_t
vm_compressor_get_swapped_segment_count(void)463 vm_compressor_get_swapped_segment_count(void)
464 {
465 	return c_swappedout_count + c_swappedout_sparse_count;
466 }
467 
468 uint32_t
vm_compressor_incore_fragmentation_wasted_pages(void)469 vm_compressor_incore_fragmentation_wasted_pages(void)
470 {
471 	/* return one of the components of the calculation in vm_compressor_needs_to_major_compact() */
472 	uint32_t incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
473 	return (incore_seg_count * c_seg_max_pages) - VM_PAGE_COMPRESSOR_COUNT;
474 }
475 
476 TUNABLE_WRITEABLE(uint64_t, vm_compressor_minor_fragmentation_threshold_pct, "vm_compressor_minor_frag_threshold_pct", 10);
477 
478 static bool
vm_compressor_needs_to_minor_compact(void)479 vm_compressor_needs_to_minor_compact(void)
480 {
481 	uint32_t compactible_seg_count = os_atomic_load(&c_minor_count, relaxed);
482 	if (compactible_seg_count == 0) {
483 		return false;
484 	}
485 
486 	bool is_pressured = AVAILABLE_NON_COMPRESSED_MEMORY <
487 	    VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD;
488 	if (!is_pressured) {
489 		return false;
490 	}
491 
492 	uint64_t bytes_used = os_atomic_load(&compressor_bytes_used, relaxed);
493 	uint64_t bytes_total = VM_PAGE_COMPRESSOR_COUNT * PAGE_SIZE_64;
494 	uint64_t bytes_frag = bytes_total - bytes_used;
495 	bool is_fragmented = bytes_frag >
496 	    bytes_total * vm_compressor_minor_fragmentation_threshold_pct / 100;
497 
498 	return is_fragmented;
499 }
500 
501 uint64_t
vm_available_memory(void)502 vm_available_memory(void)
503 {
504 	return ((uint64_t)AVAILABLE_NON_COMPRESSED_MEMORY) * PAGE_SIZE_64;
505 }
506 
507 uint32_t
vm_compressor_pool_size(void)508 vm_compressor_pool_size(void)
509 {
510 	return VM_PAGE_COMPRESSOR_COUNT;
511 }
512 
513 uint32_t
vm_compressor_fragmentation_level(void)514 vm_compressor_fragmentation_level(void)
515 {
516 	const uint32_t incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
517 	if ((incore_seg_count == 0) || (c_seg_max_pages == 0)) {
518 		return 0;
519 	}
520 	return 100 - (vm_compressor_pool_size() * 100 / (incore_seg_count * c_seg_max_pages));
521 }
522 
523 uint32_t
vm_compression_ratio(void)524 vm_compression_ratio(void)
525 {
526 	if (vm_compressor_pool_size() == 0) {
527 		return UINT32_MAX;
528 	}
529 	return c_segment_pages_compressed / vm_compressor_pool_size();
530 }
531 
532 uint32_t
vm_compressor_pages_compressed(void)533 vm_compressor_pages_compressed(void)
534 {
535 #if CONFIG_FREEZE
536 	if (freezer_incore_cseg_acct) {
537 		return os_atomic_load(&c_segment_pages_compressed_incore, relaxed);
538 	}
539 #endif /* CONFIG_FREEZE */
540 	return os_atomic_load(&c_segment_pages_compressed, relaxed);
541 }
542 
543 bool
vm_compressor_compressed_pages_nearing_limit(void)544 vm_compressor_compressed_pages_nearing_limit(void)
545 {
546 	return vm_compressor_pages_compressed() > c_segment_pages_compressed_nearing_limit;
547 }
548 
549 static bool
vm_compressor_segments_nearing_limit(void)550 vm_compressor_segments_nearing_limit(void)
551 {
552 	uint64_t segments;
553 
554 #if CONFIG_FREEZE
555 	if (freezer_incore_cseg_acct) {
556 		if (os_sub_overflow(c_segment_count, c_swappedout_count, &segments)) {
557 			segments = 0;
558 		}
559 		if (os_sub_overflow(segments, c_swappedout_sparse_count, &segments)) {
560 			segments = 0;
561 		}
562 	} else {
563 		segments = os_atomic_load(&c_segment_count, relaxed);
564 	}
565 #else /* CONFIG_FREEZE */
566 	segments = c_segment_count;
567 #endif /* CONFIG_FREEZE */
568 
569 	return segments > c_segments_nearing_limit;
570 }
571 
572 bool
vm_compressor_low_on_space(void)573 vm_compressor_low_on_space(void)
574 {
575 	return vm_compressor_compressed_pages_nearing_limit() ||
576 	       vm_compressor_segments_nearing_limit();
577 }
578 
579 
580 bool
vm_compressor_out_of_space(void)581 vm_compressor_out_of_space(void)
582 {
583 #if CONFIG_FREEZE
584 	uint64_t incore_seg_count;
585 	uint32_t incore_compressed_pages;
586 	if (freezer_incore_cseg_acct) {
587 		if (os_sub_overflow(c_segment_count, c_swappedout_count, &incore_seg_count)) {
588 			incore_seg_count = 0;
589 		}
590 		if (os_sub_overflow(incore_seg_count, c_swappedout_sparse_count, &incore_seg_count)) {
591 			incore_seg_count = 0;
592 		}
593 		incore_compressed_pages = os_atomic_load(&c_segment_pages_compressed_incore, relaxed);
594 	} else {
595 		incore_seg_count = os_atomic_load(&c_segment_count, relaxed);
596 		incore_compressed_pages = os_atomic_load(&c_segment_pages_compressed_incore, relaxed);
597 	}
598 
599 	if ((incore_compressed_pages >= c_segment_pages_compressed_limit) ||
600 	    (incore_seg_count > c_segments_incore_limit)) {
601 		return true;
602 	}
603 #else /* CONFIG_FREEZE */
604 	if ((c_segment_pages_compressed >= c_segment_pages_compressed_limit) ||
605 	    (c_segment_count >= c_segments_limit)) {
606 		return true;
607 	}
608 #endif /* CONFIG_FREEZE */
609 	return FALSE;
610 }
611 
612 bool
vm_compressor_is_thrashing()613 vm_compressor_is_thrashing()
614 {
615 	compute_swapout_target_age();
616 
617 	if (swapout_target_age) {
618 		c_segment_t     c_seg;
619 
620 		lck_mtx_lock_spin_always(c_list_lock);
621 
622 		if (!queue_empty(&c_age_list_head)) {
623 			c_seg = (c_segment_t) queue_first(&c_age_list_head);
624 
625 			if (c_seg->c_creation_ts > swapout_target_age) {
626 				swapout_target_age = 0;
627 			}
628 		}
629 		lck_mtx_unlock_always(c_list_lock);
630 	}
631 
632 	return swapout_target_age != 0;
633 }
634 
635 
636 int
vm_wants_task_throttled(task_t task)637 vm_wants_task_throttled(task_t task)
638 {
639 	ledger_amount_t compressed;
640 	if (task == kernel_task) {
641 		return 0;
642 	}
643 
644 	if (VM_CONFIG_SWAP_IS_ACTIVE) {
645 		if ((vm_compressor_low_on_space() || HARD_THROTTLE_LIMIT_REACHED())) {
646 			ledger_get_balance(task->ledger, task_ledgers.internal_compressed, &compressed);
647 			compressed >>= VM_MAP_PAGE_SHIFT(task->map);
648 			if ((unsigned int)compressed > (c_segment_pages_compressed / 4)) {
649 				return 1;
650 			}
651 		}
652 	}
653 	return 0;
654 }
655 
656 #if CONFIG_JETSAM
657 bool            memorystatus_disable_swap(void);
658 #if CONFIG_PHANTOM_CACHE
659 extern bool memorystatus_phantom_cache_pressure;
660 #endif /* CONFIG_PHANTOM_CACHE */
661 int             compressor_thrashing_induced_jetsam = 0;
662 int             filecache_thrashing_induced_jetsam = 0;
663 static boolean_t        vm_compressor_thrashing_detected = FALSE;
664 #endif /* CONFIG_JETSAM */
665 
666 void
vm_decompressor_lock(void)667 vm_decompressor_lock(void)
668 {
669 	PAGE_REPLACEMENT_ALLOWED(TRUE);
670 
671 	decompressions_blocked = TRUE;
672 
673 	PAGE_REPLACEMENT_ALLOWED(FALSE);
674 }
675 
676 void
vm_decompressor_unlock(void)677 vm_decompressor_unlock(void)
678 {
679 	PAGE_REPLACEMENT_ALLOWED(TRUE);
680 
681 	decompressions_blocked = FALSE;
682 
683 	PAGE_REPLACEMENT_ALLOWED(FALSE);
684 
685 	thread_wakeup((event_t)&decompressions_blocked);
686 }
687 
688 static inline void
cslot_copy(c_slot_t cdst,c_slot_t csrc)689 cslot_copy(c_slot_t cdst, c_slot_t csrc)
690 {
691 #if CHECKSUM_THE_DATA
692 	cdst->c_hash_data = csrc->c_hash_data;
693 #endif
694 #if CHECKSUM_THE_COMPRESSED_DATA
695 	cdst->c_hash_compressed_data = csrc->c_hash_compressed_data;
696 #endif
697 #if POPCOUNT_THE_COMPRESSED_DATA
698 	cdst->c_pop_cdata = csrc->c_pop_cdata;
699 #endif
700 	cdst->c_size = csrc->c_size;
701 #if HAS_MTE
702 	cdst->c_mte_size = csrc->c_mte_size;
703 #endif
704 	cdst->c_packed_ptr = csrc->c_packed_ptr;
705 #if defined(__arm64__)
706 	cdst->c_codec = csrc->c_codec;
707 #endif
708 }
709 
710 #if XNU_TARGET_OS_OSX
711 #define VM_COMPRESSOR_MAX_POOL_SIZE (192UL << 30)
712 #else
713 #define VM_COMPRESSOR_MAX_POOL_SIZE (0)
714 #endif
715 
716 static vm_map_size_t compressor_size;
717 static SECURITY_READ_ONLY_LATE(struct mach_vm_range) compressor_range;
718 vm_map_t compressor_map;
719 uint64_t compressor_pool_max_size;
720 uint64_t compressor_pool_size;
721 uint32_t compressor_pool_multiplier;
722 
723 #if CONFIG_CSEG_MPROTECT
724 /*
725  * Compressor segments may be write-protected in development/debug
726  * kernels to help debug memory corruption. This incurs significant
727  * performance overhead under heavy overcommit and is therefore disabled by
728  * default. To debug compressor corruption issues, it can be enabled via boot-arg.
729  */
730 TUNABLE_WRITEABLE(bool, write_protect_c_segs, "c_segment_mprotect", false);
731 int vm_compressor_test_seg_wp;
732 #endif /* CONFIG_CSEG_MPROTECT */
733 
734 #if DEVELOPMENT || DEBUG
735 uint32_t vm_ktrace_enabled;
736 #endif /* DEVELOPMENT || DEBUG */
737 
738 #if (XNU_TARGET_OS_OSX && __arm64__)
739 
740 #include <IOKit/IOPlatformExpert.h>
741 #include <sys/random.h>
742 
743 static const char *csegbufsizeExperimentProperty = "_csegbufsz_experiment";
744 static thread_call_t csegbufsz_experiment_thread_call;
745 
746 extern boolean_t IOServiceWaitForMatchingResource(const char * property, uint64_t timeout);
747 static void
erase_csegbufsz_experiment_property(__unused void * param0,__unused void * param1)748 erase_csegbufsz_experiment_property(__unused void *param0, __unused void *param1)
749 {
750 	// Wait for NVRAM to be writable
751 	if (!IOServiceWaitForMatchingResource("IONVRAM", UINT64_MAX)) {
752 		printf("csegbufsz_experiment_property: Failed to wait for IONVRAM.");
753 	}
754 
755 	if (!PERemoveNVRAMProperty(csegbufsizeExperimentProperty)) {
756 		printf("csegbufsize_experiment_property: Failed to remove %s from NVRAM.", csegbufsizeExperimentProperty);
757 	}
758 	thread_call_free(csegbufsz_experiment_thread_call);
759 }
760 
761 static void
erase_csegbufsz_experiment_property_async()762 erase_csegbufsz_experiment_property_async()
763 {
764 	csegbufsz_experiment_thread_call = thread_call_allocate_with_priority(
765 		erase_csegbufsz_experiment_property,
766 		NULL,
767 		THREAD_CALL_PRIORITY_LOW
768 		);
769 	if (csegbufsz_experiment_thread_call == NULL) {
770 		printf("csegbufsize_experiment_property: Unable to allocate thread call.");
771 	} else {
772 		thread_call_enter(csegbufsz_experiment_thread_call);
773 	}
774 }
775 
776 static void
cleanup_csegbufsz_experiment(__unused void * arg0)777 cleanup_csegbufsz_experiment(__unused void *arg0)
778 {
779 	char nvram = 0;
780 	unsigned int len = sizeof(nvram);
781 	if (PEReadNVRAMProperty(csegbufsizeExperimentProperty, &nvram, &len)) {
782 		erase_csegbufsz_experiment_property_async();
783 	}
784 }
785 
786 STARTUP_ARG(EARLY_BOOT, STARTUP_RANK_FIRST, cleanup_csegbufsz_experiment, NULL);
787 #endif /* XNU_TARGET_OS_OSX && __arm64__ */
788 
789 #if CONFIG_JETSAM
790 extern unsigned int memorystatus_swap_all_apps;
791 #endif /* CONFIG_JETSAM */
792 
793 TUNABLE_DT(uint64_t, swap_vol_min_capacity, "/defaults", "kern.swap_min_capacity", "kern.swap_min_capacity", 0, TUNABLE_DT_NONE);
794 
795 static void
vm_compressor_set_size(void)796 vm_compressor_set_size(void)
797 {
798 	/*
799 	 * Note that this function may be called multiple times on systems with app swap
800 	 * because the value of vm_swap_get_max_configured_space() and memorystatus_swap_all_apps
801 	 * can change based the size of the swap volume. On these systems, we'll call
802 	 * this function once early in boot to reserve the maximum amount of VA required
803 	 * for the compressor submap and then one more time in vm_compressor_init after
804 	 * determining the swap volume size. We must not return a larger value the second
805 	 * time around.
806 	 */
807 	vm_size_t       c_segments_arr_size = 0;
808 	struct c_slot_mapping tmp_slot_ptr;
809 
810 	/* The segment size can be overwritten by a boot-arg */
811 	if (!PE_parse_boot_argn("vm_compressor_segment_buffer_size", &c_seg_bufsize, sizeof(c_seg_bufsize))) {
812 #if CONFIG_JETSAM
813 		if (memorystatus_swap_all_apps) {
814 			c_seg_bufsize = C_SEG_BUFSIZE_ARM_SWAP;
815 		} else {
816 			c_seg_bufsize = C_SEG_BUFSIZE_DEFAULT;
817 		}
818 #else
819 		c_seg_bufsize = C_SEG_BUFSIZE_DEFAULT;
820 #endif /* CONFIG_JETSAM */
821 	}
822 
823 	vm_compressor_swap_init_swap_file_limit();
824 	if (vm_compression_limit) {
825 		compressor_pool_size = ptoa_64(vm_compression_limit);
826 	}
827 
828 	compressor_pool_max_size = C_SEG_MAX_LIMIT;
829 	compressor_pool_max_size *= c_seg_bufsize;
830 
831 #if XNU_TARGET_OS_OSX
832 
833 	if (vm_compression_limit == 0) {
834 		if (max_mem <= (4ULL * 1024ULL * 1024ULL * 1024ULL)) {
835 			compressor_pool_size = 16ULL * max_mem;
836 		} else if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) {
837 			compressor_pool_size = 8ULL * max_mem;
838 		} else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) {
839 			compressor_pool_size = 4ULL * max_mem;
840 		} else {
841 			compressor_pool_size = 2ULL * max_mem;
842 		}
843 	}
844 	/*
845 	 * Cap the compressor pool size to a max of 192G
846 	 */
847 	if (compressor_pool_size > VM_COMPRESSOR_MAX_POOL_SIZE) {
848 		compressor_pool_size = VM_COMPRESSOR_MAX_POOL_SIZE;
849 	}
850 	if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) {
851 		compressor_pool_multiplier = 1;
852 	} else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) {
853 		compressor_pool_multiplier = 2;
854 	} else {
855 		compressor_pool_multiplier = 4;
856 	}
857 
858 #else
859 
860 	if (compressor_pool_max_size > max_mem) {
861 		compressor_pool_max_size = max_mem;
862 	}
863 
864 	if (vm_compression_limit == 0) {
865 		compressor_pool_size = max_mem;
866 	}
867 
868 #if XNU_TARGET_OS_WATCH
869 	compressor_pool_multiplier = 2;
870 #elif XNU_TARGET_OS_IOS
871 	if (max_mem <= (2ULL * 1024ULL * 1024ULL * 1024ULL)) {
872 		compressor_pool_multiplier = 2;
873 	} else {
874 		compressor_pool_multiplier = 1;
875 	}
876 #else
877 	compressor_pool_multiplier = 1;
878 #endif
879 
880 #endif
881 
882 	PE_parse_boot_argn("kern.compressor_pool_multiplier", &compressor_pool_multiplier, sizeof(compressor_pool_multiplier));
883 	if (compressor_pool_multiplier < 1) {
884 		compressor_pool_multiplier = 1;
885 	}
886 
887 	if (compressor_pool_size > compressor_pool_max_size) {
888 		compressor_pool_size = compressor_pool_max_size;
889 	}
890 
891 	c_seg_max_pages = (c_seg_bufsize / PAGE_SIZE);
892 	c_seg_slot_var_array_min_len = c_seg_max_pages;
893 
894 #if !defined(__x86_64__)
895 	c_seg_off_limit = (C_SEG_BYTES_TO_OFFSET((c_seg_bufsize - 512)));
896 	c_seg_allocsize = (c_seg_bufsize + PAGE_SIZE);
897 #else
898 	c_seg_off_limit = (C_SEG_BYTES_TO_OFFSET((c_seg_bufsize - 128)));
899 	c_seg_allocsize = c_seg_bufsize;
900 #endif /* !defined(__x86_64__) */
901 
902 	c_segments_limit = (uint32_t)(compressor_pool_size / (vm_size_t)(c_seg_allocsize));
903 	tmp_slot_ptr.s_cseg = c_segments_limit;
904 	/* Panic on internal configs*/
905 	assertf((tmp_slot_ptr.s_cseg == c_segments_limit), "vm_compressor_init: overflowed s_cseg field in c_slot_mapping with c_segno: %d", c_segments_limit);
906 
907 	if (tmp_slot_ptr.s_cseg != c_segments_limit) {
908 		tmp_slot_ptr.s_cseg = -1;
909 		c_segments_limit = tmp_slot_ptr.s_cseg - 1; /*limited by segment idx bits in c_slot_mapping*/
910 		compressor_pool_size = (c_segments_limit * (vm_size_t)(c_seg_allocsize));
911 	}
912 
913 	c_segments_nearing_limit = (uint32_t)(((uint64_t)c_segments_limit * 98ULL) / 100ULL);
914 
915 	/* an upper limit on how many input pages the compressor can hold */
916 	c_segment_pages_compressed_limit = (c_segments_limit * (c_seg_bufsize / PAGE_SIZE) * compressor_pool_multiplier);
917 
918 	if (c_segment_pages_compressed_limit < (uint32_t)(max_mem / PAGE_SIZE)) {
919 #if defined(XNU_TARGET_OS_WATCH)
920 		c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE);
921 #else
922 		if (!vm_compression_limit) {
923 			c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE);
924 		}
925 #endif
926 	}
927 
928 	c_segment_pages_compressed_nearing_limit = (uint32_t)(((uint64_t)c_segment_pages_compressed_limit * 98ULL) / 100ULL);
929 
930 #if CONFIG_FREEZE
931 	/*
932 	 * Our in-core limits are based on the size of the compressor pool.
933 	 * The c_segments_nearing_limit is also based on the compressor pool
934 	 * size and calculated above.
935 	 */
936 	c_segments_incore_limit = c_segments_limit;
937 
938 	if (freezer_incore_cseg_acct) {
939 		/*
940 		 * Add enough segments to track all frozen c_segs that can be stored in swap.
941 		 */
942 		c_segments_limit += (uint32_t)(vm_swap_get_max_configured_space() / (vm_size_t)(c_seg_allocsize));
943 		tmp_slot_ptr.s_cseg = c_segments_limit;
944 		/* Panic on internal configs*/
945 		assertf((tmp_slot_ptr.s_cseg == c_segments_limit), "vm_compressor_init: freezer reserve overflowed s_cseg field in c_slot_mapping with c_segno: %d", c_segments_limit);
946 	}
947 #endif
948 	/*
949 	 * Submap needs space for:
950 	 * - c_segments
951 	 * - c_buffers
952 	 * - swap reclaimations -- c_seg_bufsize
953 	 */
954 	c_segments_arr_size = vm_map_round_page((sizeof(union c_segu) * c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
955 	c_buffers_size = vm_map_round_page(((vm_size_t)c_seg_allocsize * (vm_size_t)c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
956 
957 	compressor_size = c_segments_arr_size + c_buffers_size + c_seg_bufsize;
958 
959 #if RECORD_THE_COMPRESSED_DATA
960 	c_compressed_record_sbuf_size = (vm_size_t)c_seg_allocsize + (PAGE_SIZE * 2);
961 	compressor_size += c_compressed_record_sbuf_size;
962 #endif /* RECORD_THE_COMPRESSED_DATA */
963 }
964 STARTUP(KMEM, STARTUP_RANK_FIRST, vm_compressor_set_size);
965 
966 KMEM_RANGE_REGISTER_DYNAMIC(compressor, &compressor_range, ^() {
967 	return compressor_size;
968 });
969 
970 bool
osenvironment_is_diagnostics(void)971 osenvironment_is_diagnostics(void)
972 {
973 	DTEntry chosen;
974 	const char *osenvironment;
975 	unsigned int size;
976 	if (kSuccess == SecureDTLookupEntry(0, "/chosen", &chosen)) {
977 		if (kSuccess == SecureDTGetProperty(chosen, "osenvironment", (void const **) &osenvironment, &size)) {
978 			return strcmp(osenvironment, "diagnostics") == 0;
979 		}
980 	}
981 	return false;
982 }
983 
984 bool
osenvironment_is_device_recovery(void)985 osenvironment_is_device_recovery(void)
986 {
987 	DTEntry chosen;
988 	const char *osenvironment;
989 	unsigned int size;
990 	if (kSuccess == SecureDTLookupEntry(0, "/chosen", &chosen)) {
991 		if (kSuccess == SecureDTGetProperty(chosen, "osenvironment", (void const **) &osenvironment, &size)) {
992 			return strcmp(osenvironment, "device-recovery") == 0;
993 		}
994 	}
995 	return false;
996 }
997 
998 void
vm_compressor_init(void)999 vm_compressor_init(void)
1000 {
1001 	thread_t        thread;
1002 #if RECORD_THE_COMPRESSED_DATA
1003 	vm_size_t       c_compressed_record_sbuf_size = 0;
1004 #endif /* RECORD_THE_COMPRESSED_DATA */
1005 
1006 #if DEVELOPMENT || DEBUG || CONFIG_FREEZE
1007 	char bootarg_name[32];
1008 #endif /* DEVELOPMENT || DEBUG || CONFIG_FREEZE */
1009 	__unused uint64_t early_boot_compressor_size = compressor_size;
1010 
1011 #if CONFIG_JETSAM
1012 	if (memorystatus_swap_all_apps &&
1013 	    (osenvironment_is_diagnostics() || osenvironment_is_device_recovery())) {
1014 		printf("osenvironment == \"diagnostics or device-recovery\". Disabling app swap.\n");
1015 		memorystatus_disable_swap();
1016 	}
1017 
1018 	if (memorystatus_swap_all_apps) {
1019 		/*
1020 		 * App swap is disabled on devices with small NANDs.
1021 		 * Now that we're no longer in early boot, we can get
1022 		 * the NAND size and re-run vm_compressor_set_size.
1023 		 */
1024 		int error = vm_swap_vol_get_capacity(SWAP_VOLUME_NAME, &vm_swap_volume_capacity);
1025 #if DEVELOPMENT || DEBUG
1026 		if (error != 0) {
1027 			panic("vm_compressor_init: Unable to get swap volume capacity. error=%d\n", error);
1028 		}
1029 #else
1030 		if (error != 0) {
1031 			vm_log_error("vm_compressor_init: Unable to get swap volume capacity. error=%d\n", error);
1032 		}
1033 #endif /* DEVELOPMENT || DEBUG */
1034 		if (vm_swap_volume_capacity < swap_vol_min_capacity) {
1035 			memorystatus_disable_swap();
1036 		}
1037 		/*
1038 		 * Resize the compressor and swap now that we know the capacity
1039 		 * of the swap volume.
1040 		 */
1041 		vm_compressor_set_size();
1042 		/*
1043 		 * We reserved a chunk of VA early in boot for the compressor submap.
1044 		 * We can't allocate more than that.
1045 		 */
1046 		assert(compressor_size <= early_boot_compressor_size);
1047 	}
1048 #endif /* CONFIG_JETSAM */
1049 
1050 #if DEVELOPMENT || DEBUG
1051 	if (PE_parse_boot_argn("-disable_cseg_write_protection", bootarg_name, sizeof(bootarg_name))) {
1052 		write_protect_c_segs = false;
1053 	}
1054 
1055 	int vmcval = 1;
1056 #if defined(XNU_TARGET_OS_WATCH)
1057 	vmcval = 0;
1058 #endif /* XNU_TARGET_OS_WATCH */
1059 	PE_parse_boot_argn("vm_compressor_validation", &vmcval, sizeof(vmcval));
1060 
1061 	if (kern_feature_override(KF_COMPRSV_OVRD)) {
1062 		vmcval = 0;
1063 	}
1064 
1065 	if (vmcval == 0) {
1066 #if POPCOUNT_THE_COMPRESSED_DATA
1067 		popcount_c_segs = FALSE;
1068 #endif
1069 #if CHECKSUM_THE_DATA || CHECKSUM_THE_COMPRESSED_DATA
1070 		checksum_c_segs = FALSE;
1071 #endif
1072 #if VALIDATE_C_SEGMENTS
1073 		validate_c_segs = FALSE;
1074 #endif
1075 #if CONFIG_CSEG_MPROTECT
1076 		write_protect_c_segs = false;
1077 #endif
1078 	}
1079 #endif /* DEVELOPMENT || DEBUG */
1080 
1081 #if CONFIG_FREEZE
1082 	if (PE_parse_boot_argn("-disable_freezer_cseg_acct", bootarg_name, sizeof(bootarg_name))) {
1083 		freezer_incore_cseg_acct = FALSE;
1084 	}
1085 #endif /* CONFIG_FREEZE */
1086 
1087 	assert((C_SEGMENTS_PER_PAGE * sizeof(union c_segu)) == PAGE_SIZE);
1088 
1089 #if !XNU_TARGET_OS_OSX
1090 	vm_compressor_minorcompact_threshold_divisor = 20;
1091 	vm_compressor_majorcompact_threshold_divisor = 30;
1092 	vm_compressor_unthrottle_threshold_divisor = 40;
1093 	vm_compressor_catchup_threshold_divisor = 60;
1094 #else /* !XNU_TARGET_OS_OSX */
1095 	if (max_mem <= (3ULL * 1024ULL * 1024ULL * 1024ULL)) {
1096 		vm_compressor_minorcompact_threshold_divisor = 11;
1097 		vm_compressor_majorcompact_threshold_divisor = 13;
1098 		vm_compressor_unthrottle_threshold_divisor = 20;
1099 		vm_compressor_catchup_threshold_divisor = 35;
1100 	} else {
1101 		vm_compressor_minorcompact_threshold_divisor = 20;
1102 		vm_compressor_majorcompact_threshold_divisor = 25;
1103 		vm_compressor_unthrottle_threshold_divisor = 35;
1104 		vm_compressor_catchup_threshold_divisor = 50;
1105 	}
1106 #endif /* !XNU_TARGET_OS_OSX */
1107 
1108 	queue_init(&c_bad_list_head);
1109 	queue_init(&c_age_list_head);
1110 	queue_init(&c_minor_list_head);
1111 	queue_init(&c_major_list_head);
1112 	queue_init(&c_filling_list_head);
1113 	queue_init(&c_early_swapout_list_head);
1114 	queue_init(&c_regular_swapout_list_head);
1115 	queue_init(&c_late_swapout_list_head);
1116 	queue_init(&c_swapio_list_head);
1117 	queue_init(&c_early_swappedin_list_head);
1118 	queue_init(&c_regular_swappedin_list_head);
1119 	queue_init(&c_late_swappedin_list_head);
1120 	queue_init(&c_swappedout_list_head);
1121 	queue_init(&c_swappedout_sparse_list_head);
1122 
1123 	c_free_segno_head = -1;
1124 	c_segments_available = 0;
1125 
1126 	compressor_map = kmem_suballoc(kernel_map, &compressor_range.min_address,
1127 	    compressor_size, VM_MAP_CREATE_NEVER_FAULTS,
1128 	    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1129 	    KMS_NOFAIL | KMS_PERMANENT | KMS_NOSOFTLIMIT,
1130 	    VM_KERN_MEMORY_COMPRESSOR).kmr_submap;
1131 
1132 	kmem_alloc(compressor_map, (vm_offset_t *)(&c_segments),
1133 	    (sizeof(union c_segu) * c_segments_limit),
1134 	    KMA_NOFAIL | KMA_KOBJECT | KMA_VAONLY | KMA_PERMANENT | KMA_NOSOFTLIMIT,
1135 	    VM_KERN_MEMORY_COMPRESSOR);
1136 	kmem_alloc(compressor_map, &c_buffers, c_buffers_size,
1137 	    KMA_NOFAIL | KMA_COMPRESSOR | KMA_VAONLY | KMA_PERMANENT | KMA_NOSOFTLIMIT,
1138 	    VM_KERN_MEMORY_COMPRESSOR);
1139 
1140 #if DEVELOPMENT || DEBUG
1141 	if (hvg_is_hcall_available(HVG_HCALL_SET_COREDUMP_DATA)) {
1142 		hvg_hcall_set_coredump_data();
1143 	}
1144 #endif
1145 
1146 	/*
1147 	 * Pick a good size that will minimize fragmentation in zalloc
1148 	 * by minimizing the fragmentation in a 16k run.
1149 	 *
1150 	 * c_seg_slot_var_array_min_len is larger on 4k systems than 16k ones,
1151 	 * making the fragmentation in a 4k page terrible. Using 16k for all
1152 	 * systems matches zalloc() and will minimize fragmentation.
1153 	 */
1154 	uint32_t c_segment_size = sizeof(struct c_segment) + (c_seg_slot_var_array_min_len * sizeof(struct c_slot));
1155 	uint32_t cnt  = (16 << 10) / c_segment_size;
1156 	uint32_t frag = (16 << 10) % c_segment_size;
1157 
1158 	c_seg_fixed_array_len = c_seg_slot_var_array_min_len;
1159 
1160 	while (cnt * sizeof(struct c_slot) < frag) {
1161 		c_segment_size += sizeof(struct c_slot);
1162 		c_seg_fixed_array_len++;
1163 		frag -= cnt * sizeof(struct c_slot);
1164 	}
1165 
1166 	compressor_segment_zone = zone_create("compressor_segment",
1167 	    c_segment_size, ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM);
1168 
1169 	c_segments_busy = FALSE;
1170 
1171 	c_segments_next_page = (caddr_t)c_segments;
1172 	vm_compressor_algorithm_init();
1173 
1174 	{
1175 		host_basic_info_data_t hinfo;
1176 		mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
1177 		size_t bufsize;
1178 		char *buf;
1179 
1180 #define BSD_HOST 1
1181 		host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
1182 
1183 		compressor_cpus = hinfo.max_cpus;
1184 
1185 		/* allocate various scratch buffers at the same place */
1186 		bufsize = PAGE_SIZE;
1187 		bufsize += compressor_cpus * vm_compressor_get_decode_scratch_size();
1188 		/* For the panic path */
1189 		bufsize += vm_compressor_get_decode_scratch_size();
1190 #if CONFIG_FREEZE
1191 		bufsize += vm_compressor_get_encode_scratch_size();
1192 #endif
1193 #if RECORD_THE_COMPRESSED_DATA
1194 		bufsize += c_compressed_record_sbuf_size;
1195 #endif
1196 
1197 		kmem_alloc(kernel_map, (vm_offset_t *)&buf, bufsize,
1198 		    KMA_DATA_SHARED | KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT,
1199 		    VM_KERN_MEMORY_COMPRESSOR);
1200 
1201 		/*
1202 		 * vm_compressor_kdp_state.kc_decompressed_page must be page aligned because we access
1203 		 * it through the physical aperture by page number.
1204 		 */
1205 		vm_compressor_kdp_state.kc_panic_decompressed_page = buf;
1206 		vm_compressor_kdp_state.kc_panic_decompressed_page_paddr = kvtophys((vm_offset_t)vm_compressor_kdp_state.kc_panic_decompressed_page);
1207 		vm_compressor_kdp_state.kc_panic_decompressed_page_ppnum = (ppnum_t) atop(vm_compressor_kdp_state.kc_panic_decompressed_page_paddr);
1208 		buf += PAGE_SIZE;
1209 		bufsize -= PAGE_SIZE;
1210 
1211 		compressor_scratch_bufs = buf;
1212 		buf += compressor_cpus * vm_compressor_get_decode_scratch_size();
1213 		bufsize -= compressor_cpus * vm_compressor_get_decode_scratch_size();
1214 
1215 		vm_compressor_kdp_state.kc_panic_scratch_buf = buf;
1216 		buf += vm_compressor_get_decode_scratch_size();
1217 		bufsize -= vm_compressor_get_decode_scratch_size();
1218 
1219 		/* This is set up before each stackshot in vm_compressor_kdp_init */
1220 		vm_compressor_kdp_state.kc_scratch_bufs = NULL;
1221 
1222 #if CONFIG_FREEZE
1223 		freezer_context_global.freezer_ctx_compressor_scratch_buf = buf;
1224 		buf += vm_compressor_get_encode_scratch_size();
1225 		bufsize -= vm_compressor_get_encode_scratch_size();
1226 #endif
1227 
1228 #if RECORD_THE_COMPRESSED_DATA
1229 		c_compressed_record_sbuf = buf;
1230 		c_compressed_record_cptr = buf;
1231 		c_compressed_record_ebuf = c_compressed_record_sbuf + c_compressed_record_sbuf_size;
1232 		buf += c_compressed_record_sbuf_size;
1233 		bufsize -= c_compressed_record_sbuf_size;
1234 #endif
1235 		assert(bufsize == 0);
1236 	}
1237 
1238 	if (kernel_thread_start_priority((thread_continue_t)vm_compressor_swap_trigger_thread, NULL,
1239 	    BASEPRI_VM, &thread) != KERN_SUCCESS) {
1240 		panic("vm_compressor_swap_trigger_thread: create failed");
1241 	}
1242 	thread_deallocate(thread);
1243 
1244 	if (vm_pageout_internal_start() != KERN_SUCCESS) {
1245 		panic("vm_compressor_init: Failed to start the internal pageout thread.");
1246 	}
1247 	if (VM_CONFIG_SWAP_IS_PRESENT) {
1248 		vm_compressor_swap_init();
1249 	}
1250 
1251 	if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
1252 		vm_compressor_is_active = 1;
1253 	}
1254 
1255 	vm_compressor_available = 1;
1256 
1257 	vm_page_reactivate_all_throttled();
1258 
1259 	bzero(&vmcs_stats, sizeof(struct vm_compressor_swapper_stats));
1260 }
1261 
1262 #define COMPRESSOR_KDP_BUFSIZE (\
1263 	(vm_compressor_get_decode_scratch_size() * compressor_cpus) + \
1264 	(PAGE_SIZE * compressor_cpus)) + \
1265 	(sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_paddr) * compressor_cpus) + \
1266 	(sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_ppnum) * compressor_cpus)
1267 
1268 
1269 /**
1270  * Initializes the VM compressor in preparation for a stackshot.
1271  * Stackshot mutex must be held.
1272  */
1273 kern_return_t
vm_compressor_kdp_init(void)1274 vm_compressor_kdp_init(void)
1275 {
1276 	char *buf;
1277 	kern_return_t err;
1278 	size_t bufsize;
1279 	size_t total_decode_size;
1280 
1281 #if DEVELOPMENT || DEBUG
1282 	extern lck_mtx_t stackshot_subsys_mutex;
1283 	lck_mtx_assert(&stackshot_subsys_mutex, LCK_MTX_ASSERT_OWNED);
1284 #endif /* DEVELOPMENT || DEBUG */
1285 
1286 	if (!vm_compressor_available) {
1287 		return KERN_SUCCESS;
1288 	}
1289 
1290 	bufsize = COMPRESSOR_KDP_BUFSIZE;
1291 
1292 	/* Allocate the per-cpu decompression pages. */
1293 	err = kmem_alloc(kernel_map, (vm_offset_t *)&buf, bufsize,
1294 	    KMA_DATA_SHARED | KMA_NOFAIL | KMA_KOBJECT,
1295 	    VM_KERN_MEMORY_COMPRESSOR);
1296 
1297 	if (err != KERN_SUCCESS) {
1298 		return err;
1299 	}
1300 
1301 	assert(vm_compressor_kdp_state.kc_scratch_bufs == NULL);
1302 	vm_compressor_kdp_state.kc_scratch_bufs = buf;
1303 	total_decode_size = vm_compressor_get_decode_scratch_size() * compressor_cpus;
1304 	buf += total_decode_size;
1305 	bufsize -= total_decode_size;
1306 
1307 	/*
1308 	 * vm_compressor_kdp_state.kc_decompressed_page must be page aligned because we access
1309 	 * it through the physical aperture by page number.
1310 	 */
1311 	assert(vm_compressor_kdp_state.kc_decompressed_pages == NULL);
1312 	vm_compressor_kdp_state.kc_decompressed_pages = buf;
1313 	buf += PAGE_SIZE * compressor_cpus;
1314 	bufsize -= PAGE_SIZE * compressor_cpus;
1315 
1316 	/* Scary! This will be aligned, I promise :) */
1317 	assert(((vm_address_t) buf) % _Alignof(addr64_t) == 0);
1318 	assert(vm_compressor_kdp_state.kc_decompressed_pages_paddr == NULL);
1319 	vm_compressor_kdp_state.kc_decompressed_pages_paddr = (addr64_t*) (void*) buf;
1320 	buf += sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_paddr) * compressor_cpus;
1321 	bufsize -= sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_paddr) * compressor_cpus;
1322 
1323 	assert(((vm_address_t) buf) % _Alignof(ppnum_t) == 0);
1324 	assert(vm_compressor_kdp_state.kc_decompressed_pages_ppnum == NULL);
1325 	vm_compressor_kdp_state.kc_decompressed_pages_ppnum = (ppnum_t*) (void*) buf;
1326 	buf += sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_ppnum) * compressor_cpus;
1327 	bufsize -= sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_ppnum) * compressor_cpus;
1328 
1329 	assert(bufsize == 0);
1330 
1331 	for (size_t i = 0; i < compressor_cpus; i++) {
1332 		vm_offset_t offset = (vm_offset_t) &vm_compressor_kdp_state.kc_decompressed_pages[i * PAGE_SIZE];
1333 		vm_compressor_kdp_state.kc_decompressed_pages_paddr[i] = kvtophys(offset);
1334 		vm_compressor_kdp_state.kc_decompressed_pages_ppnum[i] = (ppnum_t) atop(vm_compressor_kdp_state.kc_decompressed_pages_paddr[i]);
1335 	}
1336 
1337 	return KERN_SUCCESS;
1338 }
1339 
1340 /*
1341  * Frees up compressor buffers used by stackshot.
1342  * Stackshot mutex must be held.
1343  */
1344 void
vm_compressor_kdp_teardown(void)1345 vm_compressor_kdp_teardown(void)
1346 {
1347 	extern lck_mtx_t stackshot_subsys_mutex;
1348 	LCK_MTX_ASSERT(&stackshot_subsys_mutex, LCK_MTX_ASSERT_OWNED);
1349 
1350 	if (vm_compressor_kdp_state.kc_scratch_bufs == NULL) {
1351 		return;
1352 	}
1353 
1354 	/* Deallocate the per-cpu decompression pages. */
1355 	kmem_free(kernel_map, (vm_offset_t) vm_compressor_kdp_state.kc_scratch_bufs, COMPRESSOR_KDP_BUFSIZE);
1356 
1357 	vm_compressor_kdp_state.kc_scratch_bufs = NULL;
1358 	vm_compressor_kdp_state.kc_decompressed_pages = NULL;
1359 	vm_compressor_kdp_state.kc_decompressed_pages_paddr = 0;
1360 	vm_compressor_kdp_state.kc_decompressed_pages_ppnum = 0;
1361 }
1362 
1363 static uint32_t
c_slot_extra_size(c_slot_t cs)1364 c_slot_extra_size(c_slot_t cs)
1365 {
1366 #if HAS_MTE
1367 	return vm_mte_compressed_tags_actual_size(cs->c_mte_size);
1368 #else /* HAS_MTE */
1369 #pragma unused(cs)
1370 	return 0;
1371 #endif /* HAS_MTE */
1372 }
1373 
1374 #if VALIDATE_C_SEGMENTS
1375 
1376 static void
c_seg_validate(c_segment_t c_seg,boolean_t must_be_compact)1377 c_seg_validate(c_segment_t c_seg, boolean_t must_be_compact)
1378 {
1379 	uint16_t        c_indx;
1380 	int32_t         bytes_used;
1381 	uint32_t        c_rounded_size;
1382 	uint32_t        c_size;
1383 	c_slot_t        cs;
1384 
1385 	if (__probable(validate_c_segs == FALSE)) {
1386 		return;
1387 	}
1388 	if (c_seg->c_firstemptyslot < c_seg->c_nextslot) {
1389 		c_indx = c_seg->c_firstemptyslot;
1390 		cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1391 
1392 		if (cs == NULL) {
1393 			panic("c_seg_validate:  no slot backing c_firstemptyslot");
1394 		}
1395 
1396 		if (cs->c_size) {
1397 			panic("c_seg_validate:  c_firstemptyslot has non-zero size (%d)", cs->c_size);
1398 		}
1399 	}
1400 	bytes_used = 0;
1401 
1402 	for (c_indx = 0; c_indx < c_seg->c_nextslot; c_indx++) {
1403 		cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1404 
1405 		c_size = UNPACK_C_SIZE(cs);
1406 
1407 		c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(c_size + c_slot_extra_size(cs));
1408 
1409 		bytes_used += c_rounded_size;
1410 
1411 #if CHECKSUM_THE_COMPRESSED_DATA
1412 		unsigned csvhash;
1413 		if (c_size && cs->c_hash_compressed_data != (csvhash = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))) {
1414 			addr64_t csvphys = kvtophys((vm_offset_t)&c_seg->c_store.c_buffer[cs->c_offset]);
1415 			panic("Compressed data doesn't match original %p phys: 0x%llx %d %p %d %d 0x%x 0x%x", c_seg, csvphys, cs->c_offset, cs, c_indx, c_size, cs->c_hash_compressed_data, csvhash);
1416 		}
1417 #endif
1418 #if POPCOUNT_THE_COMPRESSED_DATA
1419 		unsigned csvpop;
1420 		if (c_size) {
1421 			uintptr_t csvaddr = (uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset];
1422 			if (cs->c_pop_cdata != (csvpop = vmc_pop(csvaddr, c_size))) {
1423 				panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%llx 0x%x 0x%x 0x%x", (csvpop - cs->c_pop_cdata), (void *)csvaddr, (void *) kvtophys(csvaddr), c_seg, cs, (uint64_t)cs->c_offset, c_size, csvpop, cs->c_pop_cdata);
1424 			}
1425 		}
1426 #endif
1427 	}
1428 
1429 	if (bytes_used != c_seg->c_bytes_used) {
1430 		panic("c_seg_validate: bytes_used mismatch - found %d, segment has %d", bytes_used, c_seg->c_bytes_used);
1431 	}
1432 
1433 	if (c_seg->c_bytes_used > C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) {
1434 		panic("c_seg_validate: c_bytes_used > c_nextoffset - c_nextoffset = %d,  c_bytes_used = %d",
1435 		    (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used);
1436 	}
1437 
1438 	if (must_be_compact) {
1439 		if (c_seg->c_bytes_used != C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) {
1440 			panic("c_seg_validate: c_bytes_used doesn't match c_nextoffset - c_nextoffset = %d,  c_bytes_used = %d",
1441 			    (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used);
1442 		}
1443 	}
1444 }
1445 
1446 #endif
1447 
1448 
1449 void
c_seg_need_delayed_compaction(c_segment_t c_seg,boolean_t c_list_lock_held)1450 c_seg_need_delayed_compaction(c_segment_t c_seg, boolean_t c_list_lock_held)
1451 {
1452 	boolean_t       clear_busy = FALSE;
1453 
1454 	if (c_list_lock_held == FALSE) {
1455 		if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1456 			C_SEG_BUSY(c_seg);
1457 
1458 			lck_mtx_unlock_always(&c_seg->c_lock);
1459 			lck_mtx_lock_spin_always(c_list_lock);
1460 			lck_mtx_lock_spin_always(&c_seg->c_lock);
1461 
1462 			clear_busy = TRUE;
1463 		}
1464 	}
1465 	assert(c_seg->c_state != C_IS_FILLING);
1466 
1467 	if (!c_seg->c_on_minorcompact_q && !(C_SEG_IS_ON_DISK_OR_SOQ(c_seg)) && !c_seg->c_has_donated_pages) {
1468 		queue_enter(&c_minor_list_head, c_seg, c_segment_t, c_list);
1469 		c_seg->c_on_minorcompact_q = 1;
1470 		os_atomic_inc(&c_minor_count, relaxed);
1471 	}
1472 	if (c_list_lock_held == FALSE) {
1473 		lck_mtx_unlock_always(c_list_lock);
1474 	}
1475 
1476 	if (clear_busy == TRUE) {
1477 		C_SEG_WAKEUP_DONE(c_seg);
1478 	}
1479 }
1480 
1481 
1482 unsigned int c_seg_moved_to_sparse_list = 0;
1483 
1484 void
c_seg_move_to_sparse_list(c_segment_t c_seg)1485 c_seg_move_to_sparse_list(c_segment_t c_seg)
1486 {
1487 	boolean_t       clear_busy = FALSE;
1488 
1489 	if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1490 		C_SEG_BUSY(c_seg);
1491 
1492 		lck_mtx_unlock_always(&c_seg->c_lock);
1493 		lck_mtx_lock_spin_always(c_list_lock);
1494 		lck_mtx_lock_spin_always(&c_seg->c_lock);
1495 
1496 		clear_busy = TRUE;
1497 	}
1498 	c_seg_switch_state(c_seg, C_ON_SWAPPEDOUTSPARSE_Q, FALSE);
1499 
1500 	c_seg_moved_to_sparse_list++;
1501 
1502 	lck_mtx_unlock_always(c_list_lock);
1503 
1504 	if (clear_busy == TRUE) {
1505 		C_SEG_WAKEUP_DONE(c_seg);
1506 	}
1507 }
1508 
1509 
1510 
1511 
1512 int try_minor_compaction_failed = 0;
1513 int try_minor_compaction_succeeded = 0;
1514 
1515 void
c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg)1516 c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg)
1517 {
1518 	assert(c_seg->c_on_minorcompact_q);
1519 	/*
1520 	 * c_seg is currently on the delayed minor compaction
1521 	 * queue and we have c_seg locked... if we can get the
1522 	 * c_list_lock w/o blocking (if we blocked we could deadlock
1523 	 * because the lock order is c_list_lock then c_seg's lock)
1524 	 * we'll pull it from the delayed list and free it directly
1525 	 */
1526 	if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1527 		/*
1528 		 * c_list_lock is held, we need to bail
1529 		 */
1530 		try_minor_compaction_failed++;
1531 
1532 		lck_mtx_unlock_always(&c_seg->c_lock);
1533 	} else {
1534 		try_minor_compaction_succeeded++;
1535 
1536 		C_SEG_BUSY(c_seg);
1537 		c_seg_do_minor_compaction_and_unlock(c_seg, TRUE, FALSE, FALSE);
1538 	}
1539 }
1540 
1541 
1542 int
c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg,boolean_t clear_busy,boolean_t need_list_lock,boolean_t disallow_page_replacement)1543 c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy, boolean_t need_list_lock, boolean_t disallow_page_replacement)
1544 {
1545 	int     c_seg_freed;
1546 
1547 	assert(c_seg->c_busy);
1548 	assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg));
1549 
1550 	/*
1551 	 * check for the case that can occur when we are not swapping
1552 	 * and this segment has been major compacted in the past
1553 	 * and moved to the majorcompact q to remove it from further
1554 	 * consideration... if the occupancy falls too low we need
1555 	 * to put it back on the age_q so that it will be considered
1556 	 * in the next major compaction sweep... if we don't do this
1557 	 * we will eventually run into the c_segments_limit
1558 	 */
1559 	if (c_seg->c_state == C_ON_MAJORCOMPACT_Q && C_SEG_SHOULD_MAJORCOMPACT_NOW(c_seg)) {
1560 		c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
1561 	}
1562 	if (!c_seg->c_on_minorcompact_q) {
1563 		if (clear_busy == TRUE) {
1564 			C_SEG_WAKEUP_DONE(c_seg);
1565 		}
1566 
1567 		lck_mtx_unlock_always(&c_seg->c_lock);
1568 
1569 		return 0;
1570 	}
1571 	queue_remove(&c_minor_list_head, c_seg, c_segment_t, c_list);
1572 	c_seg->c_on_minorcompact_q = 0;
1573 	os_atomic_dec(&c_minor_count, relaxed);
1574 
1575 	lck_mtx_unlock_always(c_list_lock);
1576 
1577 	if (disallow_page_replacement == TRUE) {
1578 		lck_mtx_unlock_always(&c_seg->c_lock);
1579 
1580 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
1581 
1582 		lck_mtx_lock_spin_always(&c_seg->c_lock);
1583 	}
1584 	c_seg_freed = c_seg_minor_compaction_and_unlock(c_seg, clear_busy);
1585 
1586 	if (disallow_page_replacement == TRUE) {
1587 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
1588 	}
1589 
1590 	if (need_list_lock == TRUE) {
1591 		lck_mtx_lock_spin_always(c_list_lock);
1592 	}
1593 
1594 	return c_seg_freed;
1595 }
1596 
1597 void
kdp_compressor_busy_find_owner(event64_t wait_event,thread_waitinfo_t * waitinfo)1598 kdp_compressor_busy_find_owner(event64_t wait_event, thread_waitinfo_t *waitinfo)
1599 {
1600 	c_segment_t c_seg = (c_segment_t) wait_event;
1601 
1602 	waitinfo->owner = thread_tid(c_seg->c_busy_for_thread);
1603 	waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(c_seg);
1604 }
1605 
1606 #if DEVELOPMENT || DEBUG
1607 int
do_cseg_wedge_thread(void)1608 do_cseg_wedge_thread(void)
1609 {
1610 	struct c_segment c_seg;
1611 	c_seg.c_busy_for_thread = current_thread();
1612 
1613 	debug_cseg_wait_event = (event_t) &c_seg;
1614 
1615 	thread_set_pending_block_hint(current_thread(), kThreadWaitCompressor);
1616 	assert_wait((event_t) (&c_seg), THREAD_INTERRUPTIBLE);
1617 
1618 	thread_block(THREAD_CONTINUE_NULL);
1619 
1620 	return 0;
1621 }
1622 
1623 int
do_cseg_unwedge_thread(void)1624 do_cseg_unwedge_thread(void)
1625 {
1626 	thread_wakeup(debug_cseg_wait_event);
1627 	debug_cseg_wait_event = NULL;
1628 
1629 	return 0;
1630 }
1631 #endif /* DEVELOPMENT || DEBUG */
1632 
1633 void
c_seg_wait_on_busy(c_segment_t c_seg)1634 c_seg_wait_on_busy(c_segment_t c_seg)
1635 {
1636 	c_seg->c_wanted = 1;
1637 
1638 	thread_set_pending_block_hint(current_thread(), kThreadWaitCompressor);
1639 	assert_wait((event_t) (c_seg), THREAD_UNINT);
1640 
1641 	lck_mtx_unlock_always(&c_seg->c_lock);
1642 	thread_block(THREAD_CONTINUE_NULL);
1643 }
1644 
1645 #if CONFIG_FREEZE
1646 /*
1647  * We don't have the task lock held while updating the task's
1648  * c_seg queues. We can do that because of the following restrictions:
1649  *
1650  * - SINGLE FREEZER CONTEXT:
1651  *   We 'insert' c_segs into the task list on the task_freeze path.
1652  *   There can only be one such freeze in progress and the task
1653  *   isn't disappearing because we have the VM map lock held throughout
1654  *   and we have a reference on the proc too.
1655  *
1656  * - SINGLE TASK DISOWN CONTEXT:
1657  *   We 'disown' c_segs of a task ONLY from the task_terminate context. So
1658  *   we don't need the task lock but we need the c_list_lock and the
1659  *   compressor master lock (shared). We also hold the individual
1660  *   c_seg locks (exclusive).
1661  *
1662  *   If we either:
1663  *   - can't get the c_seg lock on a try, then we start again because maybe
1664  *   the c_seg is part of a compaction and might get freed. So we can't trust
1665  *   that linkage and need to restart our queue traversal.
1666  *   - OR, we run into a busy c_seg (say being swapped in or free-ing) we
1667  *   drop all locks again and wait and restart our queue traversal.
1668  *
1669  * - The new_owner_task below is currently only the kernel or NULL.
1670  *
1671  */
1672 void
c_seg_update_task_owner(c_segment_t c_seg,task_t new_owner_task)1673 c_seg_update_task_owner(c_segment_t c_seg, task_t new_owner_task)
1674 {
1675 	task_t          owner_task = c_seg->c_task_owner;
1676 	uint64_t        uncompressed_bytes = ((c_seg->c_slots_used) * PAGE_SIZE_64);
1677 
1678 	LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
1679 	LCK_MTX_ASSERT(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED);
1680 
1681 	if (owner_task) {
1682 		task_update_frozen_to_swap_acct(owner_task, uncompressed_bytes, DEBIT_FROM_SWAP);
1683 		queue_remove(&owner_task->task_frozen_cseg_q, c_seg,
1684 		    c_segment_t, c_task_list_next_cseg);
1685 	}
1686 
1687 	if (new_owner_task) {
1688 		queue_enter(&new_owner_task->task_frozen_cseg_q, c_seg,
1689 		    c_segment_t, c_task_list_next_cseg);
1690 		task_update_frozen_to_swap_acct(new_owner_task, uncompressed_bytes, CREDIT_TO_SWAP);
1691 	}
1692 
1693 	c_seg->c_task_owner = new_owner_task;
1694 }
1695 
1696 void
task_disown_frozen_csegs(task_t owner_task)1697 task_disown_frozen_csegs(task_t owner_task)
1698 {
1699 	c_segment_t c_seg = NULL, next_cseg = NULL;
1700 
1701 again:
1702 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
1703 	lck_mtx_lock_spin_always(c_list_lock);
1704 
1705 	for (c_seg = (c_segment_t) queue_first(&owner_task->task_frozen_cseg_q);
1706 	    !queue_end(&owner_task->task_frozen_cseg_q, (queue_entry_t) c_seg);
1707 	    c_seg = next_cseg) {
1708 		next_cseg = (c_segment_t) queue_next(&c_seg->c_task_list_next_cseg);
1709 
1710 		if (!lck_mtx_try_lock_spin_always(&c_seg->c_lock)) {
1711 			lck_mtx_unlock(c_list_lock);
1712 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
1713 			goto again;
1714 		}
1715 
1716 		if (c_seg->c_busy) {
1717 			lck_mtx_unlock(c_list_lock);
1718 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
1719 
1720 			c_seg_wait_on_busy(c_seg);
1721 
1722 			goto again;
1723 		}
1724 		assert(c_seg->c_task_owner == owner_task);
1725 		c_seg_update_task_owner(c_seg, kernel_task);
1726 		lck_mtx_unlock_always(&c_seg->c_lock);
1727 	}
1728 
1729 	lck_mtx_unlock(c_list_lock);
1730 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
1731 }
1732 #endif /* CONFIG_FREEZE */
1733 
1734 void
c_seg_switch_state(c_segment_t c_seg,int new_state,boolean_t insert_head)1735 c_seg_switch_state(c_segment_t c_seg, int new_state, boolean_t insert_head)
1736 {
1737 	int     old_state = c_seg->c_state;
1738 	queue_head_t *donate_swapout_list_head, *donate_swappedin_list_head;
1739 	uint32_t     *donate_swapout_count, *donate_swappedin_count;
1740 
1741 	/*
1742 	 * On macOS the donate queue is swapped first ie the c_early_swapout queue.
1743 	 * On other swap-capable platforms, we want to swap those out last. So we
1744 	 * use the c_late_swapout queue.
1745 	 */
1746 #if XNU_TARGET_OS_OSX  /* tag:DONATE */
1747 #if (DEVELOPMENT || DEBUG)
1748 	if (new_state != C_IS_FILLING) {
1749 		LCK_MTX_ASSERT(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED);
1750 	}
1751 	LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
1752 #endif /* DEVELOPMENT || DEBUG */
1753 
1754 	donate_swapout_list_head = &c_early_swapout_list_head;
1755 	donate_swapout_count = &c_early_swapout_count;
1756 	donate_swappedin_list_head = &c_early_swappedin_list_head;
1757 	donate_swappedin_count = &c_early_swappedin_count;
1758 #else /* XNU_TARGET_OS_OSX */
1759 	donate_swapout_list_head = &c_late_swapout_list_head;
1760 	donate_swapout_count = &c_late_swapout_count;
1761 	donate_swappedin_list_head = &c_late_swappedin_list_head;
1762 	donate_swappedin_count = &c_late_swappedin_count;
1763 #endif /* XNU_TARGET_OS_OSX */
1764 
1765 	switch (old_state) {
1766 	case C_IS_EMPTY:
1767 		assert(new_state == C_IS_FILLING || new_state == C_IS_FREE);
1768 
1769 		c_empty_count--;
1770 		break;
1771 
1772 	case C_IS_FILLING:
1773 		assert(new_state == C_ON_AGE_Q || new_state == C_ON_SWAPOUT_Q);
1774 
1775 		queue_remove(&c_filling_list_head, c_seg, c_segment_t, c_age_list);
1776 		c_filling_count--;
1777 		break;
1778 
1779 	case C_ON_AGE_Q:
1780 		assert(new_state == C_ON_SWAPOUT_Q || new_state == C_ON_MAJORCOMPACT_Q ||
1781 		    new_state == C_IS_FREE);
1782 
1783 		queue_remove(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1784 		c_age_count--;
1785 		break;
1786 
1787 	case C_ON_SWAPPEDIN_Q:
1788 		if (c_seg->c_has_donated_pages) {
1789 			assert(new_state == C_ON_SWAPOUT_Q || new_state == C_IS_FREE);
1790 			queue_remove(donate_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1791 			*donate_swappedin_count -= 1;
1792 		} else {
1793 			assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE);
1794 #if CONFIG_FREEZE
1795 			assert(c_seg->c_has_freezer_pages);
1796 			queue_remove(&c_early_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1797 			c_early_swappedin_count--;
1798 #else /* CONFIG_FREEZE */
1799 			queue_remove(&c_regular_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1800 			c_regular_swappedin_count--;
1801 #endif /* CONFIG_FREEZE */
1802 		}
1803 		break;
1804 
1805 	case C_ON_SWAPOUT_Q:
1806 		assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE || new_state == C_IS_EMPTY || new_state == C_ON_SWAPIO_Q);
1807 
1808 #if CONFIG_FREEZE
1809 		if (c_seg->c_has_freezer_pages) {
1810 			if (c_seg->c_task_owner && (new_state != C_ON_SWAPIO_Q)) {
1811 				c_seg_update_task_owner(c_seg, NULL);
1812 			}
1813 			queue_remove(&c_early_swapout_list_head, c_seg, c_segment_t, c_age_list);
1814 			c_early_swapout_count--;
1815 		} else
1816 #endif /* CONFIG_FREEZE */
1817 		{
1818 			if (c_seg->c_has_donated_pages) {
1819 				queue_remove(donate_swapout_list_head, c_seg, c_segment_t, c_age_list);
1820 				*donate_swapout_count -= 1;
1821 			} else {
1822 				queue_remove(&c_regular_swapout_list_head, c_seg, c_segment_t, c_age_list);
1823 				c_regular_swapout_count--;
1824 			}
1825 		}
1826 
1827 		if (new_state == C_ON_AGE_Q) {
1828 			c_seg->c_has_donated_pages = 0;
1829 		}
1830 		thread_wakeup((event_t)&compaction_swapper_running);
1831 		break;
1832 
1833 	case C_ON_SWAPIO_Q:
1834 #if CONFIG_FREEZE
1835 		if (c_seg->c_has_freezer_pages) {
1836 			assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_AGE_Q);
1837 		} else
1838 #endif /* CONFIG_FREEZE */
1839 		{
1840 			if (c_seg->c_has_donated_pages) {
1841 				assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_SWAPPEDIN_Q);
1842 			} else {
1843 				assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_AGE_Q);
1844 			}
1845 		}
1846 
1847 		queue_remove(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1848 		c_swapio_count--;
1849 		break;
1850 
1851 	case C_ON_SWAPPEDOUT_Q:
1852 		assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q ||
1853 		    new_state == C_ON_SWAPPEDOUTSPARSE_Q ||
1854 		    new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE);
1855 
1856 		queue_remove(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
1857 		c_swappedout_count--;
1858 		break;
1859 
1860 	case C_ON_SWAPPEDOUTSPARSE_Q:
1861 		assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q ||
1862 		    new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE);
1863 
1864 		queue_remove(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
1865 		c_swappedout_sparse_count--;
1866 		break;
1867 
1868 	case C_ON_MAJORCOMPACT_Q:
1869 		assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE);
1870 
1871 		queue_remove(&c_major_list_head, c_seg, c_segment_t, c_age_list);
1872 		c_major_count--;
1873 		break;
1874 
1875 	case C_ON_BAD_Q:
1876 		assert(new_state == C_IS_FREE);
1877 
1878 		queue_remove(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
1879 		c_bad_count--;
1880 		break;
1881 
1882 	default:
1883 		panic("c_seg %p has bad c_state = %d", c_seg, old_state);
1884 	}
1885 
1886 	switch (new_state) {
1887 	case C_IS_FREE:
1888 		assert(old_state != C_IS_FILLING);
1889 
1890 		break;
1891 
1892 	case C_IS_EMPTY:
1893 		assert(old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1894 
1895 		c_empty_count++;
1896 		break;
1897 
1898 	case C_IS_FILLING:
1899 		assert(old_state == C_IS_EMPTY);
1900 
1901 		queue_enter(&c_filling_list_head, c_seg, c_segment_t, c_age_list);
1902 		c_filling_count++;
1903 		break;
1904 
1905 	case C_ON_AGE_Q:
1906 		assert(old_state == C_IS_FILLING || old_state == C_ON_SWAPPEDIN_Q ||
1907 		    old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPIO_Q ||
1908 		    old_state == C_ON_MAJORCOMPACT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1909 
1910 		assert(!c_seg->c_has_donated_pages);
1911 		if (old_state == C_IS_FILLING) {
1912 			queue_enter(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1913 		} else {
1914 			if (!queue_empty(&c_age_list_head)) {
1915 				c_segment_t     c_first;
1916 
1917 				c_first = (c_segment_t)queue_first(&c_age_list_head);
1918 				c_seg->c_creation_ts = c_first->c_creation_ts;
1919 			}
1920 			queue_enter_first(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1921 		}
1922 		c_age_count++;
1923 		break;
1924 
1925 	case C_ON_SWAPPEDIN_Q:
1926 	{
1927 		queue_head_t *list_head;
1928 
1929 		assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q || old_state == C_ON_SWAPIO_Q);
1930 		if (c_seg->c_has_donated_pages) {
1931 			/* Error in swapouts could happen while the c_seg is still on the swapio queue */
1932 			list_head = donate_swappedin_list_head;
1933 			*donate_swappedin_count += 1;
1934 		} else {
1935 #if CONFIG_FREEZE
1936 			assert(c_seg->c_has_freezer_pages);
1937 			list_head = &c_early_swappedin_list_head;
1938 			c_early_swappedin_count++;
1939 #else /* CONFIG_FREEZE */
1940 			list_head = &c_regular_swappedin_list_head;
1941 			c_regular_swappedin_count++;
1942 #endif /* CONFIG_FREEZE */
1943 		}
1944 
1945 		if (insert_head == TRUE) {
1946 			queue_enter_first(list_head, c_seg, c_segment_t, c_age_list);
1947 		} else {
1948 			queue_enter(list_head, c_seg, c_segment_t, c_age_list);
1949 		}
1950 		break;
1951 	}
1952 
1953 	case C_ON_SWAPOUT_Q:
1954 	{
1955 		queue_head_t *list_head;
1956 
1957 #if CONFIG_FREEZE
1958 		/*
1959 		 * A segment with both identities of frozen + donated pages
1960 		 * will be put on early swapout Q ie the frozen identity wins.
1961 		 * This is because when both identities are set, the donation bit
1962 		 * is added on after in the c_current_seg_filled path for accounting
1963 		 * purposes.
1964 		 */
1965 		if (c_seg->c_has_freezer_pages) {
1966 			assert(old_state == C_ON_AGE_Q || old_state == C_IS_FILLING);
1967 			list_head = &c_early_swapout_list_head;
1968 			c_early_swapout_count++;
1969 		} else
1970 #endif
1971 		{
1972 			if (c_seg->c_has_donated_pages) {
1973 				assert(old_state == C_ON_SWAPPEDIN_Q || old_state == C_IS_FILLING);
1974 				list_head = donate_swapout_list_head;
1975 				*donate_swapout_count += 1;
1976 			} else {
1977 				assert(old_state == C_ON_AGE_Q || old_state == C_IS_FILLING);
1978 				list_head = &c_regular_swapout_list_head;
1979 				c_regular_swapout_count++;
1980 			}
1981 		}
1982 
1983 		if (insert_head == TRUE) {
1984 			queue_enter_first(list_head, c_seg, c_segment_t, c_age_list);
1985 		} else {
1986 			queue_enter(list_head, c_seg, c_segment_t, c_age_list);
1987 		}
1988 		break;
1989 	}
1990 
1991 	case C_ON_SWAPIO_Q:
1992 		assert(old_state == C_ON_SWAPOUT_Q);
1993 
1994 		if (insert_head == TRUE) {
1995 			queue_enter_first(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1996 		} else {
1997 			queue_enter(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1998 		}
1999 		c_swapio_count++;
2000 		break;
2001 
2002 	case C_ON_SWAPPEDOUT_Q:
2003 		assert(old_state == C_ON_SWAPIO_Q);
2004 
2005 		if (insert_head == TRUE) {
2006 			queue_enter_first(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
2007 		} else {
2008 			queue_enter(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
2009 		}
2010 		c_swappedout_count++;
2011 		break;
2012 
2013 	case C_ON_SWAPPEDOUTSPARSE_Q:
2014 		assert(old_state == C_ON_SWAPIO_Q || old_state == C_ON_SWAPPEDOUT_Q);
2015 
2016 		if (insert_head == TRUE) {
2017 			queue_enter_first(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
2018 		} else {
2019 			queue_enter(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
2020 		}
2021 
2022 		c_swappedout_sparse_count++;
2023 		break;
2024 
2025 	case C_ON_MAJORCOMPACT_Q:
2026 		assert(old_state == C_ON_AGE_Q);
2027 		assert(!c_seg->c_has_donated_pages);
2028 
2029 		if (insert_head == TRUE) {
2030 			queue_enter_first(&c_major_list_head, c_seg, c_segment_t, c_age_list);
2031 		} else {
2032 			queue_enter(&c_major_list_head, c_seg, c_segment_t, c_age_list);
2033 		}
2034 		c_major_count++;
2035 		break;
2036 
2037 	case C_ON_BAD_Q:
2038 		assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
2039 
2040 		if (insert_head == TRUE) {
2041 			queue_enter_first(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
2042 		} else {
2043 			queue_enter(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
2044 		}
2045 		c_bad_count++;
2046 		break;
2047 
2048 	default:
2049 		panic("c_seg %p requesting bad c_state = %d", c_seg, new_state);
2050 	}
2051 	c_seg->c_state = new_state;
2052 }
2053 
2054 
2055 
2056 void
c_seg_free(c_segment_t c_seg)2057 c_seg_free(c_segment_t c_seg)
2058 {
2059 	assert(c_seg->c_busy);
2060 
2061 	lck_mtx_unlock_always(&c_seg->c_lock);
2062 	lck_mtx_lock_spin_always(c_list_lock);
2063 	lck_mtx_lock_spin_always(&c_seg->c_lock);
2064 
2065 	c_seg_free_locked(c_seg);
2066 }
2067 
2068 
2069 void
c_seg_free_locked(c_segment_t c_seg)2070 c_seg_free_locked(c_segment_t c_seg)
2071 {
2072 	int             segno;
2073 	int             pages_populated = 0;
2074 	int32_t         *c_buffer = NULL;
2075 	uint64_t        c_swap_handle = 0;
2076 
2077 	assert(c_seg->c_busy);
2078 	assert(c_seg->c_slots_used == 0);
2079 	assert(!c_seg->c_on_minorcompact_q);
2080 	assert(!c_seg->c_busy_swapping);
2081 
2082 	if (c_seg->c_overage_swap == TRUE) {
2083 		c_overage_swapped_count--;
2084 		c_seg->c_overage_swap = FALSE;
2085 	}
2086 	if (!(C_SEG_IS_ONDISK(c_seg))) {
2087 		c_buffer = c_seg->c_store.c_buffer;
2088 	} else {
2089 		c_swap_handle = c_seg->c_store.c_swap_handle;
2090 	}
2091 
2092 	c_seg_switch_state(c_seg, C_IS_FREE, FALSE);
2093 
2094 	if (c_buffer) {
2095 		pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE;
2096 		c_seg->c_store.c_buffer = NULL;
2097 	} else {
2098 #if CONFIG_FREEZE
2099 		c_seg_update_task_owner(c_seg, NULL);
2100 #endif /* CONFIG_FREEZE */
2101 
2102 		c_seg->c_store.c_swap_handle = (uint64_t)-1;
2103 	}
2104 
2105 	lck_mtx_unlock_always(&c_seg->c_lock);
2106 
2107 	lck_mtx_unlock_always(c_list_lock);
2108 
2109 	if (c_buffer) {
2110 		if (pages_populated) {
2111 			kernel_memory_depopulate((vm_offset_t)c_buffer,
2112 			    ptoa(pages_populated), KMA_COMPRESSOR,
2113 			    VM_KERN_MEMORY_COMPRESSOR);
2114 		}
2115 	} else if (c_swap_handle) {
2116 		/*
2117 		 * Free swap space on disk.
2118 		 */
2119 		vm_swap_free(c_swap_handle);
2120 	}
2121 	lck_mtx_lock_spin_always(&c_seg->c_lock);
2122 	/*
2123 	 * c_seg must remain busy until
2124 	 * after the call to vm_swap_free
2125 	 */
2126 	C_SEG_WAKEUP_DONE(c_seg);
2127 	lck_mtx_unlock_always(&c_seg->c_lock);
2128 
2129 	segno = c_seg->c_mysegno;
2130 
2131 	lck_mtx_lock_spin_always(c_list_lock);
2132 	/*
2133 	 * because the c_buffer is now associated with the segno,
2134 	 * we can't put the segno back on the free list until
2135 	 * after we have depopulated the c_buffer range, or
2136 	 * we run the risk of depopulating a range that is
2137 	 * now being used in one of the compressor heads
2138 	 */
2139 	c_segments_get(segno)->c_segno = c_free_segno_head;
2140 	c_free_segno_head = segno;
2141 	c_segment_count--;
2142 
2143 	lck_mtx_unlock_always(c_list_lock);
2144 
2145 	lck_mtx_destroy(&c_seg->c_lock, &vm_compressor_lck_grp);
2146 
2147 	if (c_seg->c_slot_var_array_len) {
2148 		kfree_type(struct c_slot, c_seg->c_slot_var_array_len,
2149 		    c_seg->c_slot_var_array);
2150 	}
2151 
2152 	zfree(compressor_segment_zone, c_seg);
2153 }
2154 
2155 #if DEVELOPMENT || DEBUG
2156 int c_seg_trim_page_count = 0;
2157 #endif
2158 
2159 void
c_seg_trim_tail(c_segment_t c_seg)2160 c_seg_trim_tail(c_segment_t c_seg)
2161 {
2162 	c_slot_t        cs;
2163 	uint32_t        c_size;
2164 	uint32_t        c_offset;
2165 	uint32_t        c_rounded_size;
2166 	uint16_t        current_nextslot;
2167 	uint32_t        current_populated_offset;
2168 
2169 	if (c_seg->c_bytes_used == 0) {
2170 		return;
2171 	}
2172 	current_nextslot = c_seg->c_nextslot;
2173 	current_populated_offset = c_seg->c_populated_offset;
2174 
2175 	while (c_seg->c_nextslot) {
2176 		cs = C_SEG_SLOT_FROM_INDEX(c_seg, (c_seg->c_nextslot - 1));
2177 
2178 		c_size = UNPACK_C_SIZE(cs);
2179 
2180 		if (c_size) {
2181 			if (current_nextslot != c_seg->c_nextslot) {
2182 				c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(c_size + c_slot_extra_size(cs));
2183 				c_offset = cs->c_offset + C_SEG_BYTES_TO_OFFSET(c_rounded_size);
2184 
2185 				c_seg->c_nextoffset = c_offset;
2186 				c_seg->c_populated_offset = (c_offset + (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1)) &
2187 				    ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1);
2188 
2189 				if (c_seg->c_firstemptyslot > c_seg->c_nextslot) {
2190 					c_seg->c_firstemptyslot = c_seg->c_nextslot;
2191 				}
2192 #if DEVELOPMENT || DEBUG
2193 				c_seg_trim_page_count += ((round_page_32(C_SEG_OFFSET_TO_BYTES(current_populated_offset)) -
2194 				    round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) /
2195 				    PAGE_SIZE);
2196 #endif
2197 			}
2198 			break;
2199 		}
2200 		c_seg->c_nextslot--;
2201 	}
2202 	assert(c_seg->c_nextslot);
2203 }
2204 
2205 
2206 int
c_seg_minor_compaction_and_unlock(c_segment_t c_seg,boolean_t clear_busy)2207 c_seg_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy)
2208 {
2209 	c_slot_mapping_t slot_ptr;
2210 	uint32_t        c_offset = 0;
2211 	uint32_t        old_populated_offset;
2212 	uint32_t        c_rounded_size;
2213 	uint32_t        c_size;
2214 	uint16_t        c_indx = 0;
2215 	int             i;
2216 	c_slot_t        c_dst;
2217 	c_slot_t        c_src;
2218 
2219 	assert(c_seg->c_busy);
2220 
2221 	KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_MINOR) | DBG_FUNC_START,
2222 	    VM_KERNEL_ADDRHIDE(c_seg), c_seg->c_state,
2223 	    c_seg->c_bytes_unused, c_seg->c_slots_used);
2224 
2225 
2226 #if VALIDATE_C_SEGMENTS
2227 	c_seg_validate(c_seg, FALSE);
2228 #endif
2229 	if (c_seg->c_bytes_used == 0) {
2230 		c_seg_free(c_seg);
2231 		KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_MINOR) | DBG_FUNC_END,
2232 		    true, 0, 0);
2233 		return 1;
2234 	}
2235 	lck_mtx_unlock_always(&c_seg->c_lock);
2236 
2237 	if (c_seg->c_firstemptyslot >= c_seg->c_nextslot || C_SEG_UNUSED_BYTES(c_seg) < PAGE_SIZE) {
2238 		goto done;
2239 	}
2240 
2241 /* TODO: assert first emptyslot's c_size is actually 0 */
2242 
2243 	C_SEG_MAKE_WRITEABLE(c_seg);
2244 
2245 #if VALIDATE_C_SEGMENTS
2246 	c_seg->c_was_minor_compacted++;
2247 #endif
2248 	c_indx = c_seg->c_firstemptyslot;
2249 	c_dst = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
2250 
2251 	old_populated_offset = c_seg->c_populated_offset;
2252 	c_offset = c_dst->c_offset;
2253 
2254 	for (i = c_indx + 1; i < c_seg->c_nextslot && c_offset < c_seg->c_nextoffset; i++) {
2255 		c_src = C_SEG_SLOT_FROM_INDEX(c_seg, i);
2256 
2257 		c_size = UNPACK_C_SIZE(c_src);
2258 
2259 		if (c_size == 0) {
2260 			continue;
2261 		}
2262 
2263 		c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(c_size + c_slot_extra_size(c_src));
2264 
2265 /* N.B.: This memcpy may be an overlapping copy */
2266 		memcpy(&c_seg->c_store.c_buffer[c_offset], &c_seg->c_store.c_buffer[c_src->c_offset], c_rounded_size);
2267 
2268 		cslot_copy(c_dst, c_src);
2269 		c_dst->c_offset = c_offset;
2270 
2271 		slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
2272 		slot_ptr->s_cindx = c_indx;
2273 
2274 		c_offset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
2275 		PACK_C_SIZE(c_src, 0);
2276 #if HAS_MTE
2277 		c_src->c_mte_size = 0;
2278 #endif /* HAS_MTE */
2279 		c_indx++;
2280 
2281 		c_dst = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
2282 	}
2283 	c_seg->c_firstemptyslot = c_indx;
2284 	c_seg->c_nextslot = c_indx;
2285 	c_seg->c_nextoffset = c_offset;
2286 	c_seg->c_populated_offset = (c_offset + (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1)) & ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1);
2287 	c_seg->c_bytes_unused = 0;
2288 
2289 #if VALIDATE_C_SEGMENTS
2290 	c_seg_validate(c_seg, TRUE);
2291 #endif
2292 	if (old_populated_offset > c_seg->c_populated_offset) {
2293 		uint32_t        gc_size;
2294 		int32_t         *gc_ptr;
2295 
2296 		gc_size = C_SEG_OFFSET_TO_BYTES(old_populated_offset - c_seg->c_populated_offset);
2297 		gc_ptr = &c_seg->c_store.c_buffer[c_seg->c_populated_offset];
2298 
2299 		kernel_memory_depopulate((vm_offset_t)gc_ptr, gc_size,
2300 		    KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
2301 	}
2302 
2303 	C_SEG_WRITE_PROTECT(c_seg);
2304 
2305 done:
2306 	KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_MINOR) | DBG_FUNC_END,
2307 	    false, c_seg->c_bytes_unused, c_seg->c_bytes_used);
2308 
2309 	if (clear_busy == TRUE) {
2310 		lck_mtx_lock_spin_always(&c_seg->c_lock);
2311 		C_SEG_WAKEUP_DONE(c_seg);
2312 		lck_mtx_unlock_always(&c_seg->c_lock);
2313 	}
2314 	return 0;
2315 }
2316 
2317 
2318 static void
c_seg_alloc_nextslot(c_segment_t c_seg)2319 c_seg_alloc_nextslot(c_segment_t c_seg)
2320 {
2321 	struct c_slot   *old_slot_array = NULL;
2322 	struct c_slot   *new_slot_array = NULL;
2323 	int             newlen;
2324 	int             oldlen;
2325 
2326 	if (c_seg->c_nextslot < c_seg_fixed_array_len) {
2327 		return;
2328 	}
2329 
2330 	if ((c_seg->c_nextslot - c_seg_fixed_array_len) >= c_seg->c_slot_var_array_len) {
2331 		oldlen = c_seg->c_slot_var_array_len;
2332 		old_slot_array = c_seg->c_slot_var_array;
2333 
2334 		if (oldlen == 0) {
2335 			newlen = c_seg_slot_var_array_min_len;
2336 		} else {
2337 			newlen = oldlen * 2;
2338 		}
2339 
2340 		new_slot_array = kalloc_type(struct c_slot, newlen, Z_WAITOK);
2341 
2342 		lck_mtx_lock_spin_always(&c_seg->c_lock);
2343 
2344 		if (old_slot_array) {
2345 			memcpy(new_slot_array, old_slot_array,
2346 			    sizeof(struct c_slot) * oldlen);
2347 		}
2348 
2349 		c_seg->c_slot_var_array_len = newlen;
2350 		c_seg->c_slot_var_array = new_slot_array;
2351 
2352 		lck_mtx_unlock_always(&c_seg->c_lock);
2353 
2354 		kfree_type(struct c_slot, oldlen, old_slot_array);
2355 	}
2356 }
2357 
2358 
2359 #define C_SEG_MAJOR_COMPACT_STATS_MAX   (30)
2360 
2361 struct vm_major_compact_stats_s {
2362 	uint64_t asked_permission;
2363 	uint64_t compactions;
2364 	uint64_t moved_slots;
2365 	uint64_t moved_bytes;
2366 	uint64_t wasted_space_in_swapouts;
2367 	uint64_t count_of_swapouts;
2368 	uint64_t count_of_freed_segs;
2369 	uint64_t bailed_compactions;
2370 	uint64_t bytes_freed;
2371 	uint64_t runtime_us;
2372 };
2373 
2374 struct vm_major_compact_stats_s c_seg_major_compact_stats[C_SEG_MAJOR_COMPACT_STATS_MAX];
2375 
2376 int c_seg_major_compact_stats_now = 0;
2377 
2378 #define C_MAJOR_COMPACTION_SIZE_APPROPRIATE     ((c_seg_bufsize * 90) / 100)
2379 
2380 boolean_t
c_seg_major_compact_ok(c_segment_t c_seg_dst,c_segment_t c_seg_src)2381 c_seg_major_compact_ok(
2382 	c_segment_t c_seg_dst,
2383 	c_segment_t c_seg_src)
2384 {
2385 	c_seg_major_compact_stats[c_seg_major_compact_stats_now].asked_permission++;
2386 	vm_pageout_vminfo.vm_compactor_major_compactions_considered++;
2387 
2388 	if (c_seg_src->c_bytes_used >= C_MAJOR_COMPACTION_SIZE_APPROPRIATE &&
2389 	    c_seg_dst->c_bytes_used >= C_MAJOR_COMPACTION_SIZE_APPROPRIATE) {
2390 		return FALSE;
2391 	}
2392 
2393 	if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
2394 		/*
2395 		 * destination segment is full... can't compact
2396 		 */
2397 		return FALSE;
2398 	}
2399 
2400 	return TRUE;
2401 }
2402 
2403 /*
2404  * Move slots from src to dst
2405  * returns TRUE if we can continue compacting further to the same dst segment
2406  */
2407 boolean_t
c_seg_coalesce(c_segment_t c_seg_dst,c_segment_t c_seg_src)2408 c_seg_coalesce(
2409 	c_segment_t c_seg_dst,
2410 	c_segment_t c_seg_src)
2411 {
2412 	c_slot_mapping_t slot_ptr;
2413 	uint32_t        c_rounded_size;
2414 	uint32_t        c_size;
2415 	uint16_t        dst_slot;
2416 	int             i;
2417 	c_slot_t        c_dst;
2418 	c_slot_t        c_src;
2419 	boolean_t       keep_compacting = TRUE;
2420 
2421 	KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_COALESCE) | DBG_FUNC_START,
2422 	    VM_KERNEL_ADDRHIDE(c_seg_dst), c_seg_dst->c_populated_offset,
2423 	    VM_KERNEL_ADDRHIDE(c_seg_src), c_seg_src->c_populated_offset);
2424 
2425 	/*
2426 	 * segments are not locked but they are both marked c_busy
2427 	 * which keeps c_decompress from working on them...
2428 	 * we can safely allocate new pages, move compressed data
2429 	 * from c_seg_src to c_seg_dst and update both c_segment's
2430 	 * state w/o holding the master lock
2431 	 */
2432 
2433 	C_SEG_MAKE_WRITEABLE(c_seg_dst);
2434 
2435 #if VALIDATE_C_SEGMENTS
2436 	c_seg_dst->c_was_major_compacted++;
2437 	c_seg_src->c_was_major_donor++;
2438 #endif
2439 	assertf(c_seg_dst->c_has_donated_pages == c_seg_src->c_has_donated_pages, "Mismatched donation status Dst: %p, Src: %p\n", c_seg_dst, c_seg_src);
2440 	c_seg_major_compact_stats[c_seg_major_compact_stats_now].compactions++;
2441 	vm_pageout_vminfo.vm_compactor_major_compactions_completed++;
2442 
2443 	dst_slot = c_seg_dst->c_nextslot;
2444 
2445 	for (i = 0; i < c_seg_src->c_nextslot; i++) {
2446 		c_src = C_SEG_SLOT_FROM_INDEX(c_seg_src, i);
2447 
2448 		c_size = UNPACK_C_SIZE(c_src);
2449 
2450 		if (c_size == 0) {
2451 			/* BATCH: move what we have so far; */
2452 			continue;
2453 		}
2454 
2455 		int combined_size = c_size + c_slot_extra_size(c_src);
2456 
2457 		c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(combined_size);
2458 
2459 		int size_left = c_seg_bufsize - C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_nextoffset);
2460 		/* we're going to increment c_nextoffset by c_rounded_size so it should not overflow the segment bufsize */
2461 		if (size_left < c_rounded_size) {
2462 			keep_compacting = FALSE;
2463 			break;
2464 		}
2465 
2466 		/* Do we have enough populated space left in dst? */
2467 		assertf(c_seg_dst->c_populated_offset >= c_seg_dst->c_nextoffset, "Unexpected segment offsets: %u,%u", c_seg_dst->c_populated_offset, c_seg_dst->c_nextoffset);
2468 		if (C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset - c_seg_dst->c_nextoffset) < (unsigned) combined_size) {
2469 			int     size_to_populate;
2470 
2471 			/* eagerly populate the entire segment in expectation to fill it */
2472 			assert(c_seg_bufsize >= C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset));
2473 			size_to_populate = c_seg_bufsize - C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset);
2474 
2475 			if (size_to_populate == 0) {
2476 				/* can't populate any more pages in this segment */
2477 				keep_compacting = FALSE;
2478 				break;
2479 			}
2480 			if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) {
2481 				size_to_populate = C_SEG_MAX_POPULATE_SIZE;
2482 			}
2483 
2484 			kernel_memory_populate(
2485 				(vm_offset_t) &c_seg_dst->c_store.c_buffer[c_seg_dst->c_populated_offset],
2486 				size_to_populate,
2487 				KMA_NOFAIL | KMA_COMPRESSOR,
2488 				VM_KERN_MEMORY_COMPRESSOR);
2489 
2490 			c_seg_dst->c_populated_offset += C_SEG_BYTES_TO_OFFSET(size_to_populate);
2491 			assert(C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset) <= c_seg_bufsize);
2492 		}
2493 		c_seg_alloc_nextslot(c_seg_dst);
2494 
2495 		c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, c_seg_dst->c_nextslot);
2496 
2497 		/*
2498 		 * We don't want pages to get stolen by the contiguous memory allocator
2499 		 * when copying data from one segment to another.
2500 		 */
2501 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
2502 		memcpy(&c_seg_dst->c_store.c_buffer[c_seg_dst->c_nextoffset], &c_seg_src->c_store.c_buffer[c_src->c_offset], combined_size);
2503 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
2504 
2505 		c_seg_major_compact_stats[c_seg_major_compact_stats_now].moved_slots++;
2506 		vm_pageout_vminfo.vm_compactor_major_compaction_slots_moved++;
2507 		c_seg_major_compact_stats[c_seg_major_compact_stats_now].moved_bytes += combined_size;
2508 		vm_pageout_vminfo.vm_compactor_major_compaction_bytes_moved += combined_size;
2509 
2510 		cslot_copy(c_dst, c_src);
2511 		c_dst->c_offset = c_seg_dst->c_nextoffset;
2512 
2513 		if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) {
2514 			c_seg_dst->c_firstemptyslot++;
2515 		}
2516 		c_seg_dst->c_slots_used++;
2517 		c_seg_dst->c_nextslot++;
2518 		c_seg_dst->c_bytes_used += c_rounded_size;
2519 		c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
2520 
2521 		PACK_C_SIZE(c_src, 0);
2522 #if HAS_MTE
2523 		c_src->c_mte_size = 0;
2524 #endif
2525 
2526 		c_seg_src->c_bytes_used -= c_rounded_size;
2527 		c_seg_src->c_bytes_unused += c_rounded_size;
2528 		c_seg_src->c_firstemptyslot = 0;
2529 
2530 		assert(c_seg_src->c_slots_used);
2531 		c_seg_src->c_slots_used--;
2532 
2533 		if (!c_seg_src->c_swappedin) {
2534 			/* Pessimistically lose swappedin status when non-swappedin pages are added. */
2535 			c_seg_dst->c_swappedin = false;
2536 		}
2537 
2538 		if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
2539 			/* dest segment is now full */
2540 			keep_compacting = FALSE;
2541 			break;
2542 		}
2543 	}
2544 
2545 	C_SEG_WRITE_PROTECT(c_seg_dst);
2546 
2547 	if (dst_slot < c_seg_dst->c_nextslot) {
2548 		PAGE_REPLACEMENT_ALLOWED(TRUE);
2549 		/*
2550 		 * we've now locked out c_decompress from
2551 		 * converting the slot passed into it into
2552 		 * a c_segment_t which allows us to use
2553 		 * the backptr to change which c_segment and
2554 		 * index the slot points to
2555 		 */
2556 		while (dst_slot < c_seg_dst->c_nextslot) {
2557 			c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot);
2558 
2559 			slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
2560 			/* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
2561 			slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1;
2562 			slot_ptr->s_cindx = dst_slot++;
2563 		}
2564 		PAGE_REPLACEMENT_ALLOWED(FALSE);
2565 	}
2566 	KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_COALESCE) | DBG_FUNC_END,
2567 	    keep_compacting, c_seg_dst->c_nextoffset,
2568 	    c_seg_dst->c_populated_offset, c_seg_dst->c_bytes_used);
2569 	return keep_compacting;
2570 }
2571 
2572 
2573 uint64_t
vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec,clock_nsec_t end_nsec,clock_sec_t start_sec,clock_nsec_t start_nsec)2574 vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec, clock_nsec_t end_nsec, clock_sec_t start_sec, clock_nsec_t start_nsec)
2575 {
2576 	uint64_t end_msecs;
2577 	uint64_t start_msecs;
2578 
2579 	end_msecs = (end_sec * 1000) + end_nsec / 1000000;
2580 	start_msecs = (start_sec * 1000) + start_nsec / 1000000;
2581 
2582 	return end_msecs - start_msecs;
2583 }
2584 
2585 
2586 
2587 uint32_t compressor_eval_period_in_msecs = 250;
2588 uint32_t compressor_sample_min_in_msecs = 500;
2589 uint32_t compressor_sample_max_in_msecs = 10000;
2590 uint32_t compressor_thrashing_threshold_per_10msecs = 50;
2591 uint32_t compressor_thrashing_min_per_10msecs = 20;
2592 
2593 /* When true, reset sample data next chance we get. */
2594 static boolean_t        compressor_need_sample_reset = FALSE;
2595 
2596 
2597 void
compute_swapout_target_age(void)2598 compute_swapout_target_age(void)
2599 {
2600 	clock_sec_t     cur_ts_sec;
2601 	clock_nsec_t    cur_ts_nsec;
2602 	uint32_t        min_operations_needed_in_this_sample;
2603 	uint64_t        elapsed_msecs_in_eval;
2604 	uint64_t        elapsed_msecs_in_sample;
2605 	boolean_t       need_eval_reset = FALSE;
2606 
2607 	clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec);
2608 
2609 	elapsed_msecs_in_sample = vm_compressor_compute_elapsed_msecs(cur_ts_sec, cur_ts_nsec, start_of_sample_period_sec, start_of_sample_period_nsec);
2610 
2611 	if (compressor_need_sample_reset ||
2612 	    elapsed_msecs_in_sample >= compressor_sample_max_in_msecs) {
2613 		compressor_need_sample_reset = TRUE;
2614 		need_eval_reset = TRUE;
2615 		goto done;
2616 	}
2617 	elapsed_msecs_in_eval = vm_compressor_compute_elapsed_msecs(cur_ts_sec, cur_ts_nsec, start_of_eval_period_sec, start_of_eval_period_nsec);
2618 
2619 	if (elapsed_msecs_in_eval < compressor_eval_period_in_msecs) {
2620 		goto done;
2621 	}
2622 	need_eval_reset = TRUE;
2623 
2624 	KERNEL_DEBUG(0xe0400020 | DBG_FUNC_START, elapsed_msecs_in_eval, sample_period_compression_count, sample_period_decompression_count, 0, 0);
2625 
2626 	min_operations_needed_in_this_sample = (compressor_thrashing_min_per_10msecs * (uint32_t)elapsed_msecs_in_eval) / 10;
2627 
2628 	if ((sample_period_compression_count - last_eval_compression_count) < min_operations_needed_in_this_sample ||
2629 	    (sample_period_decompression_count - last_eval_decompression_count) < min_operations_needed_in_this_sample) {
2630 		KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, sample_period_compression_count - last_eval_compression_count,
2631 		    sample_period_decompression_count - last_eval_decompression_count, 0, 1, 0);
2632 
2633 		swapout_target_age = 0;
2634 
2635 		compressor_need_sample_reset = TRUE;
2636 		need_eval_reset = TRUE;
2637 		goto done;
2638 	}
2639 	last_eval_compression_count = sample_period_compression_count;
2640 	last_eval_decompression_count = sample_period_decompression_count;
2641 
2642 	if (elapsed_msecs_in_sample < compressor_sample_min_in_msecs) {
2643 		KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, swapout_target_age, 0, 0, 5, 0);
2644 		goto done;
2645 	}
2646 	if (sample_period_decompression_count > ((compressor_thrashing_threshold_per_10msecs * elapsed_msecs_in_sample) / 10)) {
2647 		uint64_t        running_total;
2648 		uint64_t        working_target;
2649 		uint64_t        aging_target;
2650 		uint32_t        oldest_age_of_csegs_sampled = 0;
2651 		uint64_t        working_set_approximation = 0;
2652 
2653 		swapout_target_age = 0;
2654 
2655 		working_target = (sample_period_decompression_count / 100) * 95;                /* 95 percent */
2656 		aging_target = (sample_period_decompression_count / 100) * 1;                   /* 1 percent */
2657 		running_total = 0;
2658 
2659 		for (oldest_age_of_csegs_sampled = 0; oldest_age_of_csegs_sampled < DECOMPRESSION_SAMPLE_MAX_AGE; oldest_age_of_csegs_sampled++) {
2660 			running_total += age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2661 
2662 			working_set_approximation += oldest_age_of_csegs_sampled * age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2663 
2664 			if (running_total >= working_target) {
2665 				break;
2666 			}
2667 		}
2668 		if (oldest_age_of_csegs_sampled < DECOMPRESSION_SAMPLE_MAX_AGE) {
2669 			working_set_approximation = (working_set_approximation * 1000) / elapsed_msecs_in_sample;
2670 
2671 			if (working_set_approximation < VM_PAGE_COMPRESSOR_COUNT) {
2672 				running_total = overage_decompressions_during_sample_period;
2673 
2674 				for (oldest_age_of_csegs_sampled = DECOMPRESSION_SAMPLE_MAX_AGE - 1; oldest_age_of_csegs_sampled; oldest_age_of_csegs_sampled--) {
2675 					running_total += age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2676 
2677 					if (running_total >= aging_target) {
2678 						break;
2679 					}
2680 				}
2681 				swapout_target_age = (uint32_t)cur_ts_sec - oldest_age_of_csegs_sampled;
2682 
2683 				KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, swapout_target_age, working_set_approximation, VM_PAGE_COMPRESSOR_COUNT, 2, 0);
2684 			} else {
2685 				KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, working_set_approximation, VM_PAGE_COMPRESSOR_COUNT, 0, 3, 0);
2686 			}
2687 		} else {
2688 			KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, working_target, running_total, 0, 4, 0);
2689 		}
2690 
2691 		compressor_need_sample_reset = TRUE;
2692 		need_eval_reset = TRUE;
2693 	} else {
2694 		KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, sample_period_decompression_count, (compressor_thrashing_threshold_per_10msecs * elapsed_msecs_in_sample) / 10, 0, 6, 0);
2695 	}
2696 done:
2697 	if (compressor_need_sample_reset == TRUE) {
2698 		bzero(age_of_decompressions_during_sample_period, sizeof(age_of_decompressions_during_sample_period));
2699 		overage_decompressions_during_sample_period = 0;
2700 
2701 		start_of_sample_period_sec = cur_ts_sec;
2702 		start_of_sample_period_nsec = cur_ts_nsec;
2703 		sample_period_decompression_count = 0;
2704 		sample_period_compression_count = 0;
2705 		last_eval_decompression_count = 0;
2706 		last_eval_compression_count = 0;
2707 		compressor_need_sample_reset = FALSE;
2708 	}
2709 	if (need_eval_reset == TRUE) {
2710 		start_of_eval_period_sec = cur_ts_sec;
2711 		start_of_eval_period_nsec = cur_ts_nsec;
2712 	}
2713 }
2714 
2715 
2716 int             compaction_swapper_init_now = 0;
2717 int             compaction_swapper_running = 0;
2718 int             compaction_swapper_awakened = 0;
2719 int             compaction_swapper_abort = 0;
2720 
2721 bool
vm_compressor_swapout_is_ripe()2722 vm_compressor_swapout_is_ripe()
2723 {
2724 	bool is_ripe = false;
2725 	if (vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit) {
2726 		c_segment_t     c_seg;
2727 		clock_sec_t     now;
2728 		clock_sec_t     age;
2729 		clock_nsec_t    nsec;
2730 
2731 		clock_get_system_nanotime(&now, &nsec);
2732 		age = 0;
2733 
2734 		lck_mtx_lock_spin_always(c_list_lock);
2735 
2736 		if (!queue_empty(&c_age_list_head)) {
2737 			c_seg = (c_segment_t) queue_first(&c_age_list_head);
2738 
2739 			age = now - c_seg->c_creation_ts;
2740 		}
2741 		lck_mtx_unlock_always(c_list_lock);
2742 
2743 		if (age >= vm_ripe_target_age) {
2744 			is_ripe = true;
2745 		}
2746 	}
2747 	return is_ripe;
2748 }
2749 
2750 static bool
compressor_swapout_conditions_met(void)2751 compressor_swapout_conditions_met(void)
2752 {
2753 	bool should_swap = false;
2754 	if (COMPRESSOR_NEEDS_TO_SWAP()) {
2755 		should_swap = true;
2756 		vmcs_stats.compressor_swap_threshold_exceeded++;
2757 	}
2758 	if (VM_PAGE_Q_THROTTLED(&vm_pageout_queue_external) && vm_page_anonymous_count < (vm_page_inactive_count / 20)) {
2759 		should_swap = true;
2760 		vmcs_stats.external_q_throttled++;
2761 	}
2762 	if (vm_page_free_count < (vm_page_free_reserved - (COMPRESSOR_FREE_RESERVED_LIMIT * 2))) {
2763 		should_swap = true;
2764 		vmcs_stats.free_count_below_reserve++;
2765 	}
2766 	return should_swap;
2767 }
2768 
2769 static bool
compressor_needs_to_swap()2770 compressor_needs_to_swap()
2771 {
2772 	bool should_swap = false;
2773 	if (vm_compressor_swapout_is_ripe()) {
2774 		should_swap = true;
2775 		goto check_if_low_space;
2776 	}
2777 
2778 	if (VM_CONFIG_SWAP_IS_ACTIVE) {
2779 		should_swap =  compressor_swapout_conditions_met();
2780 		if (should_swap) {
2781 			goto check_if_low_space;
2782 		}
2783 	}
2784 
2785 #if (XNU_TARGET_OS_OSX && __arm64__)
2786 	/*
2787 	 * Thrashing detection disabled.
2788 	 */
2789 #else /* (XNU_TARGET_OS_OSX && __arm64__) */
2790 
2791 	if (vm_compressor_is_thrashing()) {
2792 		should_swap = true;
2793 		vmcs_stats.thrashing_detected++;
2794 	}
2795 
2796 #if CONFIG_PHANTOM_CACHE
2797 	if (vm_phantom_cache_check_pressure()) {
2798 		os_atomic_store(&memorystatus_phantom_cache_pressure, true, release);
2799 		should_swap = true;
2800 	}
2801 #endif
2802 	if (swapout_target_age) {
2803 		should_swap = true;
2804 	}
2805 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
2806 
2807 check_if_low_space:
2808 
2809 #if CONFIG_JETSAM
2810 	if (should_swap || vm_compressor_low_on_space()) {
2811 		if (vm_compressor_thrashing_detected == FALSE) {
2812 			vm_compressor_thrashing_detected = TRUE;
2813 
2814 			if (swapout_target_age) {
2815 				compressor_thrashing_induced_jetsam++;
2816 			} else if (vm_compressor_low_on_space()) {
2817 				compressor_thrashing_induced_jetsam++;
2818 			} else {
2819 				filecache_thrashing_induced_jetsam++;
2820 			}
2821 			/*
2822 			 * Wake up the memorystatus thread so that it can return
2823 			 * the system to a healthy state (by killing processes).
2824 			 */
2825 			memorystatus_thread_wake();
2826 		}
2827 		/*
2828 		 * let the jetsam take precedence over
2829 		 * any major compactions we might have
2830 		 * been able to do... otherwise we run
2831 		 * the risk of doing major compactions
2832 		 * on segments we're about to free up
2833 		 * due to the jetsam activity.
2834 		 */
2835 		should_swap = false;
2836 		if (memorystatus_swap_all_apps && vm_swap_low_on_space()) {
2837 			memorystatus_respond_to_swap_exhaustion();
2838 		}
2839 	}
2840 #else /* CONFIG_JETSAM */
2841 	if (should_swap && vm_swap_low_on_space()) {
2842 		memorystatus_respond_to_swap_exhaustion();
2843 	}
2844 #endif /* CONFIG_JETSAM */
2845 
2846 	if (should_swap == false) {
2847 		/*
2848 		 * vm_compressor_needs_to_major_compact returns true only if we're
2849 		 * about to run out of available compressor segments... in this
2850 		 * case, we absolutely need to run a major compaction even if
2851 		 * we've just kicked off a jetsam or we don't otherwise need to
2852 		 * swap... terminating objects releases
2853 		 * pages back to the uncompressed cache, but does not guarantee
2854 		 * that we will free up even a single compression segment
2855 		 */
2856 		should_swap = vm_compressor_needs_to_major_compact();
2857 		if (should_swap) {
2858 			vmcs_stats.fragmentation_detected++;
2859 		}
2860 	}
2861 
2862 	/*
2863 	 * returning TRUE when swap_supported == FALSE
2864 	 * will cause the major compaction engine to
2865 	 * run, but will not trigger any swapping...
2866 	 * segments that have been major compacted
2867 	 * will be moved to the majorcompact queue
2868 	 */
2869 	return should_swap;
2870 }
2871 
2872 #if CONFIG_JETSAM
2873 /*
2874  * This function is called from the jetsam thread after killing something to
2875  * mitigate thrashing.
2876  *
2877  * We need to restart our thrashing detection heuristics since memory pressure
2878  * has potentially changed significantly, and we don't want to detect on old
2879  * data from before the jetsam.
2880  */
2881 void
vm_thrashing_jetsam_done(void)2882 vm_thrashing_jetsam_done(void)
2883 {
2884 	vm_compressor_thrashing_detected = FALSE;
2885 
2886 	/* Were we compressor-thrashing or filecache-thrashing? */
2887 	if (swapout_target_age) {
2888 		swapout_target_age = 0;
2889 		compressor_need_sample_reset = TRUE;
2890 	}
2891 #if CONFIG_PHANTOM_CACHE
2892 	else {
2893 		vm_phantom_cache_restart_sample();
2894 	}
2895 #endif
2896 }
2897 #endif /* CONFIG_JETSAM */
2898 
2899 uint32_t vm_wake_compactor_swapper_calls = 0;
2900 uint32_t vm_run_compactor_already_running = 0;
2901 uint32_t vm_run_compactor_empty_minor_q = 0;
2902 uint32_t vm_run_compactor_did_compact = 0;
2903 uint32_t vm_run_compactor_waited = 0;
2904 
2905 /* run minor compaction right now, if the compaction-swapper thread is not already running */
2906 void
vm_run_compactor(void)2907 vm_run_compactor(void)
2908 {
2909 	if (c_segment_count == 0) {
2910 		return;
2911 	}
2912 
2913 	if (os_atomic_load(&c_minor_count, relaxed) == 0) {
2914 		vm_run_compactor_empty_minor_q++;
2915 		return;
2916 	}
2917 
2918 	lck_mtx_lock_spin_always(c_list_lock);
2919 
2920 	if (compaction_swapper_running) {
2921 		if (vm_pageout_state.vm_restricted_to_single_processor == FALSE) {
2922 			vm_run_compactor_already_running++;
2923 
2924 			lck_mtx_unlock_always(c_list_lock);
2925 			return;
2926 		}
2927 		vm_run_compactor_waited++;
2928 
2929 		assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
2930 
2931 		lck_mtx_unlock_always(c_list_lock);
2932 
2933 		thread_block(THREAD_CONTINUE_NULL);
2934 
2935 		return;
2936 	}
2937 	vm_run_compactor_did_compact++;
2938 
2939 	fastwake_warmup = FALSE;
2940 	compaction_swapper_running = 1;
2941 
2942 	vm_compressor_do_delayed_compactions(FALSE);
2943 
2944 	compaction_swapper_running = 0;
2945 
2946 	lck_mtx_unlock_always(c_list_lock);
2947 
2948 	thread_wakeup((event_t)&compaction_swapper_running);
2949 }
2950 
2951 
2952 void
vm_wake_compactor_swapper(void)2953 vm_wake_compactor_swapper(void)
2954 {
2955 	if (compaction_swapper_running || compaction_swapper_awakened || c_segment_count == 0) {
2956 		return;
2957 	}
2958 
2959 	if (os_atomic_load(&c_minor_count, relaxed) ||
2960 	    vm_compressor_needs_to_major_compact()) {
2961 		lck_mtx_lock_spin_always(c_list_lock);
2962 
2963 		fastwake_warmup = FALSE;
2964 
2965 		if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
2966 			vm_wake_compactor_swapper_calls++;
2967 
2968 			compaction_swapper_awakened = 1;
2969 			thread_wakeup((event_t)&c_compressor_swap_trigger);
2970 		}
2971 		lck_mtx_unlock_always(c_list_lock);
2972 	}
2973 }
2974 
2975 
2976 void
vm_consider_swapping()2977 vm_consider_swapping()
2978 {
2979 	assert(VM_CONFIG_SWAP_IS_PRESENT);
2980 
2981 	lck_mtx_lock_spin_always(c_list_lock);
2982 
2983 	compaction_swapper_abort = 1;
2984 
2985 	while (compaction_swapper_running) {
2986 		assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
2987 
2988 		lck_mtx_unlock_always(c_list_lock);
2989 
2990 		thread_block(THREAD_CONTINUE_NULL);
2991 
2992 		lck_mtx_lock_spin_always(c_list_lock);
2993 	}
2994 	compaction_swapper_abort = 0;
2995 	compaction_swapper_running = 1;
2996 
2997 	vm_swapout_ripe_segments = TRUE;
2998 
2999 	vm_compressor_process_major_segments(vm_swapout_ripe_segments);
3000 
3001 	vm_compressor_compact_and_swap(FALSE);
3002 
3003 	compaction_swapper_running = 0;
3004 
3005 	vm_swapout_ripe_segments = FALSE;
3006 
3007 	lck_mtx_unlock_always(c_list_lock);
3008 
3009 	thread_wakeup((event_t)&compaction_swapper_running);
3010 }
3011 
3012 
3013 void
vm_consider_waking_compactor_swapper(void)3014 vm_consider_waking_compactor_swapper(void)
3015 {
3016 	bool need_wakeup = false;
3017 
3018 	if (c_segment_count == 0) {
3019 		return;
3020 	}
3021 
3022 	if (compaction_swapper_running || compaction_swapper_awakened) {
3023 		return;
3024 	}
3025 
3026 	if (!compaction_swapper_inited && !compaction_swapper_init_now) {
3027 		compaction_swapper_init_now = 1;
3028 		need_wakeup = true;
3029 	} else if (vm_compressor_needs_to_minor_compact() ||
3030 	    compressor_needs_to_swap()) {
3031 		need_wakeup = true;
3032 	}
3033 
3034 	if (need_wakeup) {
3035 		lck_mtx_lock_spin_always(c_list_lock);
3036 
3037 		fastwake_warmup = FALSE;
3038 
3039 		if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
3040 			memoryshot(DBG_VM_WAKEUP_COMPACTOR_SWAPPER, DBG_FUNC_NONE);
3041 
3042 			compaction_swapper_awakened = 1;
3043 			thread_wakeup((event_t)&c_compressor_swap_trigger);
3044 		}
3045 		lck_mtx_unlock_always(c_list_lock);
3046 	}
3047 }
3048 
3049 
3050 #define C_SWAPOUT_LIMIT                 4
3051 #define DELAYED_COMPACTIONS_PER_PASS    30
3052 
3053 /* process segments that are in the minor compaction queue */
3054 void
vm_compressor_do_delayed_compactions(boolean_t flush_all)3055 vm_compressor_do_delayed_compactions(boolean_t flush_all)
3056 {
3057 	c_segment_t     c_seg;
3058 	int             number_compacted = 0;
3059 	bool            needs_to_swap = false;
3060 	uint32_t        c_swapout_count = 0;
3061 
3062 
3063 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_do_delayed_compactions, DBG_VM_COMPRESSOR_DELAYED_COMPACT, DBG_FUNC_START, c_minor_count, flush_all, 0, 0);
3064 	KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_DEFERRED) | DBG_FUNC_START,
3065 	    c_minor_count, flush_all);
3066 
3067 #if XNU_TARGET_OS_OSX
3068 	LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
3069 #endif /* XNU_TARGET_OS_OSX */
3070 
3071 	while (!queue_empty(&c_minor_list_head) && !needs_to_swap) {
3072 		c_seg = (c_segment_t)queue_first(&c_minor_list_head);
3073 
3074 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3075 
3076 		if (c_seg->c_busy) {
3077 			lck_mtx_unlock_always(c_list_lock);
3078 			c_seg_wait_on_busy(c_seg);
3079 			lck_mtx_lock_spin_always(c_list_lock);
3080 
3081 			continue;
3082 		}
3083 		C_SEG_BUSY(c_seg);
3084 
3085 		c_seg_do_minor_compaction_and_unlock(c_seg, TRUE, FALSE, TRUE);
3086 
3087 		c_swapout_count = c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count;
3088 		number_compacted++;
3089 		if (VM_CONFIG_SWAP_IS_ACTIVE && (number_compacted % DELAYED_COMPACTIONS_PER_PASS) == 0) {
3090 			if ((flush_all == TRUE || compressor_needs_to_swap()) && c_swapout_count < C_SWAPOUT_LIMIT) {
3091 				needs_to_swap = true;
3092 			}
3093 		}
3094 		lck_mtx_lock_spin_always(c_list_lock);
3095 	}
3096 
3097 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_do_delayed_compactions, DBG_VM_COMPRESSOR_DELAYED_COMPACT, DBG_FUNC_END, c_minor_count, number_compacted, needs_to_swap, 0);
3098 	KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_DEFERRED) | DBG_FUNC_END,
3099 	    c_minor_count, number_compacted, needs_to_swap);
3100 }
3101 
3102 int min_csegs_per_major_compaction = DELAYED_COMPACTIONS_PER_PASS;
3103 
3104 static bool
vm_compressor_major_compact_cseg(c_segment_t c_seg,uint32_t * c_seg_considered,bool * bail_wanted_cseg,uint64_t * total_bytes_freed)3105 vm_compressor_major_compact_cseg(c_segment_t c_seg, uint32_t* c_seg_considered, bool* bail_wanted_cseg, uint64_t* total_bytes_freed)
3106 {
3107 	/*
3108 	 * Major compaction
3109 	 */
3110 	bool keep_compacting = true, fully_compacted = true;
3111 	queue_head_t *list_head = NULL;
3112 	c_segment_t c_seg_next;
3113 	uint64_t        bytes_to_free = 0, bytes_freed = 0;
3114 	uint32_t        number_considered = 0;
3115 
3116 	if (c_seg->c_state == C_ON_AGE_Q) {
3117 		assert(!c_seg->c_has_donated_pages);
3118 		list_head = &c_age_list_head;
3119 	} else if (c_seg->c_state == C_ON_SWAPPEDIN_Q) {
3120 		assert(c_seg->c_has_donated_pages);
3121 		list_head = &c_late_swappedin_list_head;
3122 	}
3123 
3124 	KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_MAJOR) | DBG_FUNC_START,
3125 	    VM_KERNEL_ADDRHIDE(c_seg), c_seg->c_state,
3126 	    c_seg->c_bytes_used);
3127 
3128 	while (keep_compacting == TRUE) {
3129 		assert(c_seg->c_busy);
3130 
3131 		/* look for another segment to consolidate */
3132 
3133 		c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
3134 
3135 		if (queue_end(list_head, (queue_entry_t)c_seg_next)) {
3136 			break;
3137 		}
3138 
3139 		assert(c_seg_next->c_state == c_seg->c_state);
3140 
3141 		number_considered++;
3142 
3143 		if (c_seg_major_compact_ok(c_seg, c_seg_next) == FALSE) {
3144 			break;
3145 		}
3146 
3147 		lck_mtx_lock_spin_always(&c_seg_next->c_lock);
3148 
3149 		if (c_seg_next->c_busy) {
3150 			/*
3151 			 * We are going to block for our neighbor.
3152 			 * If our c_seg is wanted, we should unbusy
3153 			 * it because we don't know how long we might
3154 			 * have to block here.
3155 			 */
3156 			if (c_seg->c_wanted) {
3157 				lck_mtx_unlock_always(&c_seg_next->c_lock);
3158 				fully_compacted = false;
3159 				c_seg_major_compact_stats[c_seg_major_compact_stats_now].bailed_compactions++;
3160 				vm_pageout_vminfo.vm_compactor_major_compactions_bailed++;
3161 				*bail_wanted_cseg = true;
3162 				break;
3163 			}
3164 
3165 			lck_mtx_unlock_always(c_list_lock);
3166 
3167 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 8, (void*) VM_KERNEL_ADDRPERM(c_seg_next), 0, 0);
3168 
3169 			c_seg_wait_on_busy(c_seg_next);
3170 			lck_mtx_lock_spin_always(c_list_lock);
3171 
3172 			continue;
3173 		}
3174 		/* grab that segment */
3175 		C_SEG_BUSY(c_seg_next);
3176 
3177 		bytes_to_free = C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3178 		if (c_seg_do_minor_compaction_and_unlock(c_seg_next, FALSE, TRUE, TRUE)) {
3179 			/*
3180 			 * found an empty c_segment and freed it
3181 			 * so we can't continue to use c_seg_next
3182 			 */
3183 			bytes_freed += bytes_to_free;
3184 			c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3185 			vm_pageout_vminfo.vm_compactor_major_compaction_segments_freed++;
3186 			continue;
3187 		}
3188 
3189 		/* unlock the list ... */
3190 		lck_mtx_unlock_always(c_list_lock);
3191 
3192 		/* do the major compaction */
3193 
3194 		keep_compacting = c_seg_coalesce(c_seg, c_seg_next);
3195 
3196 		VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 9, keep_compacting, 0, 0);
3197 
3198 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
3199 
3200 		lck_mtx_lock_spin_always(&c_seg_next->c_lock);
3201 		/*
3202 		 * run a minor compaction on the donor segment
3203 		 * since we pulled at least some of it's
3204 		 * data into our target...  if we've emptied
3205 		 * it, now is a good time to free it which
3206 		 * c_seg_minor_compaction_and_unlock also takes care of
3207 		 *
3208 		 * by passing TRUE, we ask for c_busy to be cleared
3209 		 * and c_wanted to be taken care of
3210 		 */
3211 		bytes_to_free = C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3212 		if (c_seg_minor_compaction_and_unlock(c_seg_next, TRUE)) {
3213 			bytes_freed += bytes_to_free;
3214 			c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3215 			vm_pageout_vminfo.vm_compactor_major_compaction_segments_freed++;
3216 		} else {
3217 			bytes_to_free -= C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3218 			bytes_freed += bytes_to_free;
3219 		}
3220 
3221 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
3222 
3223 		/* relock the list */
3224 		lck_mtx_lock_spin_always(c_list_lock);
3225 
3226 		if (c_seg->c_wanted) {
3227 			/*
3228 			 * Our c_seg is in demand. Let's
3229 			 * unbusy it and wakeup the waiters
3230 			 * instead of continuing the compaction
3231 			 * because we could be in this loop
3232 			 * for a while.
3233 			 */
3234 			fully_compacted = false;
3235 			*bail_wanted_cseg = true;
3236 			c_seg_major_compact_stats[c_seg_major_compact_stats_now].bailed_compactions++;
3237 			vm_pageout_vminfo.vm_compactor_major_compactions_bailed++;
3238 			break;
3239 		}
3240 	} /* major compaction */
3241 
3242 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 10, number_considered, *bail_wanted_cseg, 0);
3243 
3244 	*c_seg_considered += number_considered;
3245 	*total_bytes_freed += bytes_freed;
3246 
3247 	lck_mtx_lock_spin_always(&c_seg->c_lock);
3248 	KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_MAJOR) | DBG_FUNC_END,
3249 	    fully_compacted, *bail_wanted_cseg,
3250 	    bytes_freed, c_seg->c_bytes_used);
3251 	return fully_compacted;
3252 }
3253 
3254 #define TIME_SUB(rsecs, secs, rfrac, frac, unit)                        \
3255 	MACRO_BEGIN                                                     \
3256 	if ((int)((rfrac) -= (frac)) < 0) {                             \
3257 	        (rfrac) += (unit);                                      \
3258 	        (rsecs) -= 1;                                           \
3259 	}                                                               \
3260 	(rsecs) -= (secs);                                              \
3261 	MACRO_END
3262 
3263 clock_nsec_t c_process_major_report_over_ms = 9; /* report if over 9 ms */
3264 int c_process_major_yield_after = 1000; /* yield after moving 1,000 segments */
3265 uint64_t c_process_major_reports = 0;
3266 clock_sec_t c_process_major_max_sec = 0;
3267 clock_nsec_t c_process_major_max_nsec = 0;
3268 uint32_t c_process_major_peak_segcount = 0;
3269 static void
vm_compressor_process_major_segments(bool ripe_age_only)3270 vm_compressor_process_major_segments(bool ripe_age_only)
3271 {
3272 	c_segment_t c_seg = NULL;
3273 	int count = 0, total = 0, breaks = 0;
3274 	clock_sec_t start_sec, end_sec;
3275 	clock_nsec_t start_nsec, end_nsec;
3276 	clock_nsec_t report_over_ns;
3277 
3278 	if (queue_empty(&c_major_list_head)) {
3279 		return;
3280 	}
3281 
3282 	// printf("%s: starting to move segments from MAJORQ to AGEQ\n", __FUNCTION__);
3283 	if (c_process_major_report_over_ms != 0) {
3284 		report_over_ns = c_process_major_report_over_ms * NSEC_PER_MSEC;
3285 	} else {
3286 		report_over_ns = (clock_nsec_t)-1;
3287 	}
3288 
3289 	if (ripe_age_only) {
3290 		if (c_overage_swapped_count >= c_overage_swapped_limit) {
3291 			/*
3292 			 * Return while we wait for the overage segments
3293 			 * in our queue to get pushed out first.
3294 			 */
3295 			return;
3296 		}
3297 	}
3298 
3299 	clock_get_system_nanotime(&start_sec, &start_nsec);
3300 	while (!queue_empty(&c_major_list_head)) {
3301 		if (!ripe_age_only) {
3302 			/*
3303 			 * Start from the end to preserve aging order. The newer
3304 			 * segments are at the tail and so need to be inserted in
3305 			 * the aging queue in this way so we have the older segments
3306 			 * at the end of the AGE_Q.
3307 			 */
3308 			c_seg = (c_segment_t)queue_last(&c_major_list_head);
3309 		} else {
3310 			c_seg = (c_segment_t)queue_first(&c_major_list_head);
3311 			if ((start_sec - c_seg->c_creation_ts) < vm_ripe_target_age) {
3312 				/*
3313 				 * We have found the first segment in our queue that is not ripe. Segments after it
3314 				 * will be the same. So let's bail here. Return with c_list_lock held.
3315 				 */
3316 				break;
3317 			}
3318 		}
3319 
3320 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3321 		c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3322 		lck_mtx_unlock_always(&c_seg->c_lock);
3323 
3324 		count++;
3325 		if (count == c_process_major_yield_after ||
3326 		    queue_empty(&c_major_list_head)) {
3327 			/* done or time to take a break */
3328 		} else {
3329 			/* keep going */
3330 			continue;
3331 		}
3332 
3333 		total += count;
3334 		clock_get_system_nanotime(&end_sec, &end_nsec);
3335 		TIME_SUB(end_sec, start_sec, end_nsec, start_nsec, NSEC_PER_SEC);
3336 		if (end_sec > c_process_major_max_sec) {
3337 			c_process_major_max_sec = end_sec;
3338 			c_process_major_max_nsec = end_nsec;
3339 		} else if (end_sec == c_process_major_max_sec &&
3340 		    end_nsec > c_process_major_max_nsec) {
3341 			c_process_major_max_nsec = end_nsec;
3342 		}
3343 		if (total > c_process_major_peak_segcount) {
3344 			c_process_major_peak_segcount = total;
3345 		}
3346 		if (end_sec > 0 ||
3347 		    end_nsec >= report_over_ns) {
3348 			/* we used more than expected */
3349 			c_process_major_reports++;
3350 			printf("%s: moved %d/%d segments from MAJORQ to AGEQ in %lu.%09u seconds and %d breaks\n",
3351 			    __FUNCTION__, count, total,
3352 			    end_sec, end_nsec, breaks);
3353 		}
3354 		if (queue_empty(&c_major_list_head)) {
3355 			/* done */
3356 			break;
3357 		}
3358 		/* take a break to allow someone else to grab the lock */
3359 		lck_mtx_unlock_always(c_list_lock);
3360 		mutex_pause(0); /* 10 microseconds */
3361 		lck_mtx_lock_spin_always(c_list_lock);
3362 		/* start again */
3363 		clock_get_system_nanotime(&start_sec, &start_nsec);
3364 		count = 0;
3365 		breaks++;
3366 	}
3367 }
3368 
3369 /*
3370  * macOS special swappable csegs -> early_swapin queue
3371  * non-macOS special swappable+non-freezer csegs -> late_swapin queue
3372  * Processing special csegs means minor compacting each cseg and then
3373  * major compacting it and putting them on the early or late
3374  * (depending on platform) swapout queue. tag:DONATE
3375  */
3376 static void
vm_compressor_process_special_swapped_in_segments_locked(void)3377 vm_compressor_process_special_swapped_in_segments_locked(void)
3378 {
3379 	c_segment_t c_seg = NULL;
3380 	bool            switch_state = true, bail_wanted_cseg = false;
3381 	unsigned int    yield_after_considered_per_pass = 0;
3382 	unsigned int    total_considered = 0, total_bailed = 0;
3383 	uint64_t        total_bytes_freed = 0;
3384 	queue_head_t    *special_swappedin_list_head;
3385 
3386 #if XNU_TARGET_OS_OSX
3387 	special_swappedin_list_head = &c_early_swappedin_list_head;
3388 #else /* XNU_TARGET_OS_OSX */
3389 	if (memorystatus_swap_all_apps) {
3390 		special_swappedin_list_head = &c_late_swappedin_list_head;
3391 	} else {
3392 		/* called on unsupported config*/
3393 		return;
3394 	}
3395 #endif /* XNU_TARGET_OS_OSX */
3396 
3397 	KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_SPECIAL) | DBG_FUNC_START,
3398 	    c_early_swappedin_count, c_late_swappedin_count);
3399 
3400 	yield_after_considered_per_pass = MAX(min_csegs_per_major_compaction, DELAYED_COMPACTIONS_PER_PASS);
3401 	while (!queue_empty(special_swappedin_list_head)) {
3402 		uint64_t cur_bytes_freed = 0;
3403 		uint32_t cur_considered = 0;
3404 
3405 		c_seg = (c_segment_t)queue_first(special_swappedin_list_head);
3406 
3407 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3408 
3409 		if (c_seg->c_busy) {
3410 			lck_mtx_unlock_always(c_list_lock);
3411 			c_seg_wait_on_busy(c_seg);
3412 			lck_mtx_lock_spin_always(c_list_lock);
3413 			continue;
3414 		}
3415 
3416 		C_SEG_BUSY(c_seg);
3417 		lck_mtx_unlock_always(&c_seg->c_lock);
3418 		lck_mtx_unlock_always(c_list_lock);
3419 
3420 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
3421 
3422 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3423 
3424 		if (c_seg_minor_compaction_and_unlock(c_seg, FALSE /*clear busy?*/)) {
3425 			/*
3426 			 * found an empty c_segment and freed it
3427 			 * so go grab the next guy in the queue
3428 			 */
3429 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
3430 			lck_mtx_lock_spin_always(c_list_lock);
3431 			continue;
3432 		}
3433 
3434 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
3435 		lck_mtx_lock_spin_always(c_list_lock);
3436 
3437 		switch_state = vm_compressor_major_compact_cseg(c_seg, &cur_considered, &bail_wanted_cseg, &cur_bytes_freed);
3438 		assert(c_seg->c_busy);
3439 		assert(!c_seg->c_on_minorcompact_q);
3440 
3441 		if (switch_state) {
3442 			if (VM_CONFIG_SWAP_IS_ACTIVE || VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
3443 				/*
3444 				 * Ordinarily we let swapped in segments age out + get
3445 				 * major compacted with the rest of the c_segs on the ageQ.
3446 				 * But the early donated c_segs, if well compacted, should be
3447 				 * kept ready to be swapped out if needed. These are typically
3448 				 * describing memory belonging to a leaky app (macOS) or a swap-
3449 				 * capable app (iPadOS) and for the latter we can keep these
3450 				 * around longer because we control the triggers in the memorystatus
3451 				 * subsystem
3452 				 */
3453 				c_seg_switch_state(c_seg, C_ON_SWAPOUT_Q, FALSE);
3454 			}
3455 		}
3456 
3457 		C_SEG_WAKEUP_DONE(c_seg);
3458 
3459 		lck_mtx_unlock_always(&c_seg->c_lock);
3460 
3461 		total_considered += cur_considered;
3462 		total_bytes_freed += cur_bytes_freed;
3463 		if (bail_wanted_cseg) {
3464 			total_bailed++;
3465 		}
3466 
3467 		if (cur_considered >= yield_after_considered_per_pass) {
3468 			if (bail_wanted_cseg) {
3469 				/*
3470 				 * We stopped major compactions on a c_seg
3471 				 * that is wanted. We don't know the priority
3472 				 * of the waiter unfortunately but we are at
3473 				 * a very high priority and so, just in case
3474 				 * the waiter is a critical system daemon or
3475 				 * UI thread, let's give up the CPU in case
3476 				 * the system is running a few CPU intensive
3477 				 * tasks.
3478 				 */
3479 				bail_wanted_cseg = false;
3480 				KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_PAUSE) | DBG_FUNC_START);
3481 				lck_mtx_unlock_always(c_list_lock);
3482 
3483 				mutex_pause(2); /* 100us yield */
3484 
3485 				lck_mtx_lock_spin_always(c_list_lock);
3486 				KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_PAUSE) | DBG_FUNC_END);
3487 			}
3488 
3489 			cur_considered = 0;
3490 		}
3491 	}
3492 
3493 	KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_SPECIAL) | DBG_FUNC_END,
3494 	    total_considered, total_bailed, total_bytes_freed);
3495 }
3496 
3497 void
vm_compressor_process_special_swapped_in_segments(void)3498 vm_compressor_process_special_swapped_in_segments(void)
3499 {
3500 	lck_mtx_lock_spin_always(c_list_lock);
3501 	vm_compressor_process_special_swapped_in_segments_locked();
3502 	lck_mtx_unlock_always(c_list_lock);
3503 }
3504 
3505 #define ENABLE_DYNAMIC_SWAPPED_AGE_LIMIT 1
3506 
3507 /* minimum time that segments can be in swappedin q as a grace period after they were swapped-in
3508  * before they are added to age-q */
3509 #define C_SEGMENT_SWAPPEDIN_AGE_LIMIT_LOW  1  /* seconds */
3510 #define C_SEGMENT_SWAPPEDIN_AGE_LIMIT_NORMAL 10  /* seconds */
3511 #define C_AGE_Q_COUNT_LOW_THRESHOLD 50
3512 
3513 /*
3514  * Processing regular csegs means aging them.
3515  */
3516 static void
vm_compressor_process_regular_swapped_in_segments(boolean_t flush_all)3517 vm_compressor_process_regular_swapped_in_segments(boolean_t flush_all)
3518 {
3519 	c_segment_t     c_seg;
3520 	clock_sec_t     now;
3521 	clock_nsec_t    nsec;
3522 	unsigned int    num_processed = 0;
3523 
3524 	unsigned long limit = C_SEGMENT_SWAPPEDIN_AGE_LIMIT_NORMAL;
3525 
3526 #ifdef ENABLE_DYNAMIC_SWAPPED_AGE_LIMIT
3527 	/* In normal operation, segments are kept in the swapped-in-q for a grace period of 10 seconds so that whoever
3528 	 * needed to decompress something from a segment that was just swapped-in would have a chance to decompress
3529 	 * more out of it.
3530 	 * If the system is in high memory pressure state, this may cause the age-q to be completely empty so that
3531 	 * there are no candidate segments for swap-out. In this state we use a lower limit of 1 second.
3532 	 * condition 1: the age-q absolute size is too low
3533 	 * condition 2: there are more segments in swapped-in-q than in age-q
3534 	 * each of these represent a bad situation which we want to try to alleviate by moving more segments from
3535 	 * swappped-in-q to age-q so that we have a better selection of who to swap-out
3536 	 */
3537 	if (c_age_count < C_AGE_Q_COUNT_LOW_THRESHOLD || c_age_count < c_regular_swappedin_count) {
3538 		limit = C_SEGMENT_SWAPPEDIN_AGE_LIMIT_LOW;
3539 	}
3540 #endif
3541 	KDBG(VM_COMPRESSOR_EVENTID(DBG_PROCESS_SWAPPEDIN) | DBG_FUNC_START,
3542 	    c_regular_swappedin_count, c_age_count, limit, flush_all);
3543 
3544 	clock_get_system_nanotime(&now, &nsec);
3545 
3546 	while (!queue_empty(&c_regular_swappedin_list_head)) {
3547 		c_seg = (c_segment_t)queue_first(&c_regular_swappedin_list_head);
3548 
3549 		if (flush_all == FALSE && (now - c_seg->c_swappedin_ts) < limit) {
3550 			/* swappedin q is sorted by the order of time of addition os if we reached a seg that's too
3551 			 * young, we know that all the rest after it are also too young */
3552 			break;
3553 		}
3554 
3555 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3556 
3557 		c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3558 		c_seg->c_agedin_ts = (uint32_t) now;
3559 		num_processed++;
3560 
3561 		lck_mtx_unlock_always(&c_seg->c_lock);
3562 	}
3563 	KDBG(VM_COMPRESSOR_EVENTID(DBG_PROCESS_SWAPPEDIN) | DBG_FUNC_END,
3564 	    num_processed);
3565 }
3566 
3567 
3568 extern  int     vm_num_swap_files;
3569 extern  int     vm_num_pinned_swap_files;
3570 extern  int     vm_swappin_enabled;
3571 
3572 extern  unsigned int    vm_swapfile_total_segs_used;
3573 extern  unsigned int    vm_swapfile_total_segs_alloced;
3574 
3575 
3576 void
vm_compressor_flush(void)3577 vm_compressor_flush(void)
3578 {
3579 	uint64_t        vm_swap_put_failures_at_start;
3580 	wait_result_t   wait_result = 0;
3581 	AbsoluteTime    startTime, endTime;
3582 	clock_sec_t     now_sec;
3583 	clock_nsec_t    now_nsec;
3584 	uint64_t        nsec;
3585 	c_segment_t     c_seg, c_seg_next;
3586 
3587 	HIBLOG("vm_compressor_flush - starting\n");
3588 
3589 	clock_get_uptime(&startTime);
3590 
3591 	lck_mtx_lock_spin_always(c_list_lock);
3592 
3593 	fastwake_warmup = FALSE;
3594 	compaction_swapper_abort = 1;
3595 
3596 	while (compaction_swapper_running) {
3597 		assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
3598 
3599 		lck_mtx_unlock_always(c_list_lock);
3600 
3601 		thread_block(THREAD_CONTINUE_NULL);
3602 
3603 		lck_mtx_lock_spin_always(c_list_lock);
3604 	}
3605 	compaction_swapper_abort = 0;
3606 	compaction_swapper_running = 1;
3607 
3608 	hibernate_flushing = TRUE;
3609 	hibernate_no_swapspace = FALSE;
3610 	hibernate_flush_timed_out = FALSE;
3611 	c_generation_id_flush_barrier = c_generation_id + 1000;
3612 
3613 	clock_get_system_nanotime(&now_sec, &now_nsec);
3614 	hibernate_flushing_deadline = now_sec + HIBERNATE_FLUSHING_SECS_TO_COMPLETE;
3615 
3616 	vm_swap_put_failures_at_start = vm_swap_put_failures;
3617 
3618 	/*
3619 	 * We are about to hibernate and so we want all segments flushed to disk.
3620 	 * Segments that are on the major compaction queue won't be considered in
3621 	 * the vm_compressor_compact_and_swap() pass. So we need to bring them to
3622 	 * the ageQ for consideration.
3623 	 */
3624 	if (!queue_empty(&c_major_list_head)) {
3625 		c_seg = (c_segment_t)queue_first(&c_major_list_head);
3626 
3627 		while (!queue_end(&c_major_list_head, (queue_entry_t)c_seg)) {
3628 			c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
3629 			lck_mtx_lock_spin_always(&c_seg->c_lock);
3630 			c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3631 			lck_mtx_unlock_always(&c_seg->c_lock);
3632 			c_seg = c_seg_next;
3633 		}
3634 	}
3635 	vm_compressor_compact_and_swap(TRUE);
3636 	/* need to wait here since the swap thread may also be running in parallel and handling segments */
3637 	while (!queue_empty(&c_early_swapout_list_head) || !queue_empty(&c_regular_swapout_list_head) || !queue_empty(&c_late_swapout_list_head)) {
3638 		assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC);
3639 
3640 		lck_mtx_unlock_always(c_list_lock);
3641 
3642 		wait_result = thread_block(THREAD_CONTINUE_NULL);
3643 
3644 		lck_mtx_lock_spin_always(c_list_lock);
3645 
3646 		if (wait_result == THREAD_TIMED_OUT) {
3647 			break;
3648 		}
3649 	}
3650 	hibernate_flushing = FALSE;
3651 	compaction_swapper_running = 0;
3652 
3653 	if (vm_swap_put_failures > vm_swap_put_failures_at_start) {
3654 		HIBLOG("vm_compressor_flush failed to clean %llu segments - vm_page_compressor_count(%d)\n",
3655 		    vm_swap_put_failures - vm_swap_put_failures_at_start, VM_PAGE_COMPRESSOR_COUNT);
3656 	}
3657 
3658 	lck_mtx_unlock_always(c_list_lock);
3659 
3660 	thread_wakeup((event_t)&compaction_swapper_running);
3661 
3662 	clock_get_uptime(&endTime);
3663 	SUB_ABSOLUTETIME(&endTime, &startTime);
3664 	absolutetime_to_nanoseconds(endTime, &nsec);
3665 
3666 	HIBLOG("vm_compressor_flush completed - took %qd msecs - vm_num_swap_files = %d, vm_num_pinned_swap_files = %d, vm_swappin_enabled = %d\n",
3667 	    nsec / 1000000ULL, vm_num_swap_files, vm_num_pinned_swap_files, vm_swappin_enabled);
3668 }
3669 
3670 
3671 int             compaction_swap_trigger_thread_awakened = 0;
3672 
3673 
3674 static void
vm_compressor_swap_trigger_thread(void)3675 vm_compressor_swap_trigger_thread(void)
3676 {
3677 	thread_t        self = current_thread();
3678 
3679 	self->options |= TH_OPT_VMPRIV;
3680 
3681 	/*
3682 	 * compaction_swapper_init_now is set when the first call to
3683 	 * vm_consider_waking_compactor_swapper is made from
3684 	 * vm_pageout_scan... since this function is called upon
3685 	 * thread creation, we want to make sure to delay adjusting
3686 	 * the tuneables until we are awakened via vm_pageout_scan
3687 	 * so that we are at a point where the vm_swapfile_open will
3688 	 * be operating on the correct directory (in case the default
3689 	 * of using the VM volume is overridden by the dynamic_pager)
3690 	 */
3691 	if (compaction_swapper_init_now) {
3692 		vm_compaction_swapper_do_init();
3693 
3694 		if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
3695 			thread_vm_bind_group_add();
3696 		}
3697 #if CONFIG_THREAD_GROUPS
3698 		thread_group_vm_add();
3699 #endif
3700 		thread_set_thread_name(self, "VM_cswap_trigger");
3701 		compaction_swapper_init_now = 0;
3702 	}
3703 	lck_mtx_lock_spin_always(c_list_lock);
3704 
3705 	compaction_swap_trigger_thread_awakened++;
3706 	compaction_swapper_awakened = 0;
3707 
3708 	if (compaction_swapper_running == 0) {
3709 		compaction_swapper_running = 1;
3710 
3711 		vm_compressor_compact_and_swap(FALSE);
3712 
3713 		compaction_swapper_running = 0;
3714 	}
3715 	assert_wait((event_t)&c_compressor_swap_trigger, THREAD_UNINT);
3716 
3717 	if (compaction_swapper_running == 0) {
3718 		thread_wakeup((event_t)&compaction_swapper_running);
3719 	}
3720 
3721 	lck_mtx_unlock_always(c_list_lock);
3722 
3723 	thread_block((thread_continue_t)vm_compressor_swap_trigger_thread);
3724 
3725 	/* NOTREACHED */
3726 }
3727 
3728 
3729 void
vm_compressor_record_warmup_start(void)3730 vm_compressor_record_warmup_start(void)
3731 {
3732 	c_segment_t     c_seg;
3733 
3734 	lck_mtx_lock_spin_always(c_list_lock);
3735 
3736 	if (first_c_segment_to_warm_generation_id == 0) {
3737 		if (!queue_empty(&c_age_list_head)) {
3738 			c_seg = (c_segment_t)queue_last(&c_age_list_head);
3739 
3740 			first_c_segment_to_warm_generation_id = c_seg->c_generation_id;
3741 		} else {
3742 			first_c_segment_to_warm_generation_id = 0;
3743 		}
3744 
3745 		fastwake_recording_in_progress = TRUE;
3746 	}
3747 	lck_mtx_unlock_always(c_list_lock);
3748 }
3749 
3750 
3751 void
vm_compressor_record_warmup_end(void)3752 vm_compressor_record_warmup_end(void)
3753 {
3754 	c_segment_t     c_seg;
3755 
3756 	lck_mtx_lock_spin_always(c_list_lock);
3757 
3758 	if (fastwake_recording_in_progress == TRUE) {
3759 		if (!queue_empty(&c_age_list_head)) {
3760 			c_seg = (c_segment_t)queue_last(&c_age_list_head);
3761 
3762 			last_c_segment_to_warm_generation_id = c_seg->c_generation_id;
3763 		} else {
3764 			last_c_segment_to_warm_generation_id = first_c_segment_to_warm_generation_id;
3765 		}
3766 
3767 		fastwake_recording_in_progress = FALSE;
3768 
3769 		HIBLOG("vm_compressor_record_warmup (%qd - %qd)\n", first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id);
3770 	}
3771 	lck_mtx_unlock_always(c_list_lock);
3772 }
3773 
3774 
3775 #define DELAY_TRIM_ON_WAKE_NS (25 * NSEC_PER_SEC)
3776 
3777 void
vm_compressor_delay_trim(void)3778 vm_compressor_delay_trim(void)
3779 {
3780 	uint64_t now = mach_absolute_time();
3781 	uint64_t delay_abstime;
3782 	nanoseconds_to_absolutetime(DELAY_TRIM_ON_WAKE_NS, &delay_abstime);
3783 	dont_trim_until_ts = now + delay_abstime;
3784 }
3785 
3786 
3787 void
vm_compressor_do_warmup(void)3788 vm_compressor_do_warmup(void)
3789 {
3790 	lck_mtx_lock_spin_always(c_list_lock);
3791 
3792 	if (first_c_segment_to_warm_generation_id == last_c_segment_to_warm_generation_id) {
3793 		first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
3794 
3795 		lck_mtx_unlock_always(c_list_lock);
3796 		return;
3797 	}
3798 
3799 	if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
3800 		fastwake_warmup = TRUE;
3801 
3802 		compaction_swapper_awakened = 1;
3803 		thread_wakeup((event_t)&c_compressor_swap_trigger);
3804 	}
3805 	lck_mtx_unlock_always(c_list_lock);
3806 }
3807 
3808 void
do_fastwake_warmup_all(void)3809 do_fastwake_warmup_all(void)
3810 {
3811 	lck_mtx_lock_spin_always(c_list_lock);
3812 
3813 	if (queue_empty(&c_swappedout_list_head) && queue_empty(&c_swappedout_sparse_list_head)) {
3814 		lck_mtx_unlock_always(c_list_lock);
3815 		return;
3816 	}
3817 
3818 	fastwake_warmup = TRUE;
3819 
3820 	do_fastwake_warmup(&c_swappedout_list_head, TRUE);
3821 
3822 	do_fastwake_warmup(&c_swappedout_sparse_list_head, TRUE);
3823 
3824 	fastwake_warmup = FALSE;
3825 
3826 	lck_mtx_unlock_always(c_list_lock);
3827 }
3828 
3829 void
do_fastwake_warmup(queue_head_t * c_queue,boolean_t consider_all_cseg)3830 do_fastwake_warmup(queue_head_t *c_queue, boolean_t consider_all_cseg)
3831 {
3832 	c_segment_t     c_seg = NULL;
3833 	AbsoluteTime    startTime, endTime;
3834 	uint64_t        nsec;
3835 
3836 
3837 	HIBLOG("vm_compressor_fastwake_warmup (%qd - %qd) - starting\n", first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id);
3838 
3839 	clock_get_uptime(&startTime);
3840 
3841 	lck_mtx_unlock_always(c_list_lock);
3842 
3843 	proc_set_thread_policy(current_thread(),
3844 	    TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER2);
3845 
3846 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
3847 
3848 	lck_mtx_lock_spin_always(c_list_lock);
3849 
3850 	while (!queue_empty(c_queue) && fastwake_warmup == TRUE) {
3851 		c_seg = (c_segment_t) queue_first(c_queue);
3852 
3853 		if (consider_all_cseg == FALSE) {
3854 			if (c_seg->c_generation_id < first_c_segment_to_warm_generation_id ||
3855 			    c_seg->c_generation_id > last_c_segment_to_warm_generation_id) {
3856 				break;
3857 			}
3858 
3859 			if (vm_page_free_count < (AVAILABLE_MEMORY / 4)) {
3860 				break;
3861 			}
3862 		}
3863 
3864 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3865 		lck_mtx_unlock_always(c_list_lock);
3866 
3867 		if (c_seg->c_busy) {
3868 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
3869 			c_seg_wait_on_busy(c_seg);
3870 			PAGE_REPLACEMENT_DISALLOWED(TRUE);
3871 		} else {
3872 			if (c_seg_swapin(c_seg, TRUE, FALSE) == 0) {
3873 				lck_mtx_unlock_always(&c_seg->c_lock);
3874 			}
3875 			c_segment_warmup_count++;
3876 
3877 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
3878 			vm_pageout_io_throttle();
3879 			PAGE_REPLACEMENT_DISALLOWED(TRUE);
3880 		}
3881 		lck_mtx_lock_spin_always(c_list_lock);
3882 	}
3883 	lck_mtx_unlock_always(c_list_lock);
3884 
3885 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
3886 
3887 	proc_set_thread_policy(current_thread(),
3888 	    TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER0);
3889 
3890 	clock_get_uptime(&endTime);
3891 	SUB_ABSOLUTETIME(&endTime, &startTime);
3892 	absolutetime_to_nanoseconds(endTime, &nsec);
3893 
3894 	HIBLOG("vm_compressor_fastwake_warmup completed - took %qd msecs\n", nsec / 1000000ULL);
3895 
3896 	lck_mtx_lock_spin_always(c_list_lock);
3897 
3898 	if (consider_all_cseg == FALSE) {
3899 		first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
3900 	}
3901 }
3902 
3903 extern bool     vm_swapout_thread_running;
3904 extern boolean_t        compressor_store_stop_compaction;
3905 
3906 void
vm_compressor_compact_and_swap(boolean_t flush_all)3907 vm_compressor_compact_and_swap(boolean_t flush_all)
3908 {
3909 	c_segment_t     c_seg;
3910 	bool            switch_state, bail_wanted_cseg = false;
3911 	clock_sec_t     now;
3912 	clock_nsec_t    nsec;
3913 	mach_timespec_t start_ts, end_ts;
3914 	unsigned int    number_considered, wanted_cseg_found, yield_after_considered_per_pass, number_yields;
3915 	uint64_t        bytes_freed, delta_usec;
3916 	uint32_t        c_swapout_count = 0;
3917 
3918 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_START, c_age_count, c_minor_count, c_major_count, vm_page_free_count);
3919 
3920 	KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_AND_SWAP) | DBG_FUNC_START,
3921 	    vm_compressor_fragmentation_level(),
3922 	    VM_PAGE_COMPRESSOR_COUNT,
3923 	    c_segment_count - c_swappedout_count - c_swappedout_sparse_count,
3924 	    flush_all);
3925 
3926 	if (fastwake_warmup == TRUE) {
3927 		uint64_t        starting_warmup_count;
3928 
3929 		starting_warmup_count = c_segment_warmup_count;
3930 
3931 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_START, c_segment_warmup_count,
3932 		    first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id, 0, 0);
3933 		do_fastwake_warmup(&c_swappedout_list_head, FALSE);
3934 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_END, c_segment_warmup_count, c_segment_warmup_count - starting_warmup_count, 0, 0, 0);
3935 
3936 		fastwake_warmup = FALSE;
3937 	}
3938 
3939 #if __arm64__
3940 	/*
3941 	 * Re-considering major csegs showed benefits on all platforms by
3942 	 * significantly reducing fragmentation and getting back memory.
3943 	 * However, on smaller devices, eg watch, there was increased power
3944 	 * use for the additional compactions.
3945 	 *
3946 	 * In the normal case, major segments will become queued for minor
3947 	 * compaction (and therefore re-considered for major compaction) when they
3948 	 * become physically fragmented through the decompression path. However,
3949 	 * if the segment already had low utilization when we placed it on the major
3950 	 * queue (e.g. because its neighbor was already fully compacted), then we're
3951 	 * unlikely to reconsider the segment for major compaction. To address this,
3952 	 * always re-process major segments when the system is nearly out of segments
3953 	 * and the overall pool is fragmented.
3954 	 */
3955 #if XNU_TARGET_OS_OSX
3956 	vm_compressor_process_major_segments(false);
3957 #else /* !XNU_TARGET_OS_OSX */
3958 	if (vm_compressor_segments_nearing_limit() &&
3959 	    vm_compressor_needs_to_major_compact()) {
3960 		vm_compressor_process_major_segments(false);
3961 	}
3962 #endif /* XNU_TARGET_OS_OSX */
3963 #endif /* __arm64__ */
3964 
3965 	/*
3966 	 * it's possible for the c_age_list_head to be empty if we
3967 	 * hit our limits for growing the compressor pool and we subsequently
3968 	 * hibernated... on the next hibernation we could see the queue as
3969 	 * empty and not proceeed even though we have a bunch of segments on
3970 	 * the swapped in queue that need to be dealt with.
3971 	 */
3972 	vm_compressor_do_delayed_compactions(flush_all);
3973 	vm_compressor_process_special_swapped_in_segments_locked();
3974 	vm_compressor_process_regular_swapped_in_segments(flush_all);
3975 
3976 	/*
3977 	 * we only need to grab the timestamp once per
3978 	 * invocation of this function since the
3979 	 * timescale we're interested in is measured
3980 	 * in days
3981 	 */
3982 	clock_get_system_nanotime(&now, &nsec);
3983 
3984 	start_ts.tv_sec = (int) now;
3985 	start_ts.tv_nsec = nsec;
3986 	delta_usec = 0;
3987 	number_considered = 0;
3988 	wanted_cseg_found = 0;
3989 	number_yields = 0;
3990 	bytes_freed = 0;
3991 	yield_after_considered_per_pass = MAX(min_csegs_per_major_compaction, DELAYED_COMPACTIONS_PER_PASS);
3992 
3993 	/**
3994 	 * SW: Need to figure out how to properly rate limit this log because it is currently way too
3995 	 * noisy. rdar://99379414 (Figure out how to rate limit the fragmentation level logging)
3996 	 */
3997 	vm_log_debug("before compaction fragmentation level %u\n", vm_compressor_fragmentation_level());
3998 
3999 	while (!queue_empty(&c_age_list_head) && !compaction_swapper_abort && !compressor_store_stop_compaction) {
4000 		if (hibernate_flushing == TRUE) {
4001 			clock_sec_t     sec;
4002 
4003 			if (hibernate_should_abort()) {
4004 				HIBLOG("vm_compressor_flush - hibernate_should_abort returned TRUE\n");
4005 				break;
4006 			}
4007 			if (hibernate_no_swapspace == TRUE) {
4008 				HIBLOG("vm_compressor_flush - out of swap space\n");
4009 				break;
4010 			}
4011 			if (vm_swap_files_pinned() == FALSE) {
4012 				HIBLOG("vm_compressor_flush - unpinned swap files\n");
4013 				break;
4014 			}
4015 			if (hibernate_in_progress_with_pinned_swap == TRUE &&
4016 			    (vm_swapfile_total_segs_alloced == vm_swapfile_total_segs_used)) {
4017 				HIBLOG("vm_compressor_flush - out of pinned swap space\n");
4018 				break;
4019 			}
4020 			clock_get_system_nanotime(&sec, &nsec);
4021 
4022 			if (sec > hibernate_flushing_deadline) {
4023 				hibernate_flush_timed_out = TRUE;
4024 				HIBLOG("vm_compressor_flush - failed to finish before deadline\n");
4025 				break;
4026 			}
4027 		}
4028 
4029 		c_swapout_count = c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count;
4030 		if (VM_CONFIG_SWAP_IS_ACTIVE && !vm_swap_out_of_space() && c_swapout_count >= C_SWAPOUT_LIMIT) {
4031 			assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 100, 1000 * NSEC_PER_USEC);
4032 
4033 			if (!vm_swapout_thread_running) {
4034 				thread_wakeup((event_t)&vm_swapout_thread);
4035 			}
4036 
4037 			lck_mtx_unlock_always(c_list_lock);
4038 
4039 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 1, c_swapout_count, 0, 0);
4040 
4041 			thread_block(THREAD_CONTINUE_NULL);
4042 
4043 			lck_mtx_lock_spin_always(c_list_lock);
4044 		}
4045 		/*
4046 		 * Minor compactions
4047 		 */
4048 		vm_compressor_do_delayed_compactions(flush_all);
4049 
4050 		/*
4051 		 * vm_compressor_process_early_swapped_in_segments()
4052 		 * might be too aggressive. So OFF for now.
4053 		 */
4054 		vm_compressor_process_regular_swapped_in_segments(flush_all);
4055 
4056 		/* Recompute because we dropped the c_list_lock above*/
4057 		c_swapout_count = c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count;
4058 		if (VM_CONFIG_SWAP_IS_ACTIVE && !vm_swap_out_of_space() && c_swapout_count >= C_SWAPOUT_LIMIT) {
4059 			/*
4060 			 * we timed out on the above thread_block
4061 			 * let's loop around and try again
4062 			 * the timeout allows us to continue
4063 			 * to do minor compactions to make
4064 			 * more memory available
4065 			 */
4066 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 2, c_swapout_count, 0, 0);
4067 
4068 			continue;
4069 		}
4070 
4071 		/*
4072 		 * Swap out segments?
4073 		 */
4074 		if (flush_all == FALSE) {
4075 			bool needs_to_swap;
4076 
4077 			lck_mtx_unlock_always(c_list_lock);
4078 
4079 			needs_to_swap = compressor_needs_to_swap();
4080 
4081 			lck_mtx_lock_spin_always(c_list_lock);
4082 
4083 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 3, needs_to_swap, 0, 0);
4084 
4085 			if (!needs_to_swap) {
4086 				break;
4087 			}
4088 		}
4089 		if (queue_empty(&c_age_list_head)) {
4090 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 4, c_age_count, 0, 0);
4091 			break;
4092 		}
4093 		c_seg = (c_segment_t) queue_first(&c_age_list_head);
4094 
4095 		assert(c_seg->c_state == C_ON_AGE_Q);
4096 
4097 		if (flush_all == TRUE && c_seg->c_generation_id > c_generation_id_flush_barrier) {
4098 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 5, 0, 0, 0);
4099 			break;
4100 		}
4101 
4102 		lck_mtx_lock_spin_always(&c_seg->c_lock);
4103 
4104 		if (c_seg->c_busy) {
4105 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 6, (void*) VM_KERNEL_ADDRPERM(c_seg), 0, 0);
4106 
4107 			lck_mtx_unlock_always(c_list_lock);
4108 			c_seg_wait_on_busy(c_seg);
4109 			lck_mtx_lock_spin_always(c_list_lock);
4110 
4111 			continue;
4112 		}
4113 		C_SEG_BUSY(c_seg);
4114 
4115 		if (c_seg_do_minor_compaction_and_unlock(c_seg, FALSE, TRUE, TRUE)) {
4116 			/*
4117 			 * found an empty c_segment and freed it
4118 			 * so go grab the next guy in the queue
4119 			 */
4120 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 7, 0, 0, 0);
4121 			c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
4122 			vm_pageout_vminfo.vm_compactor_major_compaction_segments_freed++;
4123 			continue;
4124 		}
4125 
4126 		switch_state = vm_compressor_major_compact_cseg(c_seg, &number_considered, &bail_wanted_cseg, &bytes_freed);
4127 		if (bail_wanted_cseg) {
4128 			wanted_cseg_found++;
4129 			bail_wanted_cseg = false;
4130 		}
4131 
4132 		assert(c_seg->c_busy);
4133 		assert(!c_seg->c_on_minorcompact_q);
4134 
4135 		if (switch_state) {
4136 			if (VM_CONFIG_SWAP_IS_ACTIVE) {
4137 				int new_state = C_ON_SWAPOUT_Q;
4138 #if (XNU_TARGET_OS_OSX && __arm64__)
4139 				if (flush_all == false && compressor_swapout_conditions_met() == false) {
4140 					new_state = C_ON_MAJORCOMPACT_Q;
4141 				}
4142 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
4143 
4144 				if (new_state == C_ON_SWAPOUT_Q) {
4145 					/*
4146 					 * This mode of putting a generic c_seg on the swapout list is
4147 					 * only supported when we have general swapping enabled
4148 					 */
4149 					clock_sec_t lnow;
4150 					clock_nsec_t lnsec;
4151 					clock_get_system_nanotime(&lnow, &lnsec);
4152 					if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 30) {
4153 						vmcs_stats.unripe_under_30s++;
4154 					} else if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 60) {
4155 						vmcs_stats.unripe_under_60s++;
4156 					} else if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 300) {
4157 						vmcs_stats.unripe_under_300s++;
4158 					}
4159 				}
4160 
4161 				c_seg_switch_state(c_seg, new_state, FALSE);
4162 			} else {
4163 				if ((vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit)) {
4164 					assert(VM_CONFIG_SWAP_IS_PRESENT);
4165 					/*
4166 					 * we are running compressor sweeps with swap-behind
4167 					 * make sure the c_seg has aged enough before swapping it
4168 					 * out...
4169 					 */
4170 					if ((now - c_seg->c_creation_ts) >= vm_ripe_target_age) {
4171 						c_seg->c_overage_swap = TRUE;
4172 						c_overage_swapped_count++;
4173 						c_seg_switch_state(c_seg, C_ON_SWAPOUT_Q, FALSE);
4174 					}
4175 				}
4176 			}
4177 			if (c_seg->c_state == C_ON_AGE_Q) {
4178 				/*
4179 				 * this c_seg didn't get moved to the swapout queue
4180 				 * so we need to move it out of the way...
4181 				 * we just did a major compaction on it so put it
4182 				 * on that queue
4183 				 */
4184 				c_seg_switch_state(c_seg, C_ON_MAJORCOMPACT_Q, FALSE);
4185 			} else {
4186 				c_seg_major_compact_stats[c_seg_major_compact_stats_now].wasted_space_in_swapouts += c_seg_bufsize - c_seg->c_bytes_used;
4187 				vm_pageout_vminfo.vm_compactor_swapout_bytes_wasted += c_seg_bufsize - c_seg->c_bytes_used;
4188 				c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_swapouts++;
4189 				vm_pageout_vminfo.vm_compactor_swapouts_queued++;
4190 			}
4191 		}
4192 
4193 		C_SEG_WAKEUP_DONE(c_seg);
4194 
4195 		lck_mtx_unlock_always(&c_seg->c_lock);
4196 
4197 		/*
4198 		 * On systems _with_ general swap, regardless of jetsam, we wake up the swapout thread here.
4199 		 * On systems _without_ general swap, it's the responsibility of the memorystatus
4200 		 * subsystem to wake up the swapper.
4201 		 * TODO: When we have full jetsam support on a swap enabled system, we will need to revisit
4202 		 * this policy.
4203 		 */
4204 		if (VM_CONFIG_SWAP_IS_ACTIVE && c_swapout_count) {
4205 			/*
4206 			 * We don't pause/yield here because we will either
4207 			 * yield below or at the top of the loop with the
4208 			 * assert_wait_timeout.
4209 			 */
4210 			if (!vm_swapout_thread_running) {
4211 				thread_wakeup((event_t)&vm_swapout_thread);
4212 			}
4213 		}
4214 
4215 		if (number_considered >= yield_after_considered_per_pass) {
4216 			if (wanted_cseg_found) {
4217 				/*
4218 				 * We stopped major compactions on a c_seg
4219 				 * that is wanted. We don't know the priority
4220 				 * of the waiter unfortunately but we are at
4221 				 * a very high priority and so, just in case
4222 				 * the waiter is a critical system daemon or
4223 				 * UI thread, let's give up the CPU in case
4224 				 * the system is running a few CPU intensive
4225 				 * tasks.
4226 				 */
4227 				KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_PAUSE) | DBG_FUNC_START);
4228 				lck_mtx_unlock_always(c_list_lock);
4229 
4230 				mutex_pause(2); /* 100us yield */
4231 
4232 				number_yields++;
4233 
4234 				VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 11, number_considered, number_yields, 0);
4235 
4236 				lck_mtx_lock_spin_always(c_list_lock);
4237 				KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_PAUSE) | DBG_FUNC_END);
4238 			}
4239 
4240 			number_considered = 0;
4241 			wanted_cseg_found = 0;
4242 		}
4243 	}
4244 	clock_get_system_nanotime(&now, &nsec);
4245 
4246 	end_ts = major_compact_ts = (mach_timespec_t){.tv_sec = (int)now, .tv_nsec = nsec};
4247 
4248 	SUB_MACH_TIMESPEC(&end_ts, &start_ts);
4249 
4250 	delta_usec = (end_ts.tv_sec * USEC_PER_SEC) + (end_ts.tv_nsec / NSEC_PER_USEC) - (number_yields * 100);
4251 
4252 	delta_usec = MAX(1, delta_usec); /* we could have 0 usec run if conditions weren't right */
4253 
4254 	c_seg_major_compact_stats[c_seg_major_compact_stats_now].bytes_freed = bytes_freed;
4255 	vm_pageout_vminfo.vm_compactor_major_compaction_bytes_freed += bytes_freed;
4256 	c_seg_major_compact_stats[c_seg_major_compact_stats_now].runtime_us = delta_usec;
4257 
4258 	KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_AND_SWAP) | DBG_FUNC_NONE,
4259 	    c_seg_major_compact_stats[c_seg_major_compact_stats_now].asked_permission,
4260 	    c_seg_major_compact_stats[c_seg_major_compact_stats_now].bailed_compactions,
4261 	    c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_swapouts,
4262 	    c_seg_major_compact_stats[c_seg_major_compact_stats_now].wasted_space_in_swapouts);
4263 	KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_AND_SWAP) | DBG_FUNC_END,
4264 	    c_seg_major_compact_stats[c_seg_major_compact_stats_now].compactions,
4265 	    c_seg_major_compact_stats[c_seg_major_compact_stats_now].moved_slots,
4266 	    c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs,
4267 	    c_seg_major_compact_stats[c_seg_major_compact_stats_now].bytes_freed);
4268 
4269 	c_seg_major_compact_stats_now = (c_seg_major_compact_stats_now + 1) % C_SEG_MAJOR_COMPACT_STATS_MAX;
4270 
4271 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_END, c_age_count, c_minor_count, c_major_count, vm_page_free_count);
4272 }
4273 
4274 
4275 static c_segment_t
c_seg_allocate(c_segment_t * current_chead,bool * nearing_limits)4276 c_seg_allocate(c_segment_t *current_chead, bool *nearing_limits)
4277 {
4278 	c_segment_t     c_seg;
4279 	int             min_needed;
4280 	int             size_to_populate;
4281 	c_segment_t     *donate_queue_head;
4282 	uint32_t        compressed_pages;
4283 
4284 	*nearing_limits = false;
4285 
4286 	compressed_pages = vm_compressor_pages_compressed();
4287 
4288 	if (compressed_pages >= c_segment_pages_compressed_nearing_limit) {
4289 		*nearing_limits = true;
4290 	}
4291 	if (compressed_pages >= c_segment_pages_compressed_limit) {
4292 		/*
4293 		 * We've reached the compressed pages limit, don't return
4294 		 * a segment to compress into
4295 		 */
4296 		return NULL;
4297 	}
4298 
4299 	if ((c_seg = *current_chead) == NULL) {
4300 		uint32_t        c_segno;
4301 
4302 		lck_mtx_lock_spin_always(c_list_lock);
4303 
4304 		while (c_segments_busy == TRUE) {
4305 			assert_wait((event_t) (&c_segments_busy), THREAD_UNINT);
4306 
4307 			lck_mtx_unlock_always(c_list_lock);
4308 
4309 			thread_block(THREAD_CONTINUE_NULL);
4310 
4311 			lck_mtx_lock_spin_always(c_list_lock);
4312 		}
4313 		if (c_free_segno_head == (uint32_t)-1) {
4314 			uint32_t        c_segments_available_new;
4315 
4316 			/*
4317 			 * We may have dropped the c_list_lock, re-evaluate
4318 			 * the compressed pages count
4319 			 */
4320 			compressed_pages = vm_compressor_pages_compressed();
4321 
4322 			if (c_segments_available >= c_segments_nearing_limit ||
4323 			    compressed_pages >= c_segment_pages_compressed_nearing_limit) {
4324 				*nearing_limits = true;
4325 			}
4326 			if (c_segments_available >= c_segments_limit ||
4327 			    compressed_pages >= c_segment_pages_compressed_limit) {
4328 				lck_mtx_unlock_always(c_list_lock);
4329 
4330 				return NULL;
4331 			}
4332 			c_segments_busy = TRUE;
4333 			lck_mtx_unlock_always(c_list_lock);
4334 
4335 			/* pages for c_segments are never depopulated, c_segments_available never goes down */
4336 			kernel_memory_populate((vm_offset_t)c_segments_next_page,
4337 			    PAGE_SIZE, KMA_NOFAIL | KMA_KOBJECT,
4338 			    VM_KERN_MEMORY_COMPRESSOR);
4339 			c_segments_next_page += PAGE_SIZE;
4340 
4341 			c_segments_available_new = c_segments_available + C_SEGMENTS_PER_PAGE;
4342 
4343 			if (c_segments_available_new > c_segments_limit) {
4344 				c_segments_available_new = c_segments_limit;
4345 			}
4346 
4347 			/* add the just-added segments to the top of the free-list */
4348 			for (c_segno = c_segments_available + 1; c_segno < c_segments_available_new; c_segno++) {
4349 				c_segments_get(c_segno - 1)->c_segno = c_segno;  /* next free is the one after you */
4350 			}
4351 
4352 			lck_mtx_lock_spin_always(c_list_lock);
4353 
4354 			c_segments_get(c_segno - 1)->c_segno = c_free_segno_head; /* link to the rest of, existing freelist */
4355 			c_free_segno_head = c_segments_available; /* first one in the page that was just allocated */
4356 			c_segments_available = c_segments_available_new;
4357 
4358 			c_segments_busy = FALSE;
4359 			thread_wakeup((event_t) (&c_segments_busy));
4360 		}
4361 		c_segno = c_free_segno_head;
4362 		assert(c_segno >= 0 && c_segno < c_segments_limit);
4363 
4364 		c_free_segno_head = (uint32_t)c_segments_get(c_segno)->c_segno;
4365 
4366 		/*
4367 		 * do the rest of the bookkeeping now while we're still behind
4368 		 * the list lock and grab our generation id now into a local
4369 		 * so that we can install it once we have the c_seg allocated
4370 		 */
4371 		c_segment_count++;
4372 		if (c_segment_count > c_segment_count_max) {
4373 			c_segment_count_max = c_segment_count;
4374 		}
4375 
4376 		lck_mtx_unlock_always(c_list_lock);
4377 
4378 		c_seg = zalloc_flags(compressor_segment_zone, Z_WAITOK | Z_ZERO);
4379 
4380 		c_seg->c_store.c_buffer = (int32_t *)C_SEG_BUFFER_ADDRESS(c_segno);
4381 
4382 		lck_mtx_init(&c_seg->c_lock, &vm_compressor_lck_grp, LCK_ATTR_NULL);
4383 
4384 		c_seg->c_state = C_IS_EMPTY;
4385 		c_seg->c_firstemptyslot = C_SLOT_MAX_INDEX;
4386 		c_seg->c_mysegno = c_segno;
4387 
4388 		lck_mtx_lock_spin_always(c_list_lock);
4389 		c_empty_count++;  /* going to be immediately decremented in the next call */
4390 		c_seg_switch_state(c_seg, C_IS_FILLING, FALSE);
4391 		c_segments_get(c_segno)->c_seg = c_seg;
4392 		assert(c_segments_get(c_segno)->c_segno > c_segments_available);  /* we just assigned a pointer to it so this is an indication that it is occupied */
4393 		lck_mtx_unlock_always(c_list_lock);
4394 
4395 		for (int i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) {
4396 #if XNU_TARGET_OS_OSX /* tag:DONATE */
4397 			donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_early_swapout_chead);
4398 #else /* XNU_TARGET_OS_OSX */
4399 			if (memorystatus_swap_all_apps) {
4400 				donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_late_swapout_chead);
4401 			} else {
4402 				donate_queue_head = NULL;
4403 			}
4404 #endif /* XNU_TARGET_OS_OSX */
4405 
4406 			if (current_chead == donate_queue_head) {
4407 				c_seg->c_has_donated_pages = 1;
4408 				break;
4409 			}
4410 		}
4411 
4412 		*current_chead = c_seg;
4413 
4414 		C_SEG_MAKE_WRITEABLE(c_seg);
4415 	}
4416 	c_seg_alloc_nextslot(c_seg);
4417 
4418 	size_to_populate = c_seg_allocsize - C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset);
4419 
4420 	if (size_to_populate) {
4421 		min_needed = PAGE_SIZE + (c_seg_allocsize - c_seg_bufsize);
4422 
4423 		if (C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset - c_seg->c_nextoffset) < (unsigned) min_needed) {
4424 			if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) {
4425 				size_to_populate = C_SEG_MAX_POPULATE_SIZE;
4426 			}
4427 
4428 			os_atomic_add(&vm_pageout_vminfo.vm_compressor_pages_grabbed, size_to_populate / PAGE_SIZE, relaxed);
4429 
4430 			kernel_memory_populate(
4431 				(vm_offset_t) &c_seg->c_store.c_buffer[c_seg->c_populated_offset],
4432 				size_to_populate,
4433 				KMA_NOFAIL | KMA_COMPRESSOR,
4434 				VM_KERN_MEMORY_COMPRESSOR);
4435 		} else {
4436 			size_to_populate = 0;
4437 		}
4438 	}
4439 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
4440 
4441 	lck_mtx_lock_spin_always(&c_seg->c_lock);
4442 
4443 	if (size_to_populate) {
4444 		c_seg->c_populated_offset += C_SEG_BYTES_TO_OFFSET(size_to_populate);
4445 	}
4446 
4447 	return c_seg;
4448 }
4449 
4450 #if DEVELOPMENT || DEBUG
4451 #if CONFIG_FREEZE
4452 extern boolean_t memorystatus_freeze_to_memory;
4453 #endif /* CONFIG_FREEZE */
4454 #endif /* DEVELOPMENT || DEBUG */
4455 uint64_t c_seg_total_donated_bytes = 0; /* For testing/debugging only for now. Remove and add new counters for vm_stat.*/
4456 
4457 uint64_t c_seg_filled_no_contention = 0;
4458 uint64_t c_seg_filled_contention = 0;
4459 clock_sec_t c_seg_filled_contention_sec_max = 0;
4460 clock_nsec_t c_seg_filled_contention_nsec_max = 0;
4461 
4462 static void
c_current_seg_filled(c_segment_t c_seg,c_segment_t * current_chead)4463 c_current_seg_filled(c_segment_t c_seg, c_segment_t *current_chead)
4464 {
4465 	uint32_t        unused_bytes;
4466 	uint32_t        offset_to_depopulate;
4467 	int             new_state = C_ON_AGE_Q;
4468 	clock_sec_t     sec;
4469 	clock_nsec_t    nsec;
4470 	bool            head_insert = false, wakeup_swapout_thread = false;
4471 
4472 	unused_bytes = trunc_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset - c_seg->c_nextoffset));
4473 
4474 	if (unused_bytes) {
4475 		/* if this is a platform that need an extra page at the end of the segment when running compress
4476 		 * then now is the time to depopulate that extra page. it still takes virtual space but doesn't
4477 		 * actually waste memory */
4478 		offset_to_depopulate = C_SEG_BYTES_TO_OFFSET(round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_nextoffset)));
4479 
4480 		/* release the extra physical page(s) at the end of the segment  */
4481 		lck_mtx_unlock_always(&c_seg->c_lock);
4482 
4483 		kernel_memory_depopulate(
4484 			(vm_offset_t) &c_seg->c_store.c_buffer[offset_to_depopulate],
4485 			unused_bytes,
4486 			KMA_COMPRESSOR,
4487 			VM_KERN_MEMORY_COMPRESSOR);
4488 
4489 		lck_mtx_lock_spin_always(&c_seg->c_lock);
4490 
4491 		c_seg->c_populated_offset = offset_to_depopulate;
4492 	}
4493 	assert(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset) <= c_seg_bufsize);
4494 
4495 #if CONFIG_CSEG_MPROTECT
4496 	if (write_protect_c_segs) {
4497 		boolean_t       c_seg_was_busy = FALSE;
4498 
4499 		if (!c_seg->c_busy) {
4500 			C_SEG_BUSY(c_seg);
4501 		} else {
4502 			c_seg_was_busy = TRUE;
4503 		}
4504 
4505 		lck_mtx_unlock_always(&c_seg->c_lock);
4506 
4507 		C_SEG_WRITE_PROTECT(c_seg);
4508 
4509 		lck_mtx_lock_spin_always(&c_seg->c_lock);
4510 
4511 		if (c_seg_was_busy == FALSE) {
4512 			C_SEG_WAKEUP_DONE(c_seg);
4513 		}
4514 	}
4515 #endif /* CONFIG_CSEG_MPROTECT */
4516 
4517 #if CONFIG_FREEZE
4518 	if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead) &&
4519 	    VM_CONFIG_SWAP_IS_PRESENT &&
4520 	    VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
4521 #if DEVELOPMENT || DEBUG
4522 	    && !memorystatus_freeze_to_memory
4523 #endif /* DEVELOPMENT || DEBUG */
4524 	    ) {
4525 		new_state = C_ON_SWAPOUT_Q;
4526 		wakeup_swapout_thread = true;
4527 	}
4528 #endif /* CONFIG_FREEZE */
4529 
4530 	if (vm_darkwake_mode == TRUE) {
4531 		new_state = C_ON_SWAPOUT_Q;
4532 		head_insert = true;
4533 		wakeup_swapout_thread = true;
4534 	} else {
4535 		c_segment_t *donate_queue_head;
4536 		for (int i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) {
4537 #if XNU_TARGET_OS_OSX  /* tag:DONATE */
4538 			donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_early_swapout_chead);
4539 #else /* XNU_TARGET_OS_OSX */
4540 			donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_late_swapout_chead);
4541 #endif /* XNU_TARGET_OS_OSX */
4542 			if (current_chead == donate_queue_head) {
4543 				/* This is the place where the "donating" task actually does the so-called donation
4544 				 * Instead of continueing to take place in memory in the compressor, the segment goes directly
4545 				 * to swap-out instead of going to AGE_Q */
4546 				assert(c_seg->c_has_donated_pages);
4547 				new_state = C_ON_SWAPOUT_Q;
4548 				c_seg_total_donated_bytes += c_seg->c_bytes_used;
4549 				break;
4550 			}
4551 		}
4552 	}
4553 
4554 	clock_get_system_nanotime(&sec, &nsec);
4555 	c_seg->c_creation_ts = (uint32_t)sec;
4556 
4557 	if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
4558 		clock_sec_t     sec2;
4559 		clock_nsec_t    nsec2;
4560 
4561 		lck_mtx_lock_spin_always(c_list_lock);
4562 		clock_get_system_nanotime(&sec2, &nsec2);
4563 		TIME_SUB(sec2, sec, nsec2, nsec, NSEC_PER_SEC);
4564 		/* keep track of how much time we've waited for c_list_lock */
4565 		if (sec2 > c_seg_filled_contention_sec_max) {
4566 			c_seg_filled_contention_sec_max = sec2;
4567 			c_seg_filled_contention_nsec_max = nsec2;
4568 		} else if (sec2 == c_seg_filled_contention_sec_max && nsec2 > c_seg_filled_contention_nsec_max) {
4569 			c_seg_filled_contention_nsec_max = nsec2;
4570 		}
4571 		c_seg_filled_contention++;
4572 	} else {
4573 		c_seg_filled_no_contention++;
4574 	}
4575 
4576 #if CONFIG_FREEZE
4577 	if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead)) {
4578 		if (freezer_context_global.freezer_ctx_task->donates_own_pages) {
4579 			assert(!c_seg->c_has_donated_pages);
4580 			c_seg->c_has_donated_pages = 1;
4581 			os_atomic_add(&c_segment_pages_compressed_incore_late_swapout, c_seg->c_slots_used, relaxed);
4582 		}
4583 		c_seg->c_has_freezer_pages = 1;
4584 	}
4585 #endif /* CONFIG_FREEZE */
4586 
4587 	c_seg->c_generation_id = c_generation_id++;
4588 	c_seg_switch_state(c_seg, new_state, head_insert);
4589 
4590 #if CONFIG_FREEZE
4591 	/*
4592 	 * Donated segments count as frozen to swap if we go through the freezer.
4593 	 * TODO: What we need is a new ledger and cseg state that can describe
4594 	 * a frozen cseg from a donated task so we can accurately decrement it on
4595 	 * swapins.
4596 	 */
4597 	if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead) && (c_seg->c_state == C_ON_SWAPOUT_Q)) {
4598 		/*
4599 		 * darkwake and freezer can't co-exist together
4600 		 * We'll need to fix this accounting as a start.
4601 		 * And early donation c_segs are separate from frozen c_segs.
4602 		 */
4603 		assert(vm_darkwake_mode == FALSE);
4604 		c_seg_update_task_owner(c_seg, freezer_context_global.freezer_ctx_task);
4605 		freezer_context_global.freezer_ctx_swapped_bytes += c_seg->c_bytes_used;
4606 	}
4607 #endif /* CONFIG_FREEZE */
4608 
4609 	if (c_seg->c_state == C_ON_AGE_Q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
4610 		/* this is possible if we decompressed a page from the segment before it ended filling */
4611 #if CONFIG_FREEZE
4612 		assert(c_seg->c_task_owner == NULL);
4613 #endif /* CONFIG_FREEZE */
4614 		c_seg_need_delayed_compaction(c_seg, TRUE);
4615 	}
4616 
4617 	lck_mtx_unlock_always(c_list_lock);
4618 
4619 	if (wakeup_swapout_thread) {
4620 		/*
4621 		 * Darkwake and Freeze configs always
4622 		 * wake up the swapout thread because
4623 		 * the compactor thread that normally handles
4624 		 * it may not be running as much in these
4625 		 * configs.
4626 		 */
4627 		thread_wakeup((event_t)&vm_swapout_thread);
4628 	}
4629 
4630 	*current_chead = NULL;
4631 }
4632 
4633 /*
4634  * returns with c_seg locked
4635  */
4636 void
c_seg_swapin_requeue(c_segment_t c_seg,boolean_t has_data,boolean_t minor_compact_ok,boolean_t age_on_swapin_q)4637 c_seg_swapin_requeue(c_segment_t c_seg, boolean_t has_data, boolean_t minor_compact_ok, boolean_t age_on_swapin_q)
4638 {
4639 	clock_sec_t     sec;
4640 	clock_nsec_t    nsec;
4641 
4642 	clock_get_system_nanotime(&sec, &nsec);
4643 
4644 	lck_mtx_lock_spin_always(c_list_lock);
4645 	lck_mtx_lock_spin_always(&c_seg->c_lock);
4646 
4647 	assert(c_seg->c_busy_swapping);
4648 	assert(c_seg->c_busy);
4649 
4650 	c_seg->c_busy_swapping = 0;
4651 
4652 	if (c_seg->c_overage_swap == TRUE) {
4653 		c_overage_swapped_count--;
4654 		c_seg->c_overage_swap = FALSE;
4655 	}
4656 	if (has_data == TRUE) {
4657 		if (age_on_swapin_q == TRUE || c_seg->c_has_donated_pages) {
4658 #if CONFIG_FREEZE
4659 			/*
4660 			 * If a segment has both identities, frozen and donated bits set, the donated
4661 			 * bit wins on the swapin path. This is because the segment is being swapped back
4662 			 * in and so is in demand and should be given more time to spend in memory before
4663 			 * being swapped back out under pressure.
4664 			 */
4665 			if (c_seg->c_has_donated_pages) {
4666 				c_seg->c_has_freezer_pages = 0;
4667 			}
4668 #endif /* CONFIG_FREEZE */
4669 			c_seg_switch_state(c_seg, C_ON_SWAPPEDIN_Q, FALSE);
4670 		} else {
4671 			c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
4672 		}
4673 
4674 		if (minor_compact_ok == TRUE && !c_seg->c_on_minorcompact_q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
4675 			c_seg_need_delayed_compaction(c_seg, TRUE);
4676 		}
4677 	} else {
4678 		c_seg->c_store.c_buffer = (int32_t*) NULL;
4679 		c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0);
4680 
4681 		c_seg_switch_state(c_seg, C_ON_BAD_Q, FALSE);
4682 	}
4683 	c_seg->c_swappedin_ts = (uint32_t)sec;
4684 	c_seg->c_swappedin = true;
4685 #if TRACK_C_SEGMENT_UTILIZATION
4686 	c_seg->c_decompressions_since_swapin = 0;
4687 #endif /* TRACK_C_SEGMENT_UTILIZATION */
4688 
4689 	lck_mtx_unlock_always(c_list_lock);
4690 }
4691 
4692 
4693 
4694 /*
4695  * c_seg has to be locked and is returned locked if the c_seg isn't freed
4696  * PAGE_REPLACMENT_DISALLOWED has to be TRUE on entry and is returned TRUE
4697  * c_seg_swapin returns 1 if the c_seg was freed, 0 otherwise
4698  */
4699 
4700 int
c_seg_swapin(c_segment_t c_seg,boolean_t force_minor_compaction,boolean_t age_on_swapin_q)4701 c_seg_swapin(c_segment_t c_seg, boolean_t force_minor_compaction, boolean_t age_on_swapin_q)
4702 {
4703 	vm_offset_t     addr = 0;
4704 	uint32_t        io_size = 0;
4705 	uint64_t        f_offset;
4706 	thread_pri_floor_t token;
4707 
4708 	assert(C_SEG_IS_ONDISK(c_seg));
4709 
4710 #if !CHECKSUM_THE_SWAP
4711 	c_seg_trim_tail(c_seg);
4712 #endif
4713 	io_size = round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset));
4714 	f_offset = c_seg->c_store.c_swap_handle;
4715 
4716 	C_SEG_BUSY(c_seg);
4717 	c_seg->c_busy_swapping = 1;
4718 
4719 	/*
4720 	 * This thread is likely going to block for I/O.
4721 	 * Make sure it is ready to run when the I/O completes because
4722 	 * it needs to clear the busy bit on the c_seg so that other
4723 	 * waiting threads can make progress too.
4724 	 */
4725 	token = thread_priority_floor_start();
4726 	lck_mtx_unlock_always(&c_seg->c_lock);
4727 
4728 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
4729 
4730 	addr = (vm_offset_t)C_SEG_BUFFER_ADDRESS(c_seg->c_mysegno);
4731 	c_seg->c_store.c_buffer = (int32_t*) addr;
4732 
4733 	kernel_memory_populate(addr, io_size, KMA_NOFAIL | KMA_COMPRESSOR,
4734 	    VM_KERN_MEMORY_COMPRESSOR);
4735 
4736 	if (vm_swap_get(c_seg, f_offset, io_size) != KERN_SUCCESS) {
4737 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
4738 
4739 		kernel_memory_depopulate(addr, io_size, KMA_COMPRESSOR,
4740 		    VM_KERN_MEMORY_COMPRESSOR);
4741 
4742 		c_seg_swapin_requeue(c_seg, FALSE, TRUE, age_on_swapin_q);
4743 	} else {
4744 #if ENCRYPTED_SWAP
4745 		vm_swap_decrypt(c_seg, true);
4746 #endif /* ENCRYPTED_SWAP */
4747 
4748 #if CHECKSUM_THE_SWAP
4749 		if (c_seg->cseg_swap_size != io_size) {
4750 			panic("swapin size doesn't match swapout size");
4751 		}
4752 
4753 		if (c_seg->cseg_hash != vmc_hash((char*) c_seg->c_store.c_buffer, (int)io_size)) {
4754 			panic("c_seg_swapin - Swap hash mismatch");
4755 		}
4756 #endif /* CHECKSUM_THE_SWAP */
4757 
4758 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
4759 
4760 		c_seg_swapin_requeue(c_seg, TRUE, force_minor_compaction == TRUE ? FALSE : TRUE, age_on_swapin_q);
4761 
4762 #if CONFIG_FREEZE
4763 		/*
4764 		 * c_seg_swapin_requeue() returns with the c_seg lock held.
4765 		 */
4766 		if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
4767 			assert(c_seg->c_busy);
4768 
4769 			lck_mtx_unlock_always(&c_seg->c_lock);
4770 			lck_mtx_lock_spin_always(c_list_lock);
4771 			lck_mtx_lock_spin_always(&c_seg->c_lock);
4772 		}
4773 
4774 		if (c_seg->c_task_owner) {
4775 			c_seg_update_task_owner(c_seg, NULL);
4776 		}
4777 
4778 		lck_mtx_unlock_always(c_list_lock);
4779 
4780 		os_atomic_add(&c_segment_pages_compressed_incore, c_seg->c_slots_used, relaxed);
4781 		if (c_seg->c_has_donated_pages) {
4782 			os_atomic_add(&c_segment_pages_compressed_incore_late_swapout, c_seg->c_slots_used, relaxed);
4783 		}
4784 #endif /* CONFIG_FREEZE */
4785 
4786 		__assert_only unsigned int prev_swapped_count = os_atomic_sub_orig(
4787 			&vm_page_swapped_count, c_seg->c_slots_used, relaxed);
4788 		assert3u(prev_swapped_count, >=, c_seg->c_slots_used);
4789 		os_atomic_add(&compressor_bytes_used, c_seg->c_bytes_used, relaxed);
4790 
4791 		if (force_minor_compaction == TRUE) {
4792 			if (c_seg_minor_compaction_and_unlock(c_seg, FALSE)) {
4793 				/*
4794 				 * c_seg was completely empty so it was freed,
4795 				 * so be careful not to reference it again
4796 				 *
4797 				 * Drop the boost so that the thread priority
4798 				 * is returned back to where it is supposed to be.
4799 				 */
4800 				thread_priority_floor_end(&token);
4801 				return 1;
4802 			}
4803 
4804 			lck_mtx_lock_spin_always(&c_seg->c_lock);
4805 		}
4806 	}
4807 	C_SEG_WAKEUP_DONE(c_seg);
4808 
4809 	/*
4810 	 * Drop the boost so that the thread priority
4811 	 * is returned back to where it is supposed to be.
4812 	 */
4813 	thread_priority_floor_end(&token);
4814 
4815 	return 0;
4816 }
4817 
4818 /*
4819  * TODO: refactor the CAS loops in c_segment_sv_hash_drop_ref() and c_segment_sv_hash_instert()
4820  * to os_atomic_rmw_loop() [rdar://139546215]
4821  */
4822 
4823 static void
c_segment_sv_hash_drop_ref(int hash_indx)4824 c_segment_sv_hash_drop_ref(int hash_indx)
4825 {
4826 	struct c_sv_hash_entry o_sv_he, n_sv_he;
4827 
4828 	while (1) {
4829 		o_sv_he.he_record = c_segment_sv_hash_table[hash_indx].he_record;
4830 
4831 		n_sv_he.he_ref = o_sv_he.he_ref - 1;
4832 		n_sv_he.he_data = o_sv_he.he_data;
4833 
4834 		if (OSCompareAndSwap64((UInt64)o_sv_he.he_record, (UInt64)n_sv_he.he_record, (UInt64 *) &c_segment_sv_hash_table[hash_indx].he_record) == TRUE) {
4835 			if (n_sv_he.he_ref == 0) {
4836 				os_atomic_dec(&c_segment_svp_in_hash, relaxed);
4837 			}
4838 			break;
4839 		}
4840 	}
4841 }
4842 
4843 
4844 static int
c_segment_sv_hash_insert(uint32_t data)4845 c_segment_sv_hash_insert(uint32_t data)
4846 {
4847 	int             hash_sindx;
4848 	int             misses;
4849 	struct c_sv_hash_entry o_sv_he, n_sv_he;
4850 	boolean_t       got_ref = FALSE;
4851 
4852 	if (data == 0) {
4853 		os_atomic_inc(&c_segment_svp_zero_compressions, relaxed);
4854 	} else {
4855 		os_atomic_inc(&c_segment_svp_nonzero_compressions, relaxed);
4856 	}
4857 
4858 	hash_sindx = data & C_SV_HASH_MASK;
4859 
4860 	for (misses = 0; misses < C_SV_HASH_MAX_MISS; misses++) {
4861 		o_sv_he.he_record = c_segment_sv_hash_table[hash_sindx].he_record;
4862 
4863 		while (o_sv_he.he_data == data || o_sv_he.he_ref == 0) {
4864 			n_sv_he.he_ref = o_sv_he.he_ref + 1;
4865 			n_sv_he.he_data = data;
4866 
4867 			if (OSCompareAndSwap64((UInt64)o_sv_he.he_record, (UInt64)n_sv_he.he_record, (UInt64 *) &c_segment_sv_hash_table[hash_sindx].he_record) == TRUE) {
4868 				if (n_sv_he.he_ref == 1) {
4869 					os_atomic_inc(&c_segment_svp_in_hash, relaxed);
4870 				}
4871 				got_ref = TRUE;
4872 				break;
4873 			}
4874 			o_sv_he.he_record = c_segment_sv_hash_table[hash_sindx].he_record;
4875 		}
4876 		if (got_ref == TRUE) {
4877 			break;
4878 		}
4879 		hash_sindx++;
4880 
4881 		if (hash_sindx == C_SV_HASH_SIZE) {
4882 			hash_sindx = 0;
4883 		}
4884 	}
4885 	if (got_ref == FALSE) {
4886 		return -1;
4887 	}
4888 
4889 	return hash_sindx;
4890 }
4891 
4892 
4893 #if RECORD_THE_COMPRESSED_DATA
4894 
4895 static void
c_compressed_record_data(char * src,int c_size)4896 c_compressed_record_data(char *src, int c_size)
4897 {
4898 	if ((c_compressed_record_cptr + c_size + 4) >= c_compressed_record_ebuf) {
4899 		panic("c_compressed_record_cptr >= c_compressed_record_ebuf");
4900 	}
4901 
4902 	*(int *)((void *)c_compressed_record_cptr) = c_size;
4903 
4904 	c_compressed_record_cptr += 4;
4905 
4906 	memcpy(c_compressed_record_cptr, src, c_size);
4907 	c_compressed_record_cptr += c_size;
4908 }
4909 #endif
4910 
4911 #if HAS_MTE
4912 
4913 /* with KASAN we panic unconditionally in the next MTE compression functions */
4914 #pragma clang diagnostic push
4915 #pragma clang diagnostic ignored "-Wmissing-noreturn"
4916 
4917 /*
4918  * Compress the MTE tags for a page that starts in va.
4919  */
4920 static uint32_t
compress_mte_tags(void * va,char * buffer_out,uint32_t size_out)4921 compress_mte_tags(void *va, char *buffer_out, uint32_t size_out)
4922 {
4923 #if defined(KASAN)
4924 #pragma unused(va)
4925 #pragma unused(buffer_out)
4926 #pragma unused(size_out)
4927 	panic("KASAN with MTE pages is not supported (%s)", __func__);
4928 #endif /* KASAN */
4929 
4930 	MTE_BULK_DECLARE_TAGLIST(temp_tags, PAGE_SIZE);
4931 
4932 	/* copy tags to temp buffer */
4933 	mte_bulk_read_tags(va, PAGE_SIZE, temp_tags, sizeof(temp_tags));
4934 
4935 	uint32_t size_written = vm_mte_rle_compress_tags((uint8_t*)temp_tags, C_MTE_SIZE, (uint8_t*)buffer_out, size_out);
4936 	assert(size_written > 0);
4937 	/* size_written can be > 512 which indicates single-tag optimization,
4938 	 * in which case nothing written to the out buffer */
4939 	vm_mte_tags_stats_compressed(size_written);
4940 
4941 	return size_written;
4942 }
4943 
4944 /*
4945  * Decompress the MTE tags for the page that starts in va.
4946  */
4947 static bool
decompress_mte_tags(void * va,uint32_t size_in,char * buffer_in)4948 decompress_mte_tags(void *va, uint32_t size_in, char *buffer_in)
4949 {
4950 #if defined(KASAN)
4951 #pragma unused(va)
4952 #pragma unused(buffer_in)
4953 #pragma unused(size_in)
4954 	panic("KASAN with MTE pages is not supported (%s)", __func__);
4955 #endif /* KASAN */
4956 	assert(size_in > 0);
4957 
4958 	MTE_BULK_DECLARE_TAGLIST(temp_tags, PAGE_SIZE);
4959 
4960 	bool ok = vm_mte_rle_decompress_tags((uint8_t*)buffer_in, size_in, (uint8_t*)temp_tags, C_MTE_SIZE);
4961 	/* returns false if the compression encoding was somehow corrupted */
4962 	assertf(ok, "corrupt tags encoding in:%p, %ud out:%p", buffer_in, size_in, temp_tags);
4963 
4964 	if (ok) {
4965 		mte_bulk_write_tags(va, PAGE_SIZE, temp_tags, sizeof(temp_tags));
4966 	}
4967 
4968 	return ok;
4969 }
4970 
4971 #pragma clang diagnostic pop
4972 
4973 #endif /* HAS_MTE */
4974 
4975 /**
4976  * Do the actual compression of the given page
4977  * @param src [IN] address in the physical aperture of the page to compress.
4978  * @param slot_ptr [OUT] fill the slot-mapping of the c_seg+slot where the page ends up being stored
4979  * @param current_chead [IN-OUT] current filling c_seg. pointer comes from the current compression thread state
4980  *          On the very first call this is going to point to NULL and this function will fill that pointer with a new
4981  *          filling c_sec if the current filling c_seg doesn't have enough space, it will be replaced in this location
4982  *          with a new filling c_seg
4983  * @param scratch_buf [IN] pointer from the current thread state, used by the compression codec
4984  * @return KERN_RESOURCE_SHORTAGE if the compressor has been exhausted
4985  */
4986 static kern_return_t
c_compress_page(char * src,c_slot_mapping_t slot_ptr,c_segment_t * current_chead,char * scratch_buf,__unused vm_compressor_options_t flags)4987 c_compress_page(
4988 	char             *src,
4989 	c_slot_mapping_t slot_ptr,
4990 	c_segment_t      *current_chead,
4991 	char             *scratch_buf,
4992 	__unused vm_compressor_options_t flags)
4993 {
4994 	int              c_size = -1;
4995 	int              c_rounded_size = 0;
4996 	int              max_csize;
4997 	bool             nearing_limits;
4998 	c_slot_t         cs;
4999 	c_segment_t      c_seg;
5000 
5001 	KERNEL_DEBUG(0xe0400000 | DBG_FUNC_START, *current_chead, 0, 0, 0, 0);
5002 retry:  /* may need to retry if the currently filling c_seg will not have enough space */
5003 	c_seg = c_seg_allocate(current_chead, &nearing_limits);
5004 	if (c_seg == NULL) {
5005 		if (nearing_limits) {
5006 			memorystatus_respond_to_compressor_exhaustion();
5007 		}
5008 		return KERN_RESOURCE_SHORTAGE;
5009 	}
5010 
5011 	/*
5012 	 * c_seg_allocate() returns with c_seg lock held
5013 	 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
5014 	 * c_nextslot has been allocated and
5015 	 * c_store.c_buffer populated
5016 	 */
5017 	assert(c_seg->c_state == C_IS_FILLING);
5018 
5019 	cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_seg->c_nextslot);
5020 
5021 	C_SLOT_ASSERT_PACKABLE(slot_ptr);
5022 	cs->c_packed_ptr = C_SLOT_PACK_PTR(slot_ptr);
5023 
5024 	cs->c_offset = c_seg->c_nextoffset;
5025 
5026 	unsigned int avail_space = c_seg_bufsize - C_SEG_OFFSET_TO_BYTES((int32_t)cs->c_offset);
5027 
5028 #if HAS_MTE
5029 	/* Hold back room for the MTE tags, which can be as long as C_MTE_SIZE in the worst case */
5030 	/* possible optimization: radr://133756934 */
5031 	if (flags & C_MTE) {
5032 		if (avail_space > C_MTE_SIZE) {
5033 			avail_space -= C_MTE_SIZE;
5034 		} else {
5035 			avail_space = 0;
5036 		}
5037 	}
5038 #endif /* HAS_MTE */
5039 
5040 	max_csize = avail_space;
5041 	if (max_csize > PAGE_SIZE) {
5042 		max_csize = PAGE_SIZE;
5043 	}
5044 
5045 #if CHECKSUM_THE_DATA
5046 	cs->c_hash_data = vmc_hash(src, PAGE_SIZE);
5047 #endif
5048 	boolean_t incomp_copy = FALSE; /* codec indicates it already did copy an incompressible page */
5049 	/* The SW codec case needs 4 bytes for its header and these are not accounted for in the bytes_budget argument.
5050 	 * Also, the the SV-not-in-hash case needs 4 bytes. */
5051 	int max_csize_adj = (max_csize - 4);
5052 	if (__improbable(max_csize_adj < 0)) {
5053 		max_csize_adj = 0;
5054 	}
5055 
5056 	if (max_csize > 0 && max_csize_adj > 0) {
5057 		if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) {
5058 #if defined(__arm64__)
5059 			uint16_t ccodec = CINVALID;
5060 			uint32_t inline_popcount;
5061 			if (max_csize >= C_SEG_OFFSET_ALIGNMENT_BOUNDARY) {
5062 				vm_memtag_disable_checking();
5063 				c_size = metacompressor((const uint8_t *) src,
5064 				    (uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset],
5065 				    max_csize_adj, &ccodec,
5066 				    scratch_buf, &incomp_copy, &inline_popcount);
5067 				vm_memtag_enable_checking();
5068 				assert(inline_popcount == C_SLOT_NO_POPCOUNT);
5069 
5070 #if C_SEG_OFFSET_ALIGNMENT_BOUNDARY > 4
5071 				/* The case of HW codec doesn't detect overflow on its own, instead it spills the the next page
5072 				 * and we need to detect this happened */
5073 				if (c_size > max_csize_adj) {
5074 					c_size = -1;
5075 				}
5076 #endif
5077 			} else {
5078 				c_size = -1;
5079 			}
5080 			assert(ccodec == CCWK || ccodec == CCLZ4);
5081 			cs->c_codec = ccodec;
5082 #endif
5083 		} else {
5084 #if defined(__arm64__)
5085 			vm_memtag_disable_checking();
5086 			cs->c_codec = CCWK;
5087 			__unreachable_ok_push
5088 			if (PAGE_SIZE == 4096) {
5089 				c_size = WKdm_compress_4k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5090 				    (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
5091 			} else {
5092 				c_size = WKdm_compress_16k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5093 				    (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
5094 			}
5095 			__unreachable_ok_pop
5096 			vm_memtag_enable_checking();
5097 #else
5098 			vm_memtag_disable_checking();
5099 			c_size = WKdm_compress_new((const WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5100 			    (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
5101 			vm_memtag_enable_checking();
5102 #endif
5103 		}
5104 	} else { /* max_csize == 0 or max_csize_adj == 0 */
5105 		c_size = -1;
5106 	}
5107 	/* c_size is the size written by the codec, or 0 if it's uniform 32 bit value or (-1 if there was not enough space
5108 	 * or it was incompressible) */
5109 	assertf(((c_size <= max_csize_adj) && (c_size >= -1)),
5110 	    "c_size invalid (%d, %d), cur compressions: %d", c_size, max_csize_adj, c_segment_pages_compressed);
5111 
5112 	if (c_size == -1) {
5113 		if (max_csize < PAGE_SIZE) {
5114 			c_current_seg_filled(c_seg, current_chead);
5115 			assert(*current_chead == NULL);
5116 
5117 			lck_mtx_unlock_always(&c_seg->c_lock);
5118 			/* TODO: it may be worth requiring codecs to distinguish
5119 			 * between incompressible inputs and failures due to budget exhaustion.
5120 			 * right now this assumes that if the space we had is > PAGE_SIZE, then the codec failed due to incompressible input */
5121 
5122 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
5123 			goto retry;  /* previous c_seg didn't have enough space, we finalized it and can try again with a fresh c_seg */
5124 		}
5125 		c_size = PAGE_SIZE; /* tag:WK-INCOMPRESSIBLE */
5126 
5127 		if (incomp_copy == FALSE) { /* codec did not copy the incompressible input */
5128 			vm_memtag_disable_checking();
5129 			memcpy(&c_seg->c_store.c_buffer[cs->c_offset], src, c_size);
5130 			vm_memtag_enable_checking();
5131 		}
5132 
5133 		os_atomic_inc(&c_segment_noncompressible_pages, relaxed);
5134 	} else if (c_size == 0) {
5135 #if HAS_MTE
5136 		/* don't try to query the hash if we need to save the MTE tags since we won't have where to put the tags
5137 		 * (also, reading the uint32 at src for the hash query would be an MTE violation) tag:NO-SV-AND-MTE */
5138 		if (!(flags & C_MTE))
5139 #endif /* HAS_MTE */
5140 		{
5141 			/*
5142 			 * Special case - this is a page completely full of a single 32 bit value.
5143 			 * We store some values directly in the c_slot_mapping, if not there, the
5144 			 * 4 byte value goes in the compressor segment.
5145 			 */
5146 			int hash_index = c_segment_sv_hash_insert(*(uint32_t *) (uintptr_t) src);
5147 
5148 			if (hash_index != -1) {
5149 				slot_ptr->s_cindx = hash_index;
5150 				slot_ptr->s_cseg = C_SV_CSEG_ID;
5151 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5152 				slot_ptr->s_uncompressed = 0;
5153 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5154 
5155 				os_atomic_inc(&c_segment_svp_hash_succeeded, relaxed);
5156 #if RECORD_THE_COMPRESSED_DATA
5157 				c_compressed_record_data(src, 4);
5158 #endif
5159 				/* we didn't write anything to c_buffer and didn't end up using the slot in the c_seg at all, so skip all
5160 				 * the book-keeping of the case that we did */
5161 				goto sv_compression;
5162 			}
5163 		}
5164 		os_atomic_inc(&c_segment_svp_hash_failed, relaxed);
5165 
5166 		c_size = 4;
5167 		vm_memtag_disable_checking();
5168 		memcpy(&c_seg->c_store.c_buffer[cs->c_offset], src, c_size);
5169 		vm_memtag_enable_checking();
5170 	}
5171 
5172 #if RECORD_THE_COMPRESSED_DATA
5173 	c_compressed_record_data((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size);
5174 #endif
5175 #if CHECKSUM_THE_COMPRESSED_DATA
5176 	cs->c_hash_compressed_data = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size);
5177 #endif
5178 #if POPCOUNT_THE_COMPRESSED_DATA
5179 	cs->c_pop_cdata = vmc_pop((uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset], c_size);
5180 #endif
5181 
5182 	PACK_C_SIZE(cs, c_size);
5183 
5184 #if HAS_MTE
5185 	/* For bring up, just copy the tags into the segment */
5186 	if (c_size && (flags & C_MTE)) {
5187 		/* current data we filled started at c_offset and had size c_size */
5188 		int space_left = c_seg_bufsize - C_SEG_OFFSET_TO_BYTES((int32_t)cs->c_offset) - c_size;
5189 		assert(space_left >= C_MTE_SIZE); /* This is guaranteed by the avail_space modification above */
5190 		cs->c_mte_size = compress_mte_tags(src, ((char *)&c_seg->c_store.c_buffer[cs->c_offset]) + c_size, (uint32_t)space_left);
5191 	} else {
5192 		cs->c_mte_size = 0;
5193 	}
5194 	/* next invocation of WKDMc expects to be writing at a 64 byte alignment */
5195 	c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(c_size + c_slot_extra_size(cs));
5196 #else /* HAS_MTE */
5197 	c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(c_size);
5198 #endif /* HAS_MTE */
5199 
5200 	c_seg->c_bytes_used += c_rounded_size;
5201 	c_seg->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
5202 	c_seg->c_slots_used++;
5203 
5204 #if CONFIG_FREEZE
5205 	/* TODO: should c_segment_pages_compressed be up here too? See 88598046 for details */
5206 	os_atomic_inc(&c_segment_pages_compressed_incore, relaxed);
5207 	if (c_seg->c_has_donated_pages) {
5208 		os_atomic_inc(&c_segment_pages_compressed_incore_late_swapout, relaxed);
5209 	}
5210 #endif /* CONFIG_FREEZE */
5211 
5212 	slot_ptr->s_cindx = c_seg->c_nextslot++;
5213 	/* <csegno=0,indx=0> would mean "empty slot", so use csegno+1, see other usages of s_cseg where it's decremented */
5214 	slot_ptr->s_cseg = c_seg->c_mysegno + 1;
5215 
5216 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5217 	slot_ptr->s_uncompressed = 0;
5218 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5219 
5220 sv_compression:
5221 	/* can we say this c_seg is full? */
5222 	if (c_seg->c_nextoffset >= c_seg_off_limit || c_seg->c_nextslot >= C_SLOT_MAX_INDEX) {
5223 		/* condition 1: segment buffer is almost full, don't bother trying to fill it further.
5224 		 * condition 2: we can't have any more slots in this c_segment even if we had buffer space */
5225 		c_current_seg_filled(c_seg, current_chead);
5226 		assert(*current_chead == NULL);
5227 	}
5228 
5229 	lck_mtx_unlock_always(&c_seg->c_lock);
5230 
5231 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
5232 
5233 #if RECORD_THE_COMPRESSED_DATA
5234 	if ((c_compressed_record_cptr - c_compressed_record_sbuf) >= c_seg_allocsize) {
5235 		c_compressed_record_write(c_compressed_record_sbuf, (int)(c_compressed_record_cptr - c_compressed_record_sbuf));
5236 		c_compressed_record_cptr = c_compressed_record_sbuf;
5237 	}
5238 #endif
5239 	if (c_size) {
5240 		os_atomic_add(&c_segment_compressed_bytes, c_size, relaxed);
5241 		os_atomic_add(&compressor_bytes_used, c_rounded_size, relaxed);
5242 	}
5243 	os_atomic_add(&c_segment_input_bytes, PAGE_SIZE, relaxed);
5244 
5245 	os_atomic_inc(&c_segment_pages_compressed, relaxed);
5246 #if DEVELOPMENT || DEBUG
5247 	if (!compressor_running_perf_test) {
5248 		/*
5249 		 * The perf_compressor benchmark should not be able to trigger
5250 		 * compressor thrashing jetsams.
5251 		 */
5252 		os_atomic_inc(&sample_period_compression_count, relaxed);
5253 	}
5254 #else /* DEVELOPMENT || DEBUG */
5255 	os_atomic_inc(&sample_period_compression_count, relaxed);
5256 #endif /* DEVELOPMENT || DEBUG */
5257 
5258 	if (nearing_limits) {
5259 		memorystatus_respond_to_compressor_exhaustion();
5260 	}
5261 
5262 	KERNEL_DEBUG(0xe0400000 | DBG_FUNC_END, *current_chead, c_size, c_segment_input_bytes, c_segment_compressed_bytes, 0);
5263 
5264 	return KERN_SUCCESS;
5265 }
5266 
5267 static inline void
sv_decompress(int32_t * ddst,int32_t pattern)5268 sv_decompress(int32_t *ddst, int32_t pattern)
5269 {
5270 //	assert(__builtin_constant_p(PAGE_SIZE) != 0);
5271 #if defined(__x86_64__)
5272 	memset_word(ddst, pattern, PAGE_SIZE / sizeof(int32_t));
5273 #elif defined(__arm64__)
5274 	assert((PAGE_SIZE % 128) == 0);
5275 	if (pattern == 0) {
5276 		fill32_dczva((addr64_t)ddst, PAGE_SIZE);
5277 	} else {
5278 		fill32_nt((addr64_t)ddst, PAGE_SIZE, pattern);
5279 	}
5280 #else
5281 	size_t          i;
5282 
5283 	/* Unroll the pattern fill loop 4x to encourage the
5284 	 * compiler to emit NEON stores, cf.
5285 	 * <rdar://problem/25839866> Loop autovectorization
5286 	 * anomalies.
5287 	 */
5288 	/* * We use separate loops for each PAGE_SIZE
5289 	 * to allow the autovectorizer to engage, as PAGE_SIZE
5290 	 * may not be a constant.
5291 	 */
5292 
5293 	__unreachable_ok_push
5294 	if (PAGE_SIZE == 4096) {
5295 		for (i = 0; i < (4096U / sizeof(int32_t)); i += 4) {
5296 			*ddst++ = pattern;
5297 			*ddst++ = pattern;
5298 			*ddst++ = pattern;
5299 			*ddst++ = pattern;
5300 		}
5301 	} else {
5302 		assert(PAGE_SIZE == 16384);
5303 		for (i = 0; i < (int)(16384U / sizeof(int32_t)); i += 4) {
5304 			*ddst++ = pattern;
5305 			*ddst++ = pattern;
5306 			*ddst++ = pattern;
5307 			*ddst++ = pattern;
5308 		}
5309 	}
5310 	__unreachable_ok_pop
5311 #endif
5312 }
5313 
5314 static vm_decompress_result_t
c_decompress_page(char * dst,volatile c_slot_mapping_t slot_ptr,vm_compressor_options_t flags,int * zeroslot)5315 c_decompress_page(
5316 	char            *dst,
5317 	volatile c_slot_mapping_t slot_ptr,    /* why volatile? perhaps due to changes across hibernation */
5318 	vm_compressor_options_t flags,
5319 	int             *zeroslot)
5320 {
5321 	c_slot_t        cs;
5322 	c_segment_t     c_seg;
5323 	uint32_t        c_segno;
5324 	uint16_t        c_indx;
5325 	int             c_rounded_size;
5326 	uint32_t        c_size;
5327 	vm_decompress_result_t retval = 0;
5328 	boolean_t       need_unlock = TRUE;
5329 	boolean_t       consider_defragmenting = FALSE;
5330 	boolean_t       kdp_mode = FALSE;
5331 
5332 #if HAS_MTE
5333 	vm_mte_c_tags_removal_reason_t        mte_tags_removal_reason = VM_MTE_C_TAGS_REMOVAL_FREE;
5334 #endif
5335 	if (__improbable(flags & C_KDP)) {
5336 		if (not_in_kdp) {
5337 			panic("C_KDP passed to decompress page from outside of debugger context");
5338 		}
5339 
5340 		assert((flags & C_KEEP) == C_KEEP);
5341 		assert((flags & C_DONT_BLOCK) == C_DONT_BLOCK);
5342 
5343 		if ((flags & (C_DONT_BLOCK | C_KEEP)) != (C_DONT_BLOCK | C_KEEP)) {
5344 			return DECOMPRESS_NEED_BLOCK;
5345 		}
5346 
5347 		kdp_mode = TRUE;
5348 		*zeroslot = 0;
5349 	}
5350 
5351 ReTry:
5352 	if (__probable(!kdp_mode)) {
5353 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
5354 	} else {
5355 		if (kdp_lck_rw_lock_is_acquired_exclusive(&c_master_lock)) {
5356 			return DECOMPRESS_NEED_BLOCK;
5357 		}
5358 	}
5359 
5360 #if HIBERNATION
5361 	/*
5362 	 * if hibernation is enabled, it indicates (via a call
5363 	 * to 'vm_decompressor_lock' that no further
5364 	 * decompressions are allowed once it reaches
5365 	 * the point of flushing all of the currently dirty
5366 	 * anonymous memory through the compressor and out
5367 	 * to disk... in this state we allow freeing of compressed
5368 	 * pages and must honor the C_DONT_BLOCK case
5369 	 */
5370 	if (__improbable(dst && decompressions_blocked == TRUE)) {
5371 		if (flags & C_DONT_BLOCK) {
5372 			if (__probable(!kdp_mode)) {
5373 				PAGE_REPLACEMENT_DISALLOWED(FALSE);
5374 			}
5375 
5376 			*zeroslot = 0;
5377 			return -2;
5378 		}
5379 		/*
5380 		 * it's safe to atomically assert and block behind the
5381 		 * lock held in shared mode because "decompressions_blocked" is
5382 		 * only set and cleared and the thread_wakeup done when the lock
5383 		 * is held exclusively
5384 		 */
5385 		assert_wait((event_t)&decompressions_blocked, THREAD_UNINT);
5386 
5387 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5388 
5389 		thread_block(THREAD_CONTINUE_NULL);
5390 
5391 		goto ReTry;
5392 	}
5393 #endif
5394 	/* s_cseg is actually "segno+1" */
5395 	c_segno = slot_ptr->s_cseg - 1;
5396 
5397 	if (__improbable(c_segno >= c_segments_available)) {
5398 		panic("c_decompress_page: c_segno %d >= c_segments_available %d, slot_ptr(%p), slot_data(%x)",
5399 		    c_segno, c_segments_available, slot_ptr, *(int *)((void *)slot_ptr));
5400 	}
5401 
5402 	if (__improbable(c_segments_get(c_segno)->c_segno < c_segments_available)) {
5403 		panic("c_decompress_page: c_segno %d is free, slot_ptr(%p), slot_data(%x)",
5404 		    c_segno, slot_ptr, *(int *)((void *)slot_ptr));
5405 	}
5406 
5407 	c_seg = c_segments_get(c_segno)->c_seg;
5408 
5409 	if (__probable(!kdp_mode)) {
5410 		lck_mtx_lock_spin_always(&c_seg->c_lock);
5411 	} else {
5412 		if (kdp_lck_mtx_lock_spin_is_acquired(&c_seg->c_lock)) {
5413 			return DECOMPRESS_NEED_BLOCK;
5414 		}
5415 	}
5416 
5417 	assert(c_seg->c_state != C_IS_EMPTY && c_seg->c_state != C_IS_FREE);
5418 
5419 	if (dst == NULL && c_seg->c_busy_swapping) {
5420 		assert(c_seg->c_busy);
5421 
5422 		goto bypass_busy_check;
5423 	}
5424 	if (flags & C_DONT_BLOCK) {
5425 		if (c_seg->c_busy || (C_SEG_IS_ONDISK(c_seg) && dst)) {
5426 			*zeroslot = 0;
5427 
5428 			retval = DECOMPRESS_NEED_BLOCK;
5429 			goto done;
5430 		}
5431 	}
5432 	if (c_seg->c_busy) {
5433 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5434 
5435 		c_seg_wait_on_busy(c_seg);
5436 
5437 		goto ReTry;
5438 	}
5439 bypass_busy_check:
5440 
5441 	c_indx = slot_ptr->s_cindx;
5442 
5443 	if (__improbable(c_indx >= c_seg->c_nextslot)) {
5444 		panic("c_decompress_page: c_indx %d >= c_nextslot %d, c_seg(%p), slot_ptr(%p), slot_data(%x)",
5445 		    c_indx, c_seg->c_nextslot, c_seg, slot_ptr, *(int *)((void *)slot_ptr));
5446 	}
5447 
5448 	cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
5449 
5450 	c_size = UNPACK_C_SIZE(cs);
5451 
5452 #if HAS_MTE
5453 	if (dst) { /* if we're coming from vm_compressor_free() we're not going to have flags,
5454 		    * see rdar://133837861 to make this more generic */
5455 		if (cs->c_mte_size != 0) {
5456 			assertf(flags & C_MTE,
5457 			    "decompress page with mte_size=%d but no C_MTE in flags=%x", (int) cs->c_mte_size, flags);
5458 		} else {
5459 			assertf(!(flags & C_MTE),
5460 			    "decompress page without mte (mte_size=%d) and with C_MTE in flags=%x", (int) cs->c_mte_size, flags);
5461 		}
5462 	}
5463 #endif /* HAS_MTE */
5464 
5465 	if (__improbable(c_size == 0)) { /* sanity check it's not an empty slot */
5466 		panic("c_decompress_page: c_size == 0, c_seg(%p), slot_ptr(%p), slot_data(%x)",
5467 		    c_seg, slot_ptr, *(int *)((void *)slot_ptr));
5468 	}
5469 
5470 	c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(c_size + c_slot_extra_size(cs));
5471 	/* c_rounded_size should not change after this point so that it remains consistent on all branches */
5472 
5473 	if (dst) {  /* would be NULL if we don't want the page content, from free */
5474 		uint32_t        age_of_cseg;
5475 		clock_sec_t     cur_ts_sec;
5476 		clock_nsec_t    cur_ts_nsec;
5477 
5478 		if (C_SEG_IS_ONDISK(c_seg)) {
5479 #if CONFIG_FREEZE
5480 			if (freezer_incore_cseg_acct) {
5481 				if ((c_seg->c_slots_used + c_segment_pages_compressed_incore) >= c_segment_pages_compressed_nearing_limit) {
5482 					PAGE_REPLACEMENT_DISALLOWED(FALSE);
5483 					lck_mtx_unlock_always(&c_seg->c_lock);
5484 
5485 					memorystatus_kill_on_VM_compressor_space_shortage(FALSE /* async */);
5486 
5487 					goto ReTry;
5488 				}
5489 
5490 				uint32_t incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
5491 				if ((incore_seg_count + 1) >= c_segments_nearing_limit) {
5492 					PAGE_REPLACEMENT_DISALLOWED(FALSE);
5493 					lck_mtx_unlock_always(&c_seg->c_lock);
5494 
5495 					memorystatus_kill_on_VM_compressor_space_shortage(FALSE /* async */);
5496 
5497 					goto ReTry;
5498 				}
5499 			}
5500 #endif /* CONFIG_FREEZE */
5501 			assert(kdp_mode == FALSE);
5502 			retval = c_seg_swapin(c_seg, FALSE, TRUE);
5503 			assert(retval == 0);
5504 
5505 			retval = DECOMPRESS_SUCCESS_SWAPPEDIN;
5506 		}
5507 		if (c_seg->c_state == C_ON_BAD_Q) {
5508 			assert(c_seg->c_store.c_buffer == NULL);
5509 			*zeroslot = 0;
5510 
5511 			retval = DECOMPRESS_FAILED_BAD_Q;
5512 			goto done;
5513 		}
5514 
5515 #if POPCOUNT_THE_COMPRESSED_DATA
5516 		unsigned csvpop;
5517 		uintptr_t csvaddr = (uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset];
5518 		if (cs->c_pop_cdata != (csvpop = vmc_pop(csvaddr, c_size))) {
5519 			panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%x 0x%x 0x%x 0x%x", (csvpop - cs->c_pop_cdata), (void *)csvaddr, (void *) kvtophys(csvaddr), c_seg, cs, cs->c_offset, c_size, csvpop, cs->c_pop_cdata);
5520 		}
5521 #endif
5522 
5523 #if CHECKSUM_THE_COMPRESSED_DATA
5524 		unsigned csvhash;
5525 		if (cs->c_hash_compressed_data != (csvhash = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))) {
5526 			panic("Compressed data doesn't match original %p %p %u %u %u", c_seg, cs, c_size, cs->c_hash_compressed_data, csvhash);
5527 		}
5528 #endif
5529 		if (c_size == PAGE_SIZE) { /* tag:WK-INCOMPRESSIBLE */
5530 			/* page wasn't compressible... just copy it out */
5531 			vm_memtag_disable_checking();
5532 			memcpy(dst, &c_seg->c_store.c_buffer[cs->c_offset], PAGE_SIZE);
5533 			vm_memtag_enable_checking();
5534 		} else if (c_size == 4) {
5535 			int32_t         data;
5536 			int32_t         *dptr;
5537 
5538 			/*
5539 			 * page was populated with a single value
5540 			 * that didn't fit into our fast hash
5541 			 * so we packed it in as a single non-compressed value
5542 			 * that we need to populate the page with
5543 			 */
5544 			dptr = (int32_t *)(uintptr_t)dst;
5545 			data = *(int32_t *)(&c_seg->c_store.c_buffer[cs->c_offset]);
5546 			vm_memtag_disable_checking();
5547 			sv_decompress(dptr, data);
5548 			vm_memtag_enable_checking();
5549 		} else {  /* normal segment decompress */
5550 			uint32_t        my_cpu_no;
5551 			char            *scratch_buf;
5552 
5553 			my_cpu_no = cpu_number();
5554 
5555 			assert(my_cpu_no < compressor_cpus);
5556 
5557 			if (__probable(!kdp_mode)) {
5558 				/*
5559 				 * we're behind the c_seg lock held in spin mode
5560 				 * which means pre-emption is disabled... therefore
5561 				 * the following sequence is atomic and safe
5562 				 */
5563 				scratch_buf = &compressor_scratch_bufs[my_cpu_no * vm_compressor_get_decode_scratch_size()];
5564 			} else if (flags & C_KDP_MULTICPU) {
5565 				assert(vm_compressor_kdp_state.kc_scratch_bufs != NULL);
5566 				scratch_buf = &vm_compressor_kdp_state.kc_scratch_bufs[my_cpu_no * vm_compressor_get_decode_scratch_size()];
5567 			} else {
5568 				scratch_buf = vm_compressor_kdp_state.kc_panic_scratch_buf;
5569 			}
5570 
5571 			if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) {
5572 #if defined(__arm64__)
5573 				uint16_t c_codec = cs->c_codec;
5574 				uint32_t inline_popcount;
5575 				vm_memtag_disable_checking();
5576 				if (!metadecompressor((const uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset],
5577 				    (uint8_t *)dst, c_size, c_codec, (void *)scratch_buf, &inline_popcount)) {
5578 					vm_memtag_enable_checking();
5579 					retval = DECOMPRESS_FAILED_ALGO_ERROR;
5580 				} else {
5581 					vm_memtag_enable_checking();
5582 					assert(inline_popcount == C_SLOT_NO_POPCOUNT);
5583 				}
5584 #endif
5585 			} else {  /* algorithm == VM_COMPRESSOR_DEFAULT_CODEC */
5586 				vm_memtag_disable_checking();
5587 #if defined(__arm64__)
5588 				__unreachable_ok_push
5589 				if (PAGE_SIZE == 4096) {
5590 					WKdm_decompress_4k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5591 					    (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
5592 				} else {
5593 					WKdm_decompress_16k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5594 					    (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
5595 				}
5596 				__unreachable_ok_pop
5597 #else
5598 				WKdm_decompress_new((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5599 				    (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
5600 #endif
5601 				vm_memtag_enable_checking();
5602 			}
5603 		} /* normal segment decompress */
5604 
5605 #if CHECKSUM_THE_DATA
5606 		if (cs->c_hash_data != vmc_hash(dst, PAGE_SIZE)) {
5607 #if defined(__arm64__)
5608 			int32_t *dinput = &c_seg->c_store.c_buffer[cs->c_offset];
5609 			panic("decompressed data doesn't match original cs: %p, hash: 0x%x, offset: %d, c_size: %d, c_rounded_size: %d, codec: %d, header: 0x%x 0x%x 0x%x", cs, cs->c_hash_data, cs->c_offset, c_size, c_rounded_size, cs->c_codec, *dinput, *(dinput + 1), *(dinput + 2));
5610 #else /* defined(__arm64__) */
5611 			panic("decompressed data doesn't match original cs: %p, hash: %d, offset: 0x%x, c_size: %d", cs, cs->c_hash_data, cs->c_offset, c_size);
5612 #endif /* defined(__arm64__) */
5613 		}
5614 #endif /* CHECKSUM_THE_DATA */
5615 		if (c_seg->c_swappedin_ts == 0 && !kdp_mode) {
5616 			clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec);
5617 
5618 			age_of_cseg = (uint32_t)cur_ts_sec - c_seg->c_creation_ts;
5619 			if (age_of_cseg < DECOMPRESSION_SAMPLE_MAX_AGE) {
5620 				os_atomic_inc(&age_of_decompressions_during_sample_period[age_of_cseg], relaxed);
5621 			} else {
5622 				os_atomic_inc(&overage_decompressions_during_sample_period, relaxed);
5623 			}
5624 
5625 			os_atomic_inc(&sample_period_decompression_count, relaxed);
5626 		}
5627 
5628 #if HAS_MTE
5629 		/*
5630 		 * Only decompress tags if there are tags to decompress and the
5631 		 * out page is actually going to use tagging.
5632 		 */
5633 		if (cs->c_mte_size != 0 && (flags & C_MTE_DROP_TAGS) == 0) {
5634 			if (!decompress_mte_tags(dst, cs->c_mte_size, ((char *)&c_seg->c_store.c_buffer[cs->c_offset]) + c_size)) {
5635 				retval = DECOMPRESS_FAILED_TAGS;
5636 				mte_tags_removal_reason = VM_MTE_C_TAGS_REMOVAL_CORRUPT;
5637 			} else {
5638 				mte_tags_removal_reason = VM_MTE_C_TAGS_REMOVAL_DECOMPRESSED;
5639 			}
5640 		} else {
5641 			mte_tags_removal_reason = VM_MTE_C_TAGS_REMOVAL_FREE;
5642 		}
5643 #endif /* HAS_MTE */
5644 
5645 #if TRACK_C_SEGMENT_UTILIZATION
5646 		if (c_seg->c_swappedin) {
5647 			c_seg->c_decompressions_since_swapin++;
5648 		}
5649 #endif /* TRACK_C_SEGMENT_UTILIZATION */
5650 	} /* dst */
5651 	else {
5652 		/*
5653 		 * We are freeing an uncompressed page from this c_seg and so balance the ledgers.
5654 		 */
5655 		if (C_SEG_IS_ONDISK(c_seg)) {
5656 			__assert_only unsigned int prev_swapped_count =
5657 			    os_atomic_dec_orig(&vm_page_swapped_count, relaxed);
5658 			assert3u(prev_swapped_count, >, 0);
5659 #if CONFIG_FREEZE
5660 			/*
5661 			 * The compression sweep feature will push out anonymous pages to disk
5662 			 * without going through the freezer path and so those c_segs, while
5663 			 * swapped out, won't have an owner.
5664 			 */
5665 			if (c_seg->c_task_owner) {
5666 				task_update_frozen_to_swap_acct(c_seg->c_task_owner, PAGE_SIZE_64, DEBIT_FROM_SWAP);
5667 			}
5668 
5669 			/*
5670 			 * We are freeing a page in swap without swapping it in. We bump the in-core
5671 			 * count here to simulate a swapin of a page so that we can accurately
5672 			 * decrement it below.
5673 			 */
5674 			os_atomic_inc(&c_segment_pages_compressed_incore, relaxed);
5675 			if (c_seg->c_has_donated_pages) {
5676 				os_atomic_inc(&c_segment_pages_compressed_incore_late_swapout, relaxed);
5677 			}
5678 		} else if (c_seg->c_state == C_ON_BAD_Q) {
5679 			assert(c_seg->c_store.c_buffer == NULL);
5680 			*zeroslot = 0;
5681 
5682 			retval = DECOMPRESS_FAILED_BAD_Q_FREEZE;
5683 			goto done; /* this is intended to avoid the decrement of c_segment_pages_compressed_incore below */
5684 #endif /* CONFIG_FREEZE */
5685 		}
5686 #if HAS_MTE
5687 		mte_tags_removal_reason = VM_MTE_C_TAGS_REMOVAL_FREE;
5688 #endif /* HAS_MTE */
5689 	}
5690 
5691 	if (flags & C_KEEP) {
5692 		*zeroslot = 0;
5693 		goto done;
5694 	}
5695 
5696 #if HAS_MTE
5697 	if (cs->c_mte_size != 0) {
5698 		vm_mte_tags_stats_removed(cs->c_mte_size, mte_tags_removal_reason);
5699 	}
5700 #endif /* HAS_MTE */
5701 
5702 	/* now perform needed bookkeeping for the removal of the slot from the segment */
5703 	assert(kdp_mode == FALSE);
5704 
5705 	c_seg->c_bytes_unused += c_rounded_size;
5706 	c_seg->c_bytes_used -= c_rounded_size;
5707 
5708 	assert(c_seg->c_slots_used);
5709 	c_seg->c_slots_used--;
5710 	if (dst && c_seg->c_swappedin) {
5711 		task_t task = current_task();
5712 		if (task) {
5713 			ledger_credit(task->ledger, task_ledgers.swapins, PAGE_SIZE);
5714 		}
5715 	}
5716 
5717 	PACK_C_SIZE(cs, 0); /* mark slot as empty */
5718 #if HAS_MTE
5719 	cs->c_mte_size = 0;
5720 #endif /* HAS_MTE */
5721 
5722 	if (c_indx < c_seg->c_firstemptyslot) {
5723 		c_seg->c_firstemptyslot = c_indx;
5724 	}
5725 
5726 	os_atomic_dec(&c_segment_pages_compressed, relaxed);
5727 #if CONFIG_FREEZE
5728 	os_atomic_dec(&c_segment_pages_compressed_incore, relaxed);
5729 	assertf(c_segment_pages_compressed_incore >= 0, "-ve incore count %p 0x%x", c_seg, c_segment_pages_compressed_incore);
5730 	if (c_seg->c_has_donated_pages) {
5731 		os_atomic_dec(&c_segment_pages_compressed_incore_late_swapout, relaxed);
5732 		assertf(c_segment_pages_compressed_incore_late_swapout >= 0, "-ve lateswapout count %p 0x%x", c_seg, c_segment_pages_compressed_incore_late_swapout);
5733 	}
5734 #endif /* CONFIG_FREEZE */
5735 
5736 	if (c_seg->c_state != C_ON_BAD_Q && !(C_SEG_IS_ONDISK(c_seg))) {
5737 		/*
5738 		 * C_SEG_IS_ONDISK == TRUE can occur when we're doing a
5739 		 * free of a compressed page (i.e. dst == NULL)
5740 		 */
5741 		os_atomic_sub(&compressor_bytes_used, c_rounded_size, relaxed);
5742 	}
5743 	if (c_seg->c_busy_swapping) {
5744 		/*
5745 		 * bypass case for c_busy_swapping...
5746 		 * let the swapin/swapout paths deal with putting
5747 		 * the c_seg on the minor compaction queue if needed
5748 		 */
5749 		assert(c_seg->c_busy);
5750 		goto done;
5751 	}
5752 	assert(!c_seg->c_busy);
5753 
5754 	if (c_seg->c_state != C_IS_FILLING) {
5755 		/* did we just remove the last slot from the segment? */
5756 		if (c_seg->c_bytes_used == 0) {
5757 			if (!(C_SEG_IS_ONDISK(c_seg))) {
5758 				/* it was compressed resident in memory */
5759 				int     pages_populated;
5760 
5761 				pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE;
5762 				c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0);
5763 
5764 				if (pages_populated) {
5765 					assert(c_seg->c_state != C_ON_BAD_Q);
5766 					assert(c_seg->c_store.c_buffer != NULL);
5767 
5768 					C_SEG_BUSY(c_seg);
5769 					lck_mtx_unlock_always(&c_seg->c_lock);
5770 
5771 					kernel_memory_depopulate(
5772 						(vm_offset_t) c_seg->c_store.c_buffer,
5773 						ptoa(pages_populated),
5774 						KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
5775 
5776 					lck_mtx_lock_spin_always(&c_seg->c_lock);
5777 					C_SEG_WAKEUP_DONE(c_seg);
5778 				}
5779 				/* minor compaction will free it */
5780 				if (!c_seg->c_on_minorcompact_q && c_seg->c_state != C_ON_SWAPIO_Q) {
5781 					if (c_seg->c_state == C_ON_SWAPOUT_Q) {
5782 						/* If we're on the swapout q, we want to get out of it since there's no reason to swapout
5783 						 * anymore, so put on AGE Q in the meantime until minor compact */
5784 						bool clear_busy = false;
5785 						if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
5786 							C_SEG_BUSY(c_seg);
5787 
5788 							lck_mtx_unlock_always(&c_seg->c_lock);
5789 							lck_mtx_lock_spin_always(c_list_lock);
5790 							lck_mtx_lock_spin_always(&c_seg->c_lock);
5791 							clear_busy = true;
5792 						}
5793 						c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
5794 						if (clear_busy) {
5795 							C_SEG_WAKEUP_DONE(c_seg);
5796 							clear_busy = false;
5797 						}
5798 						lck_mtx_unlock_always(c_list_lock);
5799 					}
5800 					c_seg_need_delayed_compaction(c_seg, FALSE);
5801 				}
5802 			} else { /* C_SEG_IS_ONDISK(c_seg) */
5803 				/* it's empty and on-disk, make sure it's marked as sparse */
5804 				if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q) {
5805 					c_seg_move_to_sparse_list(c_seg);
5806 					consider_defragmenting = TRUE;
5807 				}
5808 			}
5809 		} else if (c_seg->c_on_minorcompact_q) {
5810 			assert(c_seg->c_state != C_ON_BAD_Q);
5811 			assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg));
5812 
5813 			if (C_SEG_SHOULD_MINORCOMPACT_NOW(c_seg)) {
5814 				c_seg_try_minor_compaction_and_unlock(c_seg);
5815 				need_unlock = FALSE;
5816 			}
5817 		} else if (!(C_SEG_IS_ONDISK(c_seg))) {
5818 			if (c_seg->c_state != C_ON_BAD_Q && c_seg->c_state != C_ON_SWAPOUT_Q && c_seg->c_state != C_ON_SWAPIO_Q &&
5819 			    C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
5820 				c_seg_need_delayed_compaction(c_seg, FALSE);
5821 			}
5822 		} else if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q && C_SEG_ONDISK_IS_SPARSE(c_seg)) {
5823 			c_seg_move_to_sparse_list(c_seg);
5824 			consider_defragmenting = TRUE;
5825 		}
5826 	} /* c_state != C_IS_FILLING */
5827 done:
5828 	if (__improbable(kdp_mode)) {
5829 		return retval;
5830 	}
5831 
5832 	if (need_unlock == TRUE) {
5833 		lck_mtx_unlock_always(&c_seg->c_lock);
5834 	}
5835 
5836 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
5837 
5838 	if (consider_defragmenting == TRUE) {
5839 		vm_swap_consider_defragmenting(VM_SWAP_FLAGS_NONE);
5840 	}
5841 
5842 #if !XNU_TARGET_OS_OSX
5843 	/*
5844 	 * Decompressions will generate fragmentation in the compressor pool
5845 	 * over time. Consider waking the compactor thread if any of the
5846 	 * fragmentation thresholds have been crossed as a result of this
5847 	 * decompression.
5848 	 */
5849 	vm_consider_waking_compactor_swapper();
5850 #endif /* !XNU_TARGET_OS_OSX */
5851 
5852 	return retval;
5853 }
5854 
5855 
5856 inline bool
vm_compressor_is_slot_compressed(int * slot)5857 vm_compressor_is_slot_compressed(int *slot)
5858 {
5859 #if !CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5860 #pragma unused(slot)
5861 	return true;
5862 #else /* !CONFIG_TRACK_UNMODIFIED_ANON_PAGES*/
5863 	c_slot_mapping_t slot_ptr = (c_slot_mapping_t)slot;
5864 	return !slot_ptr->s_uncompressed;
5865 #endif /* !CONFIG_TRACK_UNMODIFIED_ANON_PAGES*/
5866 }
5867 
5868 vm_decompress_result_t
vm_compressor_get(ppnum_t pn,int * slot,vm_compressor_options_t flags)5869 vm_compressor_get(ppnum_t pn, int *slot, vm_compressor_options_t flags)
5870 {
5871 	c_slot_mapping_t  slot_ptr;
5872 	char    *dst;
5873 	int     zeroslot = 1;
5874 	vm_decompress_result_t retval;
5875 
5876 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5877 	if (flags & C_PAGE_UNMODIFIED) {
5878 		int iretval = vm_uncompressed_get(pn, slot, flags | C_KEEP);
5879 		if (iretval == 0) {
5880 			os_atomic_inc(&compressor_ro_uncompressed_get, relaxed);
5881 			return DECOMPRESS_SUCCESS;
5882 		}
5883 
5884 		return DECOMPRESS_FAILED_UNMODIFIED;
5885 	}
5886 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5887 
5888 	/* get address in physical aperture of this page for fill into */
5889 	dst = pmap_map_compressor_page(pn);
5890 	slot_ptr = (c_slot_mapping_t)slot;
5891 
5892 	assert(dst != NULL);
5893 
5894 	if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
5895 #if HAS_MTE
5896 		/* single value page can't be an MTE page (since there's no place to put the tags) see tag:NO-SV-AND-MTE */
5897 		assert(!(flags & C_MTE));
5898 #endif
5899 		int32_t         data;
5900 		int32_t         *dptr;
5901 
5902 		/*
5903 		 * page was populated with a single value
5904 		 * that found a home in our hash table
5905 		 * grab that value from the hash and populate the page
5906 		 * that we need to populate the page with
5907 		 */
5908 		dptr = (int32_t *)(uintptr_t)dst;
5909 		data = c_segment_sv_hash_table[slot_ptr->s_cindx].he_data;
5910 		sv_decompress(dptr, data);
5911 
5912 		if (!(flags & C_KEEP)) {
5913 			c_segment_sv_hash_drop_ref(slot_ptr->s_cindx);
5914 
5915 			os_atomic_dec(&c_segment_pages_compressed, relaxed);
5916 			*slot = 0;
5917 		}
5918 		if (data) {
5919 			os_atomic_inc(&c_segment_svp_nonzero_decompressions, relaxed);
5920 		} else {
5921 			os_atomic_inc(&c_segment_svp_zero_decompressions, relaxed);
5922 		}
5923 
5924 		pmap_unmap_compressor_page(pn, dst);
5925 		return DECOMPRESS_SUCCESS;
5926 	}
5927 	retval = c_decompress_page(dst, slot_ptr, flags, &zeroslot);
5928 
5929 	/*
5930 	 * zeroslot will be set to 0 by c_decompress_page if (flags & C_KEEP)
5931 	 * or (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be TRUE
5932 	 */
5933 	if (zeroslot) {
5934 		*slot = 0;
5935 	}
5936 
5937 	pmap_unmap_compressor_page(pn, dst);
5938 
5939 	/*
5940 	 * returns 0 if we successfully decompressed a page from a segment already in memory
5941 	 * returns 1 if we had to first swap in the segment, before successfully decompressing the page
5942 	 * returns -1 if we encountered an error swapping in the segment - decompression failed
5943 	 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be true
5944 	 */
5945 	return retval;
5946 }
5947 
5948 vm_decompress_result_t
vm_compressor_free(int * slot,vm_compressor_options_t flags)5949 vm_compressor_free(int *slot, vm_compressor_options_t flags)
5950 {
5951 	bool slot_is_compressed = vm_compressor_is_slot_compressed(slot);
5952 
5953 	if (slot_is_compressed) {
5954 		c_slot_mapping_t  slot_ptr;
5955 		int     zeroslot = 1;
5956 		vm_decompress_result_t retval = DECOMPRESS_SUCCESS;
5957 
5958 		assert(flags == 0 || flags == C_DONT_BLOCK);
5959 
5960 		slot_ptr = (c_slot_mapping_t)slot;
5961 
5962 		if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
5963 			c_segment_sv_hash_drop_ref(slot_ptr->s_cindx);
5964 			os_atomic_dec(&c_segment_pages_compressed, relaxed);
5965 
5966 			*slot = 0;
5967 			return 0;
5968 		}
5969 
5970 #if HAS_MTE
5971 		/* Don't need to worry about C_MTE flag when just freeing */
5972 #endif
5973 		retval = c_decompress_page(NULL, slot_ptr, flags, &zeroslot);
5974 		/*
5975 		 * returns 0 if we successfully freed the specified compressed page
5976 		 * returns -1 if we encountered an error swapping in the segment - decompression failed
5977 		 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' set
5978 		 */
5979 
5980 		if (retval == DECOMPRESS_SUCCESS) {
5981 			*slot = 0;
5982 		}
5983 
5984 		return retval;
5985 	}
5986 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5987 	else {
5988 		if ((flags & C_PAGE_UNMODIFIED) == 0) {
5989 			/* moving from uncompressed state to compressed. Free it.*/
5990 			vm_uncompressed_free(slot, 0);
5991 			assert(*slot == 0);
5992 		}
5993 	}
5994 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5995 	return KERN_SUCCESS;
5996 }
5997 
5998 kern_return_t
vm_compressor_put(ppnum_t pn,int * slot,void ** current_chead,char * scratch_buf,vm_compressor_options_t flags)5999 vm_compressor_put(ppnum_t pn, int *slot, void  **current_chead, char *scratch_buf, vm_compressor_options_t flags)
6000 {
6001 	char *src;
6002 	kern_return_t kr;
6003 
6004 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
6005 	if (flags & C_PAGE_UNMODIFIED) {
6006 		if (*slot) {
6007 			os_atomic_inc(&compressor_ro_uncompressed_skip_returned, relaxed);
6008 			return KERN_SUCCESS;
6009 		} else {
6010 			kr = vm_uncompressed_put(pn, slot);
6011 			if (kr == KERN_SUCCESS) {
6012 				os_atomic_inc(&compressor_ro_uncompressed_put, relaxed);
6013 				return kr;
6014 			}
6015 		}
6016 	}
6017 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
6018 
6019 	/* get the address of the page in the physical apperture in the kernel task virtual memory */
6020 #if HAS_MTE
6021 	/* By the time we get here the physical apperture page should be already have tags enabled in pmap
6022 	 * see pmap_[un]make_tagged_page() */
6023 #endif
6024 	src = pmap_map_compressor_page(pn);
6025 	assert(src != NULL);
6026 
6027 	kr = c_compress_page(src, (c_slot_mapping_t)slot, (c_segment_t *)current_chead, scratch_buf, flags);
6028 	pmap_unmap_compressor_page(pn, src);
6029 
6030 	return kr;
6031 }
6032 
6033 void
vm_compressor_transfer(int * dst_slot_p,int * src_slot_p)6034 vm_compressor_transfer(
6035 	int     *dst_slot_p,
6036 	int     *src_slot_p)
6037 {
6038 	c_slot_mapping_t        dst_slot, src_slot;
6039 	c_segment_t             c_seg;
6040 	uint16_t                c_indx;
6041 	c_slot_t                cs;
6042 
6043 	src_slot = (c_slot_mapping_t) src_slot_p;
6044 
6045 	if (src_slot->s_cseg == C_SV_CSEG_ID || !vm_compressor_is_slot_compressed(src_slot_p)) {
6046 		*dst_slot_p = *src_slot_p;
6047 		*src_slot_p = 0;
6048 		return;
6049 	}
6050 	dst_slot = (c_slot_mapping_t) dst_slot_p;
6051 Retry:
6052 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
6053 	/* get segment for src_slot */
6054 	c_seg = c_segments_get(src_slot->s_cseg - 1)->c_seg;
6055 	/* lock segment */
6056 	lck_mtx_lock_spin_always(&c_seg->c_lock);
6057 	/* wait if it's busy */
6058 	if (c_seg->c_busy && !c_seg->c_busy_swapping) {
6059 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
6060 		c_seg_wait_on_busy(c_seg);
6061 		goto Retry;
6062 	}
6063 	/* find the c_slot */
6064 	c_indx = src_slot->s_cindx;
6065 	cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
6066 	/* point the c_slot back to dst_slot instead of src_slot */
6067 	C_SLOT_ASSERT_PACKABLE(dst_slot);
6068 	cs->c_packed_ptr = C_SLOT_PACK_PTR(dst_slot);
6069 	/* transfer */
6070 	*dst_slot_p = *src_slot_p;
6071 	*src_slot_p = 0;
6072 	lck_mtx_unlock_always(&c_seg->c_lock);
6073 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
6074 }
6075 
6076 #if defined(__arm64__)
6077 extern uint64_t vm_swapfile_last_failed_to_create_ts;
6078 __attribute__((noreturn))
6079 void
vm_panic_hibernate_write_image_failed(int err,uint64_t file_size_min,uint64_t file_size_max,uint64_t file_size)6080 vm_panic_hibernate_write_image_failed(
6081 	int err,
6082 	uint64_t file_size_min,
6083 	uint64_t file_size_max,
6084 	uint64_t file_size)
6085 {
6086 	panic("hibernate_write_image encountered error 0x%x - %u, %u, %d, %d, %d, %d, %d, %d, %d, %d, %llu, %d, %d, %d, %llu, %llu, %llu\n",
6087 	    err,
6088 	    VM_PAGE_COMPRESSOR_COUNT, vm_page_wire_count,
6089 	    c_age_count, c_major_count, c_minor_count, (c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count), c_swappedout_sparse_count,
6090 	    vm_num_swap_files, vm_num_pinned_swap_files, vm_swappin_enabled, vm_swap_put_failures,
6091 	    (vm_swapfile_last_failed_to_create_ts ? 1:0), hibernate_no_swapspace, hibernate_flush_timed_out,
6092 	    file_size_min, file_size_max, file_size);
6093 }
6094 #endif /*(__arm64__)*/
6095 
6096 #if CONFIG_FREEZE
6097 
6098 int     freezer_finished_filling = 0;
6099 
6100 void
vm_compressor_finished_filling(void ** current_chead)6101 vm_compressor_finished_filling(
6102 	void    **current_chead)
6103 {
6104 	c_segment_t     c_seg;
6105 
6106 	if ((c_seg = *(c_segment_t *)current_chead) == NULL) {
6107 		return;
6108 	}
6109 
6110 	assert(c_seg->c_state == C_IS_FILLING);
6111 
6112 	lck_mtx_lock_spin_always(&c_seg->c_lock);
6113 
6114 	c_current_seg_filled(c_seg, (c_segment_t *)current_chead);
6115 
6116 	lck_mtx_unlock_always(&c_seg->c_lock);
6117 
6118 	freezer_finished_filling++;
6119 }
6120 
6121 
6122 /*
6123  * This routine is used to transfer the compressed chunks from
6124  * the c_seg/cindx pointed to by slot_p into a new c_seg headed
6125  * by the current_chead and a new cindx within that c_seg.
6126  *
6127  * Currently, this routine is only used by the "freezer backed by
6128  * compressor with swap" mode to create a series of c_segs that
6129  * only contain compressed data belonging to one task. So, we
6130  * move a task's previously compressed data into a set of new
6131  * c_segs which will also hold the task's yet to be compressed data.
6132  */
6133 
6134 kern_return_t
vm_compressor_relocate(void ** current_chead,int * slot_p)6135 vm_compressor_relocate(
6136 	void            **current_chead,
6137 	int             *slot_p)
6138 {
6139 	c_slot_mapping_t        slot_ptr;
6140 	c_slot_mapping_t        src_slot;
6141 	uint32_t                c_rounded_size;
6142 	uint32_t                c_size;
6143 	uint16_t                dst_slot;
6144 	c_slot_t                c_dst;
6145 	c_slot_t                c_src;
6146 	uint16_t                c_indx;
6147 	c_segment_t             c_seg_dst = NULL;
6148 	c_segment_t             c_seg_src = NULL;
6149 	kern_return_t           kr = KERN_SUCCESS;
6150 	bool                    nearing_limits;
6151 
6152 
6153 	src_slot = (c_slot_mapping_t) slot_p;
6154 
6155 	if (src_slot->s_cseg == C_SV_CSEG_ID) {
6156 		/*
6157 		 * no need to relocate... this is a page full of a single
6158 		 * value which is hashed to a single entry not contained
6159 		 * in a c_segment_t
6160 		 */
6161 		return kr;
6162 	}
6163 
6164 	if (vm_compressor_is_slot_compressed((int *)src_slot) == false) {
6165 		/*
6166 		 * Unmodified anonymous pages are sitting uncompressed on disk.
6167 		 * So don't pull them back in again.
6168 		 */
6169 		return kr;
6170 	}
6171 
6172 Relookup_dst:
6173 	c_seg_dst = c_seg_allocate((c_segment_t *)current_chead, &nearing_limits);
6174 	/*
6175 	 * returns with c_seg lock held
6176 	 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
6177 	 * c_nextslot has been allocated and
6178 	 * c_store.c_buffer populated
6179 	 */
6180 	if (c_seg_dst == NULL) {
6181 		/*
6182 		 * Out of compression segments?
6183 		 */
6184 		if (nearing_limits) {
6185 			memorystatus_respond_to_compressor_exhaustion();
6186 		}
6187 		kr = KERN_RESOURCE_SHORTAGE;
6188 		goto out;
6189 	}
6190 
6191 	assert(c_seg_dst->c_busy == 0);
6192 
6193 	C_SEG_BUSY(c_seg_dst);
6194 
6195 	dst_slot = c_seg_dst->c_nextslot;
6196 
6197 	lck_mtx_unlock_always(&c_seg_dst->c_lock);
6198 	if (nearing_limits) {
6199 		memorystatus_respond_to_compressor_exhaustion();
6200 	}
6201 
6202 Relookup_src:
6203 	c_seg_src = c_segments_get(src_slot->s_cseg - 1)->c_seg;
6204 
6205 	assert(c_seg_dst != c_seg_src);
6206 
6207 	lck_mtx_lock_spin_always(&c_seg_src->c_lock);
6208 
6209 	if (C_SEG_IS_ON_DISK_OR_SOQ(c_seg_src) ||
6210 	    c_seg_src->c_state == C_IS_FILLING) {
6211 		/*
6212 		 * Skip this page if :-
6213 		 * a) the src c_seg is already on-disk (or on its way there)
6214 		 *    A "thaw" can mark a process as eligible for
6215 		 * another freeze cycle without bringing any of
6216 		 * its swapped out c_segs back from disk (because
6217 		 * that is done on-demand).
6218 		 *    Or, this page may be mapped elsewhere in the task's map,
6219 		 * and we may have marked it for swap already.
6220 		 *
6221 		 * b) Or, the src c_seg is being filled by the compressor
6222 		 * thread. We don't want the added latency of waiting for
6223 		 * this c_seg in the freeze path and so we skip it.
6224 		 */
6225 
6226 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
6227 
6228 		lck_mtx_unlock_always(&c_seg_src->c_lock);
6229 
6230 		c_seg_src = NULL;
6231 
6232 		goto out;
6233 	}
6234 
6235 	if (c_seg_src->c_busy) {
6236 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
6237 		c_seg_wait_on_busy(c_seg_src);
6238 
6239 		c_seg_src = NULL;
6240 
6241 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
6242 
6243 		goto Relookup_src;
6244 	}
6245 
6246 	C_SEG_BUSY(c_seg_src);
6247 
6248 	lck_mtx_unlock_always(&c_seg_src->c_lock);
6249 
6250 	/* find the c_slot */
6251 	c_indx = src_slot->s_cindx;
6252 
6253 	c_src = C_SEG_SLOT_FROM_INDEX(c_seg_src, c_indx);
6254 
6255 	c_size = UNPACK_C_SIZE(c_src);
6256 
6257 	assert(c_size);
6258 	int combined_size = c_size + c_slot_extra_size(c_src);
6259 
6260 	if (combined_size > (uint32_t)(c_seg_bufsize - C_SEG_OFFSET_TO_BYTES((int32_t)c_seg_dst->c_nextoffset))) {
6261 		/*
6262 		 * This segment is full. We need a new one.
6263 		 */
6264 
6265 		lck_mtx_lock_spin_always(&c_seg_src->c_lock);
6266 		C_SEG_WAKEUP_DONE(c_seg_src);
6267 		lck_mtx_unlock_always(&c_seg_src->c_lock);
6268 
6269 		c_seg_src = NULL;
6270 
6271 		lck_mtx_lock_spin_always(&c_seg_dst->c_lock);
6272 
6273 		assert(c_seg_dst->c_busy);
6274 		assert(c_seg_dst->c_state == C_IS_FILLING);
6275 		assert(!c_seg_dst->c_on_minorcompact_q);
6276 
6277 		c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead);
6278 		assert(*current_chead == NULL);
6279 
6280 		C_SEG_WAKEUP_DONE(c_seg_dst);
6281 
6282 		lck_mtx_unlock_always(&c_seg_dst->c_lock);
6283 
6284 		c_seg_dst = NULL;
6285 
6286 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
6287 
6288 		goto Relookup_dst;
6289 	}
6290 
6291 	c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, c_seg_dst->c_nextslot);
6292 
6293 	memcpy(&c_seg_dst->c_store.c_buffer[c_seg_dst->c_nextoffset], &c_seg_src->c_store.c_buffer[c_src->c_offset], combined_size);
6294 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
6295 	/*
6296 	 * Is platform alignment actually necessary since wkdm aligns its output?
6297 	 */
6298 	c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(combined_size);
6299 
6300 	cslot_copy(c_dst, c_src);
6301 	c_dst->c_offset = c_seg_dst->c_nextoffset;
6302 
6303 	if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) {
6304 		c_seg_dst->c_firstemptyslot++;
6305 	}
6306 
6307 	c_seg_dst->c_slots_used++;
6308 	c_seg_dst->c_nextslot++;
6309 	c_seg_dst->c_bytes_used += c_rounded_size;
6310 	c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
6311 
6312 
6313 	PACK_C_SIZE(c_src, 0);
6314 #if HAS_MTE
6315 	c_src->c_mte_size = 0;
6316 #endif
6317 
6318 	c_seg_src->c_bytes_used -= c_rounded_size;
6319 	c_seg_src->c_bytes_unused += c_rounded_size;
6320 
6321 	assert(c_seg_src->c_slots_used);
6322 	c_seg_src->c_slots_used--;
6323 
6324 	if (!c_seg_src->c_swappedin) {
6325 		/* Pessimistically lose swappedin status when non-swappedin pages are added. */
6326 		c_seg_dst->c_swappedin = false;
6327 	}
6328 
6329 	if (c_indx < c_seg_src->c_firstemptyslot) {
6330 		c_seg_src->c_firstemptyslot = c_indx;
6331 	}
6332 
6333 	c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot);
6334 
6335 	PAGE_REPLACEMENT_ALLOWED(TRUE);
6336 	slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
6337 	/* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
6338 	slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1;
6339 	slot_ptr->s_cindx = dst_slot;
6340 
6341 	PAGE_REPLACEMENT_ALLOWED(FALSE);
6342 
6343 out:
6344 	if (c_seg_src) {
6345 		lck_mtx_lock_spin_always(&c_seg_src->c_lock);
6346 
6347 		C_SEG_WAKEUP_DONE(c_seg_src);
6348 
6349 		if (c_seg_src->c_bytes_used == 0 && c_seg_src->c_state != C_IS_FILLING) {
6350 			if (!c_seg_src->c_on_minorcompact_q) {
6351 				c_seg_need_delayed_compaction(c_seg_src, FALSE);
6352 			}
6353 		}
6354 
6355 		lck_mtx_unlock_always(&c_seg_src->c_lock);
6356 	}
6357 
6358 	if (c_seg_dst) {
6359 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
6360 
6361 		lck_mtx_lock_spin_always(&c_seg_dst->c_lock);
6362 
6363 		if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
6364 			/*
6365 			 * Nearing or exceeded maximum slot and offset capacity.
6366 			 */
6367 			assert(c_seg_dst->c_busy);
6368 			assert(c_seg_dst->c_state == C_IS_FILLING);
6369 			assert(!c_seg_dst->c_on_minorcompact_q);
6370 
6371 			c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead);
6372 			assert(*current_chead == NULL);
6373 		}
6374 
6375 		C_SEG_WAKEUP_DONE(c_seg_dst);
6376 
6377 		lck_mtx_unlock_always(&c_seg_dst->c_lock);
6378 
6379 		c_seg_dst = NULL;
6380 
6381 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
6382 	}
6383 
6384 	return kr;
6385 }
6386 #endif /* CONFIG_FREEZE */
6387 
6388 #if DEVELOPMENT || DEBUG
6389 
6390 void
vm_compressor_inject_error(int * slot)6391 vm_compressor_inject_error(int *slot)
6392 {
6393 	c_slot_mapping_t slot_ptr = (c_slot_mapping_t)slot;
6394 
6395 	/* No error detection for single-value compression. */
6396 	if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
6397 		printf("%s(): cannot inject errors in SV-compressed pages\n", __func__ );
6398 		return;
6399 	}
6400 
6401 	/* s_cseg is actually "segno+1" */
6402 	const uint32_t c_segno = slot_ptr->s_cseg - 1;
6403 
6404 	assert(c_segno < c_segments_available);
6405 	assert(c_segments_get(c_segno)->c_segno >= c_segments_available);
6406 
6407 	const c_segment_t c_seg = c_segments_get(c_segno)->c_seg;
6408 
6409 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
6410 
6411 	lck_mtx_lock_spin_always(&c_seg->c_lock);
6412 	assert(c_seg->c_state != C_IS_EMPTY && c_seg->c_state != C_IS_FREE);
6413 
6414 	const uint16_t c_indx = slot_ptr->s_cindx;
6415 	assert(c_indx < c_seg->c_nextslot);
6416 
6417 	/*
6418 	 * To safely make this segment temporarily writable, we need to mark
6419 	 * the segment busy, which allows us to release the segment lock.
6420 	 */
6421 	while (c_seg->c_busy) {
6422 		c_seg_wait_on_busy(c_seg);
6423 		lck_mtx_lock_spin_always(&c_seg->c_lock);
6424 	}
6425 	C_SEG_BUSY(c_seg);
6426 
6427 	bool already_writable = (c_seg->c_state == C_IS_FILLING);
6428 	if (!already_writable) {
6429 		/*
6430 		 * Protection update must be performed preemptibly, so temporarily drop
6431 		 * the lock. Having set c_busy will prevent most other concurrent
6432 		 * operations.
6433 		 */
6434 		lck_mtx_unlock_always(&c_seg->c_lock);
6435 		C_SEG_MAKE_WRITEABLE(c_seg);
6436 		lck_mtx_lock_spin_always(&c_seg->c_lock);
6437 	}
6438 
6439 	/*
6440 	 * Once we've released the lock following our c_state == C_IS_FILLING check,
6441 	 * c_current_seg_filled() can (re-)write-protect the segment. However, it
6442 	 * will transition from C_IS_FILLING before releasing the c_seg lock, so we
6443 	 * can detect this by re-checking after we've reobtained the lock.
6444 	 */
6445 	if (already_writable && c_seg->c_state != C_IS_FILLING) {
6446 		lck_mtx_unlock_always(&c_seg->c_lock);
6447 		C_SEG_MAKE_WRITEABLE(c_seg);
6448 		lck_mtx_lock_spin_always(&c_seg->c_lock);
6449 		already_writable = false;
6450 		/* Segment can't be freed while c_busy is set. */
6451 		assert(c_seg->c_state != C_IS_FILLING);
6452 	}
6453 
6454 	/*
6455 	 * Skip if the segment is on disk. This check can only be performed after
6456 	 * the final acquisition of the segment lock before we attempt to write to
6457 	 * the segment.
6458 	 */
6459 	if (!C_SEG_IS_ON_DISK_OR_SOQ(c_seg)) {
6460 		c_slot_t cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
6461 		int32_t *data = &c_seg->c_store.c_buffer[cs->c_offset];
6462 		/* assume that the compressed data holds at least one int32_t */
6463 		assert(UNPACK_C_SIZE(cs) > sizeof(*data));
6464 		/*
6465 		 * This bit is known to be in the payload of a MISS packet resulting from
6466 		 * the pattern used in the test pattern from decompression_failure.c.
6467 		 * Flipping it should result in many corrupted bits in the test page.
6468 		 */
6469 		data[0] ^= 0x00000100;
6470 	}
6471 
6472 	if (!already_writable) {
6473 		lck_mtx_unlock_always(&c_seg->c_lock);
6474 		C_SEG_WRITE_PROTECT(c_seg);
6475 		lck_mtx_lock_spin_always(&c_seg->c_lock);
6476 	}
6477 
6478 	C_SEG_WAKEUP_DONE(c_seg);
6479 	lck_mtx_unlock_always(&c_seg->c_lock);
6480 
6481 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
6482 }
6483 
6484 /*
6485  * Serialize information about a specific segment
6486  * returns true if the segment was written or there's nothing to write for the segno
6487  *         false if there's not enough space
6488  * argument size input - the size of the input buffer, output - the size written, set to 0 on failure
6489  */
6490 kern_return_t
vm_compressor_serialize_segment_debug_info(int segno,char * buf,size_t * size,vm_c_serialize_add_data_t with_data)6491 vm_compressor_serialize_segment_debug_info(int segno, char *buf, size_t *size, vm_c_serialize_add_data_t with_data)
6492 {
6493 	size_t insize = *size;
6494 	size_t offset = 0;
6495 	*size = 0;
6496 	if (c_segments_get(segno)->c_segno < c_segments_available) {
6497 		/* This check means there's no pointer assigned here so it must be an index in the free list.
6498 		 * if this was an active c_segment, .c_seg would be assigned to, which is a pointer, interpreted as an int it
6499 		 * would be higher than c_segments_available. See also assert to this effect right after assigning to c_seg in
6500 		 * c_seg_allocate()
6501 		 */
6502 		return KERN_SUCCESS;
6503 	}
6504 	if (c_segments_get(segno)->c_segno == (uint32_t)-1) {
6505 		/* c_segno of the end of the free-list */
6506 		return KERN_SUCCESS;
6507 	}
6508 
6509 	const struct c_segment* c_seg = c_segments_get(segno)->c_seg;
6510 	if (c_seg->c_state == C_IS_FREE) {
6511 		return KERN_SUCCESS; /* nothing needs to be done */
6512 	}
6513 
6514 	int nslots = c_seg->c_nextslot;
6515 	/* do we have enough space for slots (without data)? */
6516 	if (sizeof(struct c_segment_info) + (nslots * sizeof(struct c_slot_info)) > insize) {
6517 		return KERN_NO_SPACE; /* not enough space, please call me again */
6518 	}
6519 
6520 	struct c_segment_info* csi = (struct c_segment_info*)buf;
6521 	offset += sizeof(struct c_segment_info);
6522 
6523 	csi->csi_mysegno = c_seg->c_mysegno;
6524 	csi->csi_creation_ts = c_seg->c_creation_ts;
6525 	csi->csi_swappedin_ts = c_seg->c_swappedin_ts;
6526 	csi->csi_bytes_unused = c_seg->c_bytes_unused;
6527 	csi->csi_bytes_used = c_seg->c_bytes_used;
6528 	csi->csi_populated_offset = c_seg->c_populated_offset;
6529 	csi->csi_state = c_seg->c_state;
6530 	csi->csi_swappedin = c_seg->c_swappedin;
6531 	csi->csi_on_minor_compact_q = c_seg->c_on_minorcompact_q;
6532 	csi->csi_has_donated_pages = c_seg->c_has_donated_pages;
6533 	csi->csi_slots_used = (uint16_t)c_seg->c_slots_used;
6534 	csi->csi_slot_var_array_len = c_seg->c_slot_var_array_len;
6535 	csi->csi_slots_len = (uint16_t)nslots;
6536 #if TRACK_C_SEGMENT_UTILIZATION
6537 	csi->csi_decompressions_since_swapin = c_seg->c_decompressions_since_swapin;
6538 #else
6539 	csi->csi_decompressions_since_swapin = 0;
6540 #endif /* TRACK_C_SEGMENT_UTILIZATION */
6541 #if HAS_MTE
6542 	bool cseg_in_mem = !C_SEG_IS_ON_DISK_OR_SOQ(c_seg);
6543 #endif /* HAS_MTE */
6544 	/* This entire data collection races with the compressor threads which can change any
6545 	 * of this data members, and specifically can drop the data buffer to swap
6546 	 * We don't take the segment lock since that would slow the iteration over the segments down
6547 	 * and hurt the "snapshot-ness" of the data. The race risk is acceptable since this is
6548 	 * used only for a tester in development. */
6549 
6550 	for (int si = 0; si < nslots; ++si) {
6551 		if (offset + sizeof(struct c_slot_info) > insize) {
6552 			return KERN_NO_SPACE;
6553 		}
6554 		/* see also c_seg_validate() for some of the details */
6555 		const struct c_slot* cs = C_SEG_SLOT_FROM_INDEX(c_seg, si);
6556 		struct c_slot_info* ssi = (struct c_slot_info*)(buf + offset);
6557 		offset += sizeof(struct c_slot_info);
6558 		ssi->csi_size = (uint16_t)UNPACK_C_SIZE(cs);
6559 #if HAS_MTE
6560 		ssi->csi_mte_size = cs->c_mte_size;
6561 		ssi->csi_mte_has_data = 0;
6562 		uint32_t actual_mte_size = vm_mte_compressed_tags_actual_size(ssi->csi_mte_size);
6563 		if (with_data == VM_C_SERIALIZE_DATA_TAGS && actual_mte_size > 0 && cseg_in_mem) {
6564 			if (offset + actual_mte_size > insize) {
6565 				return KERN_NO_SPACE;
6566 			}
6567 			char* tags_buf = ((char *)&c_seg->c_store.c_buffer[cs->c_offset]) + ssi->csi_size;
6568 			memcpy(buf + offset, tags_buf, actual_mte_size);
6569 			offset += actual_mte_size;
6570 			ssi->csi_mte_has_data = 1;
6571 		}
6572 #else /* HAS_MTE */
6573 #pragma unused(with_data)
6574 		ssi->csi_unused = 0;
6575 #endif /* HAS_MTE */
6576 	}
6577 	*size = offset;
6578 	return KERN_SUCCESS;
6579 }
6580 
6581 #endif /* DEVELOPMENT || DEBUG */
6582 
6583 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
6584 
6585 struct vnode;
6586 extern void vm_swapfile_open(const char *path, struct vnode **vp);
6587 extern int vm_swapfile_preallocate(struct vnode *vp, uint64_t *size, boolean_t *pin);
6588 
6589 struct vnode *uncompressed_vp0 = NULL;
6590 struct vnode *uncompressed_vp1 = NULL;
6591 uint32_t uncompressed_file0_free_pages = 0, uncompressed_file1_free_pages = 0;
6592 uint64_t uncompressed_file0_free_offset = 0, uncompressed_file1_free_offset = 0;
6593 
6594 uint64_t compressor_ro_uncompressed = 0;
6595 uint64_t compressor_ro_uncompressed_total_returned = 0;
6596 uint64_t compressor_ro_uncompressed_skip_returned = 0;
6597 uint64_t compressor_ro_uncompressed_get = 0;
6598 uint64_t compressor_ro_uncompressed_put = 0;
6599 uint64_t compressor_ro_uncompressed_swap_usage = 0;
6600 
6601 extern void vnode_put(struct vnode* vp);
6602 extern int vnode_getwithref(struct vnode* vp);
6603 extern int vm_swapfile_io(struct vnode *vp, uint64_t offset, uint64_t start, int npages, int flags, void *upl_ctx);
6604 
6605 #define MAX_OFFSET_PAGES        (255)
6606 uint64_t uncompressed_file0_space_bitmap[MAX_OFFSET_PAGES];
6607 uint64_t uncompressed_file1_space_bitmap[MAX_OFFSET_PAGES];
6608 
6609 #define UNCOMPRESSED_FILEIDX_OFFSET_MASK (((uint32_t)1<<31ull) - 1)
6610 #define UNCOMPRESSED_FILEIDX_SHIFT (29)
6611 #define UNCOMPRESSED_FILEIDX_MASK (3)
6612 #define UNCOMPRESSED_OFFSET_SHIFT (29)
6613 #define UNCOMPRESSED_OFFSET_MASK (7)
6614 
6615 static uint32_t
vm_uncompressed_extract_swap_file(int slot)6616 vm_uncompressed_extract_swap_file(int slot)
6617 {
6618 	uint32_t fileidx = (((uint32_t)slot & UNCOMPRESSED_FILEIDX_OFFSET_MASK) >> UNCOMPRESSED_FILEIDX_SHIFT) & UNCOMPRESSED_FILEIDX_MASK;
6619 	return fileidx;
6620 }
6621 
6622 static uint32_t
vm_uncompressed_extract_swap_offset(int slot)6623 vm_uncompressed_extract_swap_offset(int slot)
6624 {
6625 	return slot & (uint32_t)(~(UNCOMPRESSED_OFFSET_MASK << UNCOMPRESSED_OFFSET_SHIFT));
6626 }
6627 
6628 static void
vm_uncompressed_return_space_to_swap(int slot)6629 vm_uncompressed_return_space_to_swap(int slot)
6630 {
6631 	PAGE_REPLACEMENT_ALLOWED(TRUE);
6632 	uint32_t fileidx = vm_uncompressed_extract_swap_file(slot);
6633 	if (fileidx == 1) {
6634 		uint32_t free_offset = vm_uncompressed_extract_swap_offset(slot);
6635 		uint64_t pgidx = free_offset / PAGE_SIZE_64;
6636 		uint64_t chunkidx = pgidx / 64;
6637 		uint64_t chunkoffset = pgidx % 64;
6638 #if DEVELOPMENT || DEBUG
6639 		uint64_t vaddr = (uint64_t)&uncompressed_file0_space_bitmap[chunkidx];
6640 		uint64_t maxvaddr = (uint64_t)&uncompressed_file0_space_bitmap[MAX_OFFSET_PAGES];
6641 		assertf(vaddr < maxvaddr, "0x%llx 0x%llx", vaddr, maxvaddr);
6642 #endif /*DEVELOPMENT || DEBUG*/
6643 		assertf((uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6644 		    "0x%x %llu %llu", slot, chunkidx, chunkoffset);
6645 		uncompressed_file0_space_bitmap[chunkidx] &= ~((uint64_t)1 << chunkoffset);
6646 		assertf(!(uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6647 		    "0x%x %llu %llu", slot, chunkidx, chunkoffset);
6648 
6649 		uncompressed_file0_free_pages++;
6650 	} else {
6651 		uint32_t free_offset = vm_uncompressed_extract_swap_offset(slot);
6652 		uint64_t pgidx = free_offset / PAGE_SIZE_64;
6653 		uint64_t chunkidx = pgidx / 64;
6654 		uint64_t chunkoffset = pgidx % 64;
6655 		assertf((uncompressed_file1_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6656 		    "%llu %llu", chunkidx, chunkoffset);
6657 		uncompressed_file1_space_bitmap[chunkidx] &= ~((uint64_t)1 << chunkoffset);
6658 
6659 		uncompressed_file1_free_pages++;
6660 	}
6661 	compressor_ro_uncompressed_swap_usage--;
6662 	PAGE_REPLACEMENT_ALLOWED(FALSE);
6663 }
6664 
6665 static int
vm_uncompressed_reserve_space_in_swap()6666 vm_uncompressed_reserve_space_in_swap()
6667 {
6668 	int slot = 0;
6669 	if (uncompressed_file0_free_pages == 0 && uncompressed_file1_free_pages == 0) {
6670 		return -1;
6671 	}
6672 
6673 	PAGE_REPLACEMENT_ALLOWED(TRUE);
6674 	if (uncompressed_file0_free_pages) {
6675 		uint64_t chunkidx = 0;
6676 		uint64_t chunkoffset = 0;
6677 		while (uncompressed_file0_space_bitmap[chunkidx] == 0xffffffffffffffff) {
6678 			chunkidx++;
6679 		}
6680 		while (uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) {
6681 			chunkoffset++;
6682 		}
6683 
6684 		assertf((uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) == 0,
6685 		    "%llu %llu", chunkidx, chunkoffset);
6686 #if DEVELOPMENT || DEBUG
6687 		uint64_t vaddr = (uint64_t)&uncompressed_file0_space_bitmap[chunkidx];
6688 		uint64_t maxvaddr = (uint64_t)&uncompressed_file0_space_bitmap[MAX_OFFSET_PAGES];
6689 		assertf(vaddr < maxvaddr, "0x%llx 0x%llx", vaddr, maxvaddr);
6690 #endif /*DEVELOPMENT || DEBUG*/
6691 		uncompressed_file0_space_bitmap[chunkidx] |= ((uint64_t)1 << chunkoffset);
6692 		uncompressed_file0_free_offset = ((chunkidx * 64) + chunkoffset) * PAGE_SIZE_64;
6693 		assertf((uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6694 		    "%llu %llu", chunkidx, chunkoffset);
6695 
6696 		assert(uncompressed_file0_free_offset <= (1 << UNCOMPRESSED_OFFSET_SHIFT));
6697 		slot = (int)((1 << UNCOMPRESSED_FILEIDX_SHIFT) + uncompressed_file0_free_offset);
6698 		uncompressed_file0_free_pages--;
6699 	} else {
6700 		uint64_t chunkidx = 0;
6701 		uint64_t chunkoffset = 0;
6702 		while (uncompressed_file1_space_bitmap[chunkidx] == 0xFFFFFFFFFFFFFFFF) {
6703 			chunkidx++;
6704 		}
6705 		while (uncompressed_file1_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) {
6706 			chunkoffset++;
6707 		}
6708 		assert((uncompressed_file1_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) == 0);
6709 		uncompressed_file1_space_bitmap[chunkidx] |= ((uint64_t)1 << chunkoffset);
6710 		uncompressed_file1_free_offset = ((chunkidx * 64) + chunkoffset) * PAGE_SIZE_64;
6711 		slot = (int)((2 << UNCOMPRESSED_FILEIDX_SHIFT) + uncompressed_file1_free_offset);
6712 		uncompressed_file1_free_pages--;
6713 	}
6714 	compressor_ro_uncompressed_swap_usage++;
6715 	PAGE_REPLACEMENT_ALLOWED(FALSE);
6716 	return slot;
6717 }
6718 
6719 #define MAX_IO_REQ (16)
6720 struct _uncompressor_io_req {
6721 	uint64_t addr;
6722 	bool inuse;
6723 } uncompressor_io_req[MAX_IO_REQ];
6724 
6725 int
vm_uncompressed_put(ppnum_t pn,int * slot)6726 vm_uncompressed_put(ppnum_t pn, int *slot)
6727 {
6728 	int retval = 0;
6729 	struct vnode *uncompressed_vp = NULL;
6730 	uint64_t uncompress_offset = 0;
6731 
6732 again:
6733 	if (uncompressed_vp0 == NULL) {
6734 		PAGE_REPLACEMENT_ALLOWED(TRUE);
6735 		if (uncompressed_vp0 == NULL) {
6736 			uint64_t size = (MAX_OFFSET_PAGES * 1024 * 1024ULL);
6737 			vm_swapfile_open("/private/var/vm/uncompressedswap0", &uncompressed_vp0);
6738 			if (uncompressed_vp0 == NULL) {
6739 				PAGE_REPLACEMENT_ALLOWED(FALSE);
6740 				return KERN_NO_ACCESS;
6741 			}
6742 			vm_swapfile_preallocate(uncompressed_vp0, &size, NULL);
6743 			uncompressed_file0_free_pages = (uint32_t)atop(size);
6744 			bzero(uncompressed_file0_space_bitmap, sizeof(uint64_t) * MAX_OFFSET_PAGES);
6745 
6746 			int i = 0;
6747 			for (; i < MAX_IO_REQ; i++) {
6748 				kmem_alloc(kernel_map, (vm_offset_t*)&uncompressor_io_req[i].addr, PAGE_SIZE_64, KMA_NOFAIL | KMA_KOBJECT, VM_KERN_MEMORY_COMPRESSOR);
6749 				uncompressor_io_req[i].inuse = false;
6750 			}
6751 
6752 			vm_swapfile_open("/private/var/vm/uncompressedswap1", &uncompressed_vp1);
6753 			assert(uncompressed_vp1);
6754 			vm_swapfile_preallocate(uncompressed_vp1, &size, NULL);
6755 			uncompressed_file1_free_pages = (uint32_t)atop(size);
6756 			bzero(uncompressed_file1_space_bitmap, sizeof(uint64_t) * MAX_OFFSET_PAGES);
6757 			PAGE_REPLACEMENT_ALLOWED(FALSE);
6758 		} else {
6759 			PAGE_REPLACEMENT_ALLOWED(FALSE);
6760 			delay(100);
6761 			goto again;
6762 		}
6763 	}
6764 
6765 	int swapinfo = vm_uncompressed_reserve_space_in_swap();
6766 	if (swapinfo == -1) {
6767 		*slot = 0;
6768 		return KERN_RESOURCE_SHORTAGE;
6769 	}
6770 
6771 	if (vm_uncompressed_extract_swap_file(swapinfo) == 1) {
6772 		uncompressed_vp = uncompressed_vp0;
6773 	} else {
6774 		uncompressed_vp = uncompressed_vp1;
6775 	}
6776 	uncompress_offset = vm_uncompressed_extract_swap_offset(swapinfo);
6777 	if ((retval = vnode_getwithref(uncompressed_vp)) != 0) {
6778 		vm_log_error("vm_uncompressed_put: vnode_getwithref on swapfile failed with %d\n", retval);
6779 	} else {
6780 		int i = 0;
6781 retry:
6782 		PAGE_REPLACEMENT_ALLOWED(TRUE);
6783 		for (i = 0; i < MAX_IO_REQ; i++) {
6784 			if (uncompressor_io_req[i].inuse == false) {
6785 				uncompressor_io_req[i].inuse = true;
6786 				break;
6787 			}
6788 		}
6789 		if (i == MAX_IO_REQ) {
6790 			assert_wait((event_t)&uncompressor_io_req, THREAD_UNINT);
6791 			PAGE_REPLACEMENT_ALLOWED(FALSE);
6792 			thread_block(THREAD_CONTINUE_NULL);
6793 			goto retry;
6794 		}
6795 		PAGE_REPLACEMENT_ALLOWED(FALSE);
6796 		void *addr = pmap_map_compressor_page(pn);
6797 		memcpy((void*)uncompressor_io_req[i].addr, addr, PAGE_SIZE_64);
6798 		pmap_unmap_compressor_page(pn, addr);
6799 
6800 		retval = vm_swapfile_io(uncompressed_vp, uncompress_offset, (uint64_t)uncompressor_io_req[i].addr, 1, SWAP_WRITE, NULL);
6801 		if (retval) {
6802 			*slot = 0;
6803 		} else {
6804 			*slot = (int)swapinfo;
6805 			((c_slot_mapping_t)(slot))->s_uncompressed = 1;
6806 		}
6807 		vnode_put(uncompressed_vp);
6808 		PAGE_REPLACEMENT_ALLOWED(TRUE);
6809 		uncompressor_io_req[i].inuse = false;
6810 		thread_wakeup((event_t)&uncompressor_io_req);
6811 		PAGE_REPLACEMENT_ALLOWED(FALSE);
6812 	}
6813 	return retval;
6814 }
6815 
6816 int
vm_uncompressed_get(ppnum_t pn,int * slot,__unused vm_compressor_options_t flags)6817 vm_uncompressed_get(ppnum_t pn, int *slot, __unused vm_compressor_options_t flags)
6818 {
6819 	int retval = 0;
6820 	struct vnode *uncompressed_vp = NULL;
6821 	uint32_t fileidx = vm_uncompressed_extract_swap_file(*slot);
6822 	uint64_t uncompress_offset = vm_uncompressed_extract_swap_offset(*slot);
6823 
6824 	if (__improbable(flags & C_KDP)) {
6825 		return -2;
6826 	}
6827 
6828 	if (fileidx == 1) {
6829 		uncompressed_vp = uncompressed_vp0;
6830 	} else {
6831 		uncompressed_vp = uncompressed_vp1;
6832 	}
6833 
6834 	if ((retval = vnode_getwithref(uncompressed_vp)) != 0) {
6835 		vm_log_error("vm_uncompressed_put: vnode_getwithref on swapfile failed with %d\n", retval);
6836 	} else {
6837 		int i = 0;
6838 retry:
6839 		PAGE_REPLACEMENT_ALLOWED(TRUE);
6840 		for (i = 0; i < MAX_IO_REQ; i++) {
6841 			if (uncompressor_io_req[i].inuse == false) {
6842 				uncompressor_io_req[i].inuse = true;
6843 				break;
6844 			}
6845 		}
6846 		if (i == MAX_IO_REQ) {
6847 			assert_wait((event_t)&uncompressor_io_req, THREAD_UNINT);
6848 			PAGE_REPLACEMENT_ALLOWED(FALSE);
6849 			thread_block(THREAD_CONTINUE_NULL);
6850 			goto retry;
6851 		}
6852 		PAGE_REPLACEMENT_ALLOWED(FALSE);
6853 		retval = vm_swapfile_io(uncompressed_vp, uncompress_offset, (uint64_t)uncompressor_io_req[i].addr, 1, SWAP_READ, NULL);
6854 		vnode_put(uncompressed_vp);
6855 		void *addr = pmap_map_compressor_page(pn);
6856 		memcpy(addr, (void*)uncompressor_io_req[i].addr, PAGE_SIZE_64);
6857 		pmap_unmap_compressor_page(pn, addr);
6858 		PAGE_REPLACEMENT_ALLOWED(TRUE);
6859 		uncompressor_io_req[i].inuse = false;
6860 		thread_wakeup((event_t)&uncompressor_io_req);
6861 		PAGE_REPLACEMENT_ALLOWED(FALSE);
6862 	}
6863 	return retval;
6864 }
6865 
6866 int
vm_uncompressed_free(int * slot,__unused vm_compressor_options_t flags)6867 vm_uncompressed_free(int *slot, __unused vm_compressor_options_t flags)
6868 {
6869 	vm_uncompressed_return_space_to_swap(*slot);
6870 	*slot = 0;
6871 	return 0;
6872 }
6873 
6874 #endif /*CONFIG_TRACK_UNMODIFIED_ANON_PAGES*/
6875