xref: /xnu-11417.121.6/osfmk/vm/vm_compressor.c (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <vm/vm_compressor_internal.h>
30 
31 #if CONFIG_PHANTOM_CACHE
32 #include <vm/vm_phantom_cache_internal.h>
33 #endif
34 
35 #include <vm/vm_map_xnu.h>
36 #include <vm/vm_pageout_xnu.h>
37 #include <vm/vm_map_internal.h>
38 #include <vm/memory_object.h>
39 #include <vm/vm_compressor_algorithms_internal.h>
40 #include <vm/vm_compressor_backing_store_internal.h>
41 #include <vm/vm_fault.h>
42 #include <vm/vm_protos.h>
43 #include <vm/vm_kern_xnu.h>
44 #include <vm/vm_compressor_pager_internal.h>
45 #include <vm/vm_iokit.h>
46 #include <vm/vm_far.h>
47 #include <mach/mach_host.h>             /* for host_info() */
48 #if DEVELOPMENT || DEBUG
49 #include <kern/hvg_hypercall.h>
50 #include <vm/vm_compressor_info.h>         /* for c_segment_info */
51 #endif
52 #include <kern/ledger.h>
53 #include <kern/policy_internal.h>
54 #include <kern/thread_group.h>
55 #include <san/kasan.h>
56 #include <sys/kern_memorystatus_xnu.h>
57 #include <os/atomic_private.h>
58 #include <os/log.h>
59 #include <pexpert/pexpert.h>
60 #include <pexpert/device_tree.h>
61 
62 #if defined(__x86_64__)
63 #include <i386/misc_protos.h>
64 #endif
65 #if defined(__arm64__)
66 #include <arm/machine_routines.h>
67 #endif
68 
69 #include <IOKit/IOHibernatePrivate.h>
70 
71 /*
72  * The segment buffer size is a tradeoff.
73  * A larger buffer leads to faster I/O throughput, better compression ratios
74  * (since fewer bytes are wasted at the end of the segment),
75  * and less overhead (both in time and space).
76  * However, a smaller buffer causes less swap when the system is overcommited
77  * b/c a higher percentage of the swapped-in segment is definitely accessed
78  * before it goes back out to storage.
79  *
80  * So on systems without swap, a larger segment is a clear win.
81  * On systems with swap, the choice is murkier. Empirically, we've
82  * found that a 64KB segment provides a better tradeoff both in terms of
83  * performance and swap writes than a 256KB segment on systems with fast SSDs
84  * and a HW compression block.
85  */
86 #define C_SEG_BUFSIZE_ARM_SWAP (1024 * 64)
87 #if XNU_TARGET_OS_OSX && defined(__arm64__)
88 #define C_SEG_BUFSIZE_DEFAULT C_SEG_BUFSIZE_ARM_SWAP
89 #else
90 #define C_SEG_BUFSIZE_DEFAULT (1024 * 256)
91 #endif /* TARGET_OS_OSX && defined(__arm64__) */
92 uint32_t c_seg_bufsize;
93 
94 uint32_t c_seg_max_pages; /* maximum number of pages the compressed data of a segment can take  */
95 uint32_t c_seg_off_limit; /* if we've reached this size while filling the segment, don't bother trying to fill anymore
96                            * because it's unlikely to succeed, in units of uint32_t, same as c_nextoffset */
97 uint32_t c_seg_allocsize, c_seg_slot_var_array_min_len;
98 
99 extern boolean_t vm_darkwake_mode;
100 extern zone_t vm_page_zone;
101 
102 #if DEVELOPMENT || DEBUG
103 /* sysctl defined in bsd/dev/arm64/sysctl.c */
104 static event_t debug_cseg_wait_event = NULL;
105 #endif /* DEVELOPMENT || DEBUG */
106 
107 #if CONFIG_FREEZE
108 bool freezer_incore_cseg_acct = TRUE; /* Only count incore compressed memory for jetsams. */
109 #endif /* CONFIG_FREEZE */
110 
111 #if POPCOUNT_THE_COMPRESSED_DATA
112 boolean_t popcount_c_segs = TRUE;
113 
114 static inline uint32_t
vmc_pop(uintptr_t ins,int sz)115 vmc_pop(uintptr_t ins, int sz)
116 {
117 	uint32_t rv = 0;
118 
119 	if (__probable(popcount_c_segs == FALSE)) {
120 		return 0xDEAD707C;
121 	}
122 
123 	while (sz >= 16) {
124 		uint32_t rv1, rv2;
125 		uint64_t *ins64 = (uint64_t *) ins;
126 		uint64_t *ins642 = (uint64_t *) (ins + 8);
127 		rv1 = __builtin_popcountll(*ins64);
128 		rv2 = __builtin_popcountll(*ins642);
129 		rv += rv1 + rv2;
130 		sz -= 16;
131 		ins += 16;
132 	}
133 
134 	while (sz >= 4) {
135 		uint32_t *ins32 = (uint32_t *) ins;
136 		rv += __builtin_popcount(*ins32);
137 		sz -= 4;
138 		ins += 4;
139 	}
140 
141 	while (sz > 0) {
142 		char *ins8 = (char *)ins;
143 		rv += __builtin_popcount(*ins8);
144 		sz--;
145 		ins++;
146 	}
147 	return rv;
148 }
149 #endif
150 
151 #if VALIDATE_C_SEGMENTS
152 boolean_t validate_c_segs = TRUE;
153 #endif
154 /*
155  * vm_compressor_mode has a hierarchy of control to set its value.
156  * boot-args are checked first, then device-tree, and finally
157  * the default value that is defined below. See vm_fault_init() for
158  * the boot-arg & device-tree code.
159  */
160 
161 #if !XNU_TARGET_OS_OSX
162 
163 #if CONFIG_FREEZE
164 int     vm_compressor_mode = VM_PAGER_FREEZER_DEFAULT;
165 struct  freezer_context freezer_context_global;
166 #else /* CONFIG_FREEZE */
167 int     vm_compressor_mode = VM_PAGER_NOT_CONFIGURED;
168 #endif /* CONFIG_FREEZE */
169 
170 #else /* !XNU_TARGET_OS_OSX */
171 int             vm_compressor_mode = VM_PAGER_COMPRESSOR_WITH_SWAP;
172 
173 #endif /* !XNU_TARGET_OS_OSX */
174 
175 TUNABLE(uint32_t, vm_compression_limit, "vm_compression_limit", 0);
176 int             vm_compressor_is_active = 0;
177 int             vm_compressor_available = 0;
178 
179 extern uint64_t vm_swap_get_max_configured_space(void);
180 extern void     vm_pageout_io_throttle(void);
181 
182 #if CHECKSUM_THE_DATA || CHECKSUM_THE_SWAP || CHECKSUM_THE_COMPRESSED_DATA
183 extern unsigned int hash_string(char *cp, int len);
184 static unsigned int vmc_hash(char *, int);
185 boolean_t checksum_c_segs = TRUE;
186 
187 unsigned int
vmc_hash(char * cp,int len)188 vmc_hash(char *cp, int len)
189 {
190 	unsigned int result;
191 	if (__probable(checksum_c_segs == FALSE)) {
192 		return 0xDEAD7A37;
193 	}
194 	vm_memtag_disable_checking();
195 	result = hash_string(cp, len);
196 	vm_memtag_enable_checking();
197 	return result;
198 }
199 #endif
200 
201 #define UNPACK_C_SIZE(cs)       ((cs->c_size == (PAGE_SIZE-1)) ? PAGE_SIZE : cs->c_size)
202 #define PACK_C_SIZE(cs, size)   (cs->c_size = ((size == PAGE_SIZE) ? PAGE_SIZE - 1 : size))
203 
204 
205 struct c_sv_hash_entry {
206 	union {
207 		struct  {
208 			uint32_t        c_sv_he_ref;
209 			uint32_t        c_sv_he_data;
210 		} c_sv_he;
211 		uint64_t        c_sv_he_record;
212 	} c_sv_he_un;
213 };
214 
215 #define he_ref  c_sv_he_un.c_sv_he.c_sv_he_ref
216 #define he_data c_sv_he_un.c_sv_he.c_sv_he_data
217 #define he_record c_sv_he_un.c_sv_he_record
218 
219 #define C_SV_HASH_MAX_MISS      32
220 #define C_SV_HASH_SIZE          ((1 << 10))
221 #define C_SV_HASH_MASK          ((1 << 10) - 1)
222 
223 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
224 #define C_SV_CSEG_ID            ((1 << 21) - 1)
225 #else /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
226 #define C_SV_CSEG_ID            ((1 << 22) - 1)
227 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
228 
229 /* elements of c_segments array */
230 union c_segu {
231 	c_segment_t     c_seg;
232 	uintptr_t       c_segno;  /* index of the next element in the segments free-list, c_free_segno_head is the head */
233 };
234 
235 #define C_SLOT_ASSERT_PACKABLE(ptr) \
236 	VM_ASSERT_POINTER_PACKABLE((vm_offset_t)(ptr), C_SLOT_PACKED_PTR);
237 
238 #define C_SLOT_PACK_PTR(ptr) \
239 	VM_PACK_POINTER((vm_offset_t)(ptr), C_SLOT_PACKED_PTR)
240 
241 #define C_SLOT_UNPACK_PTR(cslot) \
242 	(c_slot_mapping_t)VM_UNPACK_POINTER((cslot)->c_packed_ptr, C_SLOT_PACKED_PTR)
243 
244 /* for debugging purposes */
245 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) c_slot_packing_params =
246     VM_PACKING_PARAMS(C_SLOT_PACKED_PTR);
247 
248 uint32_t        c_segment_count = 0;       /* count all allocated c_segments in all queues */
249 uint32_t        c_segment_count_max = 0;   /* maximum c_segment_count has ever been */
250 
251 uint64_t        c_generation_id = 0;
252 uint64_t        c_generation_id_flush_barrier;
253 
254 boolean_t       hibernate_no_swapspace = FALSE;
255 boolean_t       hibernate_flush_timed_out = FALSE;
256 clock_sec_t     hibernate_flushing_deadline = 0;
257 
258 #if RECORD_THE_COMPRESSED_DATA
259 /* buffer used as an intermediate stage before writing to file */
260 char    *c_compressed_record_sbuf;  /* start */
261 char    *c_compressed_record_ebuf;  /* end */
262 char    *c_compressed_record_cptr;  /* next buffered write */
263 #endif
264 
265 /* the different queues a c_segment can be in via c_age_list */
266 queue_head_t    c_age_list_head;
267 queue_head_t    c_early_swappedin_list_head, c_regular_swappedin_list_head, c_late_swappedin_list_head;
268 queue_head_t    c_early_swapout_list_head, c_regular_swapout_list_head, c_late_swapout_list_head;
269 queue_head_t    c_swapio_list_head;
270 queue_head_t    c_swappedout_list_head;
271 queue_head_t    c_swappedout_sparse_list_head;
272 queue_head_t    c_major_list_head;
273 queue_head_t    c_filling_list_head;
274 queue_head_t    c_bad_list_head;
275 
276 /* count of each of the queues above */
277 uint32_t        c_age_count = 0;
278 uint32_t        c_early_swappedin_count = 0, c_regular_swappedin_count = 0, c_late_swappedin_count = 0;
279 uint32_t        c_early_swapout_count = 0, c_regular_swapout_count = 0, c_late_swapout_count = 0;
280 uint32_t        c_swapio_count = 0;
281 uint32_t        c_swappedout_count = 0;
282 uint32_t        c_swappedout_sparse_count = 0;
283 uint32_t        c_major_count = 0;
284 uint32_t        c_filling_count = 0;
285 uint32_t        c_empty_count = 0;
286 uint32_t        c_bad_count = 0;
287 
288 /* a c_segment can be in the minor-compact queue as well as one of the above ones, via c_list */
289 queue_head_t    c_minor_list_head;
290 uint32_t        c_minor_count = 0;
291 
292 int             c_overage_swapped_count = 0;
293 int             c_overage_swapped_limit = 0;
294 
295 int             c_seg_fixed_array_len;   /* number of slots in the c_segment inline slots array */
296 union  c_segu   *c_segments;             /* array of all c_segments, not all of it may be populated */
297 vm_offset_t     c_buffers;               /* starting address of all compressed data pointed to by c_segment.c_store.c_buffer */
298 vm_size_t       c_buffers_size;          /* total size allocated in c_buffers */
299 caddr_t         c_segments_next_page;    /* next page to populate for extending c_segments */
300 boolean_t       c_segments_busy;
301 uint32_t        c_segments_available;    /* how many segments are in populated memory (used or free), populated size of c_segments array */
302 uint32_t        c_segments_limit;        /* max size of c_segments array */
303 uint32_t        c_segments_nearing_limit;
304 
305 uint32_t        c_segment_svp_in_hash;
306 uint32_t        c_segment_svp_hash_succeeded;
307 uint32_t        c_segment_svp_hash_failed;
308 uint32_t        c_segment_svp_zero_compressions;
309 uint32_t        c_segment_svp_nonzero_compressions;
310 uint32_t        c_segment_svp_zero_decompressions;
311 uint32_t        c_segment_svp_nonzero_decompressions;
312 
313 uint32_t        c_segment_noncompressible_pages;
314 
315 uint32_t        c_segment_pages_compressed = 0; /* Tracks # of uncompressed pages fed into the compressor, including SV (single value) pages */
316 #if CONFIG_FREEZE
317 int32_t         c_segment_pages_compressed_incore = 0; /* Tracks # of uncompressed pages fed into the compressor that are in memory */
318 int32_t         c_segment_pages_compressed_incore_late_swapout = 0; /* Tracks # of uncompressed pages fed into the compressor that are in memory and tagged for swapout */
319 uint32_t        c_segments_incore_limit = 0; /* Tracks # of segments allowed to be in-core. Based on compressor pool size */
320 #endif /* CONFIG_FREEZE */
321 
322 uint32_t        c_segment_pages_compressed_limit;
323 uint32_t        c_segment_pages_compressed_nearing_limit;
324 uint32_t        c_free_segno_head = (uint32_t)-1;   /* head of free list of c_segment pointers in c_segments */
325 
326 uint32_t        vm_compressor_minorcompact_threshold_divisor = 10;
327 uint32_t        vm_compressor_majorcompact_threshold_divisor = 10;
328 uint32_t        vm_compressor_unthrottle_threshold_divisor = 10;
329 uint32_t        vm_compressor_catchup_threshold_divisor = 10;
330 
331 uint32_t        vm_compressor_minorcompact_threshold_divisor_overridden = 0;
332 uint32_t        vm_compressor_majorcompact_threshold_divisor_overridden = 0;
333 uint32_t        vm_compressor_unthrottle_threshold_divisor_overridden = 0;
334 uint32_t        vm_compressor_catchup_threshold_divisor_overridden = 0;
335 
336 #define         C_SEGMENTS_PER_PAGE     (PAGE_SIZE / sizeof(union c_segu))
337 
338 LCK_GRP_DECLARE(vm_compressor_lck_grp, "vm_compressor");
339 LCK_RW_DECLARE(c_master_lock, &vm_compressor_lck_grp);
340 LCK_MTX_DECLARE(c_list_lock_storage, &vm_compressor_lck_grp);
341 
342 boolean_t       decompressions_blocked = FALSE;
343 
344 zone_t          compressor_segment_zone;
345 int             c_compressor_swap_trigger = 0;
346 
347 uint32_t        compressor_cpus;
348 char            *compressor_scratch_bufs;
349 
350 struct vm_compressor_kdp_state vm_compressor_kdp_state;
351 
352 clock_sec_t     start_of_sample_period_sec = 0;
353 clock_nsec_t    start_of_sample_period_nsec = 0;
354 clock_sec_t     start_of_eval_period_sec = 0;
355 clock_nsec_t    start_of_eval_period_nsec = 0;
356 uint32_t        sample_period_decompression_count = 0;
357 uint32_t        sample_period_compression_count = 0;
358 uint32_t        last_eval_decompression_count = 0;
359 uint32_t        last_eval_compression_count = 0;
360 
361 #define         DECOMPRESSION_SAMPLE_MAX_AGE            (60 * 30)
362 
363 boolean_t       vm_swapout_ripe_segments = FALSE;
364 uint32_t        vm_ripe_target_age = (60 * 60 * 48);
365 
366 uint32_t        swapout_target_age = 0;
367 uint32_t        age_of_decompressions_during_sample_period[DECOMPRESSION_SAMPLE_MAX_AGE];
368 uint32_t        overage_decompressions_during_sample_period = 0;
369 
370 
371 void            do_fastwake_warmup(queue_head_t *, boolean_t);
372 boolean_t       fastwake_warmup = FALSE;
373 boolean_t       fastwake_recording_in_progress = FALSE;
374 uint64_t        dont_trim_until_ts = 0;
375 
376 uint64_t        c_segment_warmup_count;
377 uint64_t        first_c_segment_to_warm_generation_id = 0;
378 uint64_t        last_c_segment_to_warm_generation_id = 0;
379 boolean_t       hibernate_flushing = FALSE;
380 
381 _Atomic uint64_t c_segment_input_bytes = 0;
382 _Atomic uint64_t c_segment_compressed_bytes = 0;
383 _Atomic uint64_t compressor_bytes_used = 0;
384 
385 /* Keeps track of the most recent timestamp for when major compaction finished. */
386 mach_timespec_t major_compact_ts;
387 
388 struct c_sv_hash_entry c_segment_sv_hash_table[C_SV_HASH_SIZE]  __attribute__ ((aligned(8)));
389 
390 static void vm_compressor_swap_trigger_thread(void);
391 static void vm_compressor_do_delayed_compactions(boolean_t);
392 static void vm_compressor_compact_and_swap(boolean_t);
393 static void vm_compressor_process_regular_swapped_in_segments(boolean_t);
394 static void vm_compressor_process_special_swapped_in_segments_locked(void);
395 
396 struct vm_compressor_swapper_stats vmcs_stats;
397 
398 static void vm_compressor_process_major_segments(bool);
399 
400 void compute_swapout_target_age(void);
401 
402 boolean_t c_seg_major_compact(c_segment_t, c_segment_t);
403 boolean_t c_seg_major_compact_ok(c_segment_t, c_segment_t);
404 
405 int  c_seg_minor_compaction_and_unlock(c_segment_t, boolean_t);
406 int  c_seg_do_minor_compaction_and_unlock(c_segment_t, boolean_t, boolean_t, boolean_t);
407 void c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg);
408 
409 void c_seg_move_to_sparse_list(c_segment_t);
410 void c_seg_insert_into_q(queue_head_t *, c_segment_t);
411 
412 uint64_t vm_available_memory(void);
413 
414 /*
415  * Get the address of a given entry in the c_segments array
416  */
417 static inline union c_segu *
c_segments_get(uint32_t segno)418 c_segments_get(uint32_t segno)
419 {
420 	return VM_FAR_ADD_PTR_UNBOUNDED(c_segments, segno);
421 }
422 
423 /*
424  * indicate the need to do a major compaction if
425  * the overall set of in-use compression segments
426  * becomes sparse... on systems that support pressure
427  * driven swapping, this will also cause swapouts to
428  * be initiated.
429  */
430 static bool
vm_compressor_needs_to_major_compact(void)431 vm_compressor_needs_to_major_compact(void)
432 {
433 	uint32_t        incore_seg_count;
434 
435 	incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
436 
437 	/* second condition:
438 	 *   first term:
439 	 *   - (incore_seg_count * c_seg_max_pages) is the maximum number of pages that all resident segments can hold in their buffers
440 	 *   - VM_PAGE_COMPRESSOR_COUNT is the current size that is actually held by the buffers
441 	 *   -- subtracting these gives the amount of pages that is wasted as holes due to segments not being full
442 	 *   second term:
443 	 *   - 1/8 of the maximum size that can be held by this many segments
444 	 *   meaning of the comparison: is the ratio of wasted space greater than 1/8
445 	 * first condition:
446 	 *   compare number of segments being used vs the number of segments that can ever be allocated
447 	 *   if we don't have a lot of data in the compressor, then we don't need to bother caring about wasted space in holes
448 	 */
449 
450 	if ((c_segment_count >= (c_segments_nearing_limit / 8)) &&
451 	    ((incore_seg_count * c_seg_max_pages) - VM_PAGE_COMPRESSOR_COUNT) >
452 	    ((incore_seg_count / 8) * c_seg_max_pages)) {
453 		return true;
454 	}
455 	return false;
456 }
457 
458 uint32_t
vm_compressor_incore_fragmentation_wasted_pages(void)459 vm_compressor_incore_fragmentation_wasted_pages(void)
460 {
461 	/* return one of the components of the calculation in vm_compressor_needs_to_major_compact() */
462 	uint32_t incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
463 	return (incore_seg_count * c_seg_max_pages) - VM_PAGE_COMPRESSOR_COUNT;
464 }
465 
466 TUNABLE_WRITEABLE(uint64_t, vm_compressor_minor_fragmentation_threshold_pct, "vm_compressor_minor_frag_threshold_pct", 10);
467 
468 static bool
vm_compressor_needs_to_minor_compact(void)469 vm_compressor_needs_to_minor_compact(void)
470 {
471 	uint32_t compactible_seg_count = os_atomic_load(&c_minor_count, relaxed);
472 	if (compactible_seg_count == 0) {
473 		return false;
474 	}
475 
476 	bool is_pressured = AVAILABLE_NON_COMPRESSED_MEMORY <
477 	    VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD;
478 	if (!is_pressured) {
479 		return false;
480 	}
481 
482 	uint64_t bytes_used = os_atomic_load(&compressor_bytes_used, relaxed);
483 	uint64_t bytes_total = VM_PAGE_COMPRESSOR_COUNT * PAGE_SIZE_64;
484 	uint64_t bytes_frag = bytes_total - bytes_used;
485 	bool is_fragmented = bytes_frag >
486 	    bytes_total * vm_compressor_minor_fragmentation_threshold_pct / 100;
487 
488 	return is_fragmented;
489 }
490 
491 
492 uint64_t
vm_available_memory(void)493 vm_available_memory(void)
494 {
495 	return ((uint64_t)AVAILABLE_NON_COMPRESSED_MEMORY) * PAGE_SIZE_64;
496 }
497 
498 
499 uint32_t
vm_compressor_pool_size(void)500 vm_compressor_pool_size(void)
501 {
502 	return VM_PAGE_COMPRESSOR_COUNT;
503 }
504 
505 uint32_t
vm_compressor_fragmentation_level(void)506 vm_compressor_fragmentation_level(void)
507 {
508 	const uint32_t incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
509 	if ((incore_seg_count == 0) || (c_seg_max_pages == 0)) {
510 		return 0;
511 	}
512 	return 100 - (vm_compressor_pool_size() * 100 / (incore_seg_count * c_seg_max_pages));
513 }
514 
515 uint32_t
vm_compression_ratio(void)516 vm_compression_ratio(void)
517 {
518 	if (vm_compressor_pool_size() == 0) {
519 		return UINT32_MAX;
520 	}
521 	return c_segment_pages_compressed / vm_compressor_pool_size();
522 }
523 
524 uint32_t
vm_compressor_pages_compressed(void)525 vm_compressor_pages_compressed(void)
526 {
527 #if CONFIG_FREEZE
528 	if (freezer_incore_cseg_acct) {
529 		return os_atomic_load(&c_segment_pages_compressed_incore, relaxed);
530 	}
531 #endif /* CONFIG_FREEZE */
532 	return os_atomic_load(&c_segment_pages_compressed, relaxed);
533 }
534 
535 bool
vm_compressor_compressed_pages_nearing_limit(void)536 vm_compressor_compressed_pages_nearing_limit(void)
537 {
538 	return vm_compressor_pages_compressed() > c_segment_pages_compressed_nearing_limit;
539 }
540 
541 static bool
vm_compressor_segments_nearing_limit(void)542 vm_compressor_segments_nearing_limit(void)
543 {
544 	uint64_t segments;
545 
546 #if CONFIG_FREEZE
547 	if (freezer_incore_cseg_acct) {
548 		if (os_sub_overflow(c_segment_count, c_swappedout_count, &segments)) {
549 			segments = 0;
550 		}
551 		if (os_sub_overflow(segments, c_swappedout_sparse_count, &segments)) {
552 			segments = 0;
553 		}
554 	} else {
555 		segments = os_atomic_load(&c_segment_count, relaxed);
556 	}
557 #else /* CONFIG_FREEZE */
558 	segments = c_segment_count;
559 #endif /* CONFIG_FREEZE */
560 
561 	return segments > c_segments_nearing_limit;
562 }
563 
564 bool
vm_compressor_low_on_space(void)565 vm_compressor_low_on_space(void)
566 {
567 	return vm_compressor_compressed_pages_nearing_limit() ||
568 	       vm_compressor_segments_nearing_limit();
569 }
570 
571 
572 bool
vm_compressor_out_of_space(void)573 vm_compressor_out_of_space(void)
574 {
575 #if CONFIG_FREEZE
576 	uint64_t incore_seg_count;
577 	uint32_t incore_compressed_pages;
578 	if (freezer_incore_cseg_acct) {
579 		if (os_sub_overflow(c_segment_count, c_swappedout_count, &incore_seg_count)) {
580 			incore_seg_count = 0;
581 		}
582 		if (os_sub_overflow(incore_seg_count, c_swappedout_sparse_count, &incore_seg_count)) {
583 			incore_seg_count = 0;
584 		}
585 		incore_compressed_pages = os_atomic_load(&c_segment_pages_compressed_incore, relaxed);
586 	} else {
587 		incore_seg_count = os_atomic_load(&c_segment_count, relaxed);
588 		incore_compressed_pages = os_atomic_load(&c_segment_pages_compressed_incore, relaxed);
589 	}
590 
591 	if ((incore_compressed_pages >= c_segment_pages_compressed_limit) ||
592 	    (incore_seg_count > c_segments_incore_limit)) {
593 		return true;
594 	}
595 #else /* CONFIG_FREEZE */
596 	if ((c_segment_pages_compressed >= c_segment_pages_compressed_limit) ||
597 	    (c_segment_count >= c_segments_limit)) {
598 		return true;
599 	}
600 #endif /* CONFIG_FREEZE */
601 	return FALSE;
602 }
603 
604 bool
vm_compressor_is_thrashing()605 vm_compressor_is_thrashing()
606 {
607 	compute_swapout_target_age();
608 
609 	if (swapout_target_age) {
610 		c_segment_t     c_seg;
611 
612 		lck_mtx_lock_spin_always(c_list_lock);
613 
614 		if (!queue_empty(&c_age_list_head)) {
615 			c_seg = (c_segment_t) queue_first(&c_age_list_head);
616 
617 			if (c_seg->c_creation_ts > swapout_target_age) {
618 				swapout_target_age = 0;
619 			}
620 		}
621 		lck_mtx_unlock_always(c_list_lock);
622 	}
623 
624 	return swapout_target_age != 0;
625 }
626 
627 
628 int
vm_wants_task_throttled(task_t task)629 vm_wants_task_throttled(task_t task)
630 {
631 	ledger_amount_t compressed;
632 	if (task == kernel_task) {
633 		return 0;
634 	}
635 
636 	if (VM_CONFIG_SWAP_IS_ACTIVE) {
637 		if ((vm_compressor_low_on_space() || HARD_THROTTLE_LIMIT_REACHED())) {
638 			ledger_get_balance(task->ledger, task_ledgers.internal_compressed, &compressed);
639 			compressed >>= VM_MAP_PAGE_SHIFT(task->map);
640 			if ((unsigned int)compressed > (c_segment_pages_compressed / 4)) {
641 				return 1;
642 			}
643 		}
644 	}
645 	return 0;
646 }
647 
648 #if CONFIG_JETSAM
649 bool            memorystatus_disable_swap(void);
650 #if CONFIG_PHANTOM_CACHE
651 extern bool memorystatus_phantom_cache_pressure;
652 #endif /* CONFIG_PHANTOM_CACHE */
653 int             compressor_thrashing_induced_jetsam = 0;
654 int             filecache_thrashing_induced_jetsam = 0;
655 static boolean_t        vm_compressor_thrashing_detected = FALSE;
656 #endif /* CONFIG_JETSAM */
657 
658 void
vm_decompressor_lock(void)659 vm_decompressor_lock(void)
660 {
661 	PAGE_REPLACEMENT_ALLOWED(TRUE);
662 
663 	decompressions_blocked = TRUE;
664 
665 	PAGE_REPLACEMENT_ALLOWED(FALSE);
666 }
667 
668 void
vm_decompressor_unlock(void)669 vm_decompressor_unlock(void)
670 {
671 	PAGE_REPLACEMENT_ALLOWED(TRUE);
672 
673 	decompressions_blocked = FALSE;
674 
675 	PAGE_REPLACEMENT_ALLOWED(FALSE);
676 
677 	thread_wakeup((event_t)&decompressions_blocked);
678 }
679 
680 static inline void
cslot_copy(c_slot_t cdst,c_slot_t csrc)681 cslot_copy(c_slot_t cdst, c_slot_t csrc)
682 {
683 #if CHECKSUM_THE_DATA
684 	cdst->c_hash_data = csrc->c_hash_data;
685 #endif
686 #if CHECKSUM_THE_COMPRESSED_DATA
687 	cdst->c_hash_compressed_data = csrc->c_hash_compressed_data;
688 #endif
689 #if POPCOUNT_THE_COMPRESSED_DATA
690 	cdst->c_pop_cdata = csrc->c_pop_cdata;
691 #endif
692 	cdst->c_size = csrc->c_size;
693 	cdst->c_packed_ptr = csrc->c_packed_ptr;
694 #if defined(__arm64__)
695 	cdst->c_codec = csrc->c_codec;
696 #endif
697 }
698 
699 #if XNU_TARGET_OS_OSX
700 #define VM_COMPRESSOR_MAX_POOL_SIZE (192UL << 30)
701 #else
702 #define VM_COMPRESSOR_MAX_POOL_SIZE (0)
703 #endif
704 
705 static vm_map_size_t compressor_size;
706 static SECURITY_READ_ONLY_LATE(struct mach_vm_range) compressor_range;
707 vm_map_t compressor_map;
708 uint64_t compressor_pool_max_size;
709 uint64_t compressor_pool_size;
710 uint32_t compressor_pool_multiplier;
711 
712 #if DEVELOPMENT || DEBUG
713 /*
714  * Compressor segments are write-protected in development/debug
715  * kernels to help debug memory corruption.
716  * In cases where performance is a concern, this can be disabled
717  * via the boot-arg "-disable_cseg_write_protection".
718  */
719 boolean_t write_protect_c_segs = TRUE;
720 int vm_compressor_test_seg_wp;
721 uint32_t vm_ktrace_enabled;
722 #endif /* DEVELOPMENT || DEBUG */
723 
724 #if (XNU_TARGET_OS_OSX && __arm64__)
725 
726 #include <IOKit/IOPlatformExpert.h>
727 #include <sys/random.h>
728 
729 static const char *csegbufsizeExperimentProperty = "_csegbufsz_experiment";
730 static thread_call_t csegbufsz_experiment_thread_call;
731 
732 extern boolean_t IOServiceWaitForMatchingResource(const char * property, uint64_t timeout);
733 static void
erase_csegbufsz_experiment_property(__unused void * param0,__unused void * param1)734 erase_csegbufsz_experiment_property(__unused void *param0, __unused void *param1)
735 {
736 	// Wait for NVRAM to be writable
737 	if (!IOServiceWaitForMatchingResource("IONVRAM", UINT64_MAX)) {
738 		printf("csegbufsz_experiment_property: Failed to wait for IONVRAM.");
739 	}
740 
741 	if (!PERemoveNVRAMProperty(csegbufsizeExperimentProperty)) {
742 		printf("csegbufsize_experiment_property: Failed to remove %s from NVRAM.", csegbufsizeExperimentProperty);
743 	}
744 	thread_call_free(csegbufsz_experiment_thread_call);
745 }
746 
747 static void
erase_csegbufsz_experiment_property_async()748 erase_csegbufsz_experiment_property_async()
749 {
750 	csegbufsz_experiment_thread_call = thread_call_allocate_with_priority(
751 		erase_csegbufsz_experiment_property,
752 		NULL,
753 		THREAD_CALL_PRIORITY_LOW
754 		);
755 	if (csegbufsz_experiment_thread_call == NULL) {
756 		printf("csegbufsize_experiment_property: Unable to allocate thread call.");
757 	} else {
758 		thread_call_enter(csegbufsz_experiment_thread_call);
759 	}
760 }
761 
762 static void
cleanup_csegbufsz_experiment(__unused void * arg0)763 cleanup_csegbufsz_experiment(__unused void *arg0)
764 {
765 	char nvram = 0;
766 	unsigned int len = sizeof(nvram);
767 	if (PEReadNVRAMProperty(csegbufsizeExperimentProperty, &nvram, &len)) {
768 		erase_csegbufsz_experiment_property_async();
769 	}
770 }
771 
772 STARTUP_ARG(EARLY_BOOT, STARTUP_RANK_FIRST, cleanup_csegbufsz_experiment, NULL);
773 #endif /* XNU_TARGET_OS_OSX && __arm64__ */
774 
775 #if CONFIG_JETSAM
776 extern unsigned int memorystatus_swap_all_apps;
777 #endif /* CONFIG_JETSAM */
778 
779 TUNABLE_DT(uint64_t, swap_vol_min_capacity, "/defaults", "kern.swap_min_capacity", "kern.swap_min_capacity", 0, TUNABLE_DT_NONE);
780 
781 static void
vm_compressor_set_size(void)782 vm_compressor_set_size(void)
783 {
784 	/*
785 	 * Note that this function may be called multiple times on systems with app swap
786 	 * because the value of vm_swap_get_max_configured_space() and memorystatus_swap_all_apps
787 	 * can change based the size of the swap volume. On these systems, we'll call
788 	 * this function once early in boot to reserve the maximum amount of VA required
789 	 * for the compressor submap and then one more time in vm_compressor_init after
790 	 * determining the swap volume size. We must not return a larger value the second
791 	 * time around.
792 	 */
793 	vm_size_t       c_segments_arr_size = 0;
794 	struct c_slot_mapping tmp_slot_ptr;
795 
796 	/* The segment size can be overwritten by a boot-arg */
797 	if (!PE_parse_boot_argn("vm_compressor_segment_buffer_size", &c_seg_bufsize, sizeof(c_seg_bufsize))) {
798 #if CONFIG_JETSAM
799 		if (memorystatus_swap_all_apps) {
800 			c_seg_bufsize = C_SEG_BUFSIZE_ARM_SWAP;
801 		} else {
802 			c_seg_bufsize = C_SEG_BUFSIZE_DEFAULT;
803 		}
804 #else
805 		c_seg_bufsize = C_SEG_BUFSIZE_DEFAULT;
806 #endif /* CONFIG_JETSAM */
807 	}
808 
809 	vm_compressor_swap_init_swap_file_limit();
810 	if (vm_compression_limit) {
811 		compressor_pool_size = ptoa_64(vm_compression_limit);
812 	}
813 
814 	compressor_pool_max_size = C_SEG_MAX_LIMIT;
815 	compressor_pool_max_size *= c_seg_bufsize;
816 
817 #if XNU_TARGET_OS_OSX
818 
819 	if (vm_compression_limit == 0) {
820 		if (max_mem <= (4ULL * 1024ULL * 1024ULL * 1024ULL)) {
821 			compressor_pool_size = 16ULL * max_mem;
822 		} else if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) {
823 			compressor_pool_size = 8ULL * max_mem;
824 		} else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) {
825 			compressor_pool_size = 4ULL * max_mem;
826 		} else {
827 			compressor_pool_size = 2ULL * max_mem;
828 		}
829 	}
830 	/*
831 	 * Cap the compressor pool size to a max of 192G
832 	 */
833 	if (compressor_pool_size > VM_COMPRESSOR_MAX_POOL_SIZE) {
834 		compressor_pool_size = VM_COMPRESSOR_MAX_POOL_SIZE;
835 	}
836 	if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) {
837 		compressor_pool_multiplier = 1;
838 	} else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) {
839 		compressor_pool_multiplier = 2;
840 	} else {
841 		compressor_pool_multiplier = 4;
842 	}
843 
844 #else
845 
846 	if (compressor_pool_max_size > max_mem) {
847 		compressor_pool_max_size = max_mem;
848 	}
849 
850 	if (vm_compression_limit == 0) {
851 		compressor_pool_size = max_mem;
852 	}
853 
854 #if XNU_TARGET_OS_WATCH
855 	compressor_pool_multiplier = 2;
856 #elif XNU_TARGET_OS_IOS
857 	if (max_mem <= (2ULL * 1024ULL * 1024ULL * 1024ULL)) {
858 		compressor_pool_multiplier = 2;
859 	} else {
860 		compressor_pool_multiplier = 1;
861 	}
862 #else
863 	compressor_pool_multiplier = 1;
864 #endif
865 
866 #endif
867 
868 	PE_parse_boot_argn("kern.compressor_pool_multiplier", &compressor_pool_multiplier, sizeof(compressor_pool_multiplier));
869 	if (compressor_pool_multiplier < 1) {
870 		compressor_pool_multiplier = 1;
871 	}
872 
873 	if (compressor_pool_size > compressor_pool_max_size) {
874 		compressor_pool_size = compressor_pool_max_size;
875 	}
876 
877 	c_seg_max_pages = (c_seg_bufsize / PAGE_SIZE);
878 	c_seg_slot_var_array_min_len = c_seg_max_pages;
879 
880 #if !defined(__x86_64__)
881 	c_seg_off_limit = (C_SEG_BYTES_TO_OFFSET((c_seg_bufsize - 512)));
882 	c_seg_allocsize = (c_seg_bufsize + PAGE_SIZE);
883 #else
884 	c_seg_off_limit = (C_SEG_BYTES_TO_OFFSET((c_seg_bufsize - 128)));
885 	c_seg_allocsize = c_seg_bufsize;
886 #endif /* !defined(__x86_64__) */
887 
888 	c_segments_limit = (uint32_t)(compressor_pool_size / (vm_size_t)(c_seg_allocsize));
889 	tmp_slot_ptr.s_cseg = c_segments_limit;
890 	/* Panic on internal configs*/
891 	assertf((tmp_slot_ptr.s_cseg == c_segments_limit), "vm_compressor_init: overflowed s_cseg field in c_slot_mapping with c_segno: %d", c_segments_limit);
892 
893 	if (tmp_slot_ptr.s_cseg != c_segments_limit) {
894 		tmp_slot_ptr.s_cseg = -1;
895 		c_segments_limit = tmp_slot_ptr.s_cseg - 1; /*limited by segment idx bits in c_slot_mapping*/
896 		compressor_pool_size = (c_segments_limit * (vm_size_t)(c_seg_allocsize));
897 	}
898 
899 	c_segments_nearing_limit = (uint32_t)(((uint64_t)c_segments_limit * 98ULL) / 100ULL);
900 
901 	/* an upper limit on how many input pages the compressor can hold */
902 	c_segment_pages_compressed_limit = (c_segments_limit * (c_seg_bufsize / PAGE_SIZE) * compressor_pool_multiplier);
903 
904 	if (c_segment_pages_compressed_limit < (uint32_t)(max_mem / PAGE_SIZE)) {
905 #if defined(XNU_TARGET_OS_WATCH)
906 		c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE);
907 #else
908 		if (!vm_compression_limit) {
909 			c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE);
910 		}
911 #endif
912 	}
913 
914 	c_segment_pages_compressed_nearing_limit = (uint32_t)(((uint64_t)c_segment_pages_compressed_limit * 98ULL) / 100ULL);
915 
916 #if CONFIG_FREEZE
917 	/*
918 	 * Our in-core limits are based on the size of the compressor pool.
919 	 * The c_segments_nearing_limit is also based on the compressor pool
920 	 * size and calculated above.
921 	 */
922 	c_segments_incore_limit = c_segments_limit;
923 
924 	if (freezer_incore_cseg_acct) {
925 		/*
926 		 * Add enough segments to track all frozen c_segs that can be stored in swap.
927 		 */
928 		c_segments_limit += (uint32_t)(vm_swap_get_max_configured_space() / (vm_size_t)(c_seg_allocsize));
929 		tmp_slot_ptr.s_cseg = c_segments_limit;
930 		/* Panic on internal configs*/
931 		assertf((tmp_slot_ptr.s_cseg == c_segments_limit), "vm_compressor_init: freezer reserve overflowed s_cseg field in c_slot_mapping with c_segno: %d", c_segments_limit);
932 	}
933 #endif
934 	/*
935 	 * Submap needs space for:
936 	 * - c_segments
937 	 * - c_buffers
938 	 * - swap reclaimations -- c_seg_bufsize
939 	 */
940 	c_segments_arr_size = vm_map_round_page((sizeof(union c_segu) * c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
941 	c_buffers_size = vm_map_round_page(((vm_size_t)c_seg_allocsize * (vm_size_t)c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
942 
943 	compressor_size = c_segments_arr_size + c_buffers_size + c_seg_bufsize;
944 
945 #if RECORD_THE_COMPRESSED_DATA
946 	c_compressed_record_sbuf_size = (vm_size_t)c_seg_allocsize + (PAGE_SIZE * 2);
947 	compressor_size += c_compressed_record_sbuf_size;
948 #endif /* RECORD_THE_COMPRESSED_DATA */
949 }
950 STARTUP(KMEM, STARTUP_RANK_FIRST, vm_compressor_set_size);
951 
952 KMEM_RANGE_REGISTER_DYNAMIC(compressor, &compressor_range, ^() {
953 	return compressor_size;
954 });
955 
956 bool
osenvironment_is_diagnostics(void)957 osenvironment_is_diagnostics(void)
958 {
959 	DTEntry chosen;
960 	const char *osenvironment;
961 	unsigned int size;
962 	if (kSuccess == SecureDTLookupEntry(0, "/chosen", &chosen)) {
963 		if (kSuccess == SecureDTGetProperty(chosen, "osenvironment", (void const **) &osenvironment, &size)) {
964 			return strcmp(osenvironment, "diagnostics") == 0;
965 		}
966 	}
967 	return false;
968 }
969 
970 void
vm_compressor_init(void)971 vm_compressor_init(void)
972 {
973 	thread_t        thread;
974 #if RECORD_THE_COMPRESSED_DATA
975 	vm_size_t       c_compressed_record_sbuf_size = 0;
976 #endif /* RECORD_THE_COMPRESSED_DATA */
977 
978 #if DEVELOPMENT || DEBUG || CONFIG_FREEZE
979 	char bootarg_name[32];
980 #endif /* DEVELOPMENT || DEBUG || CONFIG_FREEZE */
981 	__unused uint64_t early_boot_compressor_size = compressor_size;
982 
983 #if CONFIG_JETSAM
984 	if (memorystatus_swap_all_apps && osenvironment_is_diagnostics()) {
985 		printf("osenvironment == \"diagnostics\". Disabling app swap.\n");
986 		memorystatus_disable_swap();
987 	}
988 
989 	if (memorystatus_swap_all_apps) {
990 		/*
991 		 * App swap is disabled on devices with small NANDs.
992 		 * Now that we're no longer in early boot, we can get
993 		 * the NAND size and re-run vm_compressor_set_size.
994 		 */
995 		int error = vm_swap_vol_get_capacity(SWAP_VOLUME_NAME, &vm_swap_volume_capacity);
996 #if DEVELOPMENT || DEBUG
997 		if (error != 0) {
998 			panic("vm_compressor_init: Unable to get swap volume capacity. error=%d\n", error);
999 		}
1000 #else
1001 		if (error != 0) {
1002 			os_log_with_startup_serial(OS_LOG_DEFAULT, "vm_compressor_init: Unable to get swap volume capacity. error=%d\n", error);
1003 		}
1004 #endif /* DEVELOPMENT || DEBUG */
1005 		if (vm_swap_volume_capacity < swap_vol_min_capacity) {
1006 			memorystatus_disable_swap();
1007 		}
1008 		/*
1009 		 * Resize the compressor and swap now that we know the capacity
1010 		 * of the swap volume.
1011 		 */
1012 		vm_compressor_set_size();
1013 		/*
1014 		 * We reserved a chunk of VA early in boot for the compressor submap.
1015 		 * We can't allocate more than that.
1016 		 */
1017 		assert(compressor_size <= early_boot_compressor_size);
1018 	}
1019 #endif /* CONFIG_JETSAM */
1020 
1021 #if DEVELOPMENT || DEBUG
1022 	if (PE_parse_boot_argn("-disable_cseg_write_protection", bootarg_name, sizeof(bootarg_name))) {
1023 		write_protect_c_segs = FALSE;
1024 	}
1025 
1026 	int vmcval = 1;
1027 #if defined(XNU_TARGET_OS_WATCH)
1028 	vmcval = 0;
1029 #endif /* XNU_TARGET_OS_WATCH */
1030 	PE_parse_boot_argn("vm_compressor_validation", &vmcval, sizeof(vmcval));
1031 
1032 	if (kern_feature_override(KF_COMPRSV_OVRD)) {
1033 		vmcval = 0;
1034 	}
1035 
1036 	if (vmcval == 0) {
1037 #if POPCOUNT_THE_COMPRESSED_DATA
1038 		popcount_c_segs = FALSE;
1039 #endif
1040 #if CHECKSUM_THE_DATA || CHECKSUM_THE_COMPRESSED_DATA
1041 		checksum_c_segs = FALSE;
1042 #endif
1043 #if VALIDATE_C_SEGMENTS
1044 		validate_c_segs = FALSE;
1045 #endif
1046 		write_protect_c_segs = FALSE;
1047 	}
1048 #endif /* DEVELOPMENT || DEBUG */
1049 
1050 #if CONFIG_FREEZE
1051 	if (PE_parse_boot_argn("-disable_freezer_cseg_acct", bootarg_name, sizeof(bootarg_name))) {
1052 		freezer_incore_cseg_acct = FALSE;
1053 	}
1054 #endif /* CONFIG_FREEZE */
1055 
1056 	assert((C_SEGMENTS_PER_PAGE * sizeof(union c_segu)) == PAGE_SIZE);
1057 
1058 #if !XNU_TARGET_OS_OSX
1059 	vm_compressor_minorcompact_threshold_divisor = 20;
1060 	vm_compressor_majorcompact_threshold_divisor = 30;
1061 	vm_compressor_unthrottle_threshold_divisor = 40;
1062 	vm_compressor_catchup_threshold_divisor = 60;
1063 #else /* !XNU_TARGET_OS_OSX */
1064 	if (max_mem <= (3ULL * 1024ULL * 1024ULL * 1024ULL)) {
1065 		vm_compressor_minorcompact_threshold_divisor = 11;
1066 		vm_compressor_majorcompact_threshold_divisor = 13;
1067 		vm_compressor_unthrottle_threshold_divisor = 20;
1068 		vm_compressor_catchup_threshold_divisor = 35;
1069 	} else {
1070 		vm_compressor_minorcompact_threshold_divisor = 20;
1071 		vm_compressor_majorcompact_threshold_divisor = 25;
1072 		vm_compressor_unthrottle_threshold_divisor = 35;
1073 		vm_compressor_catchup_threshold_divisor = 50;
1074 	}
1075 #endif /* !XNU_TARGET_OS_OSX */
1076 
1077 	queue_init(&c_bad_list_head);
1078 	queue_init(&c_age_list_head);
1079 	queue_init(&c_minor_list_head);
1080 	queue_init(&c_major_list_head);
1081 	queue_init(&c_filling_list_head);
1082 	queue_init(&c_early_swapout_list_head);
1083 	queue_init(&c_regular_swapout_list_head);
1084 	queue_init(&c_late_swapout_list_head);
1085 	queue_init(&c_swapio_list_head);
1086 	queue_init(&c_early_swappedin_list_head);
1087 	queue_init(&c_regular_swappedin_list_head);
1088 	queue_init(&c_late_swappedin_list_head);
1089 	queue_init(&c_swappedout_list_head);
1090 	queue_init(&c_swappedout_sparse_list_head);
1091 
1092 	c_free_segno_head = -1;
1093 	c_segments_available = 0;
1094 
1095 	compressor_map = kmem_suballoc(kernel_map, &compressor_range.min_address,
1096 	    compressor_size, VM_MAP_CREATE_NEVER_FAULTS,
1097 	    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1098 	    KMS_NOFAIL | KMS_PERMANENT | KMS_NOSOFTLIMIT,
1099 	    VM_KERN_MEMORY_COMPRESSOR).kmr_submap;
1100 
1101 	kmem_alloc(compressor_map, (vm_offset_t *)(&c_segments),
1102 	    (sizeof(union c_segu) * c_segments_limit),
1103 	    KMA_NOFAIL | KMA_KOBJECT | KMA_VAONLY | KMA_PERMANENT | KMA_NOSOFTLIMIT,
1104 	    VM_KERN_MEMORY_COMPRESSOR);
1105 	kmem_alloc(compressor_map, &c_buffers, c_buffers_size,
1106 	    KMA_NOFAIL | KMA_COMPRESSOR | KMA_VAONLY | KMA_PERMANENT | KMA_NOSOFTLIMIT,
1107 	    VM_KERN_MEMORY_COMPRESSOR);
1108 
1109 #if DEVELOPMENT || DEBUG
1110 	if (hvg_is_hcall_available(HVG_HCALL_SET_COREDUMP_DATA)) {
1111 		hvg_hcall_set_coredump_data();
1112 	}
1113 #endif
1114 
1115 	/*
1116 	 * Pick a good size that will minimize fragmentation in zalloc
1117 	 * by minimizing the fragmentation in a 16k run.
1118 	 *
1119 	 * c_seg_slot_var_array_min_len is larger on 4k systems than 16k ones,
1120 	 * making the fragmentation in a 4k page terrible. Using 16k for all
1121 	 * systems matches zalloc() and will minimize fragmentation.
1122 	 */
1123 	uint32_t c_segment_size = sizeof(struct c_segment) + (c_seg_slot_var_array_min_len * sizeof(struct c_slot));
1124 	uint32_t cnt  = (16 << 10) / c_segment_size;
1125 	uint32_t frag = (16 << 10) % c_segment_size;
1126 
1127 	c_seg_fixed_array_len = c_seg_slot_var_array_min_len;
1128 
1129 	while (cnt * sizeof(struct c_slot) < frag) {
1130 		c_segment_size += sizeof(struct c_slot);
1131 		c_seg_fixed_array_len++;
1132 		frag -= cnt * sizeof(struct c_slot);
1133 	}
1134 
1135 	compressor_segment_zone = zone_create("compressor_segment",
1136 	    c_segment_size, ZC_PGZ_USE_GUARDS | ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM);
1137 
1138 	c_segments_busy = FALSE;
1139 
1140 	c_segments_next_page = (caddr_t)c_segments;
1141 	vm_compressor_algorithm_init();
1142 
1143 	{
1144 		host_basic_info_data_t hinfo;
1145 		mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
1146 		size_t bufsize;
1147 		char *buf;
1148 
1149 #define BSD_HOST 1
1150 		host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
1151 
1152 		compressor_cpus = hinfo.max_cpus;
1153 
1154 		/* allocate various scratch buffers at the same place */
1155 		bufsize = PAGE_SIZE;
1156 		bufsize += compressor_cpus * vm_compressor_get_decode_scratch_size();
1157 		/* For the panic path */
1158 		bufsize += vm_compressor_get_decode_scratch_size();
1159 #if CONFIG_FREEZE
1160 		bufsize += vm_compressor_get_encode_scratch_size();
1161 #endif
1162 #if RECORD_THE_COMPRESSED_DATA
1163 		bufsize += c_compressed_record_sbuf_size;
1164 #endif
1165 
1166 		kmem_alloc(kernel_map, (vm_offset_t *)&buf, bufsize,
1167 		    KMA_DATA | KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT,
1168 		    VM_KERN_MEMORY_COMPRESSOR);
1169 
1170 		/*
1171 		 * vm_compressor_kdp_state.kc_decompressed_page must be page aligned because we access
1172 		 * it through the physical aperture by page number.
1173 		 */
1174 		vm_compressor_kdp_state.kc_panic_decompressed_page = buf;
1175 		vm_compressor_kdp_state.kc_panic_decompressed_page_paddr = kvtophys((vm_offset_t)vm_compressor_kdp_state.kc_panic_decompressed_page);
1176 		vm_compressor_kdp_state.kc_panic_decompressed_page_ppnum = (ppnum_t) atop(vm_compressor_kdp_state.kc_panic_decompressed_page_paddr);
1177 		buf += PAGE_SIZE;
1178 		bufsize -= PAGE_SIZE;
1179 
1180 		compressor_scratch_bufs = buf;
1181 		buf += compressor_cpus * vm_compressor_get_decode_scratch_size();
1182 		bufsize -= compressor_cpus * vm_compressor_get_decode_scratch_size();
1183 
1184 		vm_compressor_kdp_state.kc_panic_scratch_buf = buf;
1185 		buf += vm_compressor_get_decode_scratch_size();
1186 		bufsize -= vm_compressor_get_decode_scratch_size();
1187 
1188 		/* This is set up before each stackshot in vm_compressor_kdp_init */
1189 		vm_compressor_kdp_state.kc_scratch_bufs = NULL;
1190 
1191 #if CONFIG_FREEZE
1192 		freezer_context_global.freezer_ctx_compressor_scratch_buf = buf;
1193 		buf += vm_compressor_get_encode_scratch_size();
1194 		bufsize -= vm_compressor_get_encode_scratch_size();
1195 #endif
1196 
1197 #if RECORD_THE_COMPRESSED_DATA
1198 		c_compressed_record_sbuf = buf;
1199 		c_compressed_record_cptr = buf;
1200 		c_compressed_record_ebuf = c_compressed_record_sbuf + c_compressed_record_sbuf_size;
1201 		buf += c_compressed_record_sbuf_size;
1202 		bufsize -= c_compressed_record_sbuf_size;
1203 #endif
1204 		assert(bufsize == 0);
1205 	}
1206 
1207 	if (kernel_thread_start_priority((thread_continue_t)vm_compressor_swap_trigger_thread, NULL,
1208 	    BASEPRI_VM, &thread) != KERN_SUCCESS) {
1209 		panic("vm_compressor_swap_trigger_thread: create failed");
1210 	}
1211 	thread_deallocate(thread);
1212 
1213 	if (vm_pageout_internal_start() != KERN_SUCCESS) {
1214 		panic("vm_compressor_init: Failed to start the internal pageout thread.");
1215 	}
1216 	if (VM_CONFIG_SWAP_IS_PRESENT) {
1217 		vm_compressor_swap_init();
1218 	}
1219 
1220 	if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
1221 		vm_compressor_is_active = 1;
1222 	}
1223 
1224 	vm_compressor_available = 1;
1225 
1226 	vm_page_reactivate_all_throttled();
1227 
1228 	bzero(&vmcs_stats, sizeof(struct vm_compressor_swapper_stats));
1229 }
1230 
1231 #define COMPRESSOR_KDP_BUFSIZE (\
1232 	(vm_compressor_get_decode_scratch_size() * compressor_cpus) + \
1233 	(PAGE_SIZE * compressor_cpus)) + \
1234 	(sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_paddr) * compressor_cpus) + \
1235 	(sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_ppnum) * compressor_cpus)
1236 
1237 
1238 /**
1239  * Initializes the VM compressor in preparation for a stackshot.
1240  * Stackshot mutex must be held.
1241  */
1242 kern_return_t
vm_compressor_kdp_init(void)1243 vm_compressor_kdp_init(void)
1244 {
1245 	char *buf;
1246 	kern_return_t err;
1247 	size_t bufsize;
1248 	size_t total_decode_size;
1249 
1250 #if DEVELOPMENT || DEBUG
1251 	extern lck_mtx_t stackshot_subsys_mutex;
1252 	lck_mtx_assert(&stackshot_subsys_mutex, LCK_MTX_ASSERT_OWNED);
1253 #endif /* DEVELOPMENT || DEBUG */
1254 
1255 	if (!vm_compressor_available) {
1256 		return KERN_SUCCESS;
1257 	}
1258 
1259 	bufsize = COMPRESSOR_KDP_BUFSIZE;
1260 
1261 	/* Allocate the per-cpu decompression pages. */
1262 	err = kmem_alloc(kernel_map, (vm_offset_t *)&buf, bufsize,
1263 	    KMA_DATA | KMA_NOFAIL | KMA_KOBJECT,
1264 	    VM_KERN_MEMORY_COMPRESSOR);
1265 
1266 	if (err != KERN_SUCCESS) {
1267 		return err;
1268 	}
1269 
1270 	assert(vm_compressor_kdp_state.kc_scratch_bufs == NULL);
1271 	vm_compressor_kdp_state.kc_scratch_bufs = buf;
1272 	total_decode_size = vm_compressor_get_decode_scratch_size() * compressor_cpus;
1273 	buf += total_decode_size;
1274 	bufsize -= total_decode_size;
1275 
1276 	/*
1277 	 * vm_compressor_kdp_state.kc_decompressed_page must be page aligned because we access
1278 	 * it through the physical aperture by page number.
1279 	 */
1280 	assert(vm_compressor_kdp_state.kc_decompressed_pages == NULL);
1281 	vm_compressor_kdp_state.kc_decompressed_pages = buf;
1282 	buf += PAGE_SIZE * compressor_cpus;
1283 	bufsize -= PAGE_SIZE * compressor_cpus;
1284 
1285 	/* Scary! This will be aligned, I promise :) */
1286 	assert(((vm_address_t) buf) % _Alignof(addr64_t) == 0);
1287 	assert(vm_compressor_kdp_state.kc_decompressed_pages_paddr == NULL);
1288 	vm_compressor_kdp_state.kc_decompressed_pages_paddr = (addr64_t*) (void*) buf;
1289 	buf += sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_paddr) * compressor_cpus;
1290 	bufsize -= sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_paddr) * compressor_cpus;
1291 
1292 	assert(((vm_address_t) buf) % _Alignof(ppnum_t) == 0);
1293 	assert(vm_compressor_kdp_state.kc_decompressed_pages_ppnum == NULL);
1294 	vm_compressor_kdp_state.kc_decompressed_pages_ppnum = (ppnum_t*) (void*) buf;
1295 	buf += sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_ppnum) * compressor_cpus;
1296 	bufsize -= sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_ppnum) * compressor_cpus;
1297 
1298 	assert(bufsize == 0);
1299 
1300 	for (size_t i = 0; i < compressor_cpus; i++) {
1301 		vm_offset_t offset = (vm_offset_t) &vm_compressor_kdp_state.kc_decompressed_pages[i * PAGE_SIZE];
1302 		vm_compressor_kdp_state.kc_decompressed_pages_paddr[i] = kvtophys(offset);
1303 		vm_compressor_kdp_state.kc_decompressed_pages_ppnum[i] = (ppnum_t) atop(vm_compressor_kdp_state.kc_decompressed_pages_paddr[i]);
1304 	}
1305 
1306 	return KERN_SUCCESS;
1307 }
1308 
1309 /*
1310  * Frees up compressor buffers used by stackshot.
1311  * Stackshot mutex must be held.
1312  */
1313 void
vm_compressor_kdp_teardown(void)1314 vm_compressor_kdp_teardown(void)
1315 {
1316 	extern lck_mtx_t stackshot_subsys_mutex;
1317 	LCK_MTX_ASSERT(&stackshot_subsys_mutex, LCK_MTX_ASSERT_OWNED);
1318 
1319 	if (vm_compressor_kdp_state.kc_scratch_bufs == NULL) {
1320 		return;
1321 	}
1322 
1323 	/* Deallocate the per-cpu decompression pages. */
1324 	kmem_free(kernel_map, (vm_offset_t) vm_compressor_kdp_state.kc_scratch_bufs, COMPRESSOR_KDP_BUFSIZE);
1325 
1326 	vm_compressor_kdp_state.kc_scratch_bufs = NULL;
1327 	vm_compressor_kdp_state.kc_decompressed_pages = NULL;
1328 	vm_compressor_kdp_state.kc_decompressed_pages_paddr = 0;
1329 	vm_compressor_kdp_state.kc_decompressed_pages_ppnum = 0;
1330 }
1331 
1332 static uint32_t
c_slot_extra_size(c_slot_t cs)1333 c_slot_extra_size(c_slot_t cs)
1334 {
1335 #pragma unused(cs)
1336 	return 0;
1337 }
1338 
1339 #if VALIDATE_C_SEGMENTS
1340 
1341 static void
c_seg_validate(c_segment_t c_seg,boolean_t must_be_compact)1342 c_seg_validate(c_segment_t c_seg, boolean_t must_be_compact)
1343 {
1344 	uint16_t        c_indx;
1345 	int32_t         bytes_used;
1346 	uint32_t        c_rounded_size;
1347 	uint32_t        c_size;
1348 	c_slot_t        cs;
1349 
1350 	if (__probable(validate_c_segs == FALSE)) {
1351 		return;
1352 	}
1353 	if (c_seg->c_firstemptyslot < c_seg->c_nextslot) {
1354 		c_indx = c_seg->c_firstemptyslot;
1355 		cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1356 
1357 		if (cs == NULL) {
1358 			panic("c_seg_validate:  no slot backing c_firstemptyslot");
1359 		}
1360 
1361 		if (cs->c_size) {
1362 			panic("c_seg_validate:  c_firstemptyslot has non-zero size (%d)", cs->c_size);
1363 		}
1364 	}
1365 	bytes_used = 0;
1366 
1367 	for (c_indx = 0; c_indx < c_seg->c_nextslot; c_indx++) {
1368 		cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1369 
1370 		c_size = UNPACK_C_SIZE(cs);
1371 
1372 		c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(c_size + c_slot_extra_size(cs));
1373 
1374 		bytes_used += c_rounded_size;
1375 
1376 #if CHECKSUM_THE_COMPRESSED_DATA
1377 		unsigned csvhash;
1378 		if (c_size && cs->c_hash_compressed_data != (csvhash = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))) {
1379 			addr64_t csvphys = kvtophys((vm_offset_t)&c_seg->c_store.c_buffer[cs->c_offset]);
1380 			panic("Compressed data doesn't match original %p phys: 0x%llx %d %p %d %d 0x%x 0x%x", c_seg, csvphys, cs->c_offset, cs, c_indx, c_size, cs->c_hash_compressed_data, csvhash);
1381 		}
1382 #endif
1383 #if POPCOUNT_THE_COMPRESSED_DATA
1384 		unsigned csvpop;
1385 		if (c_size) {
1386 			uintptr_t csvaddr = (uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset];
1387 			if (cs->c_pop_cdata != (csvpop = vmc_pop(csvaddr, c_size))) {
1388 				panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%llx 0x%x 0x%x 0x%x", (csvpop - cs->c_pop_cdata), (void *)csvaddr, (void *) kvtophys(csvaddr), c_seg, cs, (uint64_t)cs->c_offset, c_size, csvpop, cs->c_pop_cdata);
1389 			}
1390 		}
1391 #endif
1392 	}
1393 
1394 	if (bytes_used != c_seg->c_bytes_used) {
1395 		panic("c_seg_validate: bytes_used mismatch - found %d, segment has %d", bytes_used, c_seg->c_bytes_used);
1396 	}
1397 
1398 	if (c_seg->c_bytes_used > C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) {
1399 		panic("c_seg_validate: c_bytes_used > c_nextoffset - c_nextoffset = %d,  c_bytes_used = %d",
1400 		    (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used);
1401 	}
1402 
1403 	if (must_be_compact) {
1404 		if (c_seg->c_bytes_used != C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) {
1405 			panic("c_seg_validate: c_bytes_used doesn't match c_nextoffset - c_nextoffset = %d,  c_bytes_used = %d",
1406 			    (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used);
1407 		}
1408 	}
1409 }
1410 
1411 #endif
1412 
1413 
1414 void
c_seg_need_delayed_compaction(c_segment_t c_seg,boolean_t c_list_lock_held)1415 c_seg_need_delayed_compaction(c_segment_t c_seg, boolean_t c_list_lock_held)
1416 {
1417 	boolean_t       clear_busy = FALSE;
1418 
1419 	if (c_list_lock_held == FALSE) {
1420 		if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1421 			C_SEG_BUSY(c_seg);
1422 
1423 			lck_mtx_unlock_always(&c_seg->c_lock);
1424 			lck_mtx_lock_spin_always(c_list_lock);
1425 			lck_mtx_lock_spin_always(&c_seg->c_lock);
1426 
1427 			clear_busy = TRUE;
1428 		}
1429 	}
1430 	assert(c_seg->c_state != C_IS_FILLING);
1431 
1432 	if (!c_seg->c_on_minorcompact_q && !(C_SEG_IS_ON_DISK_OR_SOQ(c_seg)) && !c_seg->c_has_donated_pages) {
1433 		queue_enter(&c_minor_list_head, c_seg, c_segment_t, c_list);
1434 		c_seg->c_on_minorcompact_q = 1;
1435 		os_atomic_inc(&c_minor_count, relaxed);
1436 	}
1437 	if (c_list_lock_held == FALSE) {
1438 		lck_mtx_unlock_always(c_list_lock);
1439 	}
1440 
1441 	if (clear_busy == TRUE) {
1442 		C_SEG_WAKEUP_DONE(c_seg);
1443 	}
1444 }
1445 
1446 
1447 unsigned int c_seg_moved_to_sparse_list = 0;
1448 
1449 void
c_seg_move_to_sparse_list(c_segment_t c_seg)1450 c_seg_move_to_sparse_list(c_segment_t c_seg)
1451 {
1452 	boolean_t       clear_busy = FALSE;
1453 
1454 	if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1455 		C_SEG_BUSY(c_seg);
1456 
1457 		lck_mtx_unlock_always(&c_seg->c_lock);
1458 		lck_mtx_lock_spin_always(c_list_lock);
1459 		lck_mtx_lock_spin_always(&c_seg->c_lock);
1460 
1461 		clear_busy = TRUE;
1462 	}
1463 	c_seg_switch_state(c_seg, C_ON_SWAPPEDOUTSPARSE_Q, FALSE);
1464 
1465 	c_seg_moved_to_sparse_list++;
1466 
1467 	lck_mtx_unlock_always(c_list_lock);
1468 
1469 	if (clear_busy == TRUE) {
1470 		C_SEG_WAKEUP_DONE(c_seg);
1471 	}
1472 }
1473 
1474 
1475 
1476 
1477 int try_minor_compaction_failed = 0;
1478 int try_minor_compaction_succeeded = 0;
1479 
1480 void
c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg)1481 c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg)
1482 {
1483 	assert(c_seg->c_on_minorcompact_q);
1484 	/*
1485 	 * c_seg is currently on the delayed minor compaction
1486 	 * queue and we have c_seg locked... if we can get the
1487 	 * c_list_lock w/o blocking (if we blocked we could deadlock
1488 	 * because the lock order is c_list_lock then c_seg's lock)
1489 	 * we'll pull it from the delayed list and free it directly
1490 	 */
1491 	if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1492 		/*
1493 		 * c_list_lock is held, we need to bail
1494 		 */
1495 		try_minor_compaction_failed++;
1496 
1497 		lck_mtx_unlock_always(&c_seg->c_lock);
1498 	} else {
1499 		try_minor_compaction_succeeded++;
1500 
1501 		C_SEG_BUSY(c_seg);
1502 		c_seg_do_minor_compaction_and_unlock(c_seg, TRUE, FALSE, FALSE);
1503 	}
1504 }
1505 
1506 
1507 int
c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg,boolean_t clear_busy,boolean_t need_list_lock,boolean_t disallow_page_replacement)1508 c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy, boolean_t need_list_lock, boolean_t disallow_page_replacement)
1509 {
1510 	int     c_seg_freed;
1511 
1512 	assert(c_seg->c_busy);
1513 	assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg));
1514 
1515 	/*
1516 	 * check for the case that can occur when we are not swapping
1517 	 * and this segment has been major compacted in the past
1518 	 * and moved to the majorcompact q to remove it from further
1519 	 * consideration... if the occupancy falls too low we need
1520 	 * to put it back on the age_q so that it will be considered
1521 	 * in the next major compaction sweep... if we don't do this
1522 	 * we will eventually run into the c_segments_limit
1523 	 */
1524 	if (c_seg->c_state == C_ON_MAJORCOMPACT_Q && C_SEG_SHOULD_MAJORCOMPACT_NOW(c_seg)) {
1525 		c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
1526 	}
1527 	if (!c_seg->c_on_minorcompact_q) {
1528 		if (clear_busy == TRUE) {
1529 			C_SEG_WAKEUP_DONE(c_seg);
1530 		}
1531 
1532 		lck_mtx_unlock_always(&c_seg->c_lock);
1533 
1534 		return 0;
1535 	}
1536 	queue_remove(&c_minor_list_head, c_seg, c_segment_t, c_list);
1537 	c_seg->c_on_minorcompact_q = 0;
1538 	os_atomic_dec(&c_minor_count, relaxed);
1539 
1540 	lck_mtx_unlock_always(c_list_lock);
1541 
1542 	if (disallow_page_replacement == TRUE) {
1543 		lck_mtx_unlock_always(&c_seg->c_lock);
1544 
1545 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
1546 
1547 		lck_mtx_lock_spin_always(&c_seg->c_lock);
1548 	}
1549 	c_seg_freed = c_seg_minor_compaction_and_unlock(c_seg, clear_busy);
1550 
1551 	if (disallow_page_replacement == TRUE) {
1552 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
1553 	}
1554 
1555 	if (need_list_lock == TRUE) {
1556 		lck_mtx_lock_spin_always(c_list_lock);
1557 	}
1558 
1559 	return c_seg_freed;
1560 }
1561 
1562 void
kdp_compressor_busy_find_owner(event64_t wait_event,thread_waitinfo_t * waitinfo)1563 kdp_compressor_busy_find_owner(event64_t wait_event, thread_waitinfo_t *waitinfo)
1564 {
1565 	c_segment_t c_seg = (c_segment_t) wait_event;
1566 
1567 	waitinfo->owner = thread_tid(c_seg->c_busy_for_thread);
1568 	waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(c_seg);
1569 }
1570 
1571 #if DEVELOPMENT || DEBUG
1572 int
do_cseg_wedge_thread(void)1573 do_cseg_wedge_thread(void)
1574 {
1575 	struct c_segment c_seg;
1576 	c_seg.c_busy_for_thread = current_thread();
1577 
1578 	debug_cseg_wait_event = (event_t) &c_seg;
1579 
1580 	thread_set_pending_block_hint(current_thread(), kThreadWaitCompressor);
1581 	assert_wait((event_t) (&c_seg), THREAD_INTERRUPTIBLE);
1582 
1583 	thread_block(THREAD_CONTINUE_NULL);
1584 
1585 	return 0;
1586 }
1587 
1588 int
do_cseg_unwedge_thread(void)1589 do_cseg_unwedge_thread(void)
1590 {
1591 	thread_wakeup(debug_cseg_wait_event);
1592 	debug_cseg_wait_event = NULL;
1593 
1594 	return 0;
1595 }
1596 #endif /* DEVELOPMENT || DEBUG */
1597 
1598 void
c_seg_wait_on_busy(c_segment_t c_seg)1599 c_seg_wait_on_busy(c_segment_t c_seg)
1600 {
1601 	c_seg->c_wanted = 1;
1602 
1603 	thread_set_pending_block_hint(current_thread(), kThreadWaitCompressor);
1604 	assert_wait((event_t) (c_seg), THREAD_UNINT);
1605 
1606 	lck_mtx_unlock_always(&c_seg->c_lock);
1607 	thread_block(THREAD_CONTINUE_NULL);
1608 }
1609 
1610 #if CONFIG_FREEZE
1611 /*
1612  * We don't have the task lock held while updating the task's
1613  * c_seg queues. We can do that because of the following restrictions:
1614  *
1615  * - SINGLE FREEZER CONTEXT:
1616  *   We 'insert' c_segs into the task list on the task_freeze path.
1617  *   There can only be one such freeze in progress and the task
1618  *   isn't disappearing because we have the VM map lock held throughout
1619  *   and we have a reference on the proc too.
1620  *
1621  * - SINGLE TASK DISOWN CONTEXT:
1622  *   We 'disown' c_segs of a task ONLY from the task_terminate context. So
1623  *   we don't need the task lock but we need the c_list_lock and the
1624  *   compressor master lock (shared). We also hold the individual
1625  *   c_seg locks (exclusive).
1626  *
1627  *   If we either:
1628  *   - can't get the c_seg lock on a try, then we start again because maybe
1629  *   the c_seg is part of a compaction and might get freed. So we can't trust
1630  *   that linkage and need to restart our queue traversal.
1631  *   - OR, we run into a busy c_seg (say being swapped in or free-ing) we
1632  *   drop all locks again and wait and restart our queue traversal.
1633  *
1634  * - The new_owner_task below is currently only the kernel or NULL.
1635  *
1636  */
1637 void
c_seg_update_task_owner(c_segment_t c_seg,task_t new_owner_task)1638 c_seg_update_task_owner(c_segment_t c_seg, task_t new_owner_task)
1639 {
1640 	task_t          owner_task = c_seg->c_task_owner;
1641 	uint64_t        uncompressed_bytes = ((c_seg->c_slots_used) * PAGE_SIZE_64);
1642 
1643 	LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
1644 	LCK_MTX_ASSERT(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED);
1645 
1646 	if (owner_task) {
1647 		task_update_frozen_to_swap_acct(owner_task, uncompressed_bytes, DEBIT_FROM_SWAP);
1648 		queue_remove(&owner_task->task_frozen_cseg_q, c_seg,
1649 		    c_segment_t, c_task_list_next_cseg);
1650 	}
1651 
1652 	if (new_owner_task) {
1653 		queue_enter(&new_owner_task->task_frozen_cseg_q, c_seg,
1654 		    c_segment_t, c_task_list_next_cseg);
1655 		task_update_frozen_to_swap_acct(new_owner_task, uncompressed_bytes, CREDIT_TO_SWAP);
1656 	}
1657 
1658 	c_seg->c_task_owner = new_owner_task;
1659 }
1660 
1661 void
task_disown_frozen_csegs(task_t owner_task)1662 task_disown_frozen_csegs(task_t owner_task)
1663 {
1664 	c_segment_t c_seg = NULL, next_cseg = NULL;
1665 
1666 again:
1667 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
1668 	lck_mtx_lock_spin_always(c_list_lock);
1669 
1670 	for (c_seg = (c_segment_t) queue_first(&owner_task->task_frozen_cseg_q);
1671 	    !queue_end(&owner_task->task_frozen_cseg_q, (queue_entry_t) c_seg);
1672 	    c_seg = next_cseg) {
1673 		next_cseg = (c_segment_t) queue_next(&c_seg->c_task_list_next_cseg);
1674 
1675 		if (!lck_mtx_try_lock_spin_always(&c_seg->c_lock)) {
1676 			lck_mtx_unlock(c_list_lock);
1677 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
1678 			goto again;
1679 		}
1680 
1681 		if (c_seg->c_busy) {
1682 			lck_mtx_unlock(c_list_lock);
1683 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
1684 
1685 			c_seg_wait_on_busy(c_seg);
1686 
1687 			goto again;
1688 		}
1689 		assert(c_seg->c_task_owner == owner_task);
1690 		c_seg_update_task_owner(c_seg, kernel_task);
1691 		lck_mtx_unlock_always(&c_seg->c_lock);
1692 	}
1693 
1694 	lck_mtx_unlock(c_list_lock);
1695 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
1696 }
1697 #endif /* CONFIG_FREEZE */
1698 
1699 void
c_seg_switch_state(c_segment_t c_seg,int new_state,boolean_t insert_head)1700 c_seg_switch_state(c_segment_t c_seg, int new_state, boolean_t insert_head)
1701 {
1702 	int     old_state = c_seg->c_state;
1703 	queue_head_t *donate_swapout_list_head, *donate_swappedin_list_head;
1704 	uint32_t     *donate_swapout_count, *donate_swappedin_count;
1705 
1706 	/*
1707 	 * On macOS the donate queue is swapped first ie the c_early_swapout queue.
1708 	 * On other swap-capable platforms, we want to swap those out last. So we
1709 	 * use the c_late_swapout queue.
1710 	 */
1711 #if XNU_TARGET_OS_OSX  /* tag:DONATE */
1712 #if (DEVELOPMENT || DEBUG)
1713 	if (new_state != C_IS_FILLING) {
1714 		LCK_MTX_ASSERT(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED);
1715 	}
1716 	LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
1717 #endif /* DEVELOPMENT || DEBUG */
1718 
1719 	donate_swapout_list_head = &c_early_swapout_list_head;
1720 	donate_swapout_count = &c_early_swapout_count;
1721 	donate_swappedin_list_head = &c_early_swappedin_list_head;
1722 	donate_swappedin_count = &c_early_swappedin_count;
1723 #else /* XNU_TARGET_OS_OSX */
1724 	donate_swapout_list_head = &c_late_swapout_list_head;
1725 	donate_swapout_count = &c_late_swapout_count;
1726 	donate_swappedin_list_head = &c_late_swappedin_list_head;
1727 	donate_swappedin_count = &c_late_swappedin_count;
1728 #endif /* XNU_TARGET_OS_OSX */
1729 
1730 	switch (old_state) {
1731 	case C_IS_EMPTY:
1732 		assert(new_state == C_IS_FILLING || new_state == C_IS_FREE);
1733 
1734 		c_empty_count--;
1735 		break;
1736 
1737 	case C_IS_FILLING:
1738 		assert(new_state == C_ON_AGE_Q || new_state == C_ON_SWAPOUT_Q);
1739 
1740 		queue_remove(&c_filling_list_head, c_seg, c_segment_t, c_age_list);
1741 		c_filling_count--;
1742 		break;
1743 
1744 	case C_ON_AGE_Q:
1745 		assert(new_state == C_ON_SWAPOUT_Q || new_state == C_ON_MAJORCOMPACT_Q ||
1746 		    new_state == C_IS_FREE);
1747 
1748 		queue_remove(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1749 		c_age_count--;
1750 		break;
1751 
1752 	case C_ON_SWAPPEDIN_Q:
1753 		if (c_seg->c_has_donated_pages) {
1754 			assert(new_state == C_ON_SWAPOUT_Q || new_state == C_IS_FREE);
1755 			queue_remove(donate_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1756 			*donate_swappedin_count -= 1;
1757 		} else {
1758 			assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE);
1759 #if CONFIG_FREEZE
1760 			assert(c_seg->c_has_freezer_pages);
1761 			queue_remove(&c_early_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1762 			c_early_swappedin_count--;
1763 #else /* CONFIG_FREEZE */
1764 			queue_remove(&c_regular_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1765 			c_regular_swappedin_count--;
1766 #endif /* CONFIG_FREEZE */
1767 		}
1768 		break;
1769 
1770 	case C_ON_SWAPOUT_Q:
1771 		assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE || new_state == C_IS_EMPTY || new_state == C_ON_SWAPIO_Q);
1772 
1773 #if CONFIG_FREEZE
1774 		if (c_seg->c_has_freezer_pages) {
1775 			if (c_seg->c_task_owner && (new_state != C_ON_SWAPIO_Q)) {
1776 				c_seg_update_task_owner(c_seg, NULL);
1777 			}
1778 			queue_remove(&c_early_swapout_list_head, c_seg, c_segment_t, c_age_list);
1779 			c_early_swapout_count--;
1780 		} else
1781 #endif /* CONFIG_FREEZE */
1782 		{
1783 			if (c_seg->c_has_donated_pages) {
1784 				queue_remove(donate_swapout_list_head, c_seg, c_segment_t, c_age_list);
1785 				*donate_swapout_count -= 1;
1786 			} else {
1787 				queue_remove(&c_regular_swapout_list_head, c_seg, c_segment_t, c_age_list);
1788 				c_regular_swapout_count--;
1789 			}
1790 		}
1791 
1792 		if (new_state == C_ON_AGE_Q) {
1793 			c_seg->c_has_donated_pages = 0;
1794 		}
1795 		thread_wakeup((event_t)&compaction_swapper_running);
1796 		break;
1797 
1798 	case C_ON_SWAPIO_Q:
1799 #if CONFIG_FREEZE
1800 		if (c_seg->c_has_freezer_pages) {
1801 			assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_AGE_Q);
1802 		} else
1803 #endif /* CONFIG_FREEZE */
1804 		{
1805 			if (c_seg->c_has_donated_pages) {
1806 				assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_SWAPPEDIN_Q);
1807 			} else {
1808 				assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_AGE_Q);
1809 			}
1810 		}
1811 
1812 		queue_remove(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1813 		c_swapio_count--;
1814 		break;
1815 
1816 	case C_ON_SWAPPEDOUT_Q:
1817 		assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q ||
1818 		    new_state == C_ON_SWAPPEDOUTSPARSE_Q ||
1819 		    new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE);
1820 
1821 		queue_remove(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
1822 		c_swappedout_count--;
1823 		break;
1824 
1825 	case C_ON_SWAPPEDOUTSPARSE_Q:
1826 		assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q ||
1827 		    new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE);
1828 
1829 		queue_remove(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
1830 		c_swappedout_sparse_count--;
1831 		break;
1832 
1833 	case C_ON_MAJORCOMPACT_Q:
1834 		assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE);
1835 
1836 		queue_remove(&c_major_list_head, c_seg, c_segment_t, c_age_list);
1837 		c_major_count--;
1838 		break;
1839 
1840 	case C_ON_BAD_Q:
1841 		assert(new_state == C_IS_FREE);
1842 
1843 		queue_remove(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
1844 		c_bad_count--;
1845 		break;
1846 
1847 	default:
1848 		panic("c_seg %p has bad c_state = %d", c_seg, old_state);
1849 	}
1850 
1851 	switch (new_state) {
1852 	case C_IS_FREE:
1853 		assert(old_state != C_IS_FILLING);
1854 
1855 		break;
1856 
1857 	case C_IS_EMPTY:
1858 		assert(old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1859 
1860 		c_empty_count++;
1861 		break;
1862 
1863 	case C_IS_FILLING:
1864 		assert(old_state == C_IS_EMPTY);
1865 
1866 		queue_enter(&c_filling_list_head, c_seg, c_segment_t, c_age_list);
1867 		c_filling_count++;
1868 		break;
1869 
1870 	case C_ON_AGE_Q:
1871 		assert(old_state == C_IS_FILLING || old_state == C_ON_SWAPPEDIN_Q ||
1872 		    old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPIO_Q ||
1873 		    old_state == C_ON_MAJORCOMPACT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1874 
1875 		assert(!c_seg->c_has_donated_pages);
1876 		if (old_state == C_IS_FILLING) {
1877 			queue_enter(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1878 		} else {
1879 			if (!queue_empty(&c_age_list_head)) {
1880 				c_segment_t     c_first;
1881 
1882 				c_first = (c_segment_t)queue_first(&c_age_list_head);
1883 				c_seg->c_creation_ts = c_first->c_creation_ts;
1884 			}
1885 			queue_enter_first(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1886 		}
1887 		c_age_count++;
1888 		break;
1889 
1890 	case C_ON_SWAPPEDIN_Q:
1891 	{
1892 		queue_head_t *list_head;
1893 
1894 		assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q || old_state == C_ON_SWAPIO_Q);
1895 		if (c_seg->c_has_donated_pages) {
1896 			/* Error in swapouts could happen while the c_seg is still on the swapio queue */
1897 			list_head = donate_swappedin_list_head;
1898 			*donate_swappedin_count += 1;
1899 		} else {
1900 #if CONFIG_FREEZE
1901 			assert(c_seg->c_has_freezer_pages);
1902 			list_head = &c_early_swappedin_list_head;
1903 			c_early_swappedin_count++;
1904 #else /* CONFIG_FREEZE */
1905 			list_head = &c_regular_swappedin_list_head;
1906 			c_regular_swappedin_count++;
1907 #endif /* CONFIG_FREEZE */
1908 		}
1909 
1910 		if (insert_head == TRUE) {
1911 			queue_enter_first(list_head, c_seg, c_segment_t, c_age_list);
1912 		} else {
1913 			queue_enter(list_head, c_seg, c_segment_t, c_age_list);
1914 		}
1915 		break;
1916 	}
1917 
1918 	case C_ON_SWAPOUT_Q:
1919 	{
1920 		queue_head_t *list_head;
1921 
1922 #if CONFIG_FREEZE
1923 		/*
1924 		 * A segment with both identities of frozen + donated pages
1925 		 * will be put on early swapout Q ie the frozen identity wins.
1926 		 * This is because when both identities are set, the donation bit
1927 		 * is added on after in the c_current_seg_filled path for accounting
1928 		 * purposes.
1929 		 */
1930 		if (c_seg->c_has_freezer_pages) {
1931 			assert(old_state == C_ON_AGE_Q || old_state == C_IS_FILLING);
1932 			list_head = &c_early_swapout_list_head;
1933 			c_early_swapout_count++;
1934 		} else
1935 #endif
1936 		{
1937 			if (c_seg->c_has_donated_pages) {
1938 				assert(old_state == C_ON_SWAPPEDIN_Q || old_state == C_IS_FILLING);
1939 				list_head = donate_swapout_list_head;
1940 				*donate_swapout_count += 1;
1941 			} else {
1942 				assert(old_state == C_ON_AGE_Q || old_state == C_IS_FILLING);
1943 				list_head = &c_regular_swapout_list_head;
1944 				c_regular_swapout_count++;
1945 			}
1946 		}
1947 
1948 		if (insert_head == TRUE) {
1949 			queue_enter_first(list_head, c_seg, c_segment_t, c_age_list);
1950 		} else {
1951 			queue_enter(list_head, c_seg, c_segment_t, c_age_list);
1952 		}
1953 		break;
1954 	}
1955 
1956 	case C_ON_SWAPIO_Q:
1957 		assert(old_state == C_ON_SWAPOUT_Q);
1958 
1959 		if (insert_head == TRUE) {
1960 			queue_enter_first(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1961 		} else {
1962 			queue_enter(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1963 		}
1964 		c_swapio_count++;
1965 		break;
1966 
1967 	case C_ON_SWAPPEDOUT_Q:
1968 		assert(old_state == C_ON_SWAPIO_Q);
1969 
1970 		if (insert_head == TRUE) {
1971 			queue_enter_first(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
1972 		} else {
1973 			queue_enter(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
1974 		}
1975 		c_swappedout_count++;
1976 		break;
1977 
1978 	case C_ON_SWAPPEDOUTSPARSE_Q:
1979 		assert(old_state == C_ON_SWAPIO_Q || old_state == C_ON_SWAPPEDOUT_Q);
1980 
1981 		if (insert_head == TRUE) {
1982 			queue_enter_first(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
1983 		} else {
1984 			queue_enter(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
1985 		}
1986 
1987 		c_swappedout_sparse_count++;
1988 		break;
1989 
1990 	case C_ON_MAJORCOMPACT_Q:
1991 		assert(old_state == C_ON_AGE_Q);
1992 		assert(!c_seg->c_has_donated_pages);
1993 
1994 		if (insert_head == TRUE) {
1995 			queue_enter_first(&c_major_list_head, c_seg, c_segment_t, c_age_list);
1996 		} else {
1997 			queue_enter(&c_major_list_head, c_seg, c_segment_t, c_age_list);
1998 		}
1999 		c_major_count++;
2000 		break;
2001 
2002 	case C_ON_BAD_Q:
2003 		assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
2004 
2005 		if (insert_head == TRUE) {
2006 			queue_enter_first(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
2007 		} else {
2008 			queue_enter(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
2009 		}
2010 		c_bad_count++;
2011 		break;
2012 
2013 	default:
2014 		panic("c_seg %p requesting bad c_state = %d", c_seg, new_state);
2015 	}
2016 	c_seg->c_state = new_state;
2017 }
2018 
2019 
2020 
2021 void
c_seg_free(c_segment_t c_seg)2022 c_seg_free(c_segment_t c_seg)
2023 {
2024 	assert(c_seg->c_busy);
2025 
2026 	lck_mtx_unlock_always(&c_seg->c_lock);
2027 	lck_mtx_lock_spin_always(c_list_lock);
2028 	lck_mtx_lock_spin_always(&c_seg->c_lock);
2029 
2030 	c_seg_free_locked(c_seg);
2031 }
2032 
2033 
2034 void
c_seg_free_locked(c_segment_t c_seg)2035 c_seg_free_locked(c_segment_t c_seg)
2036 {
2037 	int             segno;
2038 	int             pages_populated = 0;
2039 	int32_t         *c_buffer = NULL;
2040 	uint64_t        c_swap_handle = 0;
2041 
2042 	assert(c_seg->c_busy);
2043 	assert(c_seg->c_slots_used == 0);
2044 	assert(!c_seg->c_on_minorcompact_q);
2045 	assert(!c_seg->c_busy_swapping);
2046 
2047 	if (c_seg->c_overage_swap == TRUE) {
2048 		c_overage_swapped_count--;
2049 		c_seg->c_overage_swap = FALSE;
2050 	}
2051 	if (!(C_SEG_IS_ONDISK(c_seg))) {
2052 		c_buffer = c_seg->c_store.c_buffer;
2053 	} else {
2054 		c_swap_handle = c_seg->c_store.c_swap_handle;
2055 	}
2056 
2057 	c_seg_switch_state(c_seg, C_IS_FREE, FALSE);
2058 
2059 	if (c_buffer) {
2060 		pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE;
2061 		c_seg->c_store.c_buffer = NULL;
2062 	} else {
2063 #if CONFIG_FREEZE
2064 		c_seg_update_task_owner(c_seg, NULL);
2065 #endif /* CONFIG_FREEZE */
2066 
2067 		c_seg->c_store.c_swap_handle = (uint64_t)-1;
2068 	}
2069 
2070 	lck_mtx_unlock_always(&c_seg->c_lock);
2071 
2072 	lck_mtx_unlock_always(c_list_lock);
2073 
2074 	if (c_buffer) {
2075 		if (pages_populated) {
2076 			kernel_memory_depopulate((vm_offset_t)c_buffer,
2077 			    ptoa(pages_populated), KMA_COMPRESSOR,
2078 			    VM_KERN_MEMORY_COMPRESSOR);
2079 		}
2080 	} else if (c_swap_handle) {
2081 		/*
2082 		 * Free swap space on disk.
2083 		 */
2084 		vm_swap_free(c_swap_handle);
2085 	}
2086 	lck_mtx_lock_spin_always(&c_seg->c_lock);
2087 	/*
2088 	 * c_seg must remain busy until
2089 	 * after the call to vm_swap_free
2090 	 */
2091 	C_SEG_WAKEUP_DONE(c_seg);
2092 	lck_mtx_unlock_always(&c_seg->c_lock);
2093 
2094 	segno = c_seg->c_mysegno;
2095 
2096 	lck_mtx_lock_spin_always(c_list_lock);
2097 	/*
2098 	 * because the c_buffer is now associated with the segno,
2099 	 * we can't put the segno back on the free list until
2100 	 * after we have depopulated the c_buffer range, or
2101 	 * we run the risk of depopulating a range that is
2102 	 * now being used in one of the compressor heads
2103 	 */
2104 	c_segments_get(segno)->c_segno = c_free_segno_head;
2105 	c_free_segno_head = segno;
2106 	c_segment_count--;
2107 
2108 	lck_mtx_unlock_always(c_list_lock);
2109 
2110 	lck_mtx_destroy(&c_seg->c_lock, &vm_compressor_lck_grp);
2111 
2112 	if (c_seg->c_slot_var_array_len) {
2113 		kfree_type(struct c_slot, c_seg->c_slot_var_array_len,
2114 		    c_seg->c_slot_var_array);
2115 	}
2116 
2117 	zfree(compressor_segment_zone, c_seg);
2118 }
2119 
2120 #if DEVELOPMENT || DEBUG
2121 int c_seg_trim_page_count = 0;
2122 #endif
2123 
2124 void
c_seg_trim_tail(c_segment_t c_seg)2125 c_seg_trim_tail(c_segment_t c_seg)
2126 {
2127 	c_slot_t        cs;
2128 	uint32_t        c_size;
2129 	uint32_t        c_offset;
2130 	uint32_t        c_rounded_size;
2131 	uint16_t        current_nextslot;
2132 	uint32_t        current_populated_offset;
2133 
2134 	if (c_seg->c_bytes_used == 0) {
2135 		return;
2136 	}
2137 	current_nextslot = c_seg->c_nextslot;
2138 	current_populated_offset = c_seg->c_populated_offset;
2139 
2140 	while (c_seg->c_nextslot) {
2141 		cs = C_SEG_SLOT_FROM_INDEX(c_seg, (c_seg->c_nextslot - 1));
2142 
2143 		c_size = UNPACK_C_SIZE(cs);
2144 
2145 		if (c_size) {
2146 			if (current_nextslot != c_seg->c_nextslot) {
2147 				c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(c_size + c_slot_extra_size(cs));
2148 				c_offset = cs->c_offset + C_SEG_BYTES_TO_OFFSET(c_rounded_size);
2149 
2150 				c_seg->c_nextoffset = c_offset;
2151 				c_seg->c_populated_offset = (c_offset + (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1)) &
2152 				    ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1);
2153 
2154 				if (c_seg->c_firstemptyslot > c_seg->c_nextslot) {
2155 					c_seg->c_firstemptyslot = c_seg->c_nextslot;
2156 				}
2157 #if DEVELOPMENT || DEBUG
2158 				c_seg_trim_page_count += ((round_page_32(C_SEG_OFFSET_TO_BYTES(current_populated_offset)) -
2159 				    round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) /
2160 				    PAGE_SIZE);
2161 #endif
2162 			}
2163 			break;
2164 		}
2165 		c_seg->c_nextslot--;
2166 	}
2167 	assert(c_seg->c_nextslot);
2168 }
2169 
2170 
2171 int
c_seg_minor_compaction_and_unlock(c_segment_t c_seg,boolean_t clear_busy)2172 c_seg_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy)
2173 {
2174 	c_slot_mapping_t slot_ptr;
2175 	uint32_t        c_offset = 0;
2176 	uint32_t        old_populated_offset;
2177 	uint32_t        c_rounded_size;
2178 	uint32_t        c_size;
2179 	uint16_t        c_indx = 0;
2180 	int             i;
2181 	c_slot_t        c_dst;
2182 	c_slot_t        c_src;
2183 
2184 	assert(c_seg->c_busy);
2185 
2186 #if VALIDATE_C_SEGMENTS
2187 	c_seg_validate(c_seg, FALSE);
2188 #endif
2189 	if (c_seg->c_bytes_used == 0) {
2190 		c_seg_free(c_seg);
2191 		return 1;
2192 	}
2193 	lck_mtx_unlock_always(&c_seg->c_lock);
2194 
2195 	if (c_seg->c_firstemptyslot >= c_seg->c_nextslot || C_SEG_UNUSED_BYTES(c_seg) < PAGE_SIZE) {
2196 		goto done;
2197 	}
2198 
2199 /* TODO: assert first emptyslot's c_size is actually 0 */
2200 
2201 #if DEVELOPMENT || DEBUG
2202 	C_SEG_MAKE_WRITEABLE(c_seg);
2203 #endif
2204 
2205 #if VALIDATE_C_SEGMENTS
2206 	c_seg->c_was_minor_compacted++;
2207 #endif
2208 	c_indx = c_seg->c_firstemptyslot;
2209 	c_dst = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
2210 
2211 	old_populated_offset = c_seg->c_populated_offset;
2212 	c_offset = c_dst->c_offset;
2213 
2214 	for (i = c_indx + 1; i < c_seg->c_nextslot && c_offset < c_seg->c_nextoffset; i++) {
2215 		c_src = C_SEG_SLOT_FROM_INDEX(c_seg, i);
2216 
2217 		c_size = UNPACK_C_SIZE(c_src);
2218 
2219 		if (c_size == 0) {
2220 			continue;
2221 		}
2222 
2223 		c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(c_size + c_slot_extra_size(c_src));
2224 
2225 /* N.B.: This memcpy may be an overlapping copy */
2226 		memcpy(&c_seg->c_store.c_buffer[c_offset], &c_seg->c_store.c_buffer[c_src->c_offset], c_rounded_size);
2227 
2228 		cslot_copy(c_dst, c_src);
2229 		c_dst->c_offset = c_offset;
2230 
2231 		slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
2232 		slot_ptr->s_cindx = c_indx;
2233 
2234 		c_offset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
2235 		PACK_C_SIZE(c_src, 0);
2236 		c_indx++;
2237 
2238 		c_dst = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
2239 	}
2240 	c_seg->c_firstemptyslot = c_indx;
2241 	c_seg->c_nextslot = c_indx;
2242 	c_seg->c_nextoffset = c_offset;
2243 	c_seg->c_populated_offset = (c_offset + (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1)) & ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1);
2244 	c_seg->c_bytes_unused = 0;
2245 
2246 #if VALIDATE_C_SEGMENTS
2247 	c_seg_validate(c_seg, TRUE);
2248 #endif
2249 	if (old_populated_offset > c_seg->c_populated_offset) {
2250 		uint32_t        gc_size;
2251 		int32_t         *gc_ptr;
2252 
2253 		gc_size = C_SEG_OFFSET_TO_BYTES(old_populated_offset - c_seg->c_populated_offset);
2254 		gc_ptr = &c_seg->c_store.c_buffer[c_seg->c_populated_offset];
2255 
2256 		kernel_memory_depopulate((vm_offset_t)gc_ptr, gc_size,
2257 		    KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
2258 	}
2259 
2260 #if DEVELOPMENT || DEBUG
2261 	C_SEG_WRITE_PROTECT(c_seg);
2262 #endif
2263 
2264 done:
2265 	if (clear_busy == TRUE) {
2266 		lck_mtx_lock_spin_always(&c_seg->c_lock);
2267 		C_SEG_WAKEUP_DONE(c_seg);
2268 		lck_mtx_unlock_always(&c_seg->c_lock);
2269 	}
2270 	return 0;
2271 }
2272 
2273 
2274 static void
c_seg_alloc_nextslot(c_segment_t c_seg)2275 c_seg_alloc_nextslot(c_segment_t c_seg)
2276 {
2277 	struct c_slot   *old_slot_array = NULL;
2278 	struct c_slot   *new_slot_array = NULL;
2279 	int             newlen;
2280 	int             oldlen;
2281 
2282 	if (c_seg->c_nextslot < c_seg_fixed_array_len) {
2283 		return;
2284 	}
2285 
2286 	if ((c_seg->c_nextslot - c_seg_fixed_array_len) >= c_seg->c_slot_var_array_len) {
2287 		oldlen = c_seg->c_slot_var_array_len;
2288 		old_slot_array = c_seg->c_slot_var_array;
2289 
2290 		if (oldlen == 0) {
2291 			newlen = c_seg_slot_var_array_min_len;
2292 		} else {
2293 			newlen = oldlen * 2;
2294 		}
2295 
2296 		new_slot_array = kalloc_type(struct c_slot, newlen, Z_WAITOK);
2297 
2298 		lck_mtx_lock_spin_always(&c_seg->c_lock);
2299 
2300 		if (old_slot_array) {
2301 			memcpy(new_slot_array, old_slot_array,
2302 			    sizeof(struct c_slot) * oldlen);
2303 		}
2304 
2305 		c_seg->c_slot_var_array_len = newlen;
2306 		c_seg->c_slot_var_array = new_slot_array;
2307 
2308 		lck_mtx_unlock_always(&c_seg->c_lock);
2309 
2310 		kfree_type(struct c_slot, oldlen, old_slot_array);
2311 	}
2312 }
2313 
2314 
2315 #define C_SEG_MAJOR_COMPACT_STATS_MAX   (30)
2316 
2317 struct {
2318 	uint64_t asked_permission;
2319 	uint64_t compactions;
2320 	uint64_t moved_slots;
2321 	uint64_t moved_bytes;
2322 	uint64_t wasted_space_in_swapouts;
2323 	uint64_t count_of_swapouts;
2324 	uint64_t count_of_freed_segs;
2325 	uint64_t bailed_compactions;
2326 	uint64_t bytes_freed_rate_us;
2327 } c_seg_major_compact_stats[C_SEG_MAJOR_COMPACT_STATS_MAX];
2328 
2329 int c_seg_major_compact_stats_now = 0;
2330 
2331 
2332 #define C_MAJOR_COMPACTION_SIZE_APPROPRIATE     ((c_seg_bufsize * 90) / 100)
2333 
2334 
2335 boolean_t
c_seg_major_compact_ok(c_segment_t c_seg_dst,c_segment_t c_seg_src)2336 c_seg_major_compact_ok(
2337 	c_segment_t c_seg_dst,
2338 	c_segment_t c_seg_src)
2339 {
2340 	c_seg_major_compact_stats[c_seg_major_compact_stats_now].asked_permission++;
2341 
2342 	if (c_seg_src->c_bytes_used >= C_MAJOR_COMPACTION_SIZE_APPROPRIATE &&
2343 	    c_seg_dst->c_bytes_used >= C_MAJOR_COMPACTION_SIZE_APPROPRIATE) {
2344 		return FALSE;
2345 	}
2346 
2347 	if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
2348 		/*
2349 		 * destination segment is full... can't compact
2350 		 */
2351 		return FALSE;
2352 	}
2353 
2354 	return TRUE;
2355 }
2356 
2357 /*
2358  * Move slots from src to dst
2359  * returns TRUE if we can continue compacting further to the same dst segment
2360  */
2361 boolean_t
c_seg_major_compact(c_segment_t c_seg_dst,c_segment_t c_seg_src)2362 c_seg_major_compact(
2363 	c_segment_t c_seg_dst,
2364 	c_segment_t c_seg_src)
2365 {
2366 	c_slot_mapping_t slot_ptr;
2367 	uint32_t        c_rounded_size;
2368 	uint32_t        c_size;
2369 	uint16_t        dst_slot;
2370 	int             i;
2371 	c_slot_t        c_dst;
2372 	c_slot_t        c_src;
2373 	boolean_t       keep_compacting = TRUE;
2374 
2375 	/*
2376 	 * segments are not locked but they are both marked c_busy
2377 	 * which keeps c_decompress from working on them...
2378 	 * we can safely allocate new pages, move compressed data
2379 	 * from c_seg_src to c_seg_dst and update both c_segment's
2380 	 * state w/o holding the master lock
2381 	 */
2382 #if DEVELOPMENT || DEBUG
2383 	C_SEG_MAKE_WRITEABLE(c_seg_dst);
2384 #endif
2385 
2386 #if VALIDATE_C_SEGMENTS
2387 	c_seg_dst->c_was_major_compacted++;
2388 	c_seg_src->c_was_major_donor++;
2389 #endif
2390 	assertf(c_seg_dst->c_has_donated_pages == c_seg_src->c_has_donated_pages, "Mismatched donation status Dst: %p, Src: %p\n", c_seg_dst, c_seg_src);
2391 	c_seg_major_compact_stats[c_seg_major_compact_stats_now].compactions++;
2392 
2393 	dst_slot = c_seg_dst->c_nextslot;
2394 
2395 	for (i = 0; i < c_seg_src->c_nextslot; i++) {
2396 		c_src = C_SEG_SLOT_FROM_INDEX(c_seg_src, i);
2397 
2398 		c_size = UNPACK_C_SIZE(c_src);
2399 
2400 		if (c_size == 0) {
2401 			/* BATCH: move what we have so far; */
2402 			continue;
2403 		}
2404 
2405 		int combined_size = c_size + c_slot_extra_size(c_src);
2406 
2407 		c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(combined_size);
2408 
2409 		int size_left = c_seg_bufsize - C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_nextoffset);
2410 		/* we're going to increment c_nextoffset by c_rounded_size so it should not overflow the segment bufsize */
2411 		if (size_left < c_rounded_size) {
2412 			keep_compacting = FALSE;
2413 			break;
2414 		}
2415 
2416 		/* Do we have enough populated space left in dst? */
2417 		assertf(c_seg_dst->c_populated_offset >= c_seg_dst->c_nextoffset, "Unexpected segment offsets: %u,%u", c_seg_dst->c_populated_offset, c_seg_dst->c_nextoffset);
2418 		if (C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset - c_seg_dst->c_nextoffset) < (unsigned) combined_size) {
2419 			int     size_to_populate;
2420 
2421 			/* eagerly populate the entire segment in expectation to fill it */
2422 			assert(c_seg_bufsize >= C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset));
2423 			size_to_populate = c_seg_bufsize - C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset);
2424 
2425 			if (size_to_populate == 0) {
2426 				/* can't populate any more pages in this segment */
2427 				keep_compacting = FALSE;
2428 				break;
2429 			}
2430 			if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) {
2431 				size_to_populate = C_SEG_MAX_POPULATE_SIZE;
2432 			}
2433 
2434 			kernel_memory_populate(
2435 				(vm_offset_t) &c_seg_dst->c_store.c_buffer[c_seg_dst->c_populated_offset],
2436 				size_to_populate,
2437 				KMA_NOFAIL | KMA_COMPRESSOR,
2438 				VM_KERN_MEMORY_COMPRESSOR);
2439 
2440 			c_seg_dst->c_populated_offset += C_SEG_BYTES_TO_OFFSET(size_to_populate);
2441 			assert(C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset) <= c_seg_bufsize);
2442 		}
2443 		c_seg_alloc_nextslot(c_seg_dst);
2444 
2445 		c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, c_seg_dst->c_nextslot);
2446 
2447 		memcpy(&c_seg_dst->c_store.c_buffer[c_seg_dst->c_nextoffset], &c_seg_src->c_store.c_buffer[c_src->c_offset], combined_size);
2448 
2449 		c_seg_major_compact_stats[c_seg_major_compact_stats_now].moved_slots++;
2450 		c_seg_major_compact_stats[c_seg_major_compact_stats_now].moved_bytes += combined_size;
2451 
2452 		cslot_copy(c_dst, c_src);
2453 		c_dst->c_offset = c_seg_dst->c_nextoffset;
2454 
2455 		if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) {
2456 			c_seg_dst->c_firstemptyslot++;
2457 		}
2458 		c_seg_dst->c_slots_used++;
2459 		c_seg_dst->c_nextslot++;
2460 		c_seg_dst->c_bytes_used += c_rounded_size;
2461 		c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
2462 
2463 		PACK_C_SIZE(c_src, 0);
2464 
2465 		c_seg_src->c_bytes_used -= c_rounded_size;
2466 		c_seg_src->c_bytes_unused += c_rounded_size;
2467 		c_seg_src->c_firstemptyslot = 0;
2468 
2469 		assert(c_seg_src->c_slots_used);
2470 		c_seg_src->c_slots_used--;
2471 
2472 		if (!c_seg_src->c_swappedin) {
2473 			/* Pessimistically lose swappedin status when non-swappedin pages are added. */
2474 			c_seg_dst->c_swappedin = false;
2475 		}
2476 
2477 		if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
2478 			/* dest segment is now full */
2479 			keep_compacting = FALSE;
2480 			break;
2481 		}
2482 	}
2483 #if DEVELOPMENT || DEBUG
2484 	C_SEG_WRITE_PROTECT(c_seg_dst);
2485 #endif
2486 	if (dst_slot < c_seg_dst->c_nextslot) {
2487 		PAGE_REPLACEMENT_ALLOWED(TRUE);
2488 		/*
2489 		 * we've now locked out c_decompress from
2490 		 * converting the slot passed into it into
2491 		 * a c_segment_t which allows us to use
2492 		 * the backptr to change which c_segment and
2493 		 * index the slot points to
2494 		 */
2495 		while (dst_slot < c_seg_dst->c_nextslot) {
2496 			c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot);
2497 
2498 			slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
2499 			/* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
2500 			slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1;
2501 			slot_ptr->s_cindx = dst_slot++;
2502 		}
2503 		PAGE_REPLACEMENT_ALLOWED(FALSE);
2504 	}
2505 	return keep_compacting;
2506 }
2507 
2508 
2509 uint64_t
vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec,clock_nsec_t end_nsec,clock_sec_t start_sec,clock_nsec_t start_nsec)2510 vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec, clock_nsec_t end_nsec, clock_sec_t start_sec, clock_nsec_t start_nsec)
2511 {
2512 	uint64_t end_msecs;
2513 	uint64_t start_msecs;
2514 
2515 	end_msecs = (end_sec * 1000) + end_nsec / 1000000;
2516 	start_msecs = (start_sec * 1000) + start_nsec / 1000000;
2517 
2518 	return end_msecs - start_msecs;
2519 }
2520 
2521 
2522 
2523 uint32_t compressor_eval_period_in_msecs = 250;
2524 uint32_t compressor_sample_min_in_msecs = 500;
2525 uint32_t compressor_sample_max_in_msecs = 10000;
2526 uint32_t compressor_thrashing_threshold_per_10msecs = 50;
2527 uint32_t compressor_thrashing_min_per_10msecs = 20;
2528 
2529 /* When true, reset sample data next chance we get. */
2530 static boolean_t        compressor_need_sample_reset = FALSE;
2531 
2532 
2533 void
compute_swapout_target_age(void)2534 compute_swapout_target_age(void)
2535 {
2536 	clock_sec_t     cur_ts_sec;
2537 	clock_nsec_t    cur_ts_nsec;
2538 	uint32_t        min_operations_needed_in_this_sample;
2539 	uint64_t        elapsed_msecs_in_eval;
2540 	uint64_t        elapsed_msecs_in_sample;
2541 	boolean_t       need_eval_reset = FALSE;
2542 
2543 	clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec);
2544 
2545 	elapsed_msecs_in_sample = vm_compressor_compute_elapsed_msecs(cur_ts_sec, cur_ts_nsec, start_of_sample_period_sec, start_of_sample_period_nsec);
2546 
2547 	if (compressor_need_sample_reset ||
2548 	    elapsed_msecs_in_sample >= compressor_sample_max_in_msecs) {
2549 		compressor_need_sample_reset = TRUE;
2550 		need_eval_reset = TRUE;
2551 		goto done;
2552 	}
2553 	elapsed_msecs_in_eval = vm_compressor_compute_elapsed_msecs(cur_ts_sec, cur_ts_nsec, start_of_eval_period_sec, start_of_eval_period_nsec);
2554 
2555 	if (elapsed_msecs_in_eval < compressor_eval_period_in_msecs) {
2556 		goto done;
2557 	}
2558 	need_eval_reset = TRUE;
2559 
2560 	KERNEL_DEBUG(0xe0400020 | DBG_FUNC_START, elapsed_msecs_in_eval, sample_period_compression_count, sample_period_decompression_count, 0, 0);
2561 
2562 	min_operations_needed_in_this_sample = (compressor_thrashing_min_per_10msecs * (uint32_t)elapsed_msecs_in_eval) / 10;
2563 
2564 	if ((sample_period_compression_count - last_eval_compression_count) < min_operations_needed_in_this_sample ||
2565 	    (sample_period_decompression_count - last_eval_decompression_count) < min_operations_needed_in_this_sample) {
2566 		KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, sample_period_compression_count - last_eval_compression_count,
2567 		    sample_period_decompression_count - last_eval_decompression_count, 0, 1, 0);
2568 
2569 		swapout_target_age = 0;
2570 
2571 		compressor_need_sample_reset = TRUE;
2572 		need_eval_reset = TRUE;
2573 		goto done;
2574 	}
2575 	last_eval_compression_count = sample_period_compression_count;
2576 	last_eval_decompression_count = sample_period_decompression_count;
2577 
2578 	if (elapsed_msecs_in_sample < compressor_sample_min_in_msecs) {
2579 		KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, swapout_target_age, 0, 0, 5, 0);
2580 		goto done;
2581 	}
2582 	if (sample_period_decompression_count > ((compressor_thrashing_threshold_per_10msecs * elapsed_msecs_in_sample) / 10)) {
2583 		uint64_t        running_total;
2584 		uint64_t        working_target;
2585 		uint64_t        aging_target;
2586 		uint32_t        oldest_age_of_csegs_sampled = 0;
2587 		uint64_t        working_set_approximation = 0;
2588 
2589 		swapout_target_age = 0;
2590 
2591 		working_target = (sample_period_decompression_count / 100) * 95;                /* 95 percent */
2592 		aging_target = (sample_period_decompression_count / 100) * 1;                   /* 1 percent */
2593 		running_total = 0;
2594 
2595 		for (oldest_age_of_csegs_sampled = 0; oldest_age_of_csegs_sampled < DECOMPRESSION_SAMPLE_MAX_AGE; oldest_age_of_csegs_sampled++) {
2596 			running_total += age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2597 
2598 			working_set_approximation += oldest_age_of_csegs_sampled * age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2599 
2600 			if (running_total >= working_target) {
2601 				break;
2602 			}
2603 		}
2604 		if (oldest_age_of_csegs_sampled < DECOMPRESSION_SAMPLE_MAX_AGE) {
2605 			working_set_approximation = (working_set_approximation * 1000) / elapsed_msecs_in_sample;
2606 
2607 			if (working_set_approximation < VM_PAGE_COMPRESSOR_COUNT) {
2608 				running_total = overage_decompressions_during_sample_period;
2609 
2610 				for (oldest_age_of_csegs_sampled = DECOMPRESSION_SAMPLE_MAX_AGE - 1; oldest_age_of_csegs_sampled; oldest_age_of_csegs_sampled--) {
2611 					running_total += age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2612 
2613 					if (running_total >= aging_target) {
2614 						break;
2615 					}
2616 				}
2617 				swapout_target_age = (uint32_t)cur_ts_sec - oldest_age_of_csegs_sampled;
2618 
2619 				KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, swapout_target_age, working_set_approximation, VM_PAGE_COMPRESSOR_COUNT, 2, 0);
2620 			} else {
2621 				KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, working_set_approximation, VM_PAGE_COMPRESSOR_COUNT, 0, 3, 0);
2622 			}
2623 		} else {
2624 			KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, working_target, running_total, 0, 4, 0);
2625 		}
2626 
2627 		compressor_need_sample_reset = TRUE;
2628 		need_eval_reset = TRUE;
2629 	} else {
2630 		KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, sample_period_decompression_count, (compressor_thrashing_threshold_per_10msecs * elapsed_msecs_in_sample) / 10, 0, 6, 0);
2631 	}
2632 done:
2633 	if (compressor_need_sample_reset == TRUE) {
2634 		bzero(age_of_decompressions_during_sample_period, sizeof(age_of_decompressions_during_sample_period));
2635 		overage_decompressions_during_sample_period = 0;
2636 
2637 		start_of_sample_period_sec = cur_ts_sec;
2638 		start_of_sample_period_nsec = cur_ts_nsec;
2639 		sample_period_decompression_count = 0;
2640 		sample_period_compression_count = 0;
2641 		last_eval_decompression_count = 0;
2642 		last_eval_compression_count = 0;
2643 		compressor_need_sample_reset = FALSE;
2644 	}
2645 	if (need_eval_reset == TRUE) {
2646 		start_of_eval_period_sec = cur_ts_sec;
2647 		start_of_eval_period_nsec = cur_ts_nsec;
2648 	}
2649 }
2650 
2651 
2652 int             compaction_swapper_init_now = 0;
2653 int             compaction_swapper_running = 0;
2654 int             compaction_swapper_awakened = 0;
2655 int             compaction_swapper_abort = 0;
2656 
2657 bool
vm_compressor_swapout_is_ripe()2658 vm_compressor_swapout_is_ripe()
2659 {
2660 	bool is_ripe = false;
2661 	if (vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit) {
2662 		c_segment_t     c_seg;
2663 		clock_sec_t     now;
2664 		clock_sec_t     age;
2665 		clock_nsec_t    nsec;
2666 
2667 		clock_get_system_nanotime(&now, &nsec);
2668 		age = 0;
2669 
2670 		lck_mtx_lock_spin_always(c_list_lock);
2671 
2672 		if (!queue_empty(&c_age_list_head)) {
2673 			c_seg = (c_segment_t) queue_first(&c_age_list_head);
2674 
2675 			age = now - c_seg->c_creation_ts;
2676 		}
2677 		lck_mtx_unlock_always(c_list_lock);
2678 
2679 		if (age >= vm_ripe_target_age) {
2680 			is_ripe = true;
2681 		}
2682 	}
2683 	return is_ripe;
2684 }
2685 
2686 static bool
compressor_swapout_conditions_met(void)2687 compressor_swapout_conditions_met(void)
2688 {
2689 	bool should_swap = false;
2690 	if (COMPRESSOR_NEEDS_TO_SWAP()) {
2691 		should_swap = true;
2692 		vmcs_stats.compressor_swap_threshold_exceeded++;
2693 	}
2694 	if (VM_PAGE_Q_THROTTLED(&vm_pageout_queue_external) && vm_page_anonymous_count < (vm_page_inactive_count / 20)) {
2695 		should_swap = true;
2696 		vmcs_stats.external_q_throttled++;
2697 	}
2698 	if (vm_page_free_count < (vm_page_free_reserved - (COMPRESSOR_FREE_RESERVED_LIMIT * 2))) {
2699 		should_swap = true;
2700 		vmcs_stats.free_count_below_reserve++;
2701 	}
2702 	return should_swap;
2703 }
2704 
2705 static bool
compressor_needs_to_swap()2706 compressor_needs_to_swap()
2707 {
2708 	bool should_swap = false;
2709 	if (vm_compressor_swapout_is_ripe()) {
2710 		should_swap = true;
2711 		goto check_if_low_space;
2712 	}
2713 
2714 	if (VM_CONFIG_SWAP_IS_ACTIVE) {
2715 		should_swap =  compressor_swapout_conditions_met();
2716 		if (should_swap) {
2717 			goto check_if_low_space;
2718 		}
2719 	}
2720 
2721 #if (XNU_TARGET_OS_OSX && __arm64__)
2722 	/*
2723 	 * Thrashing detection disabled.
2724 	 */
2725 #else /* (XNU_TARGET_OS_OSX && __arm64__) */
2726 
2727 	if (vm_compressor_is_thrashing()) {
2728 		should_swap = true;
2729 		vmcs_stats.thrashing_detected++;
2730 	}
2731 
2732 #if CONFIG_PHANTOM_CACHE
2733 	if (vm_phantom_cache_check_pressure()) {
2734 		os_atomic_store(&memorystatus_phantom_cache_pressure, true, release);
2735 		should_swap = true;
2736 	}
2737 #endif
2738 	if (swapout_target_age) {
2739 		should_swap = true;
2740 	}
2741 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
2742 
2743 check_if_low_space:
2744 
2745 #if CONFIG_JETSAM
2746 	if (should_swap || vm_compressor_low_on_space()) {
2747 		if (vm_compressor_thrashing_detected == FALSE) {
2748 			vm_compressor_thrashing_detected = TRUE;
2749 
2750 			if (swapout_target_age) {
2751 				compressor_thrashing_induced_jetsam++;
2752 			} else if (vm_compressor_low_on_space()) {
2753 				compressor_thrashing_induced_jetsam++;
2754 			} else {
2755 				filecache_thrashing_induced_jetsam++;
2756 			}
2757 			/*
2758 			 * Wake up the memorystatus thread so that it can return
2759 			 * the system to a healthy state (by killing processes).
2760 			 */
2761 			memorystatus_thread_wake();
2762 		}
2763 		/*
2764 		 * let the jetsam take precedence over
2765 		 * any major compactions we might have
2766 		 * been able to do... otherwise we run
2767 		 * the risk of doing major compactions
2768 		 * on segments we're about to free up
2769 		 * due to the jetsam activity.
2770 		 */
2771 		should_swap = false;
2772 		if (memorystatus_swap_all_apps && vm_swap_low_on_space()) {
2773 			memorystatus_respond_to_swap_exhaustion();
2774 		}
2775 	}
2776 #else /* CONFIG_JETSAM */
2777 	if (should_swap && vm_swap_low_on_space()) {
2778 		memorystatus_respond_to_swap_exhaustion();
2779 	}
2780 #endif /* CONFIG_JETSAM */
2781 
2782 	if (should_swap == false) {
2783 		/*
2784 		 * vm_compressor_needs_to_major_compact returns true only if we're
2785 		 * about to run out of available compressor segments... in this
2786 		 * case, we absolutely need to run a major compaction even if
2787 		 * we've just kicked off a jetsam or we don't otherwise need to
2788 		 * swap... terminating objects releases
2789 		 * pages back to the uncompressed cache, but does not guarantee
2790 		 * that we will free up even a single compression segment
2791 		 */
2792 		should_swap = vm_compressor_needs_to_major_compact();
2793 		if (should_swap) {
2794 			vmcs_stats.fragmentation_detected++;
2795 		}
2796 	}
2797 
2798 	/*
2799 	 * returning TRUE when swap_supported == FALSE
2800 	 * will cause the major compaction engine to
2801 	 * run, but will not trigger any swapping...
2802 	 * segments that have been major compacted
2803 	 * will be moved to the majorcompact queue
2804 	 */
2805 	return should_swap;
2806 }
2807 
2808 #if CONFIG_JETSAM
2809 /*
2810  * This function is called from the jetsam thread after killing something to
2811  * mitigate thrashing.
2812  *
2813  * We need to restart our thrashing detection heuristics since memory pressure
2814  * has potentially changed significantly, and we don't want to detect on old
2815  * data from before the jetsam.
2816  */
2817 void
vm_thrashing_jetsam_done(void)2818 vm_thrashing_jetsam_done(void)
2819 {
2820 	vm_compressor_thrashing_detected = FALSE;
2821 
2822 	/* Were we compressor-thrashing or filecache-thrashing? */
2823 	if (swapout_target_age) {
2824 		swapout_target_age = 0;
2825 		compressor_need_sample_reset = TRUE;
2826 	}
2827 #if CONFIG_PHANTOM_CACHE
2828 	else {
2829 		vm_phantom_cache_restart_sample();
2830 	}
2831 #endif
2832 }
2833 #endif /* CONFIG_JETSAM */
2834 
2835 uint32_t vm_wake_compactor_swapper_calls = 0;
2836 uint32_t vm_run_compactor_already_running = 0;
2837 uint32_t vm_run_compactor_empty_minor_q = 0;
2838 uint32_t vm_run_compactor_did_compact = 0;
2839 uint32_t vm_run_compactor_waited = 0;
2840 
2841 /* run minor compaction right now, if the compaction-swapper thread is not already running */
2842 void
vm_run_compactor(void)2843 vm_run_compactor(void)
2844 {
2845 	if (c_segment_count == 0) {
2846 		return;
2847 	}
2848 
2849 	if (os_atomic_load(&c_minor_count, relaxed) == 0) {
2850 		vm_run_compactor_empty_minor_q++;
2851 		return;
2852 	}
2853 
2854 	lck_mtx_lock_spin_always(c_list_lock);
2855 
2856 	if (compaction_swapper_running) {
2857 		if (vm_pageout_state.vm_restricted_to_single_processor == FALSE) {
2858 			vm_run_compactor_already_running++;
2859 
2860 			lck_mtx_unlock_always(c_list_lock);
2861 			return;
2862 		}
2863 		vm_run_compactor_waited++;
2864 
2865 		assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
2866 
2867 		lck_mtx_unlock_always(c_list_lock);
2868 
2869 		thread_block(THREAD_CONTINUE_NULL);
2870 
2871 		return;
2872 	}
2873 	vm_run_compactor_did_compact++;
2874 
2875 	fastwake_warmup = FALSE;
2876 	compaction_swapper_running = 1;
2877 
2878 	vm_compressor_do_delayed_compactions(FALSE);
2879 
2880 	compaction_swapper_running = 0;
2881 
2882 	lck_mtx_unlock_always(c_list_lock);
2883 
2884 	thread_wakeup((event_t)&compaction_swapper_running);
2885 }
2886 
2887 
2888 void
vm_wake_compactor_swapper(void)2889 vm_wake_compactor_swapper(void)
2890 {
2891 	if (compaction_swapper_running || compaction_swapper_awakened || c_segment_count == 0) {
2892 		return;
2893 	}
2894 
2895 	if (os_atomic_load(&c_minor_count, relaxed) ||
2896 	    vm_compressor_needs_to_major_compact()) {
2897 		lck_mtx_lock_spin_always(c_list_lock);
2898 
2899 		fastwake_warmup = FALSE;
2900 
2901 		if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
2902 			vm_wake_compactor_swapper_calls++;
2903 
2904 			compaction_swapper_awakened = 1;
2905 			thread_wakeup((event_t)&c_compressor_swap_trigger);
2906 		}
2907 		lck_mtx_unlock_always(c_list_lock);
2908 	}
2909 }
2910 
2911 
2912 void
vm_consider_swapping()2913 vm_consider_swapping()
2914 {
2915 	assert(VM_CONFIG_SWAP_IS_PRESENT);
2916 
2917 	lck_mtx_lock_spin_always(c_list_lock);
2918 
2919 	compaction_swapper_abort = 1;
2920 
2921 	while (compaction_swapper_running) {
2922 		assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
2923 
2924 		lck_mtx_unlock_always(c_list_lock);
2925 
2926 		thread_block(THREAD_CONTINUE_NULL);
2927 
2928 		lck_mtx_lock_spin_always(c_list_lock);
2929 	}
2930 	compaction_swapper_abort = 0;
2931 	compaction_swapper_running = 1;
2932 
2933 	vm_swapout_ripe_segments = TRUE;
2934 
2935 	vm_compressor_process_major_segments(vm_swapout_ripe_segments);
2936 
2937 	vm_compressor_compact_and_swap(FALSE);
2938 
2939 	compaction_swapper_running = 0;
2940 
2941 	vm_swapout_ripe_segments = FALSE;
2942 
2943 	lck_mtx_unlock_always(c_list_lock);
2944 
2945 	thread_wakeup((event_t)&compaction_swapper_running);
2946 }
2947 
2948 
2949 void
vm_consider_waking_compactor_swapper(void)2950 vm_consider_waking_compactor_swapper(void)
2951 {
2952 	bool need_wakeup = false;
2953 
2954 	if (c_segment_count == 0) {
2955 		return;
2956 	}
2957 
2958 	if (compaction_swapper_running || compaction_swapper_awakened) {
2959 		return;
2960 	}
2961 
2962 	if (!compaction_swapper_inited && !compaction_swapper_init_now) {
2963 		compaction_swapper_init_now = 1;
2964 		need_wakeup = true;
2965 	} else if (vm_compressor_needs_to_minor_compact() ||
2966 	    compressor_needs_to_swap()) {
2967 		need_wakeup = true;
2968 	}
2969 
2970 	if (need_wakeup) {
2971 		lck_mtx_lock_spin_always(c_list_lock);
2972 
2973 		fastwake_warmup = FALSE;
2974 
2975 		if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
2976 			memoryshot(DBG_VM_WAKEUP_COMPACTOR_SWAPPER, DBG_FUNC_NONE);
2977 
2978 			compaction_swapper_awakened = 1;
2979 			thread_wakeup((event_t)&c_compressor_swap_trigger);
2980 		}
2981 		lck_mtx_unlock_always(c_list_lock);
2982 	}
2983 }
2984 
2985 
2986 #define C_SWAPOUT_LIMIT                 4
2987 #define DELAYED_COMPACTIONS_PER_PASS    30
2988 
2989 /* process segments that are in the minor compaction queue */
2990 void
vm_compressor_do_delayed_compactions(boolean_t flush_all)2991 vm_compressor_do_delayed_compactions(boolean_t flush_all)
2992 {
2993 	c_segment_t     c_seg;
2994 	int             number_compacted = 0;
2995 	boolean_t       needs_to_swap = FALSE;
2996 	uint32_t        c_swapout_count = 0;
2997 
2998 
2999 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_do_delayed_compactions, DBG_VM_COMPRESSOR_DELAYED_COMPACT, DBG_FUNC_START, c_minor_count, flush_all, 0, 0);
3000 
3001 #if XNU_TARGET_OS_OSX
3002 	LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
3003 #endif /* XNU_TARGET_OS_OSX */
3004 
3005 	while (!queue_empty(&c_minor_list_head) && needs_to_swap == FALSE) {
3006 		c_seg = (c_segment_t)queue_first(&c_minor_list_head);
3007 
3008 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3009 
3010 		if (c_seg->c_busy) {
3011 			lck_mtx_unlock_always(c_list_lock);
3012 			c_seg_wait_on_busy(c_seg);
3013 			lck_mtx_lock_spin_always(c_list_lock);
3014 
3015 			continue;
3016 		}
3017 		C_SEG_BUSY(c_seg);
3018 
3019 		c_seg_do_minor_compaction_and_unlock(c_seg, TRUE, FALSE, TRUE);
3020 
3021 		c_swapout_count = c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count;
3022 		if (VM_CONFIG_SWAP_IS_ACTIVE && (number_compacted++ > DELAYED_COMPACTIONS_PER_PASS)) {
3023 			if ((flush_all == TRUE || compressor_needs_to_swap()) && c_swapout_count < C_SWAPOUT_LIMIT) {
3024 				needs_to_swap = TRUE;
3025 			}
3026 
3027 			number_compacted = 0;
3028 		}
3029 		lck_mtx_lock_spin_always(c_list_lock);
3030 	}
3031 
3032 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_do_delayed_compactions, DBG_VM_COMPRESSOR_DELAYED_COMPACT, DBG_FUNC_END, c_minor_count, number_compacted, needs_to_swap, 0);
3033 }
3034 
3035 int min_csegs_per_major_compaction = DELAYED_COMPACTIONS_PER_PASS;
3036 
3037 static bool
vm_compressor_major_compact_cseg(c_segment_t c_seg,uint32_t * c_seg_considered,bool * bail_wanted_cseg,uint64_t * total_bytes_freed)3038 vm_compressor_major_compact_cseg(c_segment_t c_seg, uint32_t* c_seg_considered, bool* bail_wanted_cseg, uint64_t* total_bytes_freed)
3039 {
3040 	/*
3041 	 * Major compaction
3042 	 */
3043 	bool keep_compacting = true, fully_compacted = true;
3044 	queue_head_t *list_head = NULL;
3045 	c_segment_t c_seg_next;
3046 	uint64_t        bytes_to_free = 0, bytes_freed = 0;
3047 	uint32_t        number_considered = 0;
3048 
3049 	if (c_seg->c_state == C_ON_AGE_Q) {
3050 		assert(!c_seg->c_has_donated_pages);
3051 		list_head = &c_age_list_head;
3052 	} else if (c_seg->c_state == C_ON_SWAPPEDIN_Q) {
3053 		assert(c_seg->c_has_donated_pages);
3054 		list_head = &c_late_swappedin_list_head;
3055 	}
3056 
3057 	while (keep_compacting == TRUE) {
3058 		assert(c_seg->c_busy);
3059 
3060 		/* look for another segment to consolidate */
3061 
3062 		c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
3063 
3064 		if (queue_end(list_head, (queue_entry_t)c_seg_next)) {
3065 			break;
3066 		}
3067 
3068 		assert(c_seg_next->c_state == c_seg->c_state);
3069 
3070 		number_considered++;
3071 
3072 		if (c_seg_major_compact_ok(c_seg, c_seg_next) == FALSE) {
3073 			break;
3074 		}
3075 
3076 		lck_mtx_lock_spin_always(&c_seg_next->c_lock);
3077 
3078 		if (c_seg_next->c_busy) {
3079 			/*
3080 			 * We are going to block for our neighbor.
3081 			 * If our c_seg is wanted, we should unbusy
3082 			 * it because we don't know how long we might
3083 			 * have to block here.
3084 			 */
3085 			if (c_seg->c_wanted) {
3086 				lck_mtx_unlock_always(&c_seg_next->c_lock);
3087 				fully_compacted = false;
3088 				c_seg_major_compact_stats[c_seg_major_compact_stats_now].bailed_compactions++;
3089 				*bail_wanted_cseg = true;
3090 				break;
3091 			}
3092 
3093 			lck_mtx_unlock_always(c_list_lock);
3094 
3095 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 8, (void*) VM_KERNEL_ADDRPERM(c_seg_next), 0, 0);
3096 
3097 			c_seg_wait_on_busy(c_seg_next);
3098 			lck_mtx_lock_spin_always(c_list_lock);
3099 
3100 			continue;
3101 		}
3102 		/* grab that segment */
3103 		C_SEG_BUSY(c_seg_next);
3104 
3105 		bytes_to_free = C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3106 		if (c_seg_do_minor_compaction_and_unlock(c_seg_next, FALSE, TRUE, TRUE)) {
3107 			/*
3108 			 * found an empty c_segment and freed it
3109 			 * so we can't continue to use c_seg_next
3110 			 */
3111 			bytes_freed += bytes_to_free;
3112 			c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3113 			continue;
3114 		}
3115 
3116 		/* unlock the list ... */
3117 		lck_mtx_unlock_always(c_list_lock);
3118 
3119 		/* do the major compaction */
3120 
3121 		keep_compacting = c_seg_major_compact(c_seg, c_seg_next);
3122 
3123 		VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 9, keep_compacting, 0, 0);
3124 
3125 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
3126 
3127 		lck_mtx_lock_spin_always(&c_seg_next->c_lock);
3128 		/*
3129 		 * run a minor compaction on the donor segment
3130 		 * since we pulled at least some of it's
3131 		 * data into our target...  if we've emptied
3132 		 * it, now is a good time to free it which
3133 		 * c_seg_minor_compaction_and_unlock also takes care of
3134 		 *
3135 		 * by passing TRUE, we ask for c_busy to be cleared
3136 		 * and c_wanted to be taken care of
3137 		 */
3138 		bytes_to_free = C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3139 		if (c_seg_minor_compaction_and_unlock(c_seg_next, TRUE)) {
3140 			bytes_freed += bytes_to_free;
3141 			c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3142 		} else {
3143 			bytes_to_free -= C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3144 			bytes_freed += bytes_to_free;
3145 		}
3146 
3147 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
3148 
3149 		/* relock the list */
3150 		lck_mtx_lock_spin_always(c_list_lock);
3151 
3152 		if (c_seg->c_wanted) {
3153 			/*
3154 			 * Our c_seg is in demand. Let's
3155 			 * unbusy it and wakeup the waiters
3156 			 * instead of continuing the compaction
3157 			 * because we could be in this loop
3158 			 * for a while.
3159 			 */
3160 			fully_compacted = false;
3161 			*bail_wanted_cseg = true;
3162 			c_seg_major_compact_stats[c_seg_major_compact_stats_now].bailed_compactions++;
3163 			break;
3164 		}
3165 	} /* major compaction */
3166 
3167 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 10, number_considered, *bail_wanted_cseg, 0);
3168 
3169 	*c_seg_considered += number_considered;
3170 	*total_bytes_freed += bytes_freed;
3171 
3172 	lck_mtx_lock_spin_always(&c_seg->c_lock);
3173 	return fully_compacted;
3174 }
3175 
3176 #define TIME_SUB(rsecs, secs, rfrac, frac, unit)                        \
3177 	MACRO_BEGIN                                                     \
3178 	if ((int)((rfrac) -= (frac)) < 0) {                             \
3179 	        (rfrac) += (unit);                                      \
3180 	        (rsecs) -= 1;                                           \
3181 	}                                                               \
3182 	(rsecs) -= (secs);                                              \
3183 	MACRO_END
3184 
3185 clock_nsec_t c_process_major_report_over_ms = 9; /* report if over 9 ms */
3186 int c_process_major_yield_after = 1000; /* yield after moving 1,000 segments */
3187 uint64_t c_process_major_reports = 0;
3188 clock_sec_t c_process_major_max_sec = 0;
3189 clock_nsec_t c_process_major_max_nsec = 0;
3190 uint32_t c_process_major_peak_segcount = 0;
3191 static void
vm_compressor_process_major_segments(bool ripe_age_only)3192 vm_compressor_process_major_segments(bool ripe_age_only)
3193 {
3194 	c_segment_t c_seg = NULL;
3195 	int count = 0, total = 0, breaks = 0;
3196 	clock_sec_t start_sec, end_sec;
3197 	clock_nsec_t start_nsec, end_nsec;
3198 	clock_nsec_t report_over_ns;
3199 
3200 	if (queue_empty(&c_major_list_head)) {
3201 		return;
3202 	}
3203 
3204 	// printf("%s: starting to move segments from MAJORQ to AGEQ\n", __FUNCTION__);
3205 	if (c_process_major_report_over_ms != 0) {
3206 		report_over_ns = c_process_major_report_over_ms * NSEC_PER_MSEC;
3207 	} else {
3208 		report_over_ns = (clock_nsec_t)-1;
3209 	}
3210 
3211 	if (ripe_age_only) {
3212 		if (c_overage_swapped_count >= c_overage_swapped_limit) {
3213 			/*
3214 			 * Return while we wait for the overage segments
3215 			 * in our queue to get pushed out first.
3216 			 */
3217 			return;
3218 		}
3219 	}
3220 
3221 	clock_get_system_nanotime(&start_sec, &start_nsec);
3222 	while (!queue_empty(&c_major_list_head)) {
3223 		if (!ripe_age_only) {
3224 			/*
3225 			 * Start from the end to preserve aging order. The newer
3226 			 * segments are at the tail and so need to be inserted in
3227 			 * the aging queue in this way so we have the older segments
3228 			 * at the end of the AGE_Q.
3229 			 */
3230 			c_seg = (c_segment_t)queue_last(&c_major_list_head);
3231 		} else {
3232 			c_seg = (c_segment_t)queue_first(&c_major_list_head);
3233 			if ((start_sec - c_seg->c_creation_ts) < vm_ripe_target_age) {
3234 				/*
3235 				 * We have found the first segment in our queue that is not ripe. Segments after it
3236 				 * will be the same. So let's bail here. Return with c_list_lock held.
3237 				 */
3238 				break;
3239 			}
3240 		}
3241 
3242 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3243 		c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3244 		lck_mtx_unlock_always(&c_seg->c_lock);
3245 
3246 		count++;
3247 		if (count == c_process_major_yield_after ||
3248 		    queue_empty(&c_major_list_head)) {
3249 			/* done or time to take a break */
3250 		} else {
3251 			/* keep going */
3252 			continue;
3253 		}
3254 
3255 		total += count;
3256 		clock_get_system_nanotime(&end_sec, &end_nsec);
3257 		TIME_SUB(end_sec, start_sec, end_nsec, start_nsec, NSEC_PER_SEC);
3258 		if (end_sec > c_process_major_max_sec) {
3259 			c_process_major_max_sec = end_sec;
3260 			c_process_major_max_nsec = end_nsec;
3261 		} else if (end_sec == c_process_major_max_sec &&
3262 		    end_nsec > c_process_major_max_nsec) {
3263 			c_process_major_max_nsec = end_nsec;
3264 		}
3265 		if (total > c_process_major_peak_segcount) {
3266 			c_process_major_peak_segcount = total;
3267 		}
3268 		if (end_sec > 0 ||
3269 		    end_nsec >= report_over_ns) {
3270 			/* we used more than expected */
3271 			c_process_major_reports++;
3272 			printf("%s: moved %d/%d segments from MAJORQ to AGEQ in %lu.%09u seconds and %d breaks\n",
3273 			    __FUNCTION__, count, total,
3274 			    end_sec, end_nsec, breaks);
3275 		}
3276 		if (queue_empty(&c_major_list_head)) {
3277 			/* done */
3278 			break;
3279 		}
3280 		/* take a break to allow someone else to grab the lock */
3281 		lck_mtx_unlock_always(c_list_lock);
3282 		mutex_pause(0); /* 10 microseconds */
3283 		lck_mtx_lock_spin_always(c_list_lock);
3284 		/* start again */
3285 		clock_get_system_nanotime(&start_sec, &start_nsec);
3286 		count = 0;
3287 		breaks++;
3288 	}
3289 }
3290 
3291 /*
3292  * macOS special swappable csegs -> early_swapin queue
3293  * non-macOS special swappable+non-freezer csegs -> late_swapin queue
3294  * Processing special csegs means minor compacting each cseg and then
3295  * major compacting it and putting them on the early or late
3296  * (depending on platform) swapout queue. tag:DONATE
3297  */
3298 static void
vm_compressor_process_special_swapped_in_segments_locked(void)3299 vm_compressor_process_special_swapped_in_segments_locked(void)
3300 {
3301 	c_segment_t c_seg = NULL;
3302 	bool            switch_state = true, bail_wanted_cseg = false;
3303 	unsigned int    number_considered = 0, yield_after_considered_per_pass = 0;
3304 	uint64_t        bytes_freed = 0;
3305 	queue_head_t    *special_swappedin_list_head;
3306 
3307 #if XNU_TARGET_OS_OSX
3308 	special_swappedin_list_head = &c_early_swappedin_list_head;
3309 #else /* XNU_TARGET_OS_OSX */
3310 	if (memorystatus_swap_all_apps) {
3311 		special_swappedin_list_head = &c_late_swappedin_list_head;
3312 	} else {
3313 		/* called on unsupported config*/
3314 		return;
3315 	}
3316 #endif /* XNU_TARGET_OS_OSX */
3317 
3318 	yield_after_considered_per_pass = MAX(min_csegs_per_major_compaction, DELAYED_COMPACTIONS_PER_PASS);
3319 	while (!queue_empty(special_swappedin_list_head)) {
3320 		c_seg = (c_segment_t)queue_first(special_swappedin_list_head);
3321 
3322 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3323 
3324 		if (c_seg->c_busy) {
3325 			lck_mtx_unlock_always(c_list_lock);
3326 			c_seg_wait_on_busy(c_seg);
3327 			lck_mtx_lock_spin_always(c_list_lock);
3328 			continue;
3329 		}
3330 
3331 		C_SEG_BUSY(c_seg);
3332 		lck_mtx_unlock_always(&c_seg->c_lock);
3333 		lck_mtx_unlock_always(c_list_lock);
3334 
3335 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
3336 
3337 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3338 
3339 		if (c_seg_minor_compaction_and_unlock(c_seg, FALSE /*clear busy?*/)) {
3340 			/*
3341 			 * found an empty c_segment and freed it
3342 			 * so go grab the next guy in the queue
3343 			 */
3344 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
3345 			lck_mtx_lock_spin_always(c_list_lock);
3346 			continue;
3347 		}
3348 
3349 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
3350 		lck_mtx_lock_spin_always(c_list_lock);
3351 
3352 		switch_state = vm_compressor_major_compact_cseg(c_seg, &number_considered, &bail_wanted_cseg, &bytes_freed);
3353 		assert(c_seg->c_busy);
3354 		assert(!c_seg->c_on_minorcompact_q);
3355 
3356 		if (switch_state) {
3357 			if (VM_CONFIG_SWAP_IS_ACTIVE || VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
3358 				/*
3359 				 * Ordinarily we let swapped in segments age out + get
3360 				 * major compacted with the rest of the c_segs on the ageQ.
3361 				 * But the early donated c_segs, if well compacted, should be
3362 				 * kept ready to be swapped out if needed. These are typically
3363 				 * describing memory belonging to a leaky app (macOS) or a swap-
3364 				 * capable app (iPadOS) and for the latter we can keep these
3365 				 * around longer because we control the triggers in the memorystatus
3366 				 * subsystem
3367 				 */
3368 				c_seg_switch_state(c_seg, C_ON_SWAPOUT_Q, FALSE);
3369 			}
3370 		}
3371 
3372 		C_SEG_WAKEUP_DONE(c_seg);
3373 
3374 		lck_mtx_unlock_always(&c_seg->c_lock);
3375 
3376 		if (number_considered >= yield_after_considered_per_pass) {
3377 			if (bail_wanted_cseg) {
3378 				/*
3379 				 * We stopped major compactions on a c_seg
3380 				 * that is wanted. We don't know the priority
3381 				 * of the waiter unfortunately but we are at
3382 				 * a very high priority and so, just in case
3383 				 * the waiter is a critical system daemon or
3384 				 * UI thread, let's give up the CPU in case
3385 				 * the system is running a few CPU intensive
3386 				 * tasks.
3387 				 */
3388 				bail_wanted_cseg = false;
3389 				lck_mtx_unlock_always(c_list_lock);
3390 
3391 				mutex_pause(2); /* 100us yield */
3392 
3393 				lck_mtx_lock_spin_always(c_list_lock);
3394 			}
3395 
3396 			number_considered = 0;
3397 		}
3398 	}
3399 }
3400 
3401 void
vm_compressor_process_special_swapped_in_segments(void)3402 vm_compressor_process_special_swapped_in_segments(void)
3403 {
3404 	lck_mtx_lock_spin_always(c_list_lock);
3405 	vm_compressor_process_special_swapped_in_segments_locked();
3406 	lck_mtx_unlock_always(c_list_lock);
3407 }
3408 
3409 #define C_SEGMENT_SWAPPEDIN_AGE_LIMIT   10
3410 /*
3411  * Processing regular csegs means aging them.
3412  */
3413 static void
vm_compressor_process_regular_swapped_in_segments(boolean_t flush_all)3414 vm_compressor_process_regular_swapped_in_segments(boolean_t flush_all)
3415 {
3416 	c_segment_t     c_seg;
3417 	clock_sec_t     now;
3418 	clock_nsec_t    nsec;
3419 
3420 	clock_get_system_nanotime(&now, &nsec);
3421 
3422 	while (!queue_empty(&c_regular_swappedin_list_head)) {
3423 		c_seg = (c_segment_t)queue_first(&c_regular_swappedin_list_head);
3424 
3425 		if (flush_all == FALSE && (now - c_seg->c_swappedin_ts) < C_SEGMENT_SWAPPEDIN_AGE_LIMIT) {
3426 			break;
3427 		}
3428 
3429 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3430 
3431 		c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3432 		c_seg->c_agedin_ts = (uint32_t) now;
3433 
3434 		lck_mtx_unlock_always(&c_seg->c_lock);
3435 	}
3436 }
3437 
3438 
3439 extern  int     vm_num_swap_files;
3440 extern  int     vm_num_pinned_swap_files;
3441 extern  int     vm_swappin_enabled;
3442 
3443 extern  unsigned int    vm_swapfile_total_segs_used;
3444 extern  unsigned int    vm_swapfile_total_segs_alloced;
3445 
3446 
3447 void
vm_compressor_flush(void)3448 vm_compressor_flush(void)
3449 {
3450 	uint64_t        vm_swap_put_failures_at_start;
3451 	wait_result_t   wait_result = 0;
3452 	AbsoluteTime    startTime, endTime;
3453 	clock_sec_t     now_sec;
3454 	clock_nsec_t    now_nsec;
3455 	uint64_t        nsec;
3456 	c_segment_t     c_seg, c_seg_next;
3457 
3458 	HIBLOG("vm_compressor_flush - starting\n");
3459 
3460 	clock_get_uptime(&startTime);
3461 
3462 	lck_mtx_lock_spin_always(c_list_lock);
3463 
3464 	fastwake_warmup = FALSE;
3465 	compaction_swapper_abort = 1;
3466 
3467 	while (compaction_swapper_running) {
3468 		assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
3469 
3470 		lck_mtx_unlock_always(c_list_lock);
3471 
3472 		thread_block(THREAD_CONTINUE_NULL);
3473 
3474 		lck_mtx_lock_spin_always(c_list_lock);
3475 	}
3476 	compaction_swapper_abort = 0;
3477 	compaction_swapper_running = 1;
3478 
3479 	hibernate_flushing = TRUE;
3480 	hibernate_no_swapspace = FALSE;
3481 	hibernate_flush_timed_out = FALSE;
3482 	c_generation_id_flush_barrier = c_generation_id + 1000;
3483 
3484 	clock_get_system_nanotime(&now_sec, &now_nsec);
3485 	hibernate_flushing_deadline = now_sec + HIBERNATE_FLUSHING_SECS_TO_COMPLETE;
3486 
3487 	vm_swap_put_failures_at_start = vm_swap_put_failures;
3488 
3489 	/*
3490 	 * We are about to hibernate and so we want all segments flushed to disk.
3491 	 * Segments that are on the major compaction queue won't be considered in
3492 	 * the vm_compressor_compact_and_swap() pass. So we need to bring them to
3493 	 * the ageQ for consideration.
3494 	 */
3495 	if (!queue_empty(&c_major_list_head)) {
3496 		c_seg = (c_segment_t)queue_first(&c_major_list_head);
3497 
3498 		while (!queue_end(&c_major_list_head, (queue_entry_t)c_seg)) {
3499 			c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
3500 			lck_mtx_lock_spin_always(&c_seg->c_lock);
3501 			c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3502 			lck_mtx_unlock_always(&c_seg->c_lock);
3503 			c_seg = c_seg_next;
3504 		}
3505 	}
3506 	vm_compressor_compact_and_swap(TRUE);
3507 	/* need to wait here since the swap thread may also be running in parallel and handling segments */
3508 	while (!queue_empty(&c_early_swapout_list_head) || !queue_empty(&c_regular_swapout_list_head) || !queue_empty(&c_late_swapout_list_head)) {
3509 		assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC);
3510 
3511 		lck_mtx_unlock_always(c_list_lock);
3512 
3513 		wait_result = thread_block(THREAD_CONTINUE_NULL);
3514 
3515 		lck_mtx_lock_spin_always(c_list_lock);
3516 
3517 		if (wait_result == THREAD_TIMED_OUT) {
3518 			break;
3519 		}
3520 	}
3521 	hibernate_flushing = FALSE;
3522 	compaction_swapper_running = 0;
3523 
3524 	if (vm_swap_put_failures > vm_swap_put_failures_at_start) {
3525 		HIBLOG("vm_compressor_flush failed to clean %llu segments - vm_page_compressor_count(%d)\n",
3526 		    vm_swap_put_failures - vm_swap_put_failures_at_start, VM_PAGE_COMPRESSOR_COUNT);
3527 	}
3528 
3529 	lck_mtx_unlock_always(c_list_lock);
3530 
3531 	thread_wakeup((event_t)&compaction_swapper_running);
3532 
3533 	clock_get_uptime(&endTime);
3534 	SUB_ABSOLUTETIME(&endTime, &startTime);
3535 	absolutetime_to_nanoseconds(endTime, &nsec);
3536 
3537 	HIBLOG("vm_compressor_flush completed - took %qd msecs - vm_num_swap_files = %d, vm_num_pinned_swap_files = %d, vm_swappin_enabled = %d\n",
3538 	    nsec / 1000000ULL, vm_num_swap_files, vm_num_pinned_swap_files, vm_swappin_enabled);
3539 }
3540 
3541 
3542 int             compaction_swap_trigger_thread_awakened = 0;
3543 
3544 static void
vm_compressor_swap_trigger_thread(void)3545 vm_compressor_swap_trigger_thread(void)
3546 {
3547 	current_thread()->options |= TH_OPT_VMPRIV;
3548 
3549 	/*
3550 	 * compaction_swapper_init_now is set when the first call to
3551 	 * vm_consider_waking_compactor_swapper is made from
3552 	 * vm_pageout_scan... since this function is called upon
3553 	 * thread creation, we want to make sure to delay adjusting
3554 	 * the tuneables until we are awakened via vm_pageout_scan
3555 	 * so that we are at a point where the vm_swapfile_open will
3556 	 * be operating on the correct directory (in case the default
3557 	 * of using the VM volume is overridden by the dynamic_pager)
3558 	 */
3559 	if (compaction_swapper_init_now) {
3560 		vm_compaction_swapper_do_init();
3561 
3562 		if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
3563 			thread_vm_bind_group_add();
3564 		}
3565 #if CONFIG_THREAD_GROUPS
3566 		thread_group_vm_add();
3567 #endif
3568 		thread_set_thread_name(current_thread(), "VM_cswap_trigger");
3569 		compaction_swapper_init_now = 0;
3570 	}
3571 	lck_mtx_lock_spin_always(c_list_lock);
3572 
3573 	compaction_swap_trigger_thread_awakened++;
3574 	compaction_swapper_awakened = 0;
3575 
3576 	if (compaction_swapper_running == 0) {
3577 		compaction_swapper_running = 1;
3578 
3579 		vm_compressor_compact_and_swap(FALSE);
3580 
3581 		compaction_swapper_running = 0;
3582 	}
3583 	assert_wait((event_t)&c_compressor_swap_trigger, THREAD_UNINT);
3584 
3585 	if (compaction_swapper_running == 0) {
3586 		thread_wakeup((event_t)&compaction_swapper_running);
3587 	}
3588 
3589 	lck_mtx_unlock_always(c_list_lock);
3590 
3591 	thread_block((thread_continue_t)vm_compressor_swap_trigger_thread);
3592 
3593 	/* NOTREACHED */
3594 }
3595 
3596 
3597 void
vm_compressor_record_warmup_start(void)3598 vm_compressor_record_warmup_start(void)
3599 {
3600 	c_segment_t     c_seg;
3601 
3602 	lck_mtx_lock_spin_always(c_list_lock);
3603 
3604 	if (first_c_segment_to_warm_generation_id == 0) {
3605 		if (!queue_empty(&c_age_list_head)) {
3606 			c_seg = (c_segment_t)queue_last(&c_age_list_head);
3607 
3608 			first_c_segment_to_warm_generation_id = c_seg->c_generation_id;
3609 		} else {
3610 			first_c_segment_to_warm_generation_id = 0;
3611 		}
3612 
3613 		fastwake_recording_in_progress = TRUE;
3614 	}
3615 	lck_mtx_unlock_always(c_list_lock);
3616 }
3617 
3618 
3619 void
vm_compressor_record_warmup_end(void)3620 vm_compressor_record_warmup_end(void)
3621 {
3622 	c_segment_t     c_seg;
3623 
3624 	lck_mtx_lock_spin_always(c_list_lock);
3625 
3626 	if (fastwake_recording_in_progress == TRUE) {
3627 		if (!queue_empty(&c_age_list_head)) {
3628 			c_seg = (c_segment_t)queue_last(&c_age_list_head);
3629 
3630 			last_c_segment_to_warm_generation_id = c_seg->c_generation_id;
3631 		} else {
3632 			last_c_segment_to_warm_generation_id = first_c_segment_to_warm_generation_id;
3633 		}
3634 
3635 		fastwake_recording_in_progress = FALSE;
3636 
3637 		HIBLOG("vm_compressor_record_warmup (%qd - %qd)\n", first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id);
3638 	}
3639 	lck_mtx_unlock_always(c_list_lock);
3640 }
3641 
3642 
3643 #define DELAY_TRIM_ON_WAKE_NS (25 * NSEC_PER_SEC)
3644 
3645 void
vm_compressor_delay_trim(void)3646 vm_compressor_delay_trim(void)
3647 {
3648 	uint64_t now = mach_absolute_time();
3649 	uint64_t delay_abstime;
3650 	nanoseconds_to_absolutetime(DELAY_TRIM_ON_WAKE_NS, &delay_abstime);
3651 	dont_trim_until_ts = now + delay_abstime;
3652 }
3653 
3654 
3655 void
vm_compressor_do_warmup(void)3656 vm_compressor_do_warmup(void)
3657 {
3658 	lck_mtx_lock_spin_always(c_list_lock);
3659 
3660 	if (first_c_segment_to_warm_generation_id == last_c_segment_to_warm_generation_id) {
3661 		first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
3662 
3663 		lck_mtx_unlock_always(c_list_lock);
3664 		return;
3665 	}
3666 
3667 	if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
3668 		fastwake_warmup = TRUE;
3669 
3670 		compaction_swapper_awakened = 1;
3671 		thread_wakeup((event_t)&c_compressor_swap_trigger);
3672 	}
3673 	lck_mtx_unlock_always(c_list_lock);
3674 }
3675 
3676 void
do_fastwake_warmup_all(void)3677 do_fastwake_warmup_all(void)
3678 {
3679 	lck_mtx_lock_spin_always(c_list_lock);
3680 
3681 	if (queue_empty(&c_swappedout_list_head) && queue_empty(&c_swappedout_sparse_list_head)) {
3682 		lck_mtx_unlock_always(c_list_lock);
3683 		return;
3684 	}
3685 
3686 	fastwake_warmup = TRUE;
3687 
3688 	do_fastwake_warmup(&c_swappedout_list_head, TRUE);
3689 
3690 	do_fastwake_warmup(&c_swappedout_sparse_list_head, TRUE);
3691 
3692 	fastwake_warmup = FALSE;
3693 
3694 	lck_mtx_unlock_always(c_list_lock);
3695 }
3696 
3697 void
do_fastwake_warmup(queue_head_t * c_queue,boolean_t consider_all_cseg)3698 do_fastwake_warmup(queue_head_t *c_queue, boolean_t consider_all_cseg)
3699 {
3700 	c_segment_t     c_seg = NULL;
3701 	AbsoluteTime    startTime, endTime;
3702 	uint64_t        nsec;
3703 
3704 
3705 	HIBLOG("vm_compressor_fastwake_warmup (%qd - %qd) - starting\n", first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id);
3706 
3707 	clock_get_uptime(&startTime);
3708 
3709 	lck_mtx_unlock_always(c_list_lock);
3710 
3711 	proc_set_thread_policy(current_thread(),
3712 	    TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER2);
3713 
3714 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
3715 
3716 	lck_mtx_lock_spin_always(c_list_lock);
3717 
3718 	while (!queue_empty(c_queue) && fastwake_warmup == TRUE) {
3719 		c_seg = (c_segment_t) queue_first(c_queue);
3720 
3721 		if (consider_all_cseg == FALSE) {
3722 			if (c_seg->c_generation_id < first_c_segment_to_warm_generation_id ||
3723 			    c_seg->c_generation_id > last_c_segment_to_warm_generation_id) {
3724 				break;
3725 			}
3726 
3727 			if (vm_page_free_count < (AVAILABLE_MEMORY / 4)) {
3728 				break;
3729 			}
3730 		}
3731 
3732 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3733 		lck_mtx_unlock_always(c_list_lock);
3734 
3735 		if (c_seg->c_busy) {
3736 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
3737 			c_seg_wait_on_busy(c_seg);
3738 			PAGE_REPLACEMENT_DISALLOWED(TRUE);
3739 		} else {
3740 			if (c_seg_swapin(c_seg, TRUE, FALSE) == 0) {
3741 				lck_mtx_unlock_always(&c_seg->c_lock);
3742 			}
3743 			c_segment_warmup_count++;
3744 
3745 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
3746 			vm_pageout_io_throttle();
3747 			PAGE_REPLACEMENT_DISALLOWED(TRUE);
3748 		}
3749 		lck_mtx_lock_spin_always(c_list_lock);
3750 	}
3751 	lck_mtx_unlock_always(c_list_lock);
3752 
3753 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
3754 
3755 	proc_set_thread_policy(current_thread(),
3756 	    TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER0);
3757 
3758 	clock_get_uptime(&endTime);
3759 	SUB_ABSOLUTETIME(&endTime, &startTime);
3760 	absolutetime_to_nanoseconds(endTime, &nsec);
3761 
3762 	HIBLOG("vm_compressor_fastwake_warmup completed - took %qd msecs\n", nsec / 1000000ULL);
3763 
3764 	lck_mtx_lock_spin_always(c_list_lock);
3765 
3766 	if (consider_all_cseg == FALSE) {
3767 		first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
3768 	}
3769 }
3770 
3771 extern bool     vm_swapout_thread_running;
3772 extern boolean_t        compressor_store_stop_compaction;
3773 
3774 void
vm_compressor_compact_and_swap(boolean_t flush_all)3775 vm_compressor_compact_and_swap(boolean_t flush_all)
3776 {
3777 	c_segment_t     c_seg;
3778 	bool            switch_state, bail_wanted_cseg = false;
3779 	clock_sec_t     now;
3780 	clock_nsec_t    nsec;
3781 	mach_timespec_t start_ts, end_ts;
3782 	unsigned int    number_considered, wanted_cseg_found, yield_after_considered_per_pass, number_yields;
3783 	uint64_t        bytes_freed, delta_usec;
3784 	uint32_t        c_swapout_count = 0;
3785 
3786 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_START, c_age_count, c_minor_count, c_major_count, vm_page_free_count);
3787 
3788 	if (fastwake_warmup == TRUE) {
3789 		uint64_t        starting_warmup_count;
3790 
3791 		starting_warmup_count = c_segment_warmup_count;
3792 
3793 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_START, c_segment_warmup_count,
3794 		    first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id, 0, 0);
3795 		do_fastwake_warmup(&c_swappedout_list_head, FALSE);
3796 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_END, c_segment_warmup_count, c_segment_warmup_count - starting_warmup_count, 0, 0, 0);
3797 
3798 		fastwake_warmup = FALSE;
3799 	}
3800 
3801 #if (XNU_TARGET_OS_OSX && __arm64__)
3802 	/*
3803 	 * Re-considering major csegs showed benefits on all platforms by
3804 	 * significantly reducing fragmentation and getting back memory.
3805 	 * However, on smaller devices, eg watch, there was increased power
3806 	 * use for the additional compactions. And the turnover in csegs on
3807 	 * those smaller platforms is high enough in the decompression/free
3808 	 * path that we can skip reconsidering them here because we already
3809 	 * consider them for major compaction in those paths.
3810 	 */
3811 	vm_compressor_process_major_segments(false /*all segments and not just the ripe-aged ones*/);
3812 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
3813 
3814 	/*
3815 	 * it's possible for the c_age_list_head to be empty if we
3816 	 * hit our limits for growing the compressor pool and we subsequently
3817 	 * hibernated... on the next hibernation we could see the queue as
3818 	 * empty and not proceeed even though we have a bunch of segments on
3819 	 * the swapped in queue that need to be dealt with.
3820 	 */
3821 	vm_compressor_do_delayed_compactions(flush_all);
3822 	vm_compressor_process_special_swapped_in_segments_locked();
3823 	vm_compressor_process_regular_swapped_in_segments(flush_all);
3824 
3825 	/*
3826 	 * we only need to grab the timestamp once per
3827 	 * invocation of this function since the
3828 	 * timescale we're interested in is measured
3829 	 * in days
3830 	 */
3831 	clock_get_system_nanotime(&now, &nsec);
3832 
3833 	start_ts.tv_sec = (int) now;
3834 	start_ts.tv_nsec = nsec;
3835 	delta_usec = 0;
3836 	number_considered = 0;
3837 	wanted_cseg_found = 0;
3838 	number_yields = 0;
3839 	bytes_freed = 0;
3840 	yield_after_considered_per_pass = MAX(min_csegs_per_major_compaction, DELAYED_COMPACTIONS_PER_PASS);
3841 
3842 #if 0
3843 	/**
3844 	 * SW: Need to figure out how to properly rate limit this log because it is currently way too
3845 	 * noisy. rdar://99379414 (Figure out how to rate limit the fragmentation level logging)
3846 	 */
3847 	os_log(OS_LOG_DEFAULT, "memorystatus: before compaction fragmentation level %u\n", vm_compressor_fragmentation_level());
3848 #endif
3849 
3850 	while (!queue_empty(&c_age_list_head) && !compaction_swapper_abort && !compressor_store_stop_compaction) {
3851 		if (hibernate_flushing == TRUE) {
3852 			clock_sec_t     sec;
3853 
3854 			if (hibernate_should_abort()) {
3855 				HIBLOG("vm_compressor_flush - hibernate_should_abort returned TRUE\n");
3856 				break;
3857 			}
3858 			if (hibernate_no_swapspace == TRUE) {
3859 				HIBLOG("vm_compressor_flush - out of swap space\n");
3860 				break;
3861 			}
3862 			if (vm_swap_files_pinned() == FALSE) {
3863 				HIBLOG("vm_compressor_flush - unpinned swap files\n");
3864 				break;
3865 			}
3866 			if (hibernate_in_progress_with_pinned_swap == TRUE &&
3867 			    (vm_swapfile_total_segs_alloced == vm_swapfile_total_segs_used)) {
3868 				HIBLOG("vm_compressor_flush - out of pinned swap space\n");
3869 				break;
3870 			}
3871 			clock_get_system_nanotime(&sec, &nsec);
3872 
3873 			if (sec > hibernate_flushing_deadline) {
3874 				hibernate_flush_timed_out = TRUE;
3875 				HIBLOG("vm_compressor_flush - failed to finish before deadline\n");
3876 				break;
3877 			}
3878 		}
3879 
3880 		c_swapout_count = c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count;
3881 		if (VM_CONFIG_SWAP_IS_ACTIVE && !vm_swap_out_of_space() && c_swapout_count >= C_SWAPOUT_LIMIT) {
3882 			assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 100, 1000 * NSEC_PER_USEC);
3883 
3884 			if (!vm_swapout_thread_running) {
3885 				thread_wakeup((event_t)&vm_swapout_thread);
3886 			}
3887 
3888 			lck_mtx_unlock_always(c_list_lock);
3889 
3890 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 1, c_swapout_count, 0, 0);
3891 
3892 			thread_block(THREAD_CONTINUE_NULL);
3893 
3894 			lck_mtx_lock_spin_always(c_list_lock);
3895 		}
3896 		/*
3897 		 * Minor compactions
3898 		 */
3899 		vm_compressor_do_delayed_compactions(flush_all);
3900 
3901 		/*
3902 		 * vm_compressor_process_early_swapped_in_segments()
3903 		 * might be too aggressive. So OFF for now.
3904 		 */
3905 		vm_compressor_process_regular_swapped_in_segments(flush_all);
3906 
3907 		/* Recompute because we dropped the c_list_lock above*/
3908 		c_swapout_count = c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count;
3909 		if (VM_CONFIG_SWAP_IS_ACTIVE && !vm_swap_out_of_space() && c_swapout_count >= C_SWAPOUT_LIMIT) {
3910 			/*
3911 			 * we timed out on the above thread_block
3912 			 * let's loop around and try again
3913 			 * the timeout allows us to continue
3914 			 * to do minor compactions to make
3915 			 * more memory available
3916 			 */
3917 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 2, c_swapout_count, 0, 0);
3918 
3919 			continue;
3920 		}
3921 
3922 		/*
3923 		 * Swap out segments?
3924 		 */
3925 		if (flush_all == FALSE) {
3926 			bool needs_to_swap;
3927 
3928 			lck_mtx_unlock_always(c_list_lock);
3929 
3930 			needs_to_swap = compressor_needs_to_swap();
3931 
3932 			lck_mtx_lock_spin_always(c_list_lock);
3933 
3934 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 3, needs_to_swap, 0, 0);
3935 
3936 			if (!needs_to_swap) {
3937 				break;
3938 			}
3939 		}
3940 		if (queue_empty(&c_age_list_head)) {
3941 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 4, c_age_count, 0, 0);
3942 			break;
3943 		}
3944 		c_seg = (c_segment_t) queue_first(&c_age_list_head);
3945 
3946 		assert(c_seg->c_state == C_ON_AGE_Q);
3947 
3948 		if (flush_all == TRUE && c_seg->c_generation_id > c_generation_id_flush_barrier) {
3949 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 5, 0, 0, 0);
3950 			break;
3951 		}
3952 
3953 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3954 
3955 		if (c_seg->c_busy) {
3956 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 6, (void*) VM_KERNEL_ADDRPERM(c_seg), 0, 0);
3957 
3958 			lck_mtx_unlock_always(c_list_lock);
3959 			c_seg_wait_on_busy(c_seg);
3960 			lck_mtx_lock_spin_always(c_list_lock);
3961 
3962 			continue;
3963 		}
3964 		C_SEG_BUSY(c_seg);
3965 
3966 		if (c_seg_do_minor_compaction_and_unlock(c_seg, FALSE, TRUE, TRUE)) {
3967 			/*
3968 			 * found an empty c_segment and freed it
3969 			 * so go grab the next guy in the queue
3970 			 */
3971 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 7, 0, 0, 0);
3972 			c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3973 			continue;
3974 		}
3975 
3976 		switch_state = vm_compressor_major_compact_cseg(c_seg, &number_considered, &bail_wanted_cseg, &bytes_freed);
3977 		if (bail_wanted_cseg) {
3978 			wanted_cseg_found++;
3979 			bail_wanted_cseg = false;
3980 		}
3981 
3982 		assert(c_seg->c_busy);
3983 		assert(!c_seg->c_on_minorcompact_q);
3984 
3985 		if (switch_state) {
3986 			if (VM_CONFIG_SWAP_IS_ACTIVE) {
3987 				int new_state = C_ON_SWAPOUT_Q;
3988 #if (XNU_TARGET_OS_OSX && __arm64__)
3989 				if (flush_all == false && compressor_swapout_conditions_met() == false) {
3990 					new_state = C_ON_MAJORCOMPACT_Q;
3991 				}
3992 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
3993 
3994 				if (new_state == C_ON_SWAPOUT_Q) {
3995 					/*
3996 					 * This mode of putting a generic c_seg on the swapout list is
3997 					 * only supported when we have general swapping enabled
3998 					 */
3999 					clock_sec_t lnow;
4000 					clock_nsec_t lnsec;
4001 					clock_get_system_nanotime(&lnow, &lnsec);
4002 					if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 30) {
4003 						vmcs_stats.unripe_under_30s++;
4004 					} else if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 60) {
4005 						vmcs_stats.unripe_under_60s++;
4006 					} else if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 300) {
4007 						vmcs_stats.unripe_under_300s++;
4008 					}
4009 				}
4010 
4011 				c_seg_switch_state(c_seg, new_state, FALSE);
4012 			} else {
4013 				if ((vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit)) {
4014 					assert(VM_CONFIG_SWAP_IS_PRESENT);
4015 					/*
4016 					 * we are running compressor sweeps with swap-behind
4017 					 * make sure the c_seg has aged enough before swapping it
4018 					 * out...
4019 					 */
4020 					if ((now - c_seg->c_creation_ts) >= vm_ripe_target_age) {
4021 						c_seg->c_overage_swap = TRUE;
4022 						c_overage_swapped_count++;
4023 						c_seg_switch_state(c_seg, C_ON_SWAPOUT_Q, FALSE);
4024 					}
4025 				}
4026 			}
4027 			if (c_seg->c_state == C_ON_AGE_Q) {
4028 				/*
4029 				 * this c_seg didn't get moved to the swapout queue
4030 				 * so we need to move it out of the way...
4031 				 * we just did a major compaction on it so put it
4032 				 * on that queue
4033 				 */
4034 				c_seg_switch_state(c_seg, C_ON_MAJORCOMPACT_Q, FALSE);
4035 			} else {
4036 				c_seg_major_compact_stats[c_seg_major_compact_stats_now].wasted_space_in_swapouts += c_seg_bufsize - c_seg->c_bytes_used;
4037 				c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_swapouts++;
4038 			}
4039 		}
4040 
4041 		C_SEG_WAKEUP_DONE(c_seg);
4042 
4043 		lck_mtx_unlock_always(&c_seg->c_lock);
4044 
4045 		/*
4046 		 * On systems _with_ general swap, regardless of jetsam, we wake up the swapout thread here.
4047 		 * On systems _without_ general swap, it's the responsibility of the memorystatus
4048 		 * subsystem to wake up the swapper.
4049 		 * TODO: When we have full jetsam support on a swap enabled system, we will need to revisit
4050 		 * this policy.
4051 		 */
4052 		if (VM_CONFIG_SWAP_IS_ACTIVE && c_swapout_count) {
4053 			/*
4054 			 * We don't pause/yield here because we will either
4055 			 * yield below or at the top of the loop with the
4056 			 * assert_wait_timeout.
4057 			 */
4058 			if (!vm_swapout_thread_running) {
4059 				thread_wakeup((event_t)&vm_swapout_thread);
4060 			}
4061 		}
4062 
4063 		if (number_considered >= yield_after_considered_per_pass) {
4064 			if (wanted_cseg_found) {
4065 				/*
4066 				 * We stopped major compactions on a c_seg
4067 				 * that is wanted. We don't know the priority
4068 				 * of the waiter unfortunately but we are at
4069 				 * a very high priority and so, just in case
4070 				 * the waiter is a critical system daemon or
4071 				 * UI thread, let's give up the CPU in case
4072 				 * the system is running a few CPU intensive
4073 				 * tasks.
4074 				 */
4075 				lck_mtx_unlock_always(c_list_lock);
4076 
4077 				mutex_pause(2); /* 100us yield */
4078 
4079 				number_yields++;
4080 
4081 				VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 11, number_considered, number_yields, 0);
4082 
4083 				lck_mtx_lock_spin_always(c_list_lock);
4084 			}
4085 
4086 			number_considered = 0;
4087 			wanted_cseg_found = 0;
4088 		}
4089 	}
4090 	clock_get_system_nanotime(&now, &nsec);
4091 
4092 	end_ts = major_compact_ts = (mach_timespec_t){.tv_sec = (int)now, .tv_nsec = nsec};
4093 
4094 	SUB_MACH_TIMESPEC(&end_ts, &start_ts);
4095 
4096 	delta_usec = (end_ts.tv_sec * USEC_PER_SEC) + (end_ts.tv_nsec / NSEC_PER_USEC) - (number_yields * 100);
4097 
4098 	delta_usec = MAX(1, delta_usec); /* we could have 0 usec run if conditions weren't right */
4099 
4100 	c_seg_major_compact_stats[c_seg_major_compact_stats_now].bytes_freed_rate_us = (bytes_freed / delta_usec);
4101 
4102 	if ((c_seg_major_compact_stats_now + 1) == C_SEG_MAJOR_COMPACT_STATS_MAX) {
4103 		c_seg_major_compact_stats_now = 0;
4104 	} else {
4105 		c_seg_major_compact_stats_now++;
4106 	}
4107 
4108 	assert(c_seg_major_compact_stats_now < C_SEG_MAJOR_COMPACT_STATS_MAX);
4109 
4110 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_END, c_age_count, c_minor_count, c_major_count, vm_page_free_count);
4111 }
4112 
4113 
4114 static c_segment_t
c_seg_allocate(c_segment_t * current_chead,bool * nearing_limits)4115 c_seg_allocate(c_segment_t *current_chead, bool *nearing_limits)
4116 {
4117 	c_segment_t     c_seg;
4118 	int             min_needed;
4119 	int             size_to_populate;
4120 	c_segment_t     *donate_queue_head;
4121 	uint32_t        compressed_pages;
4122 
4123 	*nearing_limits = false;
4124 
4125 	compressed_pages = vm_compressor_pages_compressed();
4126 
4127 	if (compressed_pages >= c_segment_pages_compressed_nearing_limit) {
4128 		*nearing_limits = true;
4129 	}
4130 	if (compressed_pages >= c_segment_pages_compressed_limit) {
4131 		/*
4132 		 * We've reached the compressed pages limit, don't return
4133 		 * a segment to compress into
4134 		 */
4135 		return NULL;
4136 	}
4137 
4138 	if ((c_seg = *current_chead) == NULL) {
4139 		uint32_t        c_segno;
4140 
4141 		lck_mtx_lock_spin_always(c_list_lock);
4142 
4143 		while (c_segments_busy == TRUE) {
4144 			assert_wait((event_t) (&c_segments_busy), THREAD_UNINT);
4145 
4146 			lck_mtx_unlock_always(c_list_lock);
4147 
4148 			thread_block(THREAD_CONTINUE_NULL);
4149 
4150 			lck_mtx_lock_spin_always(c_list_lock);
4151 		}
4152 		if (c_free_segno_head == (uint32_t)-1) {
4153 			uint32_t        c_segments_available_new;
4154 
4155 			/*
4156 			 * We may have dropped the c_list_lock, re-evaluate
4157 			 * the compressed pages count
4158 			 */
4159 			compressed_pages = vm_compressor_pages_compressed();
4160 
4161 			if (c_segments_available >= c_segments_nearing_limit ||
4162 			    compressed_pages >= c_segment_pages_compressed_nearing_limit) {
4163 				*nearing_limits = true;
4164 			}
4165 			if (c_segments_available >= c_segments_limit ||
4166 			    compressed_pages >= c_segment_pages_compressed_limit) {
4167 				lck_mtx_unlock_always(c_list_lock);
4168 
4169 				return NULL;
4170 			}
4171 			c_segments_busy = TRUE;
4172 			lck_mtx_unlock_always(c_list_lock);
4173 
4174 			/* pages for c_segments are never depopulated, c_segments_available never goes down */
4175 			kernel_memory_populate((vm_offset_t)c_segments_next_page,
4176 			    PAGE_SIZE, KMA_NOFAIL | KMA_KOBJECT,
4177 			    VM_KERN_MEMORY_COMPRESSOR);
4178 			c_segments_next_page += PAGE_SIZE;
4179 
4180 			c_segments_available_new = c_segments_available + C_SEGMENTS_PER_PAGE;
4181 
4182 			if (c_segments_available_new > c_segments_limit) {
4183 				c_segments_available_new = c_segments_limit;
4184 			}
4185 
4186 			/* add the just-added segments to the top of the free-list */
4187 			for (c_segno = c_segments_available + 1; c_segno < c_segments_available_new; c_segno++) {
4188 				c_segments_get(c_segno - 1)->c_segno = c_segno;  /* next free is the one after you */
4189 			}
4190 
4191 			lck_mtx_lock_spin_always(c_list_lock);
4192 
4193 			c_segments_get(c_segno - 1)->c_segno = c_free_segno_head; /* link to the rest of, existing freelist */
4194 			c_free_segno_head = c_segments_available; /* first one in the page that was just allocated */
4195 			c_segments_available = c_segments_available_new;
4196 
4197 			c_segments_busy = FALSE;
4198 			thread_wakeup((event_t) (&c_segments_busy));
4199 		}
4200 		c_segno = c_free_segno_head;
4201 		assert(c_segno >= 0 && c_segno < c_segments_limit);
4202 
4203 		c_free_segno_head = (uint32_t)c_segments_get(c_segno)->c_segno;
4204 
4205 		/*
4206 		 * do the rest of the bookkeeping now while we're still behind
4207 		 * the list lock and grab our generation id now into a local
4208 		 * so that we can install it once we have the c_seg allocated
4209 		 */
4210 		c_segment_count++;
4211 		if (c_segment_count > c_segment_count_max) {
4212 			c_segment_count_max = c_segment_count;
4213 		}
4214 
4215 		lck_mtx_unlock_always(c_list_lock);
4216 
4217 		c_seg = zalloc_flags(compressor_segment_zone, Z_WAITOK | Z_ZERO);
4218 
4219 		c_seg->c_store.c_buffer = (int32_t *)C_SEG_BUFFER_ADDRESS(c_segno);
4220 
4221 		lck_mtx_init(&c_seg->c_lock, &vm_compressor_lck_grp, LCK_ATTR_NULL);
4222 
4223 		c_seg->c_state = C_IS_EMPTY;
4224 		c_seg->c_firstemptyslot = C_SLOT_MAX_INDEX;
4225 		c_seg->c_mysegno = c_segno;
4226 
4227 		lck_mtx_lock_spin_always(c_list_lock);
4228 		c_empty_count++;  /* going to be immediately decremented in the next call */
4229 		c_seg_switch_state(c_seg, C_IS_FILLING, FALSE);
4230 		c_segments_get(c_segno)->c_seg = c_seg;
4231 		assert(c_segments_get(c_segno)->c_segno > c_segments_available);  /* we just assigned a pointer to it so this is an indication that it is occupied */
4232 		lck_mtx_unlock_always(c_list_lock);
4233 
4234 		for (int i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) {
4235 #if XNU_TARGET_OS_OSX /* tag:DONATE */
4236 			donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_early_swapout_chead);
4237 #else /* XNU_TARGET_OS_OSX */
4238 			if (memorystatus_swap_all_apps) {
4239 				donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_late_swapout_chead);
4240 			} else {
4241 				donate_queue_head = NULL;
4242 			}
4243 #endif /* XNU_TARGET_OS_OSX */
4244 
4245 			if (current_chead == donate_queue_head) {
4246 				c_seg->c_has_donated_pages = 1;
4247 				break;
4248 			}
4249 		}
4250 
4251 		*current_chead = c_seg;
4252 
4253 #if DEVELOPMENT || DEBUG
4254 		C_SEG_MAKE_WRITEABLE(c_seg);
4255 #endif
4256 	}
4257 	c_seg_alloc_nextslot(c_seg);
4258 
4259 	size_to_populate = c_seg_allocsize - C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset);
4260 
4261 	if (size_to_populate) {
4262 		min_needed = PAGE_SIZE + (c_seg_allocsize - c_seg_bufsize);
4263 
4264 		if (C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset - c_seg->c_nextoffset) < (unsigned) min_needed) {
4265 			if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) {
4266 				size_to_populate = C_SEG_MAX_POPULATE_SIZE;
4267 			}
4268 
4269 			os_atomic_add(&vm_pageout_vminfo.vm_compressor_pages_grabbed, size_to_populate / PAGE_SIZE, relaxed);
4270 
4271 			kernel_memory_populate(
4272 				(vm_offset_t) &c_seg->c_store.c_buffer[c_seg->c_populated_offset],
4273 				size_to_populate,
4274 				KMA_NOFAIL | KMA_COMPRESSOR,
4275 				VM_KERN_MEMORY_COMPRESSOR);
4276 		} else {
4277 			size_to_populate = 0;
4278 		}
4279 	}
4280 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
4281 
4282 	lck_mtx_lock_spin_always(&c_seg->c_lock);
4283 
4284 	if (size_to_populate) {
4285 		c_seg->c_populated_offset += C_SEG_BYTES_TO_OFFSET(size_to_populate);
4286 	}
4287 
4288 	return c_seg;
4289 }
4290 
4291 #if DEVELOPMENT || DEBUG
4292 #if CONFIG_FREEZE
4293 extern boolean_t memorystatus_freeze_to_memory;
4294 #endif /* CONFIG_FREEZE */
4295 #endif /* DEVELOPMENT || DEBUG */
4296 uint64_t c_seg_total_donated_bytes = 0; /* For testing/debugging only for now. Remove and add new counters for vm_stat.*/
4297 
4298 uint64_t c_seg_filled_no_contention = 0;
4299 uint64_t c_seg_filled_contention = 0;
4300 clock_sec_t c_seg_filled_contention_sec_max = 0;
4301 clock_nsec_t c_seg_filled_contention_nsec_max = 0;
4302 
4303 static void
c_current_seg_filled(c_segment_t c_seg,c_segment_t * current_chead)4304 c_current_seg_filled(c_segment_t c_seg, c_segment_t *current_chead)
4305 {
4306 	uint32_t        unused_bytes;
4307 	uint32_t        offset_to_depopulate;
4308 	int             new_state = C_ON_AGE_Q;
4309 	clock_sec_t     sec;
4310 	clock_nsec_t    nsec;
4311 	bool            head_insert = false, wakeup_swapout_thread = false;
4312 
4313 	unused_bytes = trunc_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset - c_seg->c_nextoffset));
4314 
4315 	if (unused_bytes) {
4316 		/* if this is a platform that need an extra page at the end of the segment when running compress
4317 		 * then now is the time to depopulate that extra page. it still takes virtual space but doesn't
4318 		 * actually waste memory */
4319 		offset_to_depopulate = C_SEG_BYTES_TO_OFFSET(round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_nextoffset)));
4320 
4321 		/* release the extra physical page(s) at the end of the segment  */
4322 		lck_mtx_unlock_always(&c_seg->c_lock);
4323 
4324 		kernel_memory_depopulate(
4325 			(vm_offset_t) &c_seg->c_store.c_buffer[offset_to_depopulate],
4326 			unused_bytes,
4327 			KMA_COMPRESSOR,
4328 			VM_KERN_MEMORY_COMPRESSOR);
4329 
4330 		lck_mtx_lock_spin_always(&c_seg->c_lock);
4331 
4332 		c_seg->c_populated_offset = offset_to_depopulate;
4333 	}
4334 	assert(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset) <= c_seg_bufsize);
4335 
4336 #if DEVELOPMENT || DEBUG
4337 	{
4338 		boolean_t       c_seg_was_busy = FALSE;
4339 
4340 		if (!c_seg->c_busy) {
4341 			C_SEG_BUSY(c_seg);
4342 		} else {
4343 			c_seg_was_busy = TRUE;
4344 		}
4345 
4346 		lck_mtx_unlock_always(&c_seg->c_lock);
4347 
4348 		C_SEG_WRITE_PROTECT(c_seg);
4349 
4350 		lck_mtx_lock_spin_always(&c_seg->c_lock);
4351 
4352 		if (c_seg_was_busy == FALSE) {
4353 			C_SEG_WAKEUP_DONE(c_seg);
4354 		}
4355 	}
4356 #endif
4357 
4358 #if CONFIG_FREEZE
4359 	if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead) &&
4360 	    VM_CONFIG_SWAP_IS_PRESENT &&
4361 	    VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
4362 #if DEVELOPMENT || DEBUG
4363 	    && !memorystatus_freeze_to_memory
4364 #endif /* DEVELOPMENT || DEBUG */
4365 	    ) {
4366 		new_state = C_ON_SWAPOUT_Q;
4367 		wakeup_swapout_thread = true;
4368 	}
4369 #endif /* CONFIG_FREEZE */
4370 
4371 	if (vm_darkwake_mode == TRUE) {
4372 		new_state = C_ON_SWAPOUT_Q;
4373 		head_insert = true;
4374 		wakeup_swapout_thread = true;
4375 	} else {
4376 		c_segment_t *donate_queue_head;
4377 		for (int i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) {
4378 #if XNU_TARGET_OS_OSX  /* tag:DONATE */
4379 			donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_early_swapout_chead);
4380 #else /* XNU_TARGET_OS_OSX */
4381 			donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_late_swapout_chead);
4382 #endif /* XNU_TARGET_OS_OSX */
4383 			if (current_chead == donate_queue_head) {
4384 				/* This is the place where the "donating" task actually does the so-called donation
4385 				 * Instead of continueing to take place in memory in the compressor, the segment goes directly
4386 				 * to swap-out instead of going to AGE_Q */
4387 				assert(c_seg->c_has_donated_pages);
4388 				new_state = C_ON_SWAPOUT_Q;
4389 				c_seg_total_donated_bytes += c_seg->c_bytes_used;
4390 				break;
4391 			}
4392 		}
4393 	}
4394 
4395 	clock_get_system_nanotime(&sec, &nsec);
4396 	c_seg->c_creation_ts = (uint32_t)sec;
4397 
4398 	if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
4399 		clock_sec_t     sec2;
4400 		clock_nsec_t    nsec2;
4401 
4402 		lck_mtx_lock_spin_always(c_list_lock);
4403 		clock_get_system_nanotime(&sec2, &nsec2);
4404 		TIME_SUB(sec2, sec, nsec2, nsec, NSEC_PER_SEC);
4405 		/* keep track of how much time we've waited for c_list_lock */
4406 		if (sec2 > c_seg_filled_contention_sec_max) {
4407 			c_seg_filled_contention_sec_max = sec2;
4408 			c_seg_filled_contention_nsec_max = nsec2;
4409 		} else if (sec2 == c_seg_filled_contention_sec_max && nsec2 > c_seg_filled_contention_nsec_max) {
4410 			c_seg_filled_contention_nsec_max = nsec2;
4411 		}
4412 		c_seg_filled_contention++;
4413 	} else {
4414 		c_seg_filled_no_contention++;
4415 	}
4416 
4417 #if CONFIG_FREEZE
4418 	if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead)) {
4419 		if (freezer_context_global.freezer_ctx_task->donates_own_pages) {
4420 			assert(!c_seg->c_has_donated_pages);
4421 			c_seg->c_has_donated_pages = 1;
4422 			os_atomic_add(&c_segment_pages_compressed_incore_late_swapout, c_seg->c_slots_used, relaxed);
4423 		}
4424 		c_seg->c_has_freezer_pages = 1;
4425 	}
4426 #endif /* CONFIG_FREEZE */
4427 
4428 	c_seg->c_generation_id = c_generation_id++;
4429 	c_seg_switch_state(c_seg, new_state, head_insert);
4430 
4431 #if CONFIG_FREEZE
4432 	/*
4433 	 * Donated segments count as frozen to swap if we go through the freezer.
4434 	 * TODO: What we need is a new ledger and cseg state that can describe
4435 	 * a frozen cseg from a donated task so we can accurately decrement it on
4436 	 * swapins.
4437 	 */
4438 	if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead) && (c_seg->c_state == C_ON_SWAPOUT_Q)) {
4439 		/*
4440 		 * darkwake and freezer can't co-exist together
4441 		 * We'll need to fix this accounting as a start.
4442 		 * And early donation c_segs are separate from frozen c_segs.
4443 		 */
4444 		assert(vm_darkwake_mode == FALSE);
4445 		c_seg_update_task_owner(c_seg, freezer_context_global.freezer_ctx_task);
4446 		freezer_context_global.freezer_ctx_swapped_bytes += c_seg->c_bytes_used;
4447 	}
4448 #endif /* CONFIG_FREEZE */
4449 
4450 	if (c_seg->c_state == C_ON_AGE_Q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
4451 		/* this is possible if we decompressed a page from the segment before it ended filling */
4452 #if CONFIG_FREEZE
4453 		assert(c_seg->c_task_owner == NULL);
4454 #endif /* CONFIG_FREEZE */
4455 		c_seg_need_delayed_compaction(c_seg, TRUE);
4456 	}
4457 
4458 	lck_mtx_unlock_always(c_list_lock);
4459 
4460 	if (wakeup_swapout_thread) {
4461 		/*
4462 		 * Darkwake and Freeze configs always
4463 		 * wake up the swapout thread because
4464 		 * the compactor thread that normally handles
4465 		 * it may not be running as much in these
4466 		 * configs.
4467 		 */
4468 		thread_wakeup((event_t)&vm_swapout_thread);
4469 	}
4470 
4471 	*current_chead = NULL;
4472 }
4473 
4474 /*
4475  * returns with c_seg locked
4476  */
4477 void
c_seg_swapin_requeue(c_segment_t c_seg,boolean_t has_data,boolean_t minor_compact_ok,boolean_t age_on_swapin_q)4478 c_seg_swapin_requeue(c_segment_t c_seg, boolean_t has_data, boolean_t minor_compact_ok, boolean_t age_on_swapin_q)
4479 {
4480 	clock_sec_t     sec;
4481 	clock_nsec_t    nsec;
4482 
4483 	clock_get_system_nanotime(&sec, &nsec);
4484 
4485 	lck_mtx_lock_spin_always(c_list_lock);
4486 	lck_mtx_lock_spin_always(&c_seg->c_lock);
4487 
4488 	assert(c_seg->c_busy_swapping);
4489 	assert(c_seg->c_busy);
4490 
4491 	c_seg->c_busy_swapping = 0;
4492 
4493 	if (c_seg->c_overage_swap == TRUE) {
4494 		c_overage_swapped_count--;
4495 		c_seg->c_overage_swap = FALSE;
4496 	}
4497 	if (has_data == TRUE) {
4498 		if (age_on_swapin_q == TRUE || c_seg->c_has_donated_pages) {
4499 #if CONFIG_FREEZE
4500 			/*
4501 			 * If a segment has both identities, frozen and donated bits set, the donated
4502 			 * bit wins on the swapin path. This is because the segment is being swapped back
4503 			 * in and so is in demand and should be given more time to spend in memory before
4504 			 * being swapped back out under pressure.
4505 			 */
4506 			if (c_seg->c_has_donated_pages) {
4507 				c_seg->c_has_freezer_pages = 0;
4508 			}
4509 #endif /* CONFIG_FREEZE */
4510 			c_seg_switch_state(c_seg, C_ON_SWAPPEDIN_Q, FALSE);
4511 		} else {
4512 			c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
4513 		}
4514 
4515 		if (minor_compact_ok == TRUE && !c_seg->c_on_minorcompact_q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
4516 			c_seg_need_delayed_compaction(c_seg, TRUE);
4517 		}
4518 	} else {
4519 		c_seg->c_store.c_buffer = (int32_t*) NULL;
4520 		c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0);
4521 
4522 		c_seg_switch_state(c_seg, C_ON_BAD_Q, FALSE);
4523 	}
4524 	c_seg->c_swappedin_ts = (uint32_t)sec;
4525 	c_seg->c_swappedin = true;
4526 #if TRACK_C_SEGMENT_UTILIZATION
4527 	c_seg->c_decompressions_since_swapin = 0;
4528 #endif /* TRACK_C_SEGMENT_UTILIZATION */
4529 
4530 	lck_mtx_unlock_always(c_list_lock);
4531 }
4532 
4533 
4534 
4535 /*
4536  * c_seg has to be locked and is returned locked if the c_seg isn't freed
4537  * PAGE_REPLACMENT_DISALLOWED has to be TRUE on entry and is returned TRUE
4538  * c_seg_swapin returns 1 if the c_seg was freed, 0 otherwise
4539  */
4540 
4541 int
c_seg_swapin(c_segment_t c_seg,boolean_t force_minor_compaction,boolean_t age_on_swapin_q)4542 c_seg_swapin(c_segment_t c_seg, boolean_t force_minor_compaction, boolean_t age_on_swapin_q)
4543 {
4544 	vm_offset_t     addr = 0;
4545 	uint32_t        io_size = 0;
4546 	uint64_t        f_offset;
4547 	thread_pri_floor_t token;
4548 
4549 	assert(C_SEG_IS_ONDISK(c_seg));
4550 
4551 #if !CHECKSUM_THE_SWAP
4552 	c_seg_trim_tail(c_seg);
4553 #endif
4554 	io_size = round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset));
4555 	f_offset = c_seg->c_store.c_swap_handle;
4556 
4557 	C_SEG_BUSY(c_seg);
4558 	c_seg->c_busy_swapping = 1;
4559 
4560 	/*
4561 	 * This thread is likely going to block for I/O.
4562 	 * Make sure it is ready to run when the I/O completes because
4563 	 * it needs to clear the busy bit on the c_seg so that other
4564 	 * waiting threads can make progress too.
4565 	 */
4566 	token = thread_priority_floor_start();
4567 	lck_mtx_unlock_always(&c_seg->c_lock);
4568 
4569 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
4570 
4571 	addr = (vm_offset_t)C_SEG_BUFFER_ADDRESS(c_seg->c_mysegno);
4572 	c_seg->c_store.c_buffer = (int32_t*) addr;
4573 
4574 	kernel_memory_populate(addr, io_size, KMA_NOFAIL | KMA_COMPRESSOR,
4575 	    VM_KERN_MEMORY_COMPRESSOR);
4576 
4577 	if (vm_swap_get(c_seg, f_offset, io_size) != KERN_SUCCESS) {
4578 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
4579 
4580 		kernel_memory_depopulate(addr, io_size, KMA_COMPRESSOR,
4581 		    VM_KERN_MEMORY_COMPRESSOR);
4582 
4583 		c_seg_swapin_requeue(c_seg, FALSE, TRUE, age_on_swapin_q);
4584 	} else {
4585 #if ENCRYPTED_SWAP
4586 		vm_swap_decrypt(c_seg);
4587 #endif /* ENCRYPTED_SWAP */
4588 
4589 #if CHECKSUM_THE_SWAP
4590 		if (c_seg->cseg_swap_size != io_size) {
4591 			panic("swapin size doesn't match swapout size");
4592 		}
4593 
4594 		if (c_seg->cseg_hash != vmc_hash((char*) c_seg->c_store.c_buffer, (int)io_size)) {
4595 			panic("c_seg_swapin - Swap hash mismatch");
4596 		}
4597 #endif /* CHECKSUM_THE_SWAP */
4598 
4599 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
4600 
4601 		c_seg_swapin_requeue(c_seg, TRUE, force_minor_compaction == TRUE ? FALSE : TRUE, age_on_swapin_q);
4602 
4603 #if CONFIG_FREEZE
4604 		/*
4605 		 * c_seg_swapin_requeue() returns with the c_seg lock held.
4606 		 */
4607 		if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
4608 			assert(c_seg->c_busy);
4609 
4610 			lck_mtx_unlock_always(&c_seg->c_lock);
4611 			lck_mtx_lock_spin_always(c_list_lock);
4612 			lck_mtx_lock_spin_always(&c_seg->c_lock);
4613 		}
4614 
4615 		if (c_seg->c_task_owner) {
4616 			c_seg_update_task_owner(c_seg, NULL);
4617 		}
4618 
4619 		lck_mtx_unlock_always(c_list_lock);
4620 
4621 		os_atomic_add(&c_segment_pages_compressed_incore, c_seg->c_slots_used, relaxed);
4622 		if (c_seg->c_has_donated_pages) {
4623 			os_atomic_add(&c_segment_pages_compressed_incore_late_swapout, c_seg->c_slots_used, relaxed);
4624 		}
4625 #endif /* CONFIG_FREEZE */
4626 
4627 		os_atomic_add(&compressor_bytes_used, c_seg->c_bytes_used, relaxed);
4628 
4629 		if (force_minor_compaction == TRUE) {
4630 			if (c_seg_minor_compaction_and_unlock(c_seg, FALSE)) {
4631 				/*
4632 				 * c_seg was completely empty so it was freed,
4633 				 * so be careful not to reference it again
4634 				 *
4635 				 * Drop the boost so that the thread priority
4636 				 * is returned back to where it is supposed to be.
4637 				 */
4638 				thread_priority_floor_end(&token);
4639 				return 1;
4640 			}
4641 
4642 			lck_mtx_lock_spin_always(&c_seg->c_lock);
4643 		}
4644 	}
4645 	C_SEG_WAKEUP_DONE(c_seg);
4646 
4647 	/*
4648 	 * Drop the boost so that the thread priority
4649 	 * is returned back to where it is supposed to be.
4650 	 */
4651 	thread_priority_floor_end(&token);
4652 
4653 	return 0;
4654 }
4655 
4656 /*
4657  * TODO: refactor the CAS loops in c_segment_sv_hash_drop_ref() and c_segment_sv_hash_instert()
4658  * to os_atomic_rmw_loop() [rdar://139546215]
4659  */
4660 
4661 static void
c_segment_sv_hash_drop_ref(int hash_indx)4662 c_segment_sv_hash_drop_ref(int hash_indx)
4663 {
4664 	struct c_sv_hash_entry o_sv_he, n_sv_he;
4665 
4666 	while (1) {
4667 		o_sv_he.he_record = c_segment_sv_hash_table[hash_indx].he_record;
4668 
4669 		n_sv_he.he_ref = o_sv_he.he_ref - 1;
4670 		n_sv_he.he_data = o_sv_he.he_data;
4671 
4672 		if (OSCompareAndSwap64((UInt64)o_sv_he.he_record, (UInt64)n_sv_he.he_record, (UInt64 *) &c_segment_sv_hash_table[hash_indx].he_record) == TRUE) {
4673 			if (n_sv_he.he_ref == 0) {
4674 				os_atomic_dec(&c_segment_svp_in_hash, relaxed);
4675 			}
4676 			break;
4677 		}
4678 	}
4679 }
4680 
4681 
4682 static int
c_segment_sv_hash_insert(uint32_t data)4683 c_segment_sv_hash_insert(uint32_t data)
4684 {
4685 	int             hash_sindx;
4686 	int             misses;
4687 	struct c_sv_hash_entry o_sv_he, n_sv_he;
4688 	boolean_t       got_ref = FALSE;
4689 
4690 	if (data == 0) {
4691 		os_atomic_inc(&c_segment_svp_zero_compressions, relaxed);
4692 	} else {
4693 		os_atomic_inc(&c_segment_svp_nonzero_compressions, relaxed);
4694 	}
4695 
4696 	hash_sindx = data & C_SV_HASH_MASK;
4697 
4698 	for (misses = 0; misses < C_SV_HASH_MAX_MISS; misses++) {
4699 		o_sv_he.he_record = c_segment_sv_hash_table[hash_sindx].he_record;
4700 
4701 		while (o_sv_he.he_data == data || o_sv_he.he_ref == 0) {
4702 			n_sv_he.he_ref = o_sv_he.he_ref + 1;
4703 			n_sv_he.he_data = data;
4704 
4705 			if (OSCompareAndSwap64((UInt64)o_sv_he.he_record, (UInt64)n_sv_he.he_record, (UInt64 *) &c_segment_sv_hash_table[hash_sindx].he_record) == TRUE) {
4706 				if (n_sv_he.he_ref == 1) {
4707 					os_atomic_inc(&c_segment_svp_in_hash, relaxed);
4708 				}
4709 				got_ref = TRUE;
4710 				break;
4711 			}
4712 			o_sv_he.he_record = c_segment_sv_hash_table[hash_sindx].he_record;
4713 		}
4714 		if (got_ref == TRUE) {
4715 			break;
4716 		}
4717 		hash_sindx++;
4718 
4719 		if (hash_sindx == C_SV_HASH_SIZE) {
4720 			hash_sindx = 0;
4721 		}
4722 	}
4723 	if (got_ref == FALSE) {
4724 		return -1;
4725 	}
4726 
4727 	return hash_sindx;
4728 }
4729 
4730 
4731 #if RECORD_THE_COMPRESSED_DATA
4732 
4733 static void
c_compressed_record_data(char * src,int c_size)4734 c_compressed_record_data(char *src, int c_size)
4735 {
4736 	if ((c_compressed_record_cptr + c_size + 4) >= c_compressed_record_ebuf) {
4737 		panic("c_compressed_record_cptr >= c_compressed_record_ebuf");
4738 	}
4739 
4740 	*(int *)((void *)c_compressed_record_cptr) = c_size;
4741 
4742 	c_compressed_record_cptr += 4;
4743 
4744 	memcpy(c_compressed_record_cptr, src, c_size);
4745 	c_compressed_record_cptr += c_size;
4746 }
4747 #endif
4748 
4749 
4750 /**
4751  * Do the actual compression of the given page
4752  * @param src [IN] address in the physical aperture of the page to compress.
4753  * @param slot_ptr [OUT] fill the slot-mapping of the c_seg+slot where the page ends up being stored
4754  * @param current_chead [IN-OUT] current filling c_seg. pointer comes from the current compression thread state
4755  *          On the very first call this is going to point to NULL and this function will fill that pointer with a new
4756  *          filling c_sec if the current filling c_seg doesn't have enough space, it will be replaced in this location
4757  *          with a new filling c_seg
4758  * @param scratch_buf [IN] pointer from the current thread state, used by the compression codec
4759  * @return KERN_RESOURCE_SHORTAGE if the compressor has been exhausted
4760  */
4761 static kern_return_t
c_compress_page(char * src,c_slot_mapping_t slot_ptr,c_segment_t * current_chead,char * scratch_buf,__unused vm_compressor_options_t flags)4762 c_compress_page(
4763 	char             *src,
4764 	c_slot_mapping_t slot_ptr,
4765 	c_segment_t      *current_chead,
4766 	char             *scratch_buf,
4767 	__unused vm_compressor_options_t flags)
4768 {
4769 	int              c_size = -1;
4770 	int              c_rounded_size = 0;
4771 	int              max_csize;
4772 	bool             nearing_limits;
4773 	c_slot_t         cs;
4774 	c_segment_t      c_seg;
4775 
4776 	KERNEL_DEBUG(0xe0400000 | DBG_FUNC_START, *current_chead, 0, 0, 0, 0);
4777 retry:  /* may need to retry if the currently filling c_seg will not have enough space */
4778 	c_seg = c_seg_allocate(current_chead, &nearing_limits);
4779 	if (c_seg == NULL) {
4780 		if (nearing_limits) {
4781 			memorystatus_respond_to_compressor_exhaustion();
4782 		}
4783 		return KERN_RESOURCE_SHORTAGE;
4784 	}
4785 
4786 	/*
4787 	 * returns with c_seg lock held
4788 	 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
4789 	 * c_nextslot has been allocated and
4790 	 * c_store.c_buffer populated
4791 	 */
4792 	assert(c_seg->c_state == C_IS_FILLING);
4793 
4794 	cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_seg->c_nextslot);
4795 
4796 	C_SLOT_ASSERT_PACKABLE(slot_ptr);
4797 	cs->c_packed_ptr = C_SLOT_PACK_PTR(slot_ptr);
4798 
4799 	cs->c_offset = c_seg->c_nextoffset;
4800 
4801 	unsigned int avail_space = c_seg_bufsize - C_SEG_OFFSET_TO_BYTES((int32_t)cs->c_offset);
4802 
4803 
4804 	max_csize = avail_space;
4805 	if (max_csize > PAGE_SIZE) {
4806 		max_csize = PAGE_SIZE;
4807 	}
4808 
4809 #if CHECKSUM_THE_DATA
4810 	cs->c_hash_data = vmc_hash(src, PAGE_SIZE);
4811 #endif
4812 	boolean_t incomp_copy = FALSE; /* codec indicates it already did copy an incompressible page */
4813 	int max_csize_adj = (max_csize - 4); /* how much size we have left in this c_seg to fill. */
4814 
4815 	if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) {
4816 #if defined(__arm64__)
4817 		uint16_t ccodec = CINVALID;
4818 		uint32_t inline_popcount;
4819 		if (max_csize >= C_SEG_OFFSET_ALIGNMENT_BOUNDARY) {
4820 			vm_memtag_disable_checking();
4821 			c_size = metacompressor((const uint8_t *) src,
4822 			    (uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset],
4823 			    max_csize_adj, &ccodec,
4824 			    scratch_buf, &incomp_copy, &inline_popcount);
4825 			vm_memtag_enable_checking();
4826 			assert(inline_popcount == C_SLOT_NO_POPCOUNT);
4827 
4828 #if C_SEG_OFFSET_ALIGNMENT_BOUNDARY > 4
4829 			if (c_size > max_csize_adj) {
4830 				c_size = -1;
4831 			}
4832 #endif
4833 		} else {
4834 			c_size = -1;
4835 		}
4836 		assert(ccodec == CCWK || ccodec == CCLZ4);
4837 		cs->c_codec = ccodec;
4838 #endif
4839 	} else {
4840 #if defined(__arm64__)
4841 		vm_memtag_disable_checking();
4842 		cs->c_codec = CCWK;
4843 		__unreachable_ok_push
4844 		if (PAGE_SIZE == 4096) {
4845 			c_size = WKdm_compress_4k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4846 			    (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
4847 		} else {
4848 			c_size = WKdm_compress_16k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4849 			    (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
4850 		}
4851 		__unreachable_ok_pop
4852 		vm_memtag_enable_checking();
4853 #else
4854 		vm_memtag_disable_checking();
4855 		c_size = WKdm_compress_new((const WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4856 		    (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
4857 		vm_memtag_enable_checking();
4858 #endif
4859 	}
4860 	/* c_size is the size written by the codec, or 0 if it's uniform 32 bit value or (-1 if there was not enough space
4861 	 * or it was incompressible) */
4862 	assertf(((c_size <= max_csize_adj) && (c_size >= -1)),
4863 	    "c_size invalid (%d, %d), cur compressions: %d", c_size, max_csize_adj, c_segment_pages_compressed);
4864 
4865 	if (c_size == -1) {
4866 		if (max_csize < PAGE_SIZE) {
4867 			c_current_seg_filled(c_seg, current_chead);
4868 			assert(*current_chead == NULL);
4869 
4870 			lck_mtx_unlock_always(&c_seg->c_lock);
4871 			/* TODO: it may be worth requiring codecs to distinguish
4872 			 * between incompressible inputs and failures due to budget exhaustion.
4873 			 * right now this assumes that if the space we had is > PAGE_SIZE, then the codec failed due to incompressible input */
4874 
4875 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
4876 			goto retry;  /* previous c_seg didn't have enought space, we finalized it and can try again with a fresh c_seg */
4877 		}
4878 		c_size = PAGE_SIZE; /* tag:WK-INCOMPRESSIBLE */
4879 
4880 		if (incomp_copy == FALSE) { /* codec did not copy the incompressible input */
4881 			vm_memtag_disable_checking();
4882 			memcpy(&c_seg->c_store.c_buffer[cs->c_offset], src, c_size);
4883 			vm_memtag_enable_checking();
4884 		}
4885 
4886 		os_atomic_inc(&c_segment_noncompressible_pages, relaxed);
4887 	} else if (c_size == 0) {
4888 		{
4889 			/*
4890 			 * Special case - this is a page completely full of a single 32 bit value.
4891 			 * We store some values directly in the c_slot_mapping, if not there, the
4892 			 * 4 byte value goes in the compressor segment.
4893 			 */
4894 			int hash_index = c_segment_sv_hash_insert(*(uint32_t *) (uintptr_t) src);
4895 
4896 			if (hash_index != -1) {
4897 				slot_ptr->s_cindx = hash_index;
4898 				slot_ptr->s_cseg = C_SV_CSEG_ID;
4899 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
4900 				slot_ptr->s_uncompressed = 0;
4901 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4902 
4903 				os_atomic_inc(&c_segment_svp_hash_succeeded, relaxed);
4904 #if RECORD_THE_COMPRESSED_DATA
4905 				c_compressed_record_data(src, 4);
4906 #endif
4907 				/* we didn't write anything to c_buffer and didn't end up using the slot in the c_seg at all, so skip all
4908 				 * the book-keeping of the case that we did */
4909 				goto sv_compression;
4910 			}
4911 		}
4912 		os_atomic_inc(&c_segment_svp_hash_failed, relaxed);
4913 
4914 		c_size = 4;
4915 		vm_memtag_disable_checking();
4916 		memcpy(&c_seg->c_store.c_buffer[cs->c_offset], src, c_size);
4917 		vm_memtag_enable_checking();
4918 	}
4919 
4920 #if RECORD_THE_COMPRESSED_DATA
4921 	c_compressed_record_data((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size);
4922 #endif
4923 #if CHECKSUM_THE_COMPRESSED_DATA
4924 	cs->c_hash_compressed_data = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size);
4925 #endif
4926 #if POPCOUNT_THE_COMPRESSED_DATA
4927 	cs->c_pop_cdata = vmc_pop((uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset], c_size);
4928 #endif
4929 
4930 	PACK_C_SIZE(cs, c_size);
4931 
4932 	c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(c_size);
4933 
4934 	c_seg->c_bytes_used += c_rounded_size;
4935 	c_seg->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
4936 	c_seg->c_slots_used++;
4937 
4938 #if CONFIG_FREEZE
4939 	/* TODO: should c_segment_pages_compressed be up here too? See 88598046 for details */
4940 	os_atomic_inc(&c_segment_pages_compressed_incore, relaxed);
4941 	if (c_seg->c_has_donated_pages) {
4942 		os_atomic_inc(&c_segment_pages_compressed_incore_late_swapout, relaxed);
4943 	}
4944 #endif /* CONFIG_FREEZE */
4945 
4946 	slot_ptr->s_cindx = c_seg->c_nextslot++;
4947 	/* <csegno=0,indx=0> would mean "empty slot", so use csegno+1, see other usages of s_cseg where it's decremented */
4948 	slot_ptr->s_cseg = c_seg->c_mysegno + 1;
4949 
4950 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
4951 	slot_ptr->s_uncompressed = 0;
4952 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4953 
4954 sv_compression:
4955 	/* can we say this c_seg is full? */
4956 	if (c_seg->c_nextoffset >= c_seg_off_limit || c_seg->c_nextslot >= C_SLOT_MAX_INDEX) {
4957 		/* condition 1: segment buffer is almost full, don't bother trying to fill it further.
4958 		 * condition 2: we can't have any more slots in this c_segment even if we had buffer space */
4959 		c_current_seg_filled(c_seg, current_chead);
4960 		assert(*current_chead == NULL);
4961 	}
4962 
4963 	lck_mtx_unlock_always(&c_seg->c_lock);
4964 
4965 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
4966 
4967 #if RECORD_THE_COMPRESSED_DATA
4968 	if ((c_compressed_record_cptr - c_compressed_record_sbuf) >= c_seg_allocsize) {
4969 		c_compressed_record_write(c_compressed_record_sbuf, (int)(c_compressed_record_cptr - c_compressed_record_sbuf));
4970 		c_compressed_record_cptr = c_compressed_record_sbuf;
4971 	}
4972 #endif
4973 	if (c_size) {
4974 		os_atomic_add(&c_segment_compressed_bytes, c_size, relaxed);
4975 		os_atomic_add(&compressor_bytes_used, c_rounded_size, relaxed);
4976 	}
4977 	os_atomic_add(&c_segment_input_bytes, PAGE_SIZE, relaxed);
4978 
4979 	os_atomic_inc(&c_segment_pages_compressed, relaxed);
4980 #if DEVELOPMENT || DEBUG
4981 	if (!compressor_running_perf_test) {
4982 		/*
4983 		 * The perf_compressor benchmark should not be able to trigger
4984 		 * compressor thrashing jetsams.
4985 		 */
4986 		os_atomic_inc(&sample_period_compression_count, relaxed);
4987 	}
4988 #else /* DEVELOPMENT || DEBUG */
4989 	os_atomic_inc(&sample_period_compression_count, relaxed);
4990 #endif /* DEVELOPMENT || DEBUG */
4991 
4992 	if (nearing_limits) {
4993 		memorystatus_respond_to_compressor_exhaustion();
4994 	}
4995 
4996 	KERNEL_DEBUG(0xe0400000 | DBG_FUNC_END, *current_chead, c_size, c_segment_input_bytes, c_segment_compressed_bytes, 0);
4997 
4998 	return KERN_SUCCESS;
4999 }
5000 
5001 static inline void
sv_decompress(int32_t * ddst,int32_t pattern)5002 sv_decompress(int32_t *ddst, int32_t pattern)
5003 {
5004 //	assert(__builtin_constant_p(PAGE_SIZE) != 0);
5005 #if defined(__x86_64__)
5006 	memset_word(ddst, pattern, PAGE_SIZE / sizeof(int32_t));
5007 #elif defined(__arm64__)
5008 	assert((PAGE_SIZE % 128) == 0);
5009 	if (pattern == 0) {
5010 		fill32_dczva((addr64_t)ddst, PAGE_SIZE);
5011 	} else {
5012 		fill32_nt((addr64_t)ddst, PAGE_SIZE, pattern);
5013 	}
5014 #else
5015 	size_t          i;
5016 
5017 	/* Unroll the pattern fill loop 4x to encourage the
5018 	 * compiler to emit NEON stores, cf.
5019 	 * <rdar://problem/25839866> Loop autovectorization
5020 	 * anomalies.
5021 	 */
5022 	/* * We use separate loops for each PAGE_SIZE
5023 	 * to allow the autovectorizer to engage, as PAGE_SIZE
5024 	 * may not be a constant.
5025 	 */
5026 
5027 	__unreachable_ok_push
5028 	if (PAGE_SIZE == 4096) {
5029 		for (i = 0; i < (4096U / sizeof(int32_t)); i += 4) {
5030 			*ddst++ = pattern;
5031 			*ddst++ = pattern;
5032 			*ddst++ = pattern;
5033 			*ddst++ = pattern;
5034 		}
5035 	} else {
5036 		assert(PAGE_SIZE == 16384);
5037 		for (i = 0; i < (int)(16384U / sizeof(int32_t)); i += 4) {
5038 			*ddst++ = pattern;
5039 			*ddst++ = pattern;
5040 			*ddst++ = pattern;
5041 			*ddst++ = pattern;
5042 		}
5043 	}
5044 	__unreachable_ok_pop
5045 #endif
5046 }
5047 
5048 static vm_decompress_result_t
c_decompress_page(char * dst,volatile c_slot_mapping_t slot_ptr,vm_compressor_options_t flags,int * zeroslot)5049 c_decompress_page(
5050 	char            *dst,
5051 	volatile c_slot_mapping_t slot_ptr,    /* why volatile? perhaps due to changes across hibernation */
5052 	vm_compressor_options_t flags,
5053 	int             *zeroslot)
5054 {
5055 	c_slot_t        cs;
5056 	c_segment_t     c_seg;
5057 	uint32_t        c_segno;
5058 	uint16_t        c_indx;
5059 	int             c_rounded_size;
5060 	uint32_t        c_size;
5061 	vm_decompress_result_t retval = 0;
5062 	boolean_t       need_unlock = TRUE;
5063 	boolean_t       consider_defragmenting = FALSE;
5064 	boolean_t       kdp_mode = FALSE;
5065 
5066 	if (__improbable(flags & C_KDP)) {
5067 		if (not_in_kdp) {
5068 			panic("C_KDP passed to decompress page from outside of debugger context");
5069 		}
5070 
5071 		assert((flags & C_KEEP) == C_KEEP);
5072 		assert((flags & C_DONT_BLOCK) == C_DONT_BLOCK);
5073 
5074 		if ((flags & (C_DONT_BLOCK | C_KEEP)) != (C_DONT_BLOCK | C_KEEP)) {
5075 			return DECOMPRESS_NEED_BLOCK;
5076 		}
5077 
5078 		kdp_mode = TRUE;
5079 		*zeroslot = 0;
5080 	}
5081 
5082 ReTry:
5083 	if (__probable(!kdp_mode)) {
5084 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
5085 	} else {
5086 		if (kdp_lck_rw_lock_is_acquired_exclusive(&c_master_lock)) {
5087 			return DECOMPRESS_NEED_BLOCK;
5088 		}
5089 	}
5090 
5091 #if HIBERNATION
5092 	/*
5093 	 * if hibernation is enabled, it indicates (via a call
5094 	 * to 'vm_decompressor_lock' that no further
5095 	 * decompressions are allowed once it reaches
5096 	 * the point of flushing all of the currently dirty
5097 	 * anonymous memory through the compressor and out
5098 	 * to disk... in this state we allow freeing of compressed
5099 	 * pages and must honor the C_DONT_BLOCK case
5100 	 */
5101 	if (__improbable(dst && decompressions_blocked == TRUE)) {
5102 		if (flags & C_DONT_BLOCK) {
5103 			if (__probable(!kdp_mode)) {
5104 				PAGE_REPLACEMENT_DISALLOWED(FALSE);
5105 			}
5106 
5107 			*zeroslot = 0;
5108 			return -2;
5109 		}
5110 		/*
5111 		 * it's safe to atomically assert and block behind the
5112 		 * lock held in shared mode because "decompressions_blocked" is
5113 		 * only set and cleared and the thread_wakeup done when the lock
5114 		 * is held exclusively
5115 		 */
5116 		assert_wait((event_t)&decompressions_blocked, THREAD_UNINT);
5117 
5118 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5119 
5120 		thread_block(THREAD_CONTINUE_NULL);
5121 
5122 		goto ReTry;
5123 	}
5124 #endif
5125 	/* s_cseg is actually "segno+1" */
5126 	c_segno = slot_ptr->s_cseg - 1;
5127 
5128 	if (__improbable(c_segno >= c_segments_available)) {
5129 		panic("c_decompress_page: c_segno %d >= c_segments_available %d, slot_ptr(%p), slot_data(%x)",
5130 		    c_segno, c_segments_available, slot_ptr, *(int *)((void *)slot_ptr));
5131 	}
5132 
5133 	if (__improbable(c_segments_get(c_segno)->c_segno < c_segments_available)) {
5134 		panic("c_decompress_page: c_segno %d is free, slot_ptr(%p), slot_data(%x)",
5135 		    c_segno, slot_ptr, *(int *)((void *)slot_ptr));
5136 	}
5137 
5138 	c_seg = c_segments_get(c_segno)->c_seg;
5139 
5140 	if (__probable(!kdp_mode)) {
5141 		lck_mtx_lock_spin_always(&c_seg->c_lock);
5142 	} else {
5143 		if (kdp_lck_mtx_lock_spin_is_acquired(&c_seg->c_lock)) {
5144 			return DECOMPRESS_NEED_BLOCK;
5145 		}
5146 	}
5147 
5148 	assert(c_seg->c_state != C_IS_EMPTY && c_seg->c_state != C_IS_FREE);
5149 
5150 	if (dst == NULL && c_seg->c_busy_swapping) {
5151 		assert(c_seg->c_busy);
5152 
5153 		goto bypass_busy_check;
5154 	}
5155 	if (flags & C_DONT_BLOCK) {
5156 		if (c_seg->c_busy || (C_SEG_IS_ONDISK(c_seg) && dst)) {
5157 			*zeroslot = 0;
5158 
5159 			retval = DECOMPRESS_NEED_BLOCK;
5160 			goto done;
5161 		}
5162 	}
5163 	if (c_seg->c_busy) {
5164 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5165 
5166 		c_seg_wait_on_busy(c_seg);
5167 
5168 		goto ReTry;
5169 	}
5170 bypass_busy_check:
5171 
5172 	c_indx = slot_ptr->s_cindx;
5173 
5174 	if (__improbable(c_indx >= c_seg->c_nextslot)) {
5175 		panic("c_decompress_page: c_indx %d >= c_nextslot %d, c_seg(%p), slot_ptr(%p), slot_data(%x)",
5176 		    c_indx, c_seg->c_nextslot, c_seg, slot_ptr, *(int *)((void *)slot_ptr));
5177 	}
5178 
5179 	cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
5180 
5181 	c_size = UNPACK_C_SIZE(cs);
5182 
5183 
5184 	if (__improbable(c_size == 0)) { /* sanity check it's not an empty slot */
5185 		panic("c_decompress_page: c_size == 0, c_seg(%p), slot_ptr(%p), slot_data(%x)",
5186 		    c_seg, slot_ptr, *(int *)((void *)slot_ptr));
5187 	}
5188 
5189 	c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(c_size + c_slot_extra_size(cs));
5190 	/* c_rounded_size should not change after this point so that it remains consistent on all branches */
5191 
5192 	if (dst) {  /* would be NULL if we don't want the page content, from free */
5193 		uint32_t        age_of_cseg;
5194 		clock_sec_t     cur_ts_sec;
5195 		clock_nsec_t    cur_ts_nsec;
5196 
5197 		if (C_SEG_IS_ONDISK(c_seg)) {
5198 #if CONFIG_FREEZE
5199 			if (freezer_incore_cseg_acct) {
5200 				if ((c_seg->c_slots_used + c_segment_pages_compressed_incore) >= c_segment_pages_compressed_nearing_limit) {
5201 					PAGE_REPLACEMENT_DISALLOWED(FALSE);
5202 					lck_mtx_unlock_always(&c_seg->c_lock);
5203 
5204 					memorystatus_kill_on_VM_compressor_space_shortage(FALSE /* async */);
5205 
5206 					goto ReTry;
5207 				}
5208 
5209 				uint32_t incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
5210 				if ((incore_seg_count + 1) >= c_segments_nearing_limit) {
5211 					PAGE_REPLACEMENT_DISALLOWED(FALSE);
5212 					lck_mtx_unlock_always(&c_seg->c_lock);
5213 
5214 					memorystatus_kill_on_VM_compressor_space_shortage(FALSE /* async */);
5215 
5216 					goto ReTry;
5217 				}
5218 			}
5219 #endif /* CONFIG_FREEZE */
5220 			assert(kdp_mode == FALSE);
5221 			retval = c_seg_swapin(c_seg, FALSE, TRUE);
5222 			assert(retval == 0);
5223 
5224 			retval = DECOMPRESS_SUCCESS_SWAPPEDIN;
5225 		}
5226 		if (c_seg->c_state == C_ON_BAD_Q) {
5227 			assert(c_seg->c_store.c_buffer == NULL);
5228 			*zeroslot = 0;
5229 
5230 			retval = DECOMPRESS_FAILED_BAD_Q;
5231 			goto done;
5232 		}
5233 
5234 #if POPCOUNT_THE_COMPRESSED_DATA
5235 		unsigned csvpop;
5236 		uintptr_t csvaddr = (uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset];
5237 		if (cs->c_pop_cdata != (csvpop = vmc_pop(csvaddr, c_size))) {
5238 			panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%x 0x%x 0x%x 0x%x", (csvpop - cs->c_pop_cdata), (void *)csvaddr, (void *) kvtophys(csvaddr), c_seg, cs, cs->c_offset, c_size, csvpop, cs->c_pop_cdata);
5239 		}
5240 #endif
5241 
5242 #if CHECKSUM_THE_COMPRESSED_DATA
5243 		unsigned csvhash;
5244 		if (cs->c_hash_compressed_data != (csvhash = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))) {
5245 			panic("Compressed data doesn't match original %p %p %u %u %u", c_seg, cs, c_size, cs->c_hash_compressed_data, csvhash);
5246 		}
5247 #endif
5248 		if (c_size == PAGE_SIZE) { /* tag:WK-INCOMPRESSIBLE */
5249 			/* page wasn't compressible... just copy it out */
5250 			vm_memtag_disable_checking();
5251 			memcpy(dst, &c_seg->c_store.c_buffer[cs->c_offset], PAGE_SIZE);
5252 			vm_memtag_enable_checking();
5253 		} else if (c_size == 4) {
5254 			int32_t         data;
5255 			int32_t         *dptr;
5256 
5257 			/*
5258 			 * page was populated with a single value
5259 			 * that didn't fit into our fast hash
5260 			 * so we packed it in as a single non-compressed value
5261 			 * that we need to populate the page with
5262 			 */
5263 			dptr = (int32_t *)(uintptr_t)dst;
5264 			data = *(int32_t *)(&c_seg->c_store.c_buffer[cs->c_offset]);
5265 			vm_memtag_disable_checking();
5266 			sv_decompress(dptr, data);
5267 			vm_memtag_enable_checking();
5268 		} else {  /* normal segment decompress */
5269 			uint32_t        my_cpu_no;
5270 			char            *scratch_buf;
5271 
5272 			my_cpu_no = cpu_number();
5273 
5274 			assert(my_cpu_no < compressor_cpus);
5275 
5276 			if (__probable(!kdp_mode)) {
5277 				/*
5278 				 * we're behind the c_seg lock held in spin mode
5279 				 * which means pre-emption is disabled... therefore
5280 				 * the following sequence is atomic and safe
5281 				 */
5282 				scratch_buf = &compressor_scratch_bufs[my_cpu_no * vm_compressor_get_decode_scratch_size()];
5283 			} else if (flags & C_KDP_MULTICPU) {
5284 				assert(vm_compressor_kdp_state.kc_scratch_bufs != NULL);
5285 				scratch_buf = &vm_compressor_kdp_state.kc_scratch_bufs[my_cpu_no * vm_compressor_get_decode_scratch_size()];
5286 			} else {
5287 				scratch_buf = vm_compressor_kdp_state.kc_panic_scratch_buf;
5288 			}
5289 
5290 			if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) {
5291 #if defined(__arm64__)
5292 				uint16_t c_codec = cs->c_codec;
5293 				uint32_t inline_popcount;
5294 				vm_memtag_disable_checking();
5295 				if (!metadecompressor((const uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset],
5296 				    (uint8_t *)dst, c_size, c_codec, (void *)scratch_buf, &inline_popcount)) {
5297 					vm_memtag_enable_checking();
5298 					retval = DECOMPRESS_FAILED_ALGO_ERROR;
5299 				} else {
5300 					vm_memtag_enable_checking();
5301 					assert(inline_popcount == C_SLOT_NO_POPCOUNT);
5302 				}
5303 #endif
5304 			} else {  /* algorithm == VM_COMPRESSOR_DEFAULT_CODEC */
5305 				vm_memtag_disable_checking();
5306 #if defined(__arm64__)
5307 				__unreachable_ok_push
5308 				if (PAGE_SIZE == 4096) {
5309 					WKdm_decompress_4k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5310 					    (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
5311 				} else {
5312 					WKdm_decompress_16k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5313 					    (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
5314 				}
5315 				__unreachable_ok_pop
5316 #else
5317 				WKdm_decompress_new((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5318 				    (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
5319 #endif
5320 				vm_memtag_enable_checking();
5321 			}
5322 		} /* normal segment decompress */
5323 
5324 #if CHECKSUM_THE_DATA
5325 		if (cs->c_hash_data != vmc_hash(dst, PAGE_SIZE)) {
5326 #if defined(__arm64__)
5327 			int32_t *dinput = &c_seg->c_store.c_buffer[cs->c_offset];
5328 			panic("decompressed data doesn't match original cs: %p, hash: 0x%x, offset: %d, c_size: %d, c_rounded_size: %d, codec: %d, header: 0x%x 0x%x 0x%x", cs, cs->c_hash_data, cs->c_offset, c_size, c_rounded_size, cs->c_codec, *dinput, *(dinput + 1), *(dinput + 2));
5329 #else /* defined(__arm64__) */
5330 			panic("decompressed data doesn't match original cs: %p, hash: %d, offset: 0x%x, c_size: %d", cs, cs->c_hash_data, cs->c_offset, c_size);
5331 #endif /* defined(__arm64__) */
5332 		}
5333 #endif /* CHECKSUM_THE_DATA */
5334 		if (c_seg->c_swappedin_ts == 0 && !kdp_mode) {
5335 			clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec);
5336 
5337 			age_of_cseg = (uint32_t)cur_ts_sec - c_seg->c_creation_ts;
5338 			if (age_of_cseg < DECOMPRESSION_SAMPLE_MAX_AGE) {
5339 				os_atomic_inc(&age_of_decompressions_during_sample_period[age_of_cseg], relaxed);
5340 			} else {
5341 				os_atomic_inc(&overage_decompressions_during_sample_period, relaxed);
5342 			}
5343 
5344 			os_atomic_inc(&sample_period_decompression_count, relaxed);
5345 		}
5346 
5347 
5348 #if TRACK_C_SEGMENT_UTILIZATION
5349 		if (c_seg->c_swappedin) {
5350 			c_seg->c_decompressions_since_swapin++;
5351 		}
5352 #endif /* TRACK_C_SEGMENT_UTILIZATION */
5353 	} /* dst */
5354 	else {
5355 #if CONFIG_FREEZE
5356 		/*
5357 		 * We are freeing an uncompressed page from this c_seg and so balance the ledgers.
5358 		 */
5359 		if (C_SEG_IS_ONDISK(c_seg)) {
5360 			/*
5361 			 * The compression sweep feature will push out anonymous pages to disk
5362 			 * without going through the freezer path and so those c_segs, while
5363 			 * swapped out, won't have an owner.
5364 			 */
5365 			if (c_seg->c_task_owner) {
5366 				task_update_frozen_to_swap_acct(c_seg->c_task_owner, PAGE_SIZE_64, DEBIT_FROM_SWAP);
5367 			}
5368 
5369 			/*
5370 			 * We are freeing a page in swap without swapping it in. We bump the in-core
5371 			 * count here to simulate a swapin of a page so that we can accurately
5372 			 * decrement it below.
5373 			 */
5374 			os_atomic_inc(&c_segment_pages_compressed_incore, relaxed);
5375 			if (c_seg->c_has_donated_pages) {
5376 				os_atomic_inc(&c_segment_pages_compressed_incore_late_swapout, relaxed);
5377 			}
5378 		} else if (c_seg->c_state == C_ON_BAD_Q) {
5379 			assert(c_seg->c_store.c_buffer == NULL);
5380 			*zeroslot = 0;
5381 
5382 			retval = DECOMPRESS_FAILED_BAD_Q_FREEZE;
5383 			goto done; /* this is intended to avoid the decrement of c_segment_pages_compressed_incore below */
5384 		}
5385 #endif /* CONFIG_FREEZE */
5386 	}
5387 
5388 	if (flags & C_KEEP) {
5389 		*zeroslot = 0;
5390 		goto done;
5391 	}
5392 
5393 
5394 	/* now perform needed bookkeeping for the removal of the slot from the segment */
5395 	assert(kdp_mode == FALSE);
5396 
5397 	c_seg->c_bytes_unused += c_rounded_size;
5398 	c_seg->c_bytes_used -= c_rounded_size;
5399 
5400 	assert(c_seg->c_slots_used);
5401 	c_seg->c_slots_used--;
5402 	if (dst && c_seg->c_swappedin) {
5403 		task_t task = current_task();
5404 		if (task) {
5405 			ledger_credit(task->ledger, task_ledgers.swapins, PAGE_SIZE);
5406 		}
5407 	}
5408 
5409 	PACK_C_SIZE(cs, 0); /* mark slot as empty */
5410 
5411 	if (c_indx < c_seg->c_firstemptyslot) {
5412 		c_seg->c_firstemptyslot = c_indx;
5413 	}
5414 
5415 	os_atomic_dec(&c_segment_pages_compressed, relaxed);
5416 #if CONFIG_FREEZE
5417 	os_atomic_dec(&c_segment_pages_compressed_incore, relaxed);
5418 	assertf(c_segment_pages_compressed_incore >= 0, "-ve incore count %p 0x%x", c_seg, c_segment_pages_compressed_incore);
5419 	if (c_seg->c_has_donated_pages) {
5420 		os_atomic_dec(&c_segment_pages_compressed_incore_late_swapout, relaxed);
5421 		assertf(c_segment_pages_compressed_incore_late_swapout >= 0, "-ve lateswapout count %p 0x%x", c_seg, c_segment_pages_compressed_incore_late_swapout);
5422 	}
5423 #endif /* CONFIG_FREEZE */
5424 
5425 	if (c_seg->c_state != C_ON_BAD_Q && !(C_SEG_IS_ONDISK(c_seg))) {
5426 		/*
5427 		 * C_SEG_IS_ONDISK == TRUE can occur when we're doing a
5428 		 * free of a compressed page (i.e. dst == NULL)
5429 		 */
5430 		os_atomic_sub(&compressor_bytes_used, c_rounded_size, relaxed);
5431 	}
5432 	if (c_seg->c_busy_swapping) {
5433 		/*
5434 		 * bypass case for c_busy_swapping...
5435 		 * let the swapin/swapout paths deal with putting
5436 		 * the c_seg on the minor compaction queue if needed
5437 		 */
5438 		assert(c_seg->c_busy);
5439 		goto done;
5440 	}
5441 	assert(!c_seg->c_busy);
5442 
5443 	if (c_seg->c_state != C_IS_FILLING) {
5444 		/* did we just remove the last slot from the segment? */
5445 		if (c_seg->c_bytes_used == 0) {
5446 			if (!(C_SEG_IS_ONDISK(c_seg))) {
5447 				/* it was compressed resident in memory */
5448 				int     pages_populated;
5449 
5450 				pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE;
5451 				c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0);
5452 
5453 				if (pages_populated) {
5454 					assert(c_seg->c_state != C_ON_BAD_Q);
5455 					assert(c_seg->c_store.c_buffer != NULL);
5456 
5457 					C_SEG_BUSY(c_seg);
5458 					lck_mtx_unlock_always(&c_seg->c_lock);
5459 
5460 					kernel_memory_depopulate(
5461 						(vm_offset_t) c_seg->c_store.c_buffer,
5462 						ptoa(pages_populated),
5463 						KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
5464 
5465 					lck_mtx_lock_spin_always(&c_seg->c_lock);
5466 					C_SEG_WAKEUP_DONE(c_seg);
5467 				}
5468 				/* minor compaction will free it */
5469 				if (!c_seg->c_on_minorcompact_q && c_seg->c_state != C_ON_SWAPIO_Q) {
5470 					if (c_seg->c_state == C_ON_SWAPOUT_Q) {
5471 						/* If we're on the swapout q, we want to get out of it since there's no reason to swapout
5472 						 * anymore, so put on AGE Q in the meantime until minor compact */
5473 						bool clear_busy = false;
5474 						if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
5475 							C_SEG_BUSY(c_seg);
5476 
5477 							lck_mtx_unlock_always(&c_seg->c_lock);
5478 							lck_mtx_lock_spin_always(c_list_lock);
5479 							lck_mtx_lock_spin_always(&c_seg->c_lock);
5480 							clear_busy = true;
5481 						}
5482 						c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
5483 						if (clear_busy) {
5484 							C_SEG_WAKEUP_DONE(c_seg);
5485 							clear_busy = false;
5486 						}
5487 						lck_mtx_unlock_always(c_list_lock);
5488 					}
5489 					c_seg_need_delayed_compaction(c_seg, FALSE);
5490 				}
5491 			} else { /* C_SEG_IS_ONDISK(c_seg) */
5492 				/* it's empty and on-disk, make sure it's marked as sparse */
5493 				if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q) {
5494 					c_seg_move_to_sparse_list(c_seg);
5495 					consider_defragmenting = TRUE;
5496 				}
5497 			}
5498 		} else if (c_seg->c_on_minorcompact_q) {
5499 			assert(c_seg->c_state != C_ON_BAD_Q);
5500 			assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg));
5501 
5502 			if (C_SEG_SHOULD_MINORCOMPACT_NOW(c_seg)) {
5503 				c_seg_try_minor_compaction_and_unlock(c_seg);
5504 				need_unlock = FALSE;
5505 			}
5506 		} else if (!(C_SEG_IS_ONDISK(c_seg))) {
5507 			if (c_seg->c_state != C_ON_BAD_Q && c_seg->c_state != C_ON_SWAPOUT_Q && c_seg->c_state != C_ON_SWAPIO_Q &&
5508 			    C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
5509 				c_seg_need_delayed_compaction(c_seg, FALSE);
5510 			}
5511 		} else if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q && C_SEG_ONDISK_IS_SPARSE(c_seg)) {
5512 			c_seg_move_to_sparse_list(c_seg);
5513 			consider_defragmenting = TRUE;
5514 		}
5515 	} /* c_state != C_IS_FILLING */
5516 done:
5517 	if (__improbable(kdp_mode)) {
5518 		return retval;
5519 	}
5520 
5521 	if (need_unlock == TRUE) {
5522 		lck_mtx_unlock_always(&c_seg->c_lock);
5523 	}
5524 
5525 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
5526 
5527 	if (consider_defragmenting == TRUE) {
5528 		vm_swap_consider_defragmenting(VM_SWAP_FLAGS_NONE);
5529 	}
5530 
5531 #if !XNU_TARGET_OS_OSX
5532 	/*
5533 	 * Decompressions will generate fragmentation in the compressor pool
5534 	 * over time. Consider waking the compactor thread if any of the
5535 	 * fragmentation thresholds have been crossed as a result of this
5536 	 * decompression.
5537 	 */
5538 	vm_consider_waking_compactor_swapper();
5539 #endif /* !XNU_TARGET_OS_OSX */
5540 
5541 	return retval;
5542 }
5543 
5544 
5545 inline bool
vm_compressor_is_slot_compressed(int * slot)5546 vm_compressor_is_slot_compressed(int *slot)
5547 {
5548 #if !CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5549 #pragma unused(slot)
5550 	return true;
5551 #else /* !CONFIG_TRACK_UNMODIFIED_ANON_PAGES*/
5552 	c_slot_mapping_t slot_ptr = (c_slot_mapping_t)slot;
5553 	return !slot_ptr->s_uncompressed;
5554 #endif /* !CONFIG_TRACK_UNMODIFIED_ANON_PAGES*/
5555 }
5556 
5557 vm_decompress_result_t
vm_compressor_get(ppnum_t pn,int * slot,vm_compressor_options_t flags)5558 vm_compressor_get(ppnum_t pn, int *slot, vm_compressor_options_t flags)
5559 {
5560 	c_slot_mapping_t  slot_ptr;
5561 	char    *dst;
5562 	int     zeroslot = 1;
5563 	vm_decompress_result_t retval;
5564 
5565 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5566 	if (flags & C_PAGE_UNMODIFIED) {
5567 		int iretval = vm_uncompressed_get(pn, slot, flags | C_KEEP);
5568 		if (iretval == 0) {
5569 			os_atomic_inc(&compressor_ro_uncompressed_get, relaxed);
5570 			return DECOMPRESS_SUCCESS;
5571 		}
5572 
5573 		return DECOMPRESS_FAILED_UNMODIFIED;
5574 	}
5575 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5576 
5577 	/* get address in physical aperture of this page for fill into */
5578 	dst = pmap_map_compressor_page(pn);
5579 	slot_ptr = (c_slot_mapping_t)slot;
5580 
5581 	assert(dst != NULL);
5582 
5583 	if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
5584 		int32_t         data;
5585 		int32_t         *dptr;
5586 
5587 		/*
5588 		 * page was populated with a single value
5589 		 * that found a home in our hash table
5590 		 * grab that value from the hash and populate the page
5591 		 * that we need to populate the page with
5592 		 */
5593 		dptr = (int32_t *)(uintptr_t)dst;
5594 		data = c_segment_sv_hash_table[slot_ptr->s_cindx].he_data;
5595 		sv_decompress(dptr, data);
5596 
5597 		if (!(flags & C_KEEP)) {
5598 			c_segment_sv_hash_drop_ref(slot_ptr->s_cindx);
5599 
5600 			os_atomic_dec(&c_segment_pages_compressed, relaxed);
5601 			*slot = 0;
5602 		}
5603 		if (data) {
5604 			os_atomic_inc(&c_segment_svp_nonzero_decompressions, relaxed);
5605 		} else {
5606 			os_atomic_inc(&c_segment_svp_zero_decompressions, relaxed);
5607 		}
5608 
5609 		pmap_unmap_compressor_page(pn, dst);
5610 		return DECOMPRESS_SUCCESS;
5611 	}
5612 	retval = c_decompress_page(dst, slot_ptr, flags, &zeroslot);
5613 
5614 	/*
5615 	 * zeroslot will be set to 0 by c_decompress_page if (flags & C_KEEP)
5616 	 * or (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be TRUE
5617 	 */
5618 	if (zeroslot) {
5619 		*slot = 0;
5620 	}
5621 
5622 	pmap_unmap_compressor_page(pn, dst);
5623 
5624 	/*
5625 	 * returns 0 if we successfully decompressed a page from a segment already in memory
5626 	 * returns 1 if we had to first swap in the segment, before successfully decompressing the page
5627 	 * returns -1 if we encountered an error swapping in the segment - decompression failed
5628 	 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be true
5629 	 */
5630 	return retval;
5631 }
5632 
5633 vm_decompress_result_t
vm_compressor_free(int * slot,vm_compressor_options_t flags)5634 vm_compressor_free(int *slot, vm_compressor_options_t flags)
5635 {
5636 	bool slot_is_compressed = vm_compressor_is_slot_compressed(slot);
5637 
5638 	if (slot_is_compressed) {
5639 		c_slot_mapping_t  slot_ptr;
5640 		int     zeroslot = 1;
5641 		vm_decompress_result_t retval = DECOMPRESS_SUCCESS;
5642 
5643 		assert(flags == 0 || flags == C_DONT_BLOCK);
5644 
5645 		slot_ptr = (c_slot_mapping_t)slot;
5646 
5647 		if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
5648 			c_segment_sv_hash_drop_ref(slot_ptr->s_cindx);
5649 			os_atomic_dec(&c_segment_pages_compressed, relaxed);
5650 
5651 			*slot = 0;
5652 			return 0;
5653 		}
5654 
5655 		retval = c_decompress_page(NULL, slot_ptr, flags, &zeroslot);
5656 		/*
5657 		 * returns 0 if we successfully freed the specified compressed page
5658 		 * returns -1 if we encountered an error swapping in the segment - decompression failed
5659 		 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' set
5660 		 */
5661 
5662 		if (retval == DECOMPRESS_SUCCESS) {
5663 			*slot = 0;
5664 		}
5665 
5666 		return retval;
5667 	}
5668 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5669 	else {
5670 		if ((flags & C_PAGE_UNMODIFIED) == 0) {
5671 			/* moving from uncompressed state to compressed. Free it.*/
5672 			vm_uncompressed_free(slot, 0);
5673 			assert(*slot == 0);
5674 		}
5675 	}
5676 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5677 	return KERN_SUCCESS;
5678 }
5679 
5680 kern_return_t
vm_compressor_put(ppnum_t pn,int * slot,void ** current_chead,char * scratch_buf,vm_compressor_options_t flags)5681 vm_compressor_put(ppnum_t pn, int *slot, void  **current_chead, char *scratch_buf, vm_compressor_options_t flags)
5682 {
5683 	char *src;
5684 	kern_return_t kr;
5685 
5686 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5687 	if (flags & C_PAGE_UNMODIFIED) {
5688 		if (*slot) {
5689 			os_atomic_inc(&compressor_ro_uncompressed_skip_returned, relaxed);
5690 			return KERN_SUCCESS;
5691 		} else {
5692 			kr = vm_uncompressed_put(pn, slot);
5693 			if (kr == KERN_SUCCESS) {
5694 				os_atomic_inc(&compressor_ro_uncompressed_put, relaxed);
5695 				return kr;
5696 			}
5697 		}
5698 	}
5699 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5700 
5701 	/* get the address of the page in the physical apperture in the kernel task virtual memory */
5702 	src = pmap_map_compressor_page(pn);
5703 	assert(src != NULL);
5704 
5705 	kr = c_compress_page(src, (c_slot_mapping_t)slot, (c_segment_t *)current_chead, scratch_buf, flags);
5706 	pmap_unmap_compressor_page(pn, src);
5707 
5708 	return kr;
5709 }
5710 
5711 void
vm_compressor_transfer(int * dst_slot_p,int * src_slot_p)5712 vm_compressor_transfer(
5713 	int     *dst_slot_p,
5714 	int     *src_slot_p)
5715 {
5716 	c_slot_mapping_t        dst_slot, src_slot;
5717 	c_segment_t             c_seg;
5718 	uint16_t                c_indx;
5719 	c_slot_t                cs;
5720 
5721 	src_slot = (c_slot_mapping_t) src_slot_p;
5722 
5723 	if (src_slot->s_cseg == C_SV_CSEG_ID || !vm_compressor_is_slot_compressed(src_slot_p)) {
5724 		*dst_slot_p = *src_slot_p;
5725 		*src_slot_p = 0;
5726 		return;
5727 	}
5728 	dst_slot = (c_slot_mapping_t) dst_slot_p;
5729 Retry:
5730 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
5731 	/* get segment for src_slot */
5732 	c_seg = c_segments_get(src_slot->s_cseg - 1)->c_seg;
5733 	/* lock segment */
5734 	lck_mtx_lock_spin_always(&c_seg->c_lock);
5735 	/* wait if it's busy */
5736 	if (c_seg->c_busy && !c_seg->c_busy_swapping) {
5737 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5738 		c_seg_wait_on_busy(c_seg);
5739 		goto Retry;
5740 	}
5741 	/* find the c_slot */
5742 	c_indx = src_slot->s_cindx;
5743 	cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
5744 	/* point the c_slot back to dst_slot instead of src_slot */
5745 	C_SLOT_ASSERT_PACKABLE(dst_slot);
5746 	cs->c_packed_ptr = C_SLOT_PACK_PTR(dst_slot);
5747 	/* transfer */
5748 	*dst_slot_p = *src_slot_p;
5749 	*src_slot_p = 0;
5750 	lck_mtx_unlock_always(&c_seg->c_lock);
5751 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
5752 }
5753 
5754 #if defined(__arm64__)
5755 extern uint64_t vm_swapfile_last_failed_to_create_ts;
5756 __attribute__((noreturn))
5757 void
vm_panic_hibernate_write_image_failed(int err,uint64_t file_size_min,uint64_t file_size_max,uint64_t file_size)5758 vm_panic_hibernate_write_image_failed(
5759 	int err,
5760 	uint64_t file_size_min,
5761 	uint64_t file_size_max,
5762 	uint64_t file_size)
5763 {
5764 	panic("hibernate_write_image encountered error 0x%x - %u, %u, %d, %d, %d, %d, %d, %d, %d, %d, %llu, %d, %d, %d, %llu, %llu, %llu\n",
5765 	    err,
5766 	    VM_PAGE_COMPRESSOR_COUNT, vm_page_wire_count,
5767 	    c_age_count, c_major_count, c_minor_count, (c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count), c_swappedout_sparse_count,
5768 	    vm_num_swap_files, vm_num_pinned_swap_files, vm_swappin_enabled, vm_swap_put_failures,
5769 	    (vm_swapfile_last_failed_to_create_ts ? 1:0), hibernate_no_swapspace, hibernate_flush_timed_out,
5770 	    file_size_min, file_size_max, file_size);
5771 }
5772 #endif /*(__arm64__)*/
5773 
5774 #if CONFIG_FREEZE
5775 
5776 int     freezer_finished_filling = 0;
5777 
5778 void
vm_compressor_finished_filling(void ** current_chead)5779 vm_compressor_finished_filling(
5780 	void    **current_chead)
5781 {
5782 	c_segment_t     c_seg;
5783 
5784 	if ((c_seg = *(c_segment_t *)current_chead) == NULL) {
5785 		return;
5786 	}
5787 
5788 	assert(c_seg->c_state == C_IS_FILLING);
5789 
5790 	lck_mtx_lock_spin_always(&c_seg->c_lock);
5791 
5792 	c_current_seg_filled(c_seg, (c_segment_t *)current_chead);
5793 
5794 	lck_mtx_unlock_always(&c_seg->c_lock);
5795 
5796 	freezer_finished_filling++;
5797 }
5798 
5799 
5800 /*
5801  * This routine is used to transfer the compressed chunks from
5802  * the c_seg/cindx pointed to by slot_p into a new c_seg headed
5803  * by the current_chead and a new cindx within that c_seg.
5804  *
5805  * Currently, this routine is only used by the "freezer backed by
5806  * compressor with swap" mode to create a series of c_segs that
5807  * only contain compressed data belonging to one task. So, we
5808  * move a task's previously compressed data into a set of new
5809  * c_segs which will also hold the task's yet to be compressed data.
5810  */
5811 
5812 kern_return_t
vm_compressor_relocate(void ** current_chead,int * slot_p)5813 vm_compressor_relocate(
5814 	void            **current_chead,
5815 	int             *slot_p)
5816 {
5817 	c_slot_mapping_t        slot_ptr;
5818 	c_slot_mapping_t        src_slot;
5819 	uint32_t                c_rounded_size;
5820 	uint32_t                c_size;
5821 	uint16_t                dst_slot;
5822 	c_slot_t                c_dst;
5823 	c_slot_t                c_src;
5824 	uint16_t                c_indx;
5825 	c_segment_t             c_seg_dst = NULL;
5826 	c_segment_t             c_seg_src = NULL;
5827 	kern_return_t           kr = KERN_SUCCESS;
5828 	bool                    nearing_limits;
5829 
5830 
5831 	src_slot = (c_slot_mapping_t) slot_p;
5832 
5833 	if (src_slot->s_cseg == C_SV_CSEG_ID) {
5834 		/*
5835 		 * no need to relocate... this is a page full of a single
5836 		 * value which is hashed to a single entry not contained
5837 		 * in a c_segment_t
5838 		 */
5839 		return kr;
5840 	}
5841 
5842 	if (vm_compressor_is_slot_compressed((int *)src_slot) == false) {
5843 		/*
5844 		 * Unmodified anonymous pages are sitting uncompressed on disk.
5845 		 * So don't pull them back in again.
5846 		 */
5847 		return kr;
5848 	}
5849 
5850 Relookup_dst:
5851 	c_seg_dst = c_seg_allocate((c_segment_t *)current_chead, &nearing_limits);
5852 	/*
5853 	 * returns with c_seg lock held
5854 	 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
5855 	 * c_nextslot has been allocated and
5856 	 * c_store.c_buffer populated
5857 	 */
5858 	if (c_seg_dst == NULL) {
5859 		/*
5860 		 * Out of compression segments?
5861 		 */
5862 		if (nearing_limits) {
5863 			memorystatus_respond_to_compressor_exhaustion();
5864 		}
5865 		kr = KERN_RESOURCE_SHORTAGE;
5866 		goto out;
5867 	}
5868 
5869 	assert(c_seg_dst->c_busy == 0);
5870 
5871 	C_SEG_BUSY(c_seg_dst);
5872 
5873 	dst_slot = c_seg_dst->c_nextslot;
5874 
5875 	lck_mtx_unlock_always(&c_seg_dst->c_lock);
5876 	if (nearing_limits) {
5877 		memorystatus_respond_to_compressor_exhaustion();
5878 	}
5879 
5880 Relookup_src:
5881 	c_seg_src = c_segments_get(src_slot->s_cseg - 1)->c_seg;
5882 
5883 	assert(c_seg_dst != c_seg_src);
5884 
5885 	lck_mtx_lock_spin_always(&c_seg_src->c_lock);
5886 
5887 	if (C_SEG_IS_ON_DISK_OR_SOQ(c_seg_src) ||
5888 	    c_seg_src->c_state == C_IS_FILLING) {
5889 		/*
5890 		 * Skip this page if :-
5891 		 * a) the src c_seg is already on-disk (or on its way there)
5892 		 *    A "thaw" can mark a process as eligible for
5893 		 * another freeze cycle without bringing any of
5894 		 * its swapped out c_segs back from disk (because
5895 		 * that is done on-demand).
5896 		 *    Or, this page may be mapped elsewhere in the task's map,
5897 		 * and we may have marked it for swap already.
5898 		 *
5899 		 * b) Or, the src c_seg is being filled by the compressor
5900 		 * thread. We don't want the added latency of waiting for
5901 		 * this c_seg in the freeze path and so we skip it.
5902 		 */
5903 
5904 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5905 
5906 		lck_mtx_unlock_always(&c_seg_src->c_lock);
5907 
5908 		c_seg_src = NULL;
5909 
5910 		goto out;
5911 	}
5912 
5913 	if (c_seg_src->c_busy) {
5914 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5915 		c_seg_wait_on_busy(c_seg_src);
5916 
5917 		c_seg_src = NULL;
5918 
5919 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
5920 
5921 		goto Relookup_src;
5922 	}
5923 
5924 	C_SEG_BUSY(c_seg_src);
5925 
5926 	lck_mtx_unlock_always(&c_seg_src->c_lock);
5927 
5928 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
5929 
5930 	/* find the c_slot */
5931 	c_indx = src_slot->s_cindx;
5932 
5933 	c_src = C_SEG_SLOT_FROM_INDEX(c_seg_src, c_indx);
5934 
5935 	c_size = UNPACK_C_SIZE(c_src);
5936 
5937 	assert(c_size);
5938 	int combined_size = c_size + c_slot_extra_size(c_src);
5939 
5940 	if (combined_size > (uint32_t)(c_seg_bufsize - C_SEG_OFFSET_TO_BYTES((int32_t)c_seg_dst->c_nextoffset))) {
5941 		/*
5942 		 * This segment is full. We need a new one.
5943 		 */
5944 
5945 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
5946 
5947 		lck_mtx_lock_spin_always(&c_seg_src->c_lock);
5948 		C_SEG_WAKEUP_DONE(c_seg_src);
5949 		lck_mtx_unlock_always(&c_seg_src->c_lock);
5950 
5951 		c_seg_src = NULL;
5952 
5953 		lck_mtx_lock_spin_always(&c_seg_dst->c_lock);
5954 
5955 		assert(c_seg_dst->c_busy);
5956 		assert(c_seg_dst->c_state == C_IS_FILLING);
5957 		assert(!c_seg_dst->c_on_minorcompact_q);
5958 
5959 		c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead);
5960 		assert(*current_chead == NULL);
5961 
5962 		C_SEG_WAKEUP_DONE(c_seg_dst);
5963 
5964 		lck_mtx_unlock_always(&c_seg_dst->c_lock);
5965 
5966 		c_seg_dst = NULL;
5967 
5968 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5969 
5970 		goto Relookup_dst;
5971 	}
5972 
5973 	c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, c_seg_dst->c_nextslot);
5974 
5975 	memcpy(&c_seg_dst->c_store.c_buffer[c_seg_dst->c_nextoffset], &c_seg_src->c_store.c_buffer[c_src->c_offset], combined_size);
5976 	/*
5977 	 * Is platform alignment actually necessary since wkdm aligns its output?
5978 	 */
5979 	c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(combined_size);
5980 
5981 	cslot_copy(c_dst, c_src);
5982 	c_dst->c_offset = c_seg_dst->c_nextoffset;
5983 
5984 	if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) {
5985 		c_seg_dst->c_firstemptyslot++;
5986 	}
5987 
5988 	c_seg_dst->c_slots_used++;
5989 	c_seg_dst->c_nextslot++;
5990 	c_seg_dst->c_bytes_used += c_rounded_size;
5991 	c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
5992 
5993 
5994 	PACK_C_SIZE(c_src, 0);
5995 
5996 	c_seg_src->c_bytes_used -= c_rounded_size;
5997 	c_seg_src->c_bytes_unused += c_rounded_size;
5998 
5999 	assert(c_seg_src->c_slots_used);
6000 	c_seg_src->c_slots_used--;
6001 
6002 	if (!c_seg_src->c_swappedin) {
6003 		/* Pessimistically lose swappedin status when non-swappedin pages are added. */
6004 		c_seg_dst->c_swappedin = false;
6005 	}
6006 
6007 	if (c_indx < c_seg_src->c_firstemptyslot) {
6008 		c_seg_src->c_firstemptyslot = c_indx;
6009 	}
6010 
6011 	c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot);
6012 
6013 	PAGE_REPLACEMENT_ALLOWED(TRUE);
6014 	slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
6015 	/* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
6016 	slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1;
6017 	slot_ptr->s_cindx = dst_slot;
6018 
6019 	PAGE_REPLACEMENT_ALLOWED(FALSE);
6020 
6021 out:
6022 	if (c_seg_src) {
6023 		lck_mtx_lock_spin_always(&c_seg_src->c_lock);
6024 
6025 		C_SEG_WAKEUP_DONE(c_seg_src);
6026 
6027 		if (c_seg_src->c_bytes_used == 0 && c_seg_src->c_state != C_IS_FILLING) {
6028 			if (!c_seg_src->c_on_minorcompact_q) {
6029 				c_seg_need_delayed_compaction(c_seg_src, FALSE);
6030 			}
6031 		}
6032 
6033 		lck_mtx_unlock_always(&c_seg_src->c_lock);
6034 	}
6035 
6036 	if (c_seg_dst) {
6037 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
6038 
6039 		lck_mtx_lock_spin_always(&c_seg_dst->c_lock);
6040 
6041 		if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
6042 			/*
6043 			 * Nearing or exceeded maximum slot and offset capacity.
6044 			 */
6045 			assert(c_seg_dst->c_busy);
6046 			assert(c_seg_dst->c_state == C_IS_FILLING);
6047 			assert(!c_seg_dst->c_on_minorcompact_q);
6048 
6049 			c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead);
6050 			assert(*current_chead == NULL);
6051 		}
6052 
6053 		C_SEG_WAKEUP_DONE(c_seg_dst);
6054 
6055 		lck_mtx_unlock_always(&c_seg_dst->c_lock);
6056 
6057 		c_seg_dst = NULL;
6058 
6059 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
6060 	}
6061 
6062 	return kr;
6063 }
6064 #endif /* CONFIG_FREEZE */
6065 
6066 #if DEVELOPMENT || DEBUG
6067 
6068 void
vm_compressor_inject_error(int * slot)6069 vm_compressor_inject_error(int *slot)
6070 {
6071 	c_slot_mapping_t slot_ptr = (c_slot_mapping_t)slot;
6072 
6073 	/* No error detection for single-value compression. */
6074 	if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
6075 		printf("%s(): cannot inject errors in SV-compressed pages\n", __func__ );
6076 		return;
6077 	}
6078 
6079 	/* s_cseg is actually "segno+1" */
6080 	const uint32_t c_segno = slot_ptr->s_cseg - 1;
6081 
6082 	assert(c_segno < c_segments_available);
6083 	assert(c_segments_get(c_segno)->c_segno >= c_segments_available);
6084 
6085 	const c_segment_t c_seg = c_segments_get(c_segno)->c_seg;
6086 
6087 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
6088 
6089 	lck_mtx_lock_spin_always(&c_seg->c_lock);
6090 	assert(c_seg->c_state != C_IS_EMPTY && c_seg->c_state != C_IS_FREE);
6091 
6092 	const uint16_t c_indx = slot_ptr->s_cindx;
6093 	assert(c_indx < c_seg->c_nextslot);
6094 
6095 	/*
6096 	 * To safely make this segment temporarily writable, we need to mark
6097 	 * the segment busy, which allows us to release the segment lock.
6098 	 */
6099 	while (c_seg->c_busy) {
6100 		c_seg_wait_on_busy(c_seg);
6101 		lck_mtx_lock_spin_always(&c_seg->c_lock);
6102 	}
6103 	C_SEG_BUSY(c_seg);
6104 
6105 	bool already_writable = (c_seg->c_state == C_IS_FILLING);
6106 	if (!already_writable) {
6107 		/*
6108 		 * Protection update must be performed preemptibly, so temporarily drop
6109 		 * the lock. Having set c_busy will prevent most other concurrent
6110 		 * operations.
6111 		 */
6112 		lck_mtx_unlock_always(&c_seg->c_lock);
6113 		C_SEG_MAKE_WRITEABLE(c_seg);
6114 		lck_mtx_lock_spin_always(&c_seg->c_lock);
6115 	}
6116 
6117 	/*
6118 	 * Once we've released the lock following our c_state == C_IS_FILLING check,
6119 	 * c_current_seg_filled() can (re-)write-protect the segment. However, it
6120 	 * will transition from C_IS_FILLING before releasing the c_seg lock, so we
6121 	 * can detect this by re-checking after we've reobtained the lock.
6122 	 */
6123 	if (already_writable && c_seg->c_state != C_IS_FILLING) {
6124 		lck_mtx_unlock_always(&c_seg->c_lock);
6125 		C_SEG_MAKE_WRITEABLE(c_seg);
6126 		lck_mtx_lock_spin_always(&c_seg->c_lock);
6127 		already_writable = false;
6128 		/* Segment can't be freed while c_busy is set. */
6129 		assert(c_seg->c_state != C_IS_FILLING);
6130 	}
6131 
6132 	/*
6133 	 * Skip if the segment is on disk. This check can only be performed after
6134 	 * the final acquisition of the segment lock before we attempt to write to
6135 	 * the segment.
6136 	 */
6137 	if (!C_SEG_IS_ON_DISK_OR_SOQ(c_seg)) {
6138 		c_slot_t cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
6139 		int32_t *data = &c_seg->c_store.c_buffer[cs->c_offset];
6140 		/* assume that the compressed data holds at least one int32_t */
6141 		assert(UNPACK_C_SIZE(cs) > sizeof(*data));
6142 		/*
6143 		 * This bit is known to be in the payload of a MISS packet resulting from
6144 		 * the pattern used in the test pattern from decompression_failure.c.
6145 		 * Flipping it should result in many corrupted bits in the test page.
6146 		 */
6147 		data[0] ^= 0x00000100;
6148 	}
6149 
6150 	if (!already_writable) {
6151 		lck_mtx_unlock_always(&c_seg->c_lock);
6152 		C_SEG_WRITE_PROTECT(c_seg);
6153 		lck_mtx_lock_spin_always(&c_seg->c_lock);
6154 	}
6155 
6156 	C_SEG_WAKEUP_DONE(c_seg);
6157 	lck_mtx_unlock_always(&c_seg->c_lock);
6158 
6159 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
6160 }
6161 
6162 /*
6163  * Serialize information about a specific segment
6164  * returns true if the segment was written or there's nothing to write for the segno
6165  *         false if there's not enough space
6166  * argument size input - the size of the input buffer, output - the size written, set to 0 on failure
6167  */
6168 kern_return_t
vm_compressor_serialize_segment_debug_info(int segno,char * buf,size_t * size,vm_c_serialize_add_data_t with_data)6169 vm_compressor_serialize_segment_debug_info(int segno, char *buf, size_t *size, vm_c_serialize_add_data_t with_data)
6170 {
6171 	size_t insize = *size;
6172 	size_t offset = 0;
6173 	*size = 0;
6174 	if (c_segments_get(segno)->c_segno < c_segments_available) {
6175 		/* This check means there's no pointer assigned here so it must be an index in the free list.
6176 		 * if this was an active c_segment, .c_seg would be assigned to, which is a pointer, interpreted as an int it
6177 		 * would be higher than c_segments_available. See also assert to this effect right after assigning to c_seg in
6178 		 * c_seg_allocate()
6179 		 */
6180 		return KERN_SUCCESS;
6181 	}
6182 	if (c_segments_get(segno)->c_segno == (uint32_t)-1) {
6183 		/* c_segno of the end of the free-list */
6184 		return KERN_SUCCESS;
6185 	}
6186 
6187 	const struct c_segment* c_seg = c_segments_get(segno)->c_seg;
6188 	if (c_seg->c_state == C_IS_FREE) {
6189 		return KERN_SUCCESS; /* nothing needs to be done */
6190 	}
6191 
6192 	int nslots = c_seg->c_nextslot;
6193 	/* do we have enough space for slots (without data)? */
6194 	if (sizeof(struct c_segment_info) + (nslots * sizeof(struct c_slot_info)) > insize) {
6195 		return KERN_NO_SPACE; /* not enough space, please call me again */
6196 	}
6197 
6198 	struct c_segment_info* csi = (struct c_segment_info*)buf;
6199 	offset += sizeof(struct c_segment_info);
6200 
6201 	csi->csi_mysegno = c_seg->c_mysegno;
6202 	csi->csi_creation_ts = c_seg->c_creation_ts;
6203 	csi->csi_swappedin_ts = c_seg->c_swappedin_ts;
6204 	csi->csi_bytes_unused = c_seg->c_bytes_unused;
6205 	csi->csi_bytes_used = c_seg->c_bytes_used;
6206 	csi->csi_populated_offset = c_seg->c_populated_offset;
6207 	csi->csi_state = c_seg->c_state;
6208 	csi->csi_swappedin = c_seg->c_swappedin;
6209 	csi->csi_on_minor_compact_q = c_seg->c_on_minorcompact_q;
6210 	csi->csi_has_donated_pages = c_seg->c_has_donated_pages;
6211 	csi->csi_slots_used = (uint16_t)c_seg->c_slots_used;
6212 	csi->csi_slot_var_array_len = c_seg->c_slot_var_array_len;
6213 	csi->csi_slots_len = (uint16_t)nslots;
6214 #if TRACK_C_SEGMENT_UTILIZATION
6215 	csi->csi_decompressions_since_swapin = c_seg->c_decompressions_since_swapin;
6216 #else
6217 	csi->csi_decompressions_since_swapin = 0;
6218 #endif /* TRACK_C_SEGMENT_UTILIZATION */
6219 
6220 	for (int si = 0; si < nslots; ++si) {
6221 		if (offset + sizeof(struct c_slot_info) > insize) {
6222 			return KERN_NO_SPACE;
6223 		}
6224 		/* see also c_seg_validate() for some of the details */
6225 		const struct c_slot* cs = C_SEG_SLOT_FROM_INDEX(c_seg, si);
6226 		struct c_slot_info* ssi = (struct c_slot_info*)(buf + offset);
6227 		offset += sizeof(struct c_slot_info);
6228 		ssi->csi_size = (uint16_t)UNPACK_C_SIZE(cs);
6229 #pragma unused(with_data)
6230 		ssi->csi_unused = 0;
6231 	}
6232 	*size = offset;
6233 	return KERN_SUCCESS;
6234 }
6235 
6236 #endif /* DEVELOPMENT || DEBUG */
6237 
6238 
6239 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
6240 
6241 struct vnode;
6242 extern void vm_swapfile_open(const char *path, struct vnode **vp);
6243 extern int vm_swapfile_preallocate(struct vnode *vp, uint64_t *size, boolean_t *pin);
6244 
6245 struct vnode *uncompressed_vp0 = NULL;
6246 struct vnode *uncompressed_vp1 = NULL;
6247 uint32_t uncompressed_file0_free_pages = 0, uncompressed_file1_free_pages = 0;
6248 uint64_t uncompressed_file0_free_offset = 0, uncompressed_file1_free_offset = 0;
6249 
6250 uint64_t compressor_ro_uncompressed = 0;
6251 uint64_t compressor_ro_uncompressed_total_returned = 0;
6252 uint64_t compressor_ro_uncompressed_skip_returned = 0;
6253 uint64_t compressor_ro_uncompressed_get = 0;
6254 uint64_t compressor_ro_uncompressed_put = 0;
6255 uint64_t compressor_ro_uncompressed_swap_usage = 0;
6256 
6257 extern void vnode_put(struct vnode* vp);
6258 extern int vnode_getwithref(struct vnode* vp);
6259 extern int vm_swapfile_io(struct vnode *vp, uint64_t offset, uint64_t start, int npages, int flags, void *upl_ctx);
6260 
6261 #define MAX_OFFSET_PAGES        (255)
6262 uint64_t uncompressed_file0_space_bitmap[MAX_OFFSET_PAGES];
6263 uint64_t uncompressed_file1_space_bitmap[MAX_OFFSET_PAGES];
6264 
6265 #define UNCOMPRESSED_FILEIDX_OFFSET_MASK (((uint32_t)1<<31ull) - 1)
6266 #define UNCOMPRESSED_FILEIDX_SHIFT (29)
6267 #define UNCOMPRESSED_FILEIDX_MASK (3)
6268 #define UNCOMPRESSED_OFFSET_SHIFT (29)
6269 #define UNCOMPRESSED_OFFSET_MASK (7)
6270 
6271 static uint32_t
vm_uncompressed_extract_swap_file(int slot)6272 vm_uncompressed_extract_swap_file(int slot)
6273 {
6274 	uint32_t fileidx = (((uint32_t)slot & UNCOMPRESSED_FILEIDX_OFFSET_MASK) >> UNCOMPRESSED_FILEIDX_SHIFT) & UNCOMPRESSED_FILEIDX_MASK;
6275 	return fileidx;
6276 }
6277 
6278 static uint32_t
vm_uncompressed_extract_swap_offset(int slot)6279 vm_uncompressed_extract_swap_offset(int slot)
6280 {
6281 	return slot & (uint32_t)(~(UNCOMPRESSED_OFFSET_MASK << UNCOMPRESSED_OFFSET_SHIFT));
6282 }
6283 
6284 static void
vm_uncompressed_return_space_to_swap(int slot)6285 vm_uncompressed_return_space_to_swap(int slot)
6286 {
6287 	PAGE_REPLACEMENT_ALLOWED(TRUE);
6288 	uint32_t fileidx = vm_uncompressed_extract_swap_file(slot);
6289 	if (fileidx == 1) {
6290 		uint32_t free_offset = vm_uncompressed_extract_swap_offset(slot);
6291 		uint64_t pgidx = free_offset / PAGE_SIZE_64;
6292 		uint64_t chunkidx = pgidx / 64;
6293 		uint64_t chunkoffset = pgidx % 64;
6294 #if DEVELOPMENT || DEBUG
6295 		uint64_t vaddr = (uint64_t)&uncompressed_file0_space_bitmap[chunkidx];
6296 		uint64_t maxvaddr = (uint64_t)&uncompressed_file0_space_bitmap[MAX_OFFSET_PAGES];
6297 		assertf(vaddr < maxvaddr, "0x%llx 0x%llx", vaddr, maxvaddr);
6298 #endif /*DEVELOPMENT || DEBUG*/
6299 		assertf((uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6300 		    "0x%x %llu %llu", slot, chunkidx, chunkoffset);
6301 		uncompressed_file0_space_bitmap[chunkidx] &= ~((uint64_t)1 << chunkoffset);
6302 		assertf(!(uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6303 		    "0x%x %llu %llu", slot, chunkidx, chunkoffset);
6304 
6305 		uncompressed_file0_free_pages++;
6306 	} else {
6307 		uint32_t free_offset = vm_uncompressed_extract_swap_offset(slot);
6308 		uint64_t pgidx = free_offset / PAGE_SIZE_64;
6309 		uint64_t chunkidx = pgidx / 64;
6310 		uint64_t chunkoffset = pgidx % 64;
6311 		assertf((uncompressed_file1_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6312 		    "%llu %llu", chunkidx, chunkoffset);
6313 		uncompressed_file1_space_bitmap[chunkidx] &= ~((uint64_t)1 << chunkoffset);
6314 
6315 		uncompressed_file1_free_pages++;
6316 	}
6317 	compressor_ro_uncompressed_swap_usage--;
6318 	PAGE_REPLACEMENT_ALLOWED(FALSE);
6319 }
6320 
6321 static int
vm_uncompressed_reserve_space_in_swap()6322 vm_uncompressed_reserve_space_in_swap()
6323 {
6324 	int slot = 0;
6325 	if (uncompressed_file0_free_pages == 0 && uncompressed_file1_free_pages == 0) {
6326 		return -1;
6327 	}
6328 
6329 	PAGE_REPLACEMENT_ALLOWED(TRUE);
6330 	if (uncompressed_file0_free_pages) {
6331 		uint64_t chunkidx = 0;
6332 		uint64_t chunkoffset = 0;
6333 		while (uncompressed_file0_space_bitmap[chunkidx] == 0xffffffffffffffff) {
6334 			chunkidx++;
6335 		}
6336 		while (uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) {
6337 			chunkoffset++;
6338 		}
6339 
6340 		assertf((uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) == 0,
6341 		    "%llu %llu", chunkidx, chunkoffset);
6342 #if DEVELOPMENT || DEBUG
6343 		uint64_t vaddr = (uint64_t)&uncompressed_file0_space_bitmap[chunkidx];
6344 		uint64_t maxvaddr = (uint64_t)&uncompressed_file0_space_bitmap[MAX_OFFSET_PAGES];
6345 		assertf(vaddr < maxvaddr, "0x%llx 0x%llx", vaddr, maxvaddr);
6346 #endif /*DEVELOPMENT || DEBUG*/
6347 		uncompressed_file0_space_bitmap[chunkidx] |= ((uint64_t)1 << chunkoffset);
6348 		uncompressed_file0_free_offset = ((chunkidx * 64) + chunkoffset) * PAGE_SIZE_64;
6349 		assertf((uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6350 		    "%llu %llu", chunkidx, chunkoffset);
6351 
6352 		assert(uncompressed_file0_free_offset <= (1 << UNCOMPRESSED_OFFSET_SHIFT));
6353 		slot = (int)((1 << UNCOMPRESSED_FILEIDX_SHIFT) + uncompressed_file0_free_offset);
6354 		uncompressed_file0_free_pages--;
6355 	} else {
6356 		uint64_t chunkidx = 0;
6357 		uint64_t chunkoffset = 0;
6358 		while (uncompressed_file1_space_bitmap[chunkidx] == 0xFFFFFFFFFFFFFFFF) {
6359 			chunkidx++;
6360 		}
6361 		while (uncompressed_file1_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) {
6362 			chunkoffset++;
6363 		}
6364 		assert((uncompressed_file1_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) == 0);
6365 		uncompressed_file1_space_bitmap[chunkidx] |= ((uint64_t)1 << chunkoffset);
6366 		uncompressed_file1_free_offset = ((chunkidx * 64) + chunkoffset) * PAGE_SIZE_64;
6367 		slot = (int)((2 << UNCOMPRESSED_FILEIDX_SHIFT) + uncompressed_file1_free_offset);
6368 		uncompressed_file1_free_pages--;
6369 	}
6370 	compressor_ro_uncompressed_swap_usage++;
6371 	PAGE_REPLACEMENT_ALLOWED(FALSE);
6372 	return slot;
6373 }
6374 
6375 #define MAX_IO_REQ (16)
6376 struct _uncompressor_io_req {
6377 	uint64_t addr;
6378 	bool inuse;
6379 } uncompressor_io_req[MAX_IO_REQ];
6380 
6381 int
vm_uncompressed_put(ppnum_t pn,int * slot)6382 vm_uncompressed_put(ppnum_t pn, int *slot)
6383 {
6384 	int retval = 0;
6385 	struct vnode *uncompressed_vp = NULL;
6386 	uint64_t uncompress_offset = 0;
6387 
6388 again:
6389 	if (uncompressed_vp0 == NULL) {
6390 		PAGE_REPLACEMENT_ALLOWED(TRUE);
6391 		if (uncompressed_vp0 == NULL) {
6392 			uint64_t size = (MAX_OFFSET_PAGES * 1024 * 1024ULL);
6393 			vm_swapfile_open("/private/var/vm/uncompressedswap0", &uncompressed_vp0);
6394 			if (uncompressed_vp0 == NULL) {
6395 				PAGE_REPLACEMENT_ALLOWED(FALSE);
6396 				return KERN_NO_ACCESS;
6397 			}
6398 			vm_swapfile_preallocate(uncompressed_vp0, &size, NULL);
6399 			uncompressed_file0_free_pages = (uint32_t)atop(size);
6400 			bzero(uncompressed_file0_space_bitmap, sizeof(uint64_t) * MAX_OFFSET_PAGES);
6401 
6402 			int i = 0;
6403 			for (; i < MAX_IO_REQ; i++) {
6404 				kmem_alloc(kernel_map, (vm_offset_t*)&uncompressor_io_req[i].addr, PAGE_SIZE_64, KMA_NOFAIL | KMA_KOBJECT, VM_KERN_MEMORY_COMPRESSOR);
6405 				uncompressor_io_req[i].inuse = false;
6406 			}
6407 
6408 			vm_swapfile_open("/private/var/vm/uncompressedswap1", &uncompressed_vp1);
6409 			assert(uncompressed_vp1);
6410 			vm_swapfile_preallocate(uncompressed_vp1, &size, NULL);
6411 			uncompressed_file1_free_pages = (uint32_t)atop(size);
6412 			bzero(uncompressed_file1_space_bitmap, sizeof(uint64_t) * MAX_OFFSET_PAGES);
6413 			PAGE_REPLACEMENT_ALLOWED(FALSE);
6414 		} else {
6415 			PAGE_REPLACEMENT_ALLOWED(FALSE);
6416 			delay(100);
6417 			goto again;
6418 		}
6419 	}
6420 
6421 	int swapinfo = vm_uncompressed_reserve_space_in_swap();
6422 	if (swapinfo == -1) {
6423 		*slot = 0;
6424 		return KERN_RESOURCE_SHORTAGE;
6425 	}
6426 
6427 	if (vm_uncompressed_extract_swap_file(swapinfo) == 1) {
6428 		uncompressed_vp = uncompressed_vp0;
6429 	} else {
6430 		uncompressed_vp = uncompressed_vp1;
6431 	}
6432 	uncompress_offset = vm_uncompressed_extract_swap_offset(swapinfo);
6433 	if ((retval = vnode_getwithref(uncompressed_vp)) != 0) {
6434 		os_log_error_with_startup_serial(OS_LOG_DEFAULT, "vm_uncompressed_put: vnode_getwithref on swapfile failed with %d\n", retval);
6435 	} else {
6436 		int i = 0;
6437 retry:
6438 		PAGE_REPLACEMENT_ALLOWED(TRUE);
6439 		for (i = 0; i < MAX_IO_REQ; i++) {
6440 			if (uncompressor_io_req[i].inuse == false) {
6441 				uncompressor_io_req[i].inuse = true;
6442 				break;
6443 			}
6444 		}
6445 		if (i == MAX_IO_REQ) {
6446 			assert_wait((event_t)&uncompressor_io_req, THREAD_UNINT);
6447 			PAGE_REPLACEMENT_ALLOWED(FALSE);
6448 			thread_block(THREAD_CONTINUE_NULL);
6449 			goto retry;
6450 		}
6451 		PAGE_REPLACEMENT_ALLOWED(FALSE);
6452 		void *addr = pmap_map_compressor_page(pn);
6453 		memcpy((void*)uncompressor_io_req[i].addr, addr, PAGE_SIZE_64);
6454 		pmap_unmap_compressor_page(pn, addr);
6455 
6456 		retval = vm_swapfile_io(uncompressed_vp, uncompress_offset, (uint64_t)uncompressor_io_req[i].addr, 1, SWAP_WRITE, NULL);
6457 		if (retval) {
6458 			*slot = 0;
6459 		} else {
6460 			*slot = (int)swapinfo;
6461 			((c_slot_mapping_t)(slot))->s_uncompressed = 1;
6462 		}
6463 		vnode_put(uncompressed_vp);
6464 		PAGE_REPLACEMENT_ALLOWED(TRUE);
6465 		uncompressor_io_req[i].inuse = false;
6466 		thread_wakeup((event_t)&uncompressor_io_req);
6467 		PAGE_REPLACEMENT_ALLOWED(FALSE);
6468 	}
6469 	return retval;
6470 }
6471 
6472 int
vm_uncompressed_get(ppnum_t pn,int * slot,__unused vm_compressor_options_t flags)6473 vm_uncompressed_get(ppnum_t pn, int *slot, __unused vm_compressor_options_t flags)
6474 {
6475 	int retval = 0;
6476 	struct vnode *uncompressed_vp = NULL;
6477 	uint32_t fileidx = vm_uncompressed_extract_swap_file(*slot);
6478 	uint64_t uncompress_offset = vm_uncompressed_extract_swap_offset(*slot);
6479 
6480 	if (__improbable(flags & C_KDP)) {
6481 		return -2;
6482 	}
6483 
6484 	if (fileidx == 1) {
6485 		uncompressed_vp = uncompressed_vp0;
6486 	} else {
6487 		uncompressed_vp = uncompressed_vp1;
6488 	}
6489 
6490 	if ((retval = vnode_getwithref(uncompressed_vp)) != 0) {
6491 		os_log_error_with_startup_serial(OS_LOG_DEFAULT, "vm_uncompressed_put: vnode_getwithref on swapfile failed with %d\n", retval);
6492 	} else {
6493 		int i = 0;
6494 retry:
6495 		PAGE_REPLACEMENT_ALLOWED(TRUE);
6496 		for (i = 0; i < MAX_IO_REQ; i++) {
6497 			if (uncompressor_io_req[i].inuse == false) {
6498 				uncompressor_io_req[i].inuse = true;
6499 				break;
6500 			}
6501 		}
6502 		if (i == MAX_IO_REQ) {
6503 			assert_wait((event_t)&uncompressor_io_req, THREAD_UNINT);
6504 			PAGE_REPLACEMENT_ALLOWED(FALSE);
6505 			thread_block(THREAD_CONTINUE_NULL);
6506 			goto retry;
6507 		}
6508 		PAGE_REPLACEMENT_ALLOWED(FALSE);
6509 		retval = vm_swapfile_io(uncompressed_vp, uncompress_offset, (uint64_t)uncompressor_io_req[i].addr, 1, SWAP_READ, NULL);
6510 		vnode_put(uncompressed_vp);
6511 		void *addr = pmap_map_compressor_page(pn);
6512 		memcpy(addr, (void*)uncompressor_io_req[i].addr, PAGE_SIZE_64);
6513 		pmap_unmap_compressor_page(pn, addr);
6514 		PAGE_REPLACEMENT_ALLOWED(TRUE);
6515 		uncompressor_io_req[i].inuse = false;
6516 		thread_wakeup((event_t)&uncompressor_io_req);
6517 		PAGE_REPLACEMENT_ALLOWED(FALSE);
6518 	}
6519 	return retval;
6520 }
6521 
6522 int
vm_uncompressed_free(int * slot,__unused vm_compressor_options_t flags)6523 vm_uncompressed_free(int *slot, __unused vm_compressor_options_t flags)
6524 {
6525 	vm_uncompressed_return_space_to_swap(*slot);
6526 	*slot = 0;
6527 	return 0;
6528 }
6529 
6530 #endif /*CONFIG_TRACK_UNMODIFIED_ANON_PAGES*/
6531