xref: /xnu-8020.101.4/osfmk/vm/vm_compressor.c (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <vm/vm_compressor.h>
30 
31 #if CONFIG_PHANTOM_CACHE
32 #include <vm/vm_phantom_cache.h>
33 #endif
34 
35 #include <vm/vm_map.h>
36 #include <vm/vm_pageout.h>
37 #include <vm/memory_object.h>
38 #include <vm/vm_compressor_algorithms.h>
39 #include <vm/vm_fault.h>
40 #include <vm/vm_protos.h>
41 #include <mach/mach_host.h>             /* for host_info() */
42 #if DEVELOPMENT || DEBUG
43 #include <kern/hvg_hypercall.h>
44 #endif
45 #include <kern/ledger.h>
46 #include <kern/policy_internal.h>
47 #include <kern/thread_group.h>
48 #include <san/kasan.h>
49 
50 #if defined(__x86_64__)
51 #include <i386/misc_protos.h>
52 #endif
53 #if defined(__arm64__)
54 #include <arm/machine_routines.h>
55 #endif
56 
57 #include <IOKit/IOHibernatePrivate.h>
58 
59 TUNABLE(uint32_t, c_seg_bufsize, "vm_compressor_segment_buffer_size", (1024 * 256));
60 uint32_t c_seg_max_pages, c_seg_off_limit, c_seg_allocsize, c_seg_slot_var_array_min_len;
61 
62 extern boolean_t vm_darkwake_mode;
63 extern zone_t vm_page_zone;
64 
65 #if DEVELOPMENT || DEBUG
66 /* sysctl defined in bsd/dev/arm64/sysctl.c */
67 int do_cseg_wedge_thread(void);
68 int do_cseg_unwedge_thread(void);
69 static event_t debug_cseg_wait_event = NULL;
70 #endif /* DEVELOPMENT || DEBUG */
71 
72 #if CONFIG_FREEZE
73 bool freezer_incore_cseg_acct = TRUE; /* Only count incore compressed memory for jetsams. */
74 void task_disown_frozen_csegs(task_t owner_task);
75 #endif /* CONFIG_FREEZE */
76 
77 #if POPCOUNT_THE_COMPRESSED_DATA
78 boolean_t popcount_c_segs = TRUE;
79 
80 static inline uint32_t
vmc_pop(uintptr_t ins,int sz)81 vmc_pop(uintptr_t ins, int sz)
82 {
83 	uint32_t rv = 0;
84 
85 	if (__probable(popcount_c_segs == FALSE)) {
86 		return 0xDEAD707C;
87 	}
88 
89 	while (sz >= 16) {
90 		uint32_t rv1, rv2;
91 		uint64_t *ins64 = (uint64_t *) ins;
92 		uint64_t *ins642 = (uint64_t *) (ins + 8);
93 		rv1 = __builtin_popcountll(*ins64);
94 		rv2 = __builtin_popcountll(*ins642);
95 		rv += rv1 + rv2;
96 		sz -= 16;
97 		ins += 16;
98 	}
99 
100 	while (sz >= 4) {
101 		uint32_t *ins32 = (uint32_t *) ins;
102 		rv += __builtin_popcount(*ins32);
103 		sz -= 4;
104 		ins += 4;
105 	}
106 
107 	while (sz > 0) {
108 		char *ins8 = (char *)ins;
109 		rv += __builtin_popcount(*ins8);
110 		sz--;
111 		ins++;
112 	}
113 	return rv;
114 }
115 #endif
116 
117 #if VALIDATE_C_SEGMENTS
118 boolean_t validate_c_segs = TRUE;
119 #endif
120 /*
121  * vm_compressor_mode has a heirarchy of control to set its value.
122  * boot-args are checked first, then device-tree, and finally
123  * the default value that is defined below. See vm_fault_init() for
124  * the boot-arg & device-tree code.
125  */
126 
127 #if !XNU_TARGET_OS_OSX
128 
129 #if CONFIG_FREEZE
130 int     vm_compressor_mode = VM_PAGER_FREEZER_DEFAULT;
131 struct  freezer_context freezer_context_global;
132 #else /* CONFIG_FREEZE */
133 int     vm_compressor_mode = VM_PAGER_NOT_CONFIGURED;
134 #endif /* CONFIG_FREEZE */
135 
136 #else /* !XNU_TARGET_OS_OSX */
137 int             vm_compressor_mode = VM_PAGER_COMPRESSOR_WITH_SWAP;
138 
139 #endif /* !XNU_TARGET_OS_OSX */
140 
141 TUNABLE(uint32_t, vm_compression_limit, "vm_compression_limit", 0);
142 int             vm_compressor_is_active = 0;
143 int             vm_compressor_available = 0;
144 
145 extern uint64_t vm_swap_get_max_configured_space(void);
146 extern void     vm_pageout_io_throttle(void);
147 
148 #if CHECKSUM_THE_DATA || CHECKSUM_THE_SWAP || CHECKSUM_THE_COMPRESSED_DATA
149 extern unsigned int hash_string(char *cp, int len);
150 static unsigned int vmc_hash(char *, int);
151 boolean_t checksum_c_segs = TRUE;
152 
153 unsigned int
vmc_hash(char * cp,int len)154 vmc_hash(char *cp, int len)
155 {
156 	if (__probable(checksum_c_segs == FALSE)) {
157 		return 0xDEAD7A37;
158 	}
159 	return hash_string(cp, len);
160 }
161 #endif
162 
163 #define UNPACK_C_SIZE(cs)       ((cs->c_size == (PAGE_SIZE-1)) ? PAGE_SIZE : cs->c_size)
164 #define PACK_C_SIZE(cs, size)   (cs->c_size = ((size == PAGE_SIZE) ? PAGE_SIZE - 1 : size))
165 
166 
167 struct c_sv_hash_entry {
168 	union {
169 		struct  {
170 			uint32_t        c_sv_he_ref;
171 			uint32_t        c_sv_he_data;
172 		} c_sv_he;
173 		uint64_t        c_sv_he_record;
174 	} c_sv_he_un;
175 };
176 
177 #define he_ref  c_sv_he_un.c_sv_he.c_sv_he_ref
178 #define he_data c_sv_he_un.c_sv_he.c_sv_he_data
179 #define he_record c_sv_he_un.c_sv_he_record
180 
181 #define C_SV_HASH_MAX_MISS      32
182 #define C_SV_HASH_SIZE          ((1 << 10))
183 #define C_SV_HASH_MASK          ((1 << 10) - 1)
184 #define C_SV_CSEG_ID            ((1 << 22) - 1)
185 
186 
187 union c_segu {
188 	c_segment_t     c_seg;
189 	uintptr_t       c_segno;
190 };
191 
192 #define C_SLOT_ASSERT_PACKABLE(ptr) \
193 	VM_ASSERT_POINTER_PACKABLE((vm_offset_t)(ptr), C_SLOT_PACKED_PTR);
194 
195 #define C_SLOT_PACK_PTR(ptr) \
196 	VM_PACK_POINTER((vm_offset_t)(ptr), C_SLOT_PACKED_PTR)
197 
198 #define C_SLOT_UNPACK_PTR(cslot) \
199 	(c_slot_mapping_t)VM_UNPACK_POINTER((cslot)->c_packed_ptr, C_SLOT_PACKED_PTR)
200 
201 /* for debugging purposes */
202 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) c_slot_packing_params =
203     VM_PACKING_PARAMS(C_SLOT_PACKED_PTR);
204 
205 uint32_t        c_segment_count = 0;
206 uint32_t        c_segment_count_max = 0;
207 
208 uint64_t        c_generation_id = 0;
209 uint64_t        c_generation_id_flush_barrier;
210 
211 
212 #define         HIBERNATE_FLUSHING_SECS_TO_COMPLETE     120
213 
214 boolean_t       hibernate_no_swapspace = FALSE;
215 boolean_t       hibernate_flush_timed_out = FALSE;
216 clock_sec_t     hibernate_flushing_deadline = 0;
217 
218 
219 #if RECORD_THE_COMPRESSED_DATA
220 char    *c_compressed_record_sbuf;
221 char    *c_compressed_record_ebuf;
222 char    *c_compressed_record_cptr;
223 #endif
224 
225 
226 queue_head_t    c_age_list_head;
227 queue_head_t    c_swappedin_list_head;
228 queue_head_t    c_swapout_list_head;
229 queue_head_t    c_swapio_list_head;
230 queue_head_t    c_swappedout_list_head;
231 queue_head_t    c_swappedout_sparse_list_head;
232 queue_head_t    c_major_list_head;
233 queue_head_t    c_filling_list_head;
234 queue_head_t    c_bad_list_head;
235 
236 uint32_t        c_age_count = 0;
237 uint32_t        c_swappedin_count = 0;
238 uint32_t        c_swapout_count = 0;
239 uint32_t        c_swapio_count = 0;
240 uint32_t        c_swappedout_count = 0;
241 uint32_t        c_swappedout_sparse_count = 0;
242 uint32_t        c_major_count = 0;
243 uint32_t        c_filling_count = 0;
244 uint32_t        c_empty_count = 0;
245 uint32_t        c_bad_count = 0;
246 
247 
248 queue_head_t    c_minor_list_head;
249 uint32_t        c_minor_count = 0;
250 
251 int             c_overage_swapped_count = 0;
252 int             c_overage_swapped_limit = 0;
253 
254 int             c_seg_fixed_array_len;
255 union  c_segu   *c_segments;
256 vm_offset_t     c_buffers;
257 vm_size_t       c_buffers_size;
258 caddr_t         c_segments_next_page;
259 boolean_t       c_segments_busy;
260 uint32_t        c_segments_available;
261 uint32_t        c_segments_limit;
262 uint32_t        c_segments_nearing_limit;
263 
264 uint32_t        c_segment_svp_in_hash;
265 uint32_t        c_segment_svp_hash_succeeded;
266 uint32_t        c_segment_svp_hash_failed;
267 uint32_t        c_segment_svp_zero_compressions;
268 uint32_t        c_segment_svp_nonzero_compressions;
269 uint32_t        c_segment_svp_zero_decompressions;
270 uint32_t        c_segment_svp_nonzero_decompressions;
271 
272 uint32_t        c_segment_noncompressible_pages;
273 
274 uint32_t        c_segment_pages_compressed = 0; /* Tracks # of uncompressed pages fed into the compressor */
275 #if CONFIG_FREEZE
276 int32_t        c_segment_pages_compressed_incore = 0; /* Tracks # of uncompressed pages fed into the compressor that are in memory */
277 uint32_t        c_segments_incore_limit = 0; /* Tracks # of segments allowed to be in-core. Based on compressor pool size */
278 #endif /* CONFIG_FREEZE */
279 
280 uint32_t        c_segment_pages_compressed_limit;
281 uint32_t        c_segment_pages_compressed_nearing_limit;
282 uint32_t        c_free_segno_head = (uint32_t)-1;
283 
284 uint32_t        vm_compressor_minorcompact_threshold_divisor = 10;
285 uint32_t        vm_compressor_majorcompact_threshold_divisor = 10;
286 uint32_t        vm_compressor_unthrottle_threshold_divisor = 10;
287 uint32_t        vm_compressor_catchup_threshold_divisor = 10;
288 
289 uint32_t        vm_compressor_minorcompact_threshold_divisor_overridden = 0;
290 uint32_t        vm_compressor_majorcompact_threshold_divisor_overridden = 0;
291 uint32_t        vm_compressor_unthrottle_threshold_divisor_overridden = 0;
292 uint32_t        vm_compressor_catchup_threshold_divisor_overridden = 0;
293 
294 #define         C_SEGMENTS_PER_PAGE     (PAGE_SIZE / sizeof(union c_segu))
295 
296 LCK_GRP_DECLARE(vm_compressor_lck_grp, "vm_compressor");
297 LCK_RW_DECLARE(c_master_lock, &vm_compressor_lck_grp);
298 LCK_MTX_DECLARE(c_list_lock_storage, &vm_compressor_lck_grp);
299 
300 boolean_t       decompressions_blocked = FALSE;
301 
302 zone_t          compressor_segment_zone;
303 int             c_compressor_swap_trigger = 0;
304 
305 uint32_t        compressor_cpus;
306 char            *compressor_scratch_bufs;
307 char            *kdp_compressor_scratch_buf;
308 char            *kdp_compressor_decompressed_page;
309 addr64_t        kdp_compressor_decompressed_page_paddr;
310 ppnum_t         kdp_compressor_decompressed_page_ppnum;
311 
312 clock_sec_t     start_of_sample_period_sec = 0;
313 clock_nsec_t    start_of_sample_period_nsec = 0;
314 clock_sec_t     start_of_eval_period_sec = 0;
315 clock_nsec_t    start_of_eval_period_nsec = 0;
316 uint32_t        sample_period_decompression_count = 0;
317 uint32_t        sample_period_compression_count = 0;
318 uint32_t        last_eval_decompression_count = 0;
319 uint32_t        last_eval_compression_count = 0;
320 
321 #define         DECOMPRESSION_SAMPLE_MAX_AGE            (60 * 30)
322 
323 boolean_t       vm_swapout_ripe_segments = FALSE;
324 uint32_t        vm_ripe_target_age = (60 * 60 * 48);
325 
326 uint32_t        swapout_target_age = 0;
327 uint32_t        age_of_decompressions_during_sample_period[DECOMPRESSION_SAMPLE_MAX_AGE];
328 uint32_t        overage_decompressions_during_sample_period = 0;
329 
330 
331 void            do_fastwake_warmup(queue_head_t *, boolean_t);
332 boolean_t       fastwake_warmup = FALSE;
333 boolean_t       fastwake_recording_in_progress = FALSE;
334 clock_sec_t     dont_trim_until_ts = 0;
335 
336 uint64_t        c_segment_warmup_count;
337 uint64_t        first_c_segment_to_warm_generation_id = 0;
338 uint64_t        last_c_segment_to_warm_generation_id = 0;
339 boolean_t       hibernate_flushing = FALSE;
340 
341 int64_t         c_segment_input_bytes __attribute__((aligned(8))) = 0;
342 int64_t         c_segment_compressed_bytes __attribute__((aligned(8))) = 0;
343 int64_t         compressor_bytes_used __attribute__((aligned(8))) = 0;
344 
345 
346 struct c_sv_hash_entry c_segment_sv_hash_table[C_SV_HASH_SIZE]  __attribute__ ((aligned(8)));
347 
348 static boolean_t compressor_needs_to_swap(void);
349 static void vm_compressor_swap_trigger_thread(void);
350 static void vm_compressor_do_delayed_compactions(boolean_t);
351 static void vm_compressor_compact_and_swap(boolean_t);
352 static void vm_compressor_age_swapped_in_segments(boolean_t);
353 
354 struct vm_compressor_swapper_stats vmcs_stats;
355 
356 #if XNU_TARGET_OS_OSX
357 #if (__arm64__)
358 static void vm_compressor_process_major_segments(void);
359 #endif /* (__arm64__) */
360 static void vm_compressor_take_paging_space_action(void);
361 #endif /* XNU_TARGET_OS_OSX */
362 
363 void compute_swapout_target_age(void);
364 
365 boolean_t c_seg_major_compact(c_segment_t, c_segment_t);
366 boolean_t c_seg_major_compact_ok(c_segment_t, c_segment_t);
367 
368 int  c_seg_minor_compaction_and_unlock(c_segment_t, boolean_t);
369 int  c_seg_do_minor_compaction_and_unlock(c_segment_t, boolean_t, boolean_t, boolean_t);
370 void c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg);
371 
372 void c_seg_move_to_sparse_list(c_segment_t);
373 void c_seg_insert_into_q(queue_head_t *, c_segment_t);
374 
375 uint64_t vm_available_memory(void);
376 uint64_t vm_compressor_pages_compressed(void);
377 uint32_t vm_compressor_pool_size(void);
378 
379 /*
380  * indicate the need to do a major compaction if
381  * the overall set of in-use compression segments
382  * becomes sparse... on systems that support pressure
383  * driven swapping, this will also cause swapouts to
384  * be initiated.
385  */
386 static inline boolean_t
vm_compressor_needs_to_major_compact()387 vm_compressor_needs_to_major_compact()
388 {
389 	uint32_t        incore_seg_count;
390 
391 	incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
392 
393 	if ((c_segment_count >= (c_segments_nearing_limit / 8)) &&
394 	    ((incore_seg_count * c_seg_max_pages) - VM_PAGE_COMPRESSOR_COUNT) >
395 	    ((incore_seg_count / 8) * c_seg_max_pages)) {
396 		return 1;
397 	}
398 	return 0;
399 }
400 
401 
402 uint64_t
vm_available_memory(void)403 vm_available_memory(void)
404 {
405 	return ((uint64_t)AVAILABLE_NON_COMPRESSED_MEMORY) * PAGE_SIZE_64;
406 }
407 
408 
409 uint32_t
vm_compressor_pool_size(void)410 vm_compressor_pool_size(void)
411 {
412 	return VM_PAGE_COMPRESSOR_COUNT;
413 }
414 
415 uint64_t
vm_compressor_pages_compressed(void)416 vm_compressor_pages_compressed(void)
417 {
418 	return c_segment_pages_compressed * PAGE_SIZE_64;
419 }
420 
421 
422 boolean_t
vm_compressor_low_on_space(void)423 vm_compressor_low_on_space(void)
424 {
425 #if CONFIG_FREEZE
426 	uint64_t incore_seg_count;
427 	uint32_t incore_compressed_pages;
428 	if (freezer_incore_cseg_acct) {
429 		incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
430 		incore_compressed_pages = c_segment_pages_compressed_incore;
431 	} else {
432 		incore_seg_count = c_segment_count;
433 		incore_compressed_pages = c_segment_pages_compressed;
434 	}
435 
436 	if ((incore_compressed_pages > c_segment_pages_compressed_nearing_limit) ||
437 	    (incore_seg_count > c_segments_nearing_limit)) {
438 		return TRUE;
439 	}
440 #else /* CONFIG_FREEZE */
441 	if ((c_segment_pages_compressed > c_segment_pages_compressed_nearing_limit) ||
442 	    (c_segment_count > c_segments_nearing_limit)) {
443 		return TRUE;
444 	}
445 #endif /* CONFIG_FREEZE */
446 	return FALSE;
447 }
448 
449 
450 boolean_t
vm_compressor_out_of_space(void)451 vm_compressor_out_of_space(void)
452 {
453 #if CONFIG_FREEZE
454 	uint64_t incore_seg_count;
455 	uint32_t incore_compressed_pages;
456 	if (freezer_incore_cseg_acct) {
457 		incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
458 		incore_compressed_pages = c_segment_pages_compressed_incore;
459 	} else {
460 		incore_seg_count = c_segment_count;
461 		incore_compressed_pages = c_segment_pages_compressed;
462 	}
463 
464 	if ((incore_compressed_pages >= c_segment_pages_compressed_limit) ||
465 	    (incore_seg_count > c_segments_incore_limit)) {
466 		return TRUE;
467 	}
468 #else /* CONFIG_FREEZE */
469 	if ((c_segment_pages_compressed >= c_segment_pages_compressed_limit) ||
470 	    (c_segment_count >= c_segments_limit)) {
471 		return TRUE;
472 	}
473 #endif /* CONFIG_FREEZE */
474 	return FALSE;
475 }
476 
477 
478 int
vm_wants_task_throttled(task_t task)479 vm_wants_task_throttled(task_t task)
480 {
481 	ledger_amount_t compressed;
482 	if (task == kernel_task) {
483 		return 0;
484 	}
485 
486 	if (VM_CONFIG_SWAP_IS_ACTIVE) {
487 		if ((vm_compressor_low_on_space() || HARD_THROTTLE_LIMIT_REACHED())) {
488 			ledger_get_balance(task->ledger, task_ledgers.internal_compressed, &compressed);
489 			compressed >>= VM_MAP_PAGE_SHIFT(task->map);
490 			if ((unsigned int)compressed > (c_segment_pages_compressed / 4)) {
491 				return 1;
492 			}
493 		}
494 	}
495 	return 0;
496 }
497 
498 
499 #if DEVELOPMENT || DEBUG
500 /*
501  * On compressor/swap exhaustion, kill the largest process regardless of
502  * its chosen process policy.
503  */
504 TUNABLE(bool, kill_on_no_paging_space, "-kill_on_no_paging_space", false);
505 #endif /* DEVELOPMENT || DEBUG */
506 
507 #if XNU_TARGET_OS_OSX
508 
509 static uint32_t no_paging_space_action_in_progress = 0;
510 extern void memorystatus_send_low_swap_note(void);
511 
512 static void
vm_compressor_take_paging_space_action(void)513 vm_compressor_take_paging_space_action(void)
514 {
515 	if (no_paging_space_action_in_progress == 0) {
516 		if (OSCompareAndSwap(0, 1, (UInt32 *)&no_paging_space_action_in_progress)) {
517 			if (no_paging_space_action()) {
518 #if DEVELOPMENT || DEBUG
519 				if (kill_on_no_paging_space) {
520 					/*
521 					 * Since we are choosing to always kill a process, we don't need the
522 					 * "out of application memory" dialog box in this mode. And, hence we won't
523 					 * send the knote.
524 					 */
525 					no_paging_space_action_in_progress = 0;
526 					return;
527 				}
528 #endif /* DEVELOPMENT || DEBUG */
529 				memorystatus_send_low_swap_note();
530 			}
531 
532 			no_paging_space_action_in_progress = 0;
533 		}
534 	}
535 }
536 #endif /* XNU_TARGET_OS_OSX */
537 
538 
539 void
vm_decompressor_lock(void)540 vm_decompressor_lock(void)
541 {
542 	PAGE_REPLACEMENT_ALLOWED(TRUE);
543 
544 	decompressions_blocked = TRUE;
545 
546 	PAGE_REPLACEMENT_ALLOWED(FALSE);
547 }
548 
549 void
vm_decompressor_unlock(void)550 vm_decompressor_unlock(void)
551 {
552 	PAGE_REPLACEMENT_ALLOWED(TRUE);
553 
554 	decompressions_blocked = FALSE;
555 
556 	PAGE_REPLACEMENT_ALLOWED(FALSE);
557 
558 	thread_wakeup((event_t)&decompressions_blocked);
559 }
560 
561 static inline void
cslot_copy(c_slot_t cdst,c_slot_t csrc)562 cslot_copy(c_slot_t cdst, c_slot_t csrc)
563 {
564 #if CHECKSUM_THE_DATA
565 	cdst->c_hash_data = csrc->c_hash_data;
566 #endif
567 #if CHECKSUM_THE_COMPRESSED_DATA
568 	cdst->c_hash_compressed_data = csrc->c_hash_compressed_data;
569 #endif
570 #if POPCOUNT_THE_COMPRESSED_DATA
571 	cdst->c_pop_cdata = csrc->c_pop_cdata;
572 #endif
573 	cdst->c_size = csrc->c_size;
574 	cdst->c_packed_ptr = csrc->c_packed_ptr;
575 #if defined(__arm__) || defined(__arm64__)
576 	cdst->c_codec = csrc->c_codec;
577 #endif
578 #if __APPLE_WKDM_POPCNT_EXTENSIONS__
579 	cdst->c_inline_popcount = csrc->c_inline_popcount;
580 #endif
581 }
582 
583 vm_map_t compressor_map;
584 uint64_t compressor_pool_max_size;
585 uint64_t compressor_pool_size;
586 uint32_t compressor_pool_multiplier;
587 
588 #if DEVELOPMENT || DEBUG
589 /*
590  * Compressor segments are write-protected in development/debug
591  * kernels to help debug memory corruption.
592  * In cases where performance is a concern, this can be disabled
593  * via the boot-arg "-disable_cseg_write_protection".
594  */
595 boolean_t write_protect_c_segs = TRUE;
596 int vm_compressor_test_seg_wp;
597 uint32_t vm_ktrace_enabled;
598 #endif /* DEVELOPMENT || DEBUG */
599 
600 #if (XNU_TARGET_OS_OSX && __arm64__)
601 
602 #include <IOKit/IOPlatformExpert.h>
603 #include <sys/random.h>
604 
605 static const char *csegbufsizeExperimentProperty = "_csegbufsz_experiment";
606 static thread_call_t csegbufsz_experiment_thread_call;
607 
608 extern boolean_t IOServiceWaitForMatchingResource(const char * property, uint64_t timeout);
609 static void
erase_csegbufsz_experiment_property(__unused void * param0,__unused void * param1)610 erase_csegbufsz_experiment_property(__unused void *param0, __unused void *param1)
611 {
612 	// Wait for NVRAM to be writable
613 	if (!IOServiceWaitForMatchingResource("IONVRAM", UINT64_MAX)) {
614 		printf("csegbufsz_experiment_property: Failed to wait for IONVRAM.");
615 	}
616 
617 	if (!PERemoveNVRAMProperty(csegbufsizeExperimentProperty)) {
618 		printf("csegbufsize_experiment_property: Failed to remove %s from NVRAM.", csegbufsizeExperimentProperty);
619 	}
620 	thread_call_free(csegbufsz_experiment_thread_call);
621 }
622 
623 static void
erase_csegbufsz_experiment_property_async()624 erase_csegbufsz_experiment_property_async()
625 {
626 	csegbufsz_experiment_thread_call = thread_call_allocate_with_priority(
627 		erase_csegbufsz_experiment_property,
628 		NULL,
629 		THREAD_CALL_PRIORITY_LOW
630 		);
631 	if (csegbufsz_experiment_thread_call == NULL) {
632 		printf("csegbufsize_experiment_property: Unable to allocate thread call.");
633 	} else {
634 		thread_call_enter(csegbufsz_experiment_thread_call);
635 	}
636 }
637 
638 static void
cleanup_csegbufsz_experiment(__unused void * arg0)639 cleanup_csegbufsz_experiment(__unused void *arg0)
640 {
641 	char nvram = 0;
642 	unsigned int len = sizeof(nvram);
643 	if (PEReadNVRAMProperty(csegbufsizeExperimentProperty, &nvram, &len)) {
644 		erase_csegbufsz_experiment_property_async();
645 	}
646 }
647 
648 STARTUP_ARG(EARLY_BOOT, STARTUP_RANK_FIRST, cleanup_csegbufsz_experiment, NULL);
649 #endif /* XNU_TARGET_OS_OSX && __arm64__ */
650 
651 void
vm_compressor_init(void)652 vm_compressor_init(void)
653 {
654 	thread_t        thread;
655 	int             attempts = 1;
656 	kern_return_t   retval = KERN_SUCCESS;
657 	vm_offset_t     start_addr = 0;
658 	vm_size_t       c_segments_arr_size = 0, compressor_submap_size = 0;
659 	vm_map_kernel_flags_t vmk_flags;
660 #if RECORD_THE_COMPRESSED_DATA
661 	vm_size_t       c_compressed_record_sbuf_size = 0;
662 #endif /* RECORD_THE_COMPRESSED_DATA */
663 
664 #if DEVELOPMENT || DEBUG || CONFIG_FREEZE
665 	char bootarg_name[32];
666 #endif /* DEVELOPMENT || DEBUG || CONFIG_FREEZE */
667 
668 #if DEVELOPMENT || DEBUG
669 	if (PE_parse_boot_argn("-disable_cseg_write_protection", bootarg_name, sizeof(bootarg_name))) {
670 		write_protect_c_segs = FALSE;
671 	}
672 	int vmcval = 1;
673 	PE_parse_boot_argn("vm_compressor_validation", &vmcval, sizeof(vmcval));
674 
675 	if (kern_feature_override(KF_COMPRSV_OVRD)) {
676 		vmcval = 0;
677 	}
678 	if (vmcval == 0) {
679 #if POPCOUNT_THE_COMPRESSED_DATA
680 		popcount_c_segs = FALSE;
681 #endif
682 #if CHECKSUM_THE_DATA || CHECKSUM_THE_COMPRESSED_DATA
683 		checksum_c_segs = FALSE;
684 #endif
685 #if VALIDATE_C_SEGMENTS
686 		validate_c_segs = FALSE;
687 #endif
688 		write_protect_c_segs = FALSE;
689 	}
690 #endif /* DEVELOPMENT || DEBUG */
691 
692 #if CONFIG_FREEZE
693 	if (PE_parse_boot_argn("-disable_freezer_cseg_acct", bootarg_name, sizeof(bootarg_name))) {
694 		freezer_incore_cseg_acct = FALSE;
695 	}
696 #endif /* CONFIG_FREEZE */
697 
698 	assert((C_SEGMENTS_PER_PAGE * sizeof(union c_segu)) == PAGE_SIZE);
699 
700 #if !XNU_TARGET_OS_OSX
701 	vm_compressor_minorcompact_threshold_divisor = 20;
702 	vm_compressor_majorcompact_threshold_divisor = 30;
703 	vm_compressor_unthrottle_threshold_divisor = 40;
704 	vm_compressor_catchup_threshold_divisor = 60;
705 #else /* !XNU_TARGET_OS_OSX */
706 	if (max_mem <= (3ULL * 1024ULL * 1024ULL * 1024ULL)) {
707 		vm_compressor_minorcompact_threshold_divisor = 11;
708 		vm_compressor_majorcompact_threshold_divisor = 13;
709 		vm_compressor_unthrottle_threshold_divisor = 20;
710 		vm_compressor_catchup_threshold_divisor = 35;
711 	} else {
712 		vm_compressor_minorcompact_threshold_divisor = 20;
713 		vm_compressor_majorcompact_threshold_divisor = 25;
714 		vm_compressor_unthrottle_threshold_divisor = 35;
715 		vm_compressor_catchup_threshold_divisor = 50;
716 	}
717 #endif /* !XNU_TARGET_OS_OSX */
718 
719 	queue_init(&c_bad_list_head);
720 	queue_init(&c_age_list_head);
721 	queue_init(&c_minor_list_head);
722 	queue_init(&c_major_list_head);
723 	queue_init(&c_filling_list_head);
724 	queue_init(&c_swapout_list_head);
725 	queue_init(&c_swapio_list_head);
726 	queue_init(&c_swappedin_list_head);
727 	queue_init(&c_swappedout_list_head);
728 	queue_init(&c_swappedout_sparse_list_head);
729 
730 	c_free_segno_head = -1;
731 	c_segments_available = 0;
732 
733 	if (vm_compression_limit) {
734 		compressor_pool_size = ptoa_64(vm_compression_limit);
735 	}
736 
737 	compressor_pool_max_size = C_SEG_MAX_LIMIT;
738 	compressor_pool_max_size *= c_seg_bufsize;
739 
740 #if XNU_TARGET_OS_OSX
741 
742 	if (vm_compression_limit == 0) {
743 		if (max_mem <= (4ULL * 1024ULL * 1024ULL * 1024ULL)) {
744 			compressor_pool_size = 16ULL * max_mem;
745 		} else if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) {
746 			compressor_pool_size = 8ULL * max_mem;
747 		} else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) {
748 			compressor_pool_size = 4ULL * max_mem;
749 		} else {
750 			compressor_pool_size = 2ULL * max_mem;
751 		}
752 	}
753 	if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) {
754 		compressor_pool_multiplier = 1;
755 	} else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) {
756 		compressor_pool_multiplier = 2;
757 	} else {
758 		compressor_pool_multiplier = 4;
759 	}
760 
761 #elif defined(__arm__)
762 
763 #define VM_RESERVE_SIZE                 (1024 * 1024 * 256)
764 #define MAX_COMPRESSOR_POOL_SIZE        (1024 * 1024 * 450)
765 
766 	if (compressor_pool_max_size > MAX_COMPRESSOR_POOL_SIZE) {
767 		compressor_pool_max_size = MAX_COMPRESSOR_POOL_SIZE;
768 	}
769 
770 	if (vm_compression_limit == 0) {
771 		compressor_pool_size = ((kernel_map->max_offset - kernel_map->min_offset) - kernel_map->size) - VM_RESERVE_SIZE;
772 	}
773 	compressor_pool_multiplier = 1;
774 
775 #elif defined(__arm64__) && defined(XNU_TARGET_OS_WATCH)
776 
777 	/*
778 	 * On M9 watches the compressor can become big and can lead to
779 	 * churn in workingset resulting in audio drops. Setting a cap
780 	 * on the compressor size favors reclaiming unused memory
781 	 * sitting in idle band via jetsams
782 	 */
783 
784 #define COMPRESSOR_CAP_PERCENTAGE        37ULL
785 
786 	if (compressor_pool_max_size > max_mem) {
787 		compressor_pool_max_size = max_mem;
788 	}
789 
790 	if (vm_compression_limit == 0) {
791 		compressor_pool_size = (max_mem * COMPRESSOR_CAP_PERCENTAGE) / 100ULL;
792 	}
793 	compressor_pool_multiplier = 1;
794 
795 #else
796 
797 	if (compressor_pool_max_size > max_mem) {
798 		compressor_pool_max_size = max_mem;
799 	}
800 
801 	if (vm_compression_limit == 0) {
802 		compressor_pool_size = max_mem;
803 	}
804 	compressor_pool_multiplier = 1;
805 #endif
806 	if (compressor_pool_size > compressor_pool_max_size) {
807 		compressor_pool_size = compressor_pool_max_size;
808 	}
809 
810 	c_seg_max_pages = (c_seg_bufsize / PAGE_SIZE);
811 	c_seg_slot_var_array_min_len = c_seg_max_pages;
812 
813 #if !defined(__x86_64__)
814 	c_seg_off_limit = (C_SEG_BYTES_TO_OFFSET((c_seg_bufsize - 512)));
815 	c_seg_allocsize = (c_seg_bufsize + PAGE_SIZE);
816 #else
817 	c_seg_off_limit = (C_SEG_BYTES_TO_OFFSET((c_seg_bufsize - 128)));
818 	c_seg_allocsize = c_seg_bufsize;
819 #endif /* !defined(__x86_64__) */
820 
821 try_again:
822 	c_segments_limit = (uint32_t)(compressor_pool_size / (vm_size_t)(c_seg_allocsize));
823 	c_segments_nearing_limit = (uint32_t)(((uint64_t)c_segments_limit * 98ULL) / 100ULL);
824 
825 	c_segment_pages_compressed_limit = (c_segments_limit * (c_seg_bufsize / PAGE_SIZE) * compressor_pool_multiplier);
826 
827 	if (c_segment_pages_compressed_limit < (uint32_t)(max_mem / PAGE_SIZE)) {
828 #if defined(XNU_TARGET_OS_WATCH)
829 		c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE);
830 #else
831 		if (!vm_compression_limit) {
832 			c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE);
833 		}
834 #endif
835 	}
836 
837 	c_segment_pages_compressed_nearing_limit = (uint32_t)(((uint64_t)c_segment_pages_compressed_limit * 98ULL) / 100ULL);
838 
839 #if CONFIG_FREEZE
840 	/*
841 	 * Our in-core limits are based on the size of the compressor pool.
842 	 * The c_segments_nearing_limit is also based on the compressor pool
843 	 * size and calculated above.
844 	 */
845 	c_segments_incore_limit = c_segments_limit;
846 
847 	if (freezer_incore_cseg_acct) {
848 		/*
849 		 * Add enough segments to track all frozen c_segs that can be stored in swap.
850 		 */
851 		c_segments_limit += (uint32_t)(vm_swap_get_max_configured_space() / (vm_size_t)(c_seg_allocsize));
852 	}
853 #endif
854 	/*
855 	 * Submap needs space for:
856 	 * - c_segments
857 	 * - c_buffers
858 	 * - swap reclaimations -- c_seg_bufsize
859 	 */
860 	c_segments_arr_size = vm_map_round_page((sizeof(union c_segu) * c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
861 	c_buffers_size = vm_map_round_page(((vm_size_t)c_seg_allocsize * (vm_size_t)c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
862 
863 	compressor_submap_size = c_segments_arr_size + c_buffers_size + c_seg_bufsize;
864 
865 #if RECORD_THE_COMPRESSED_DATA
866 	c_compressed_record_sbuf_size = (vm_size_t)c_seg_allocsize + (PAGE_SIZE * 2);
867 	compressor_submap_size += c_compressed_record_sbuf_size;
868 #endif /* RECORD_THE_COMPRESSED_DATA */
869 
870 	vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
871 	vmk_flags.vmkf_permanent = TRUE;
872 	retval = kmem_suballoc(kernel_map, &start_addr, compressor_submap_size,
873 	    VM_MAP_CREATE_NEVER_FAULTS, VM_FLAGS_ANYWHERE, vmk_flags,
874 	    VM_KERN_MEMORY_COMPRESSOR, &compressor_map);
875 
876 	if (retval != KERN_SUCCESS) {
877 		if (++attempts > 3) {
878 			panic("vm_compressor_init: kmem_suballoc failed - 0x%llx", (uint64_t)compressor_submap_size);
879 		}
880 
881 		compressor_pool_size = compressor_pool_size / 2;
882 
883 		kprintf("retrying creation of the compressor submap at 0x%llx bytes\n", compressor_pool_size);
884 		goto try_again;
885 	}
886 	if (kernel_memory_allocate(compressor_map, (vm_offset_t *)(&c_segments),
887 	    (sizeof(union c_segu) * c_segments_limit), 0,
888 	    KMA_KOBJECT | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS) {
889 		panic("vm_compressor_init: kernel_memory_allocate failed - c_segments");
890 	}
891 	if (kernel_memory_allocate(compressor_map, &c_buffers, c_buffers_size, 0,
892 	    KMA_COMPRESSOR | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS) {
893 		panic("vm_compressor_init: kernel_memory_allocate failed - c_buffers");
894 	}
895 
896 #if DEVELOPMENT || DEBUG
897 	hvg_hcall_set_coredump_data();
898 #endif
899 
900 	/*
901 	 * Pick a good size that will minimize fragmentation in zalloc
902 	 * by minimizing the fragmentation in a 16k run.
903 	 *
904 	 * c_seg_slot_var_array_min_len is larger on 4k systems than 16k ones,
905 	 * making the fragmentation in a 4k page terrible. Using 16k for all
906 	 * systems matches zalloc() and will minimize fragmentation.
907 	 */
908 	uint32_t c_segment_size = sizeof(struct c_segment) + (c_seg_slot_var_array_min_len * sizeof(struct c_slot));
909 	uint32_t cnt  = (16 << 10) / c_segment_size;
910 	uint32_t frag = (16 << 10) % c_segment_size;
911 
912 	c_seg_fixed_array_len = c_seg_slot_var_array_min_len;
913 
914 	while (cnt * sizeof(struct c_slot) < frag) {
915 		c_segment_size += sizeof(struct c_slot);
916 		c_seg_fixed_array_len++;
917 		frag -= cnt * sizeof(struct c_slot);
918 	}
919 
920 	compressor_segment_zone = zone_create("compressor_segment",
921 	    c_segment_size, ZC_PGZ_USE_GUARDS | ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM);
922 
923 	c_segments_busy = FALSE;
924 
925 	c_segments_next_page = (caddr_t)c_segments;
926 	vm_compressor_algorithm_init();
927 
928 	{
929 		host_basic_info_data_t hinfo;
930 		mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
931 		size_t bufsize;
932 		char *buf;
933 
934 #define BSD_HOST 1
935 		host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
936 
937 		compressor_cpus = hinfo.max_cpus;
938 
939 		bufsize = PAGE_SIZE;
940 		bufsize += compressor_cpus * vm_compressor_get_decode_scratch_size();
941 		/* For the KDP path */
942 		bufsize += vm_compressor_get_decode_scratch_size();
943 #if CONFIG_FREEZE
944 		bufsize += vm_compressor_get_encode_scratch_size();
945 #endif
946 #if RECORD_THE_COMPRESSED_DATA
947 		bufsize += c_compressed_record_sbuf_size;
948 #endif
949 
950 		if (kernel_memory_allocate(kernel_map, (vm_offset_t *)&buf, bufsize,
951 		    PAGE_MASK, KMA_KOBJECT | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR)) {
952 			panic("vm_compressor_init: Unable to allocate %zd bytes", bufsize);
953 		}
954 
955 		/*
956 		 * kdp_compressor_decompressed_page must be page aligned because we access
957 		 * it through the physical aperture by page number.
958 		 */
959 		kdp_compressor_decompressed_page = buf;
960 		kdp_compressor_decompressed_page_paddr = kvtophys((vm_offset_t)kdp_compressor_decompressed_page);
961 		kdp_compressor_decompressed_page_ppnum = (ppnum_t) atop(kdp_compressor_decompressed_page_paddr);
962 		buf += PAGE_SIZE;
963 		bufsize -= PAGE_SIZE;
964 
965 		compressor_scratch_bufs = buf;
966 		buf += compressor_cpus * vm_compressor_get_decode_scratch_size();
967 		bufsize -= compressor_cpus * vm_compressor_get_decode_scratch_size();
968 
969 		kdp_compressor_scratch_buf = buf;
970 		buf += vm_compressor_get_decode_scratch_size();
971 		bufsize -= vm_compressor_get_decode_scratch_size();
972 
973 #if CONFIG_FREEZE
974 		freezer_context_global.freezer_ctx_compressor_scratch_buf = buf;
975 		buf += vm_compressor_get_encode_scratch_size();
976 		bufsize -= vm_compressor_get_encode_scratch_size();
977 #endif
978 
979 #if RECORD_THE_COMPRESSED_DATA
980 		c_compressed_record_sbuf = buf;
981 		c_compressed_record_cptr = buf;
982 		c_compressed_record_ebuf = c_compressed_record_sbuf + c_compressed_record_sbuf_size;
983 		buf += c_compressed_record_sbuf_size;
984 		bufsize -= c_compressed_record_sbuf_size;
985 #endif
986 		assert(bufsize == 0);
987 	}
988 
989 	if (kernel_thread_start_priority((thread_continue_t)vm_compressor_swap_trigger_thread, NULL,
990 	    BASEPRI_VM, &thread) != KERN_SUCCESS) {
991 		panic("vm_compressor_swap_trigger_thread: create failed");
992 	}
993 	thread_deallocate(thread);
994 
995 	if (vm_pageout_internal_start() != KERN_SUCCESS) {
996 		panic("vm_compressor_init: Failed to start the internal pageout thread.");
997 	}
998 	if (VM_CONFIG_SWAP_IS_PRESENT) {
999 		vm_compressor_swap_init();
1000 	}
1001 
1002 	if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
1003 		vm_compressor_is_active = 1;
1004 	}
1005 
1006 #if CONFIG_FREEZE
1007 	memorystatus_freeze_enabled = TRUE;
1008 #endif /* CONFIG_FREEZE */
1009 
1010 	vm_compressor_available = 1;
1011 
1012 	vm_page_reactivate_all_throttled();
1013 
1014 	bzero(&vmcs_stats, sizeof(struct vm_compressor_swapper_stats));
1015 }
1016 
1017 
1018 #if VALIDATE_C_SEGMENTS
1019 
1020 static void
c_seg_validate(c_segment_t c_seg,boolean_t must_be_compact)1021 c_seg_validate(c_segment_t c_seg, boolean_t must_be_compact)
1022 {
1023 	uint16_t        c_indx;
1024 	int32_t         bytes_used;
1025 	uint32_t        c_rounded_size;
1026 	uint32_t        c_size;
1027 	c_slot_t        cs;
1028 
1029 	if (__probable(validate_c_segs == FALSE)) {
1030 		return;
1031 	}
1032 	if (c_seg->c_firstemptyslot < c_seg->c_nextslot) {
1033 		c_indx = c_seg->c_firstemptyslot;
1034 		cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1035 
1036 		if (cs == NULL) {
1037 			panic("c_seg_validate:  no slot backing c_firstemptyslot");
1038 		}
1039 
1040 		if (cs->c_size) {
1041 			panic("c_seg_validate:  c_firstemptyslot has non-zero size (%d)", cs->c_size);
1042 		}
1043 	}
1044 	bytes_used = 0;
1045 
1046 	for (c_indx = 0; c_indx < c_seg->c_nextslot; c_indx++) {
1047 		cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1048 
1049 		c_size = UNPACK_C_SIZE(cs);
1050 
1051 		c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
1052 
1053 		bytes_used += c_rounded_size;
1054 
1055 #if CHECKSUM_THE_COMPRESSED_DATA
1056 		unsigned csvhash;
1057 		if (c_size && cs->c_hash_compressed_data != (csvhash = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))) {
1058 			addr64_t csvphys = kvtophys((vm_offset_t)&c_seg->c_store.c_buffer[cs->c_offset]);
1059 			panic("Compressed data doesn't match original %p phys: 0x%llx %d %p %d %d 0x%x 0x%x", c_seg, csvphys, cs->c_offset, cs, c_indx, c_size, cs->c_hash_compressed_data, csvhash);
1060 		}
1061 #endif
1062 #if POPCOUNT_THE_COMPRESSED_DATA
1063 		unsigned csvpop;
1064 		if (c_size) {
1065 			uintptr_t csvaddr = (uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset];
1066 			if (cs->c_pop_cdata != (csvpop = vmc_pop(csvaddr, c_size))) {
1067 				panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%llx 0x%x 0x%x 0x%x", (csvpop - cs->c_pop_cdata), (void *)csvaddr, (void *) kvtophys(csvaddr), c_seg, cs, (uint64_t)cs->c_offset, c_size, csvpop, cs->c_pop_cdata);
1068 			}
1069 		}
1070 #endif
1071 	}
1072 
1073 	if (bytes_used != c_seg->c_bytes_used) {
1074 		panic("c_seg_validate: bytes_used mismatch - found %d, segment has %d", bytes_used, c_seg->c_bytes_used);
1075 	}
1076 
1077 	if (c_seg->c_bytes_used > C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) {
1078 		panic("c_seg_validate: c_bytes_used > c_nextoffset - c_nextoffset = %d,  c_bytes_used = %d",
1079 		    (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used);
1080 	}
1081 
1082 	if (must_be_compact) {
1083 		if (c_seg->c_bytes_used != C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) {
1084 			panic("c_seg_validate: c_bytes_used doesn't match c_nextoffset - c_nextoffset = %d,  c_bytes_used = %d",
1085 			    (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used);
1086 		}
1087 	}
1088 }
1089 
1090 #endif
1091 
1092 
1093 void
c_seg_need_delayed_compaction(c_segment_t c_seg,boolean_t c_list_lock_held)1094 c_seg_need_delayed_compaction(c_segment_t c_seg, boolean_t c_list_lock_held)
1095 {
1096 	boolean_t       clear_busy = FALSE;
1097 
1098 	if (c_list_lock_held == FALSE) {
1099 		if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1100 			C_SEG_BUSY(c_seg);
1101 
1102 			lck_mtx_unlock_always(&c_seg->c_lock);
1103 			lck_mtx_lock_spin_always(c_list_lock);
1104 			lck_mtx_lock_spin_always(&c_seg->c_lock);
1105 
1106 			clear_busy = TRUE;
1107 		}
1108 	}
1109 	assert(c_seg->c_state != C_IS_FILLING);
1110 
1111 	if (!c_seg->c_on_minorcompact_q && !(C_SEG_IS_ON_DISK_OR_SOQ(c_seg))) {
1112 		queue_enter(&c_minor_list_head, c_seg, c_segment_t, c_list);
1113 		c_seg->c_on_minorcompact_q = 1;
1114 		c_minor_count++;
1115 	}
1116 	if (c_list_lock_held == FALSE) {
1117 		lck_mtx_unlock_always(c_list_lock);
1118 	}
1119 
1120 	if (clear_busy == TRUE) {
1121 		C_SEG_WAKEUP_DONE(c_seg);
1122 	}
1123 }
1124 
1125 
1126 unsigned int c_seg_moved_to_sparse_list = 0;
1127 
1128 void
c_seg_move_to_sparse_list(c_segment_t c_seg)1129 c_seg_move_to_sparse_list(c_segment_t c_seg)
1130 {
1131 	boolean_t       clear_busy = FALSE;
1132 
1133 	if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1134 		C_SEG_BUSY(c_seg);
1135 
1136 		lck_mtx_unlock_always(&c_seg->c_lock);
1137 		lck_mtx_lock_spin_always(c_list_lock);
1138 		lck_mtx_lock_spin_always(&c_seg->c_lock);
1139 
1140 		clear_busy = TRUE;
1141 	}
1142 	c_seg_switch_state(c_seg, C_ON_SWAPPEDOUTSPARSE_Q, FALSE);
1143 
1144 	c_seg_moved_to_sparse_list++;
1145 
1146 	lck_mtx_unlock_always(c_list_lock);
1147 
1148 	if (clear_busy == TRUE) {
1149 		C_SEG_WAKEUP_DONE(c_seg);
1150 	}
1151 }
1152 
1153 
1154 void
c_seg_insert_into_q(queue_head_t * qhead,c_segment_t c_seg)1155 c_seg_insert_into_q(queue_head_t *qhead, c_segment_t c_seg)
1156 {
1157 	c_segment_t c_seg_next;
1158 
1159 	if (queue_empty(qhead)) {
1160 		queue_enter(qhead, c_seg, c_segment_t, c_age_list);
1161 	} else {
1162 		c_seg_next = (c_segment_t)queue_first(qhead);
1163 
1164 		while (TRUE) {
1165 			if (c_seg->c_generation_id < c_seg_next->c_generation_id) {
1166 				queue_insert_before(qhead, c_seg, c_seg_next, c_segment_t, c_age_list);
1167 				break;
1168 			}
1169 			c_seg_next = (c_segment_t) queue_next(&c_seg_next->c_age_list);
1170 
1171 			if (queue_end(qhead, (queue_entry_t) c_seg_next)) {
1172 				queue_enter(qhead, c_seg, c_segment_t, c_age_list);
1173 				break;
1174 			}
1175 		}
1176 	}
1177 }
1178 
1179 
1180 int try_minor_compaction_failed = 0;
1181 int try_minor_compaction_succeeded = 0;
1182 
1183 void
c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg)1184 c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg)
1185 {
1186 	assert(c_seg->c_on_minorcompact_q);
1187 	/*
1188 	 * c_seg is currently on the delayed minor compaction
1189 	 * queue and we have c_seg locked... if we can get the
1190 	 * c_list_lock w/o blocking (if we blocked we could deadlock
1191 	 * because the lock order is c_list_lock then c_seg's lock)
1192 	 * we'll pull it from the delayed list and free it directly
1193 	 */
1194 	if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1195 		/*
1196 		 * c_list_lock is held, we need to bail
1197 		 */
1198 		try_minor_compaction_failed++;
1199 
1200 		lck_mtx_unlock_always(&c_seg->c_lock);
1201 	} else {
1202 		try_minor_compaction_succeeded++;
1203 
1204 		C_SEG_BUSY(c_seg);
1205 		c_seg_do_minor_compaction_and_unlock(c_seg, TRUE, FALSE, FALSE);
1206 	}
1207 }
1208 
1209 
1210 int
c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg,boolean_t clear_busy,boolean_t need_list_lock,boolean_t disallow_page_replacement)1211 c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy, boolean_t need_list_lock, boolean_t disallow_page_replacement)
1212 {
1213 	int     c_seg_freed;
1214 
1215 	assert(c_seg->c_busy);
1216 	assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg));
1217 
1218 	/*
1219 	 * check for the case that can occur when we are not swapping
1220 	 * and this segment has been major compacted in the past
1221 	 * and moved to the majorcompact q to remove it from further
1222 	 * consideration... if the occupancy falls too low we need
1223 	 * to put it back on the age_q so that it will be considered
1224 	 * in the next major compaction sweep... if we don't do this
1225 	 * we will eventually run into the c_segments_limit
1226 	 */
1227 	if (c_seg->c_state == C_ON_MAJORCOMPACT_Q && C_SEG_SHOULD_MAJORCOMPACT_NOW(c_seg)) {
1228 		c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
1229 	}
1230 	if (!c_seg->c_on_minorcompact_q) {
1231 		if (clear_busy == TRUE) {
1232 			C_SEG_WAKEUP_DONE(c_seg);
1233 		}
1234 
1235 		lck_mtx_unlock_always(&c_seg->c_lock);
1236 
1237 		return 0;
1238 	}
1239 	queue_remove(&c_minor_list_head, c_seg, c_segment_t, c_list);
1240 	c_seg->c_on_minorcompact_q = 0;
1241 	c_minor_count--;
1242 
1243 	lck_mtx_unlock_always(c_list_lock);
1244 
1245 	if (disallow_page_replacement == TRUE) {
1246 		lck_mtx_unlock_always(&c_seg->c_lock);
1247 
1248 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
1249 
1250 		lck_mtx_lock_spin_always(&c_seg->c_lock);
1251 	}
1252 	c_seg_freed = c_seg_minor_compaction_and_unlock(c_seg, clear_busy);
1253 
1254 	if (disallow_page_replacement == TRUE) {
1255 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
1256 	}
1257 
1258 	if (need_list_lock == TRUE) {
1259 		lck_mtx_lock_spin_always(c_list_lock);
1260 	}
1261 
1262 	return c_seg_freed;
1263 }
1264 
1265 void
kdp_compressor_busy_find_owner(event64_t wait_event,thread_waitinfo_t * waitinfo)1266 kdp_compressor_busy_find_owner(event64_t wait_event, thread_waitinfo_t *waitinfo)
1267 {
1268 	c_segment_t c_seg = (c_segment_t) wait_event;
1269 
1270 	waitinfo->owner = thread_tid(c_seg->c_busy_for_thread);
1271 	waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(c_seg);
1272 }
1273 
1274 #if DEVELOPMENT || DEBUG
1275 int
do_cseg_wedge_thread(void)1276 do_cseg_wedge_thread(void)
1277 {
1278 	struct c_segment c_seg;
1279 	c_seg.c_busy_for_thread = current_thread();
1280 
1281 	debug_cseg_wait_event = (event_t) &c_seg;
1282 
1283 	thread_set_pending_block_hint(current_thread(), kThreadWaitCompressor);
1284 	assert_wait((event_t) (&c_seg), THREAD_INTERRUPTIBLE);
1285 
1286 	thread_block(THREAD_CONTINUE_NULL);
1287 
1288 	return 0;
1289 }
1290 
1291 int
do_cseg_unwedge_thread(void)1292 do_cseg_unwedge_thread(void)
1293 {
1294 	thread_wakeup(debug_cseg_wait_event);
1295 	debug_cseg_wait_event = NULL;
1296 
1297 	return 0;
1298 }
1299 #endif /* DEVELOPMENT || DEBUG */
1300 
1301 void
c_seg_wait_on_busy(c_segment_t c_seg)1302 c_seg_wait_on_busy(c_segment_t c_seg)
1303 {
1304 	c_seg->c_wanted = 1;
1305 
1306 	thread_set_pending_block_hint(current_thread(), kThreadWaitCompressor);
1307 	assert_wait((event_t) (c_seg), THREAD_UNINT);
1308 
1309 	lck_mtx_unlock_always(&c_seg->c_lock);
1310 	thread_block(THREAD_CONTINUE_NULL);
1311 }
1312 
1313 #if CONFIG_FREEZE
1314 /*
1315  * We don't have the task lock held while updating the task's
1316  * c_seg queues. We can do that because of the following restrictions:
1317  *
1318  * - SINGLE FREEZER CONTEXT:
1319  *   We 'insert' c_segs into the task list on the task_freeze path.
1320  *   There can only be one such freeze in progress and the task
1321  *   isn't disappearing because we have the VM map lock held throughout
1322  *   and we have a reference on the proc too.
1323  *
1324  * - SINGLE TASK DISOWN CONTEXT:
1325  *   We 'disown' c_segs of a task ONLY from the task_terminate context. So
1326  *   we don't need the task lock but we need the c_list_lock and the
1327  *   compressor master lock (shared). We also hold the individual
1328  *   c_seg locks (exclusive).
1329  *
1330  *   If we either:
1331  *   - can't get the c_seg lock on a try, then we start again because maybe
1332  *   the c_seg is part of a compaction and might get freed. So we can't trust
1333  *   that linkage and need to restart our queue traversal.
1334  *   - OR, we run into a busy c_seg (say being swapped in or free-ing) we
1335  *   drop all locks again and wait and restart our queue traversal.
1336  *
1337  * - The new_owner_task below is currently only the kernel or NULL.
1338  *
1339  */
1340 void
c_seg_update_task_owner(c_segment_t c_seg,task_t new_owner_task)1341 c_seg_update_task_owner(c_segment_t c_seg, task_t new_owner_task)
1342 {
1343 	task_t          owner_task = c_seg->c_task_owner;
1344 	uint64_t        uncompressed_bytes = ((c_seg->c_slots_used) * PAGE_SIZE_64);
1345 
1346 	LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
1347 	LCK_MTX_ASSERT(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED);
1348 
1349 	if (owner_task) {
1350 		task_update_frozen_to_swap_acct(owner_task, uncompressed_bytes, DEBIT_FROM_SWAP);
1351 		queue_remove(&owner_task->task_frozen_cseg_q, c_seg,
1352 		    c_segment_t, c_task_list_next_cseg);
1353 	}
1354 
1355 	if (new_owner_task) {
1356 		queue_enter(&new_owner_task->task_frozen_cseg_q, c_seg,
1357 		    c_segment_t, c_task_list_next_cseg);
1358 		task_update_frozen_to_swap_acct(new_owner_task, uncompressed_bytes, CREDIT_TO_SWAP);
1359 	}
1360 
1361 	c_seg->c_task_owner = new_owner_task;
1362 }
1363 
1364 void
task_disown_frozen_csegs(task_t owner_task)1365 task_disown_frozen_csegs(task_t owner_task)
1366 {
1367 	c_segment_t c_seg = NULL, next_cseg = NULL;
1368 
1369 again:
1370 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
1371 	lck_mtx_lock_spin_always(c_list_lock);
1372 
1373 	for (c_seg = (c_segment_t) queue_first(&owner_task->task_frozen_cseg_q);
1374 	    !queue_end(&owner_task->task_frozen_cseg_q, (queue_entry_t) c_seg);
1375 	    c_seg = next_cseg) {
1376 		next_cseg = (c_segment_t) queue_next(&c_seg->c_task_list_next_cseg);
1377 
1378 		if (!lck_mtx_try_lock_spin_always(&c_seg->c_lock)) {
1379 			lck_mtx_unlock(c_list_lock);
1380 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
1381 			goto again;
1382 		}
1383 
1384 		if (c_seg->c_busy) {
1385 			lck_mtx_unlock(c_list_lock);
1386 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
1387 
1388 			c_seg_wait_on_busy(c_seg);
1389 
1390 			goto again;
1391 		}
1392 		assert(c_seg->c_task_owner == owner_task);
1393 		c_seg_update_task_owner(c_seg, kernel_task);
1394 		lck_mtx_unlock_always(&c_seg->c_lock);
1395 	}
1396 
1397 	lck_mtx_unlock(c_list_lock);
1398 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
1399 }
1400 #endif /* CONFIG_FREEZE */
1401 
1402 void
c_seg_switch_state(c_segment_t c_seg,int new_state,boolean_t insert_head)1403 c_seg_switch_state(c_segment_t c_seg, int new_state, boolean_t insert_head)
1404 {
1405 	int     old_state = c_seg->c_state;
1406 
1407 #if XNU_TARGET_OS_OSX
1408 #if     DEVELOPMENT || DEBUG
1409 	if (new_state != C_IS_FILLING) {
1410 		LCK_MTX_ASSERT(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED);
1411 	}
1412 	LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
1413 #endif
1414 #endif /* XNU_TARGET_OS_OSX */
1415 	switch (old_state) {
1416 	case C_IS_EMPTY:
1417 		assert(new_state == C_IS_FILLING || new_state == C_IS_FREE);
1418 
1419 		c_empty_count--;
1420 		break;
1421 
1422 	case C_IS_FILLING:
1423 		assert(new_state == C_ON_AGE_Q || new_state == C_ON_SWAPOUT_Q);
1424 
1425 		queue_remove(&c_filling_list_head, c_seg, c_segment_t, c_age_list);
1426 		c_filling_count--;
1427 		break;
1428 
1429 	case C_ON_AGE_Q:
1430 		assert(new_state == C_ON_SWAPOUT_Q || new_state == C_ON_MAJORCOMPACT_Q ||
1431 		    new_state == C_IS_FREE);
1432 
1433 		queue_remove(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1434 		c_age_count--;
1435 		break;
1436 
1437 	case C_ON_SWAPPEDIN_Q:
1438 		assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE);
1439 
1440 		queue_remove(&c_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1441 		c_swappedin_count--;
1442 		break;
1443 
1444 	case C_ON_SWAPOUT_Q:
1445 		assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE || new_state == C_IS_EMPTY || new_state == C_ON_SWAPIO_Q);
1446 
1447 #if CONFIG_FREEZE
1448 		if (c_seg->c_task_owner && (new_state != C_ON_SWAPIO_Q)) {
1449 			c_seg_update_task_owner(c_seg, NULL);
1450 		}
1451 #endif /* CONFIG_FREEZE */
1452 
1453 		queue_remove(&c_swapout_list_head, c_seg, c_segment_t, c_age_list);
1454 		thread_wakeup((event_t)&compaction_swapper_running);
1455 		c_swapout_count--;
1456 		break;
1457 
1458 	case C_ON_SWAPIO_Q:
1459 		assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_AGE_Q);
1460 
1461 		queue_remove(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1462 		c_swapio_count--;
1463 		break;
1464 
1465 	case C_ON_SWAPPEDOUT_Q:
1466 		assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q ||
1467 		    new_state == C_ON_SWAPPEDOUTSPARSE_Q ||
1468 		    new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE);
1469 
1470 		queue_remove(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
1471 		c_swappedout_count--;
1472 		break;
1473 
1474 	case C_ON_SWAPPEDOUTSPARSE_Q:
1475 		assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q ||
1476 		    new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE);
1477 
1478 		queue_remove(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
1479 		c_swappedout_sparse_count--;
1480 		break;
1481 
1482 	case C_ON_MAJORCOMPACT_Q:
1483 		assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE);
1484 
1485 		queue_remove(&c_major_list_head, c_seg, c_segment_t, c_age_list);
1486 		c_major_count--;
1487 		break;
1488 
1489 	case C_ON_BAD_Q:
1490 		assert(new_state == C_IS_FREE);
1491 
1492 		queue_remove(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
1493 		c_bad_count--;
1494 		break;
1495 
1496 	default:
1497 		panic("c_seg %p has bad c_state = %d", c_seg, old_state);
1498 	}
1499 
1500 	switch (new_state) {
1501 	case C_IS_FREE:
1502 		assert(old_state != C_IS_FILLING);
1503 
1504 		break;
1505 
1506 	case C_IS_EMPTY:
1507 		assert(old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1508 
1509 		c_empty_count++;
1510 		break;
1511 
1512 	case C_IS_FILLING:
1513 		assert(old_state == C_IS_EMPTY);
1514 
1515 		queue_enter(&c_filling_list_head, c_seg, c_segment_t, c_age_list);
1516 		c_filling_count++;
1517 		break;
1518 
1519 	case C_ON_AGE_Q:
1520 		assert(old_state == C_IS_FILLING || old_state == C_ON_SWAPPEDIN_Q ||
1521 		    old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPIO_Q ||
1522 		    old_state == C_ON_MAJORCOMPACT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1523 
1524 		if (old_state == C_IS_FILLING) {
1525 			queue_enter(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1526 		} else {
1527 			if (!queue_empty(&c_age_list_head)) {
1528 				c_segment_t     c_first;
1529 
1530 				c_first = (c_segment_t)queue_first(&c_age_list_head);
1531 				c_seg->c_creation_ts = c_first->c_creation_ts;
1532 			}
1533 			queue_enter_first(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1534 		}
1535 		c_age_count++;
1536 		break;
1537 
1538 	case C_ON_SWAPPEDIN_Q:
1539 		assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1540 
1541 		if (insert_head == TRUE) {
1542 			queue_enter_first(&c_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1543 		} else {
1544 			queue_enter(&c_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1545 		}
1546 		c_swappedin_count++;
1547 		break;
1548 
1549 	case C_ON_SWAPOUT_Q:
1550 		assert(old_state == C_ON_AGE_Q || old_state == C_IS_FILLING);
1551 
1552 		if (insert_head == TRUE) {
1553 			queue_enter_first(&c_swapout_list_head, c_seg, c_segment_t, c_age_list);
1554 		} else {
1555 			queue_enter(&c_swapout_list_head, c_seg, c_segment_t, c_age_list);
1556 		}
1557 		c_swapout_count++;
1558 		break;
1559 
1560 	case C_ON_SWAPIO_Q:
1561 		assert(old_state == C_ON_SWAPOUT_Q);
1562 
1563 		if (insert_head == TRUE) {
1564 			queue_enter_first(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1565 		} else {
1566 			queue_enter(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1567 		}
1568 		c_swapio_count++;
1569 		break;
1570 
1571 	case C_ON_SWAPPEDOUT_Q:
1572 		assert(old_state == C_ON_SWAPIO_Q);
1573 
1574 		if (insert_head == TRUE) {
1575 			queue_enter_first(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
1576 		} else {
1577 			queue_enter(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
1578 		}
1579 		c_swappedout_count++;
1580 		break;
1581 
1582 	case C_ON_SWAPPEDOUTSPARSE_Q:
1583 		assert(old_state == C_ON_SWAPIO_Q || old_state == C_ON_SWAPPEDOUT_Q);
1584 
1585 		if (insert_head == TRUE) {
1586 			queue_enter_first(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
1587 		} else {
1588 			queue_enter(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
1589 		}
1590 
1591 		c_swappedout_sparse_count++;
1592 		break;
1593 
1594 	case C_ON_MAJORCOMPACT_Q:
1595 		assert(old_state == C_ON_AGE_Q);
1596 
1597 		if (insert_head == TRUE) {
1598 			queue_enter_first(&c_major_list_head, c_seg, c_segment_t, c_age_list);
1599 		} else {
1600 			queue_enter(&c_major_list_head, c_seg, c_segment_t, c_age_list);
1601 		}
1602 		c_major_count++;
1603 		break;
1604 
1605 	case C_ON_BAD_Q:
1606 		assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1607 
1608 		if (insert_head == TRUE) {
1609 			queue_enter_first(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
1610 		} else {
1611 			queue_enter(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
1612 		}
1613 		c_bad_count++;
1614 		break;
1615 
1616 	default:
1617 		panic("c_seg %p requesting bad c_state = %d", c_seg, new_state);
1618 	}
1619 	c_seg->c_state = new_state;
1620 }
1621 
1622 
1623 
1624 void
c_seg_free(c_segment_t c_seg)1625 c_seg_free(c_segment_t c_seg)
1626 {
1627 	assert(c_seg->c_busy);
1628 
1629 	lck_mtx_unlock_always(&c_seg->c_lock);
1630 	lck_mtx_lock_spin_always(c_list_lock);
1631 	lck_mtx_lock_spin_always(&c_seg->c_lock);
1632 
1633 	c_seg_free_locked(c_seg);
1634 }
1635 
1636 
1637 void
c_seg_free_locked(c_segment_t c_seg)1638 c_seg_free_locked(c_segment_t c_seg)
1639 {
1640 	int             segno;
1641 	int             pages_populated = 0;
1642 	int32_t         *c_buffer = NULL;
1643 	uint64_t        c_swap_handle = 0;
1644 
1645 	assert(c_seg->c_busy);
1646 	assert(c_seg->c_slots_used == 0);
1647 	assert(!c_seg->c_on_minorcompact_q);
1648 	assert(!c_seg->c_busy_swapping);
1649 
1650 	if (c_seg->c_overage_swap == TRUE) {
1651 		c_overage_swapped_count--;
1652 		c_seg->c_overage_swap = FALSE;
1653 	}
1654 	if (!(C_SEG_IS_ONDISK(c_seg))) {
1655 		c_buffer = c_seg->c_store.c_buffer;
1656 	} else {
1657 		c_swap_handle = c_seg->c_store.c_swap_handle;
1658 	}
1659 
1660 	c_seg_switch_state(c_seg, C_IS_FREE, FALSE);
1661 
1662 	if (c_buffer) {
1663 		pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE;
1664 		c_seg->c_store.c_buffer = NULL;
1665 	} else {
1666 #if CONFIG_FREEZE
1667 		c_seg_update_task_owner(c_seg, NULL);
1668 #endif /* CONFIG_FREEZE */
1669 
1670 		c_seg->c_store.c_swap_handle = (uint64_t)-1;
1671 	}
1672 
1673 	lck_mtx_unlock_always(&c_seg->c_lock);
1674 
1675 	lck_mtx_unlock_always(c_list_lock);
1676 
1677 	if (c_buffer) {
1678 		if (pages_populated) {
1679 			kernel_memory_depopulate(compressor_map, (vm_offset_t)c_buffer,
1680 			    pages_populated * PAGE_SIZE, KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
1681 		}
1682 	} else if (c_swap_handle) {
1683 		/*
1684 		 * Free swap space on disk.
1685 		 */
1686 		vm_swap_free(c_swap_handle);
1687 	}
1688 	lck_mtx_lock_spin_always(&c_seg->c_lock);
1689 	/*
1690 	 * c_seg must remain busy until
1691 	 * after the call to vm_swap_free
1692 	 */
1693 	C_SEG_WAKEUP_DONE(c_seg);
1694 	lck_mtx_unlock_always(&c_seg->c_lock);
1695 
1696 	segno = c_seg->c_mysegno;
1697 
1698 	lck_mtx_lock_spin_always(c_list_lock);
1699 	/*
1700 	 * because the c_buffer is now associated with the segno,
1701 	 * we can't put the segno back on the free list until
1702 	 * after we have depopulated the c_buffer range, or
1703 	 * we run the risk of depopulating a range that is
1704 	 * now being used in one of the compressor heads
1705 	 */
1706 	c_segments[segno].c_segno = c_free_segno_head;
1707 	c_free_segno_head = segno;
1708 	c_segment_count--;
1709 
1710 	lck_mtx_unlock_always(c_list_lock);
1711 
1712 	lck_mtx_destroy(&c_seg->c_lock, &vm_compressor_lck_grp);
1713 
1714 	if (c_seg->c_slot_var_array_len) {
1715 		kfree_data(c_seg->c_slot_var_array,
1716 		    sizeof(struct c_slot) * c_seg->c_slot_var_array_len);
1717 	}
1718 
1719 	zfree(compressor_segment_zone, c_seg);
1720 }
1721 
1722 #if DEVELOPMENT || DEBUG
1723 int c_seg_trim_page_count = 0;
1724 #endif
1725 
1726 void
c_seg_trim_tail(c_segment_t c_seg)1727 c_seg_trim_tail(c_segment_t c_seg)
1728 {
1729 	c_slot_t        cs;
1730 	uint32_t        c_size;
1731 	uint32_t        c_offset;
1732 	uint32_t        c_rounded_size;
1733 	uint16_t        current_nextslot;
1734 	uint32_t        current_populated_offset;
1735 
1736 	if (c_seg->c_bytes_used == 0) {
1737 		return;
1738 	}
1739 	current_nextslot = c_seg->c_nextslot;
1740 	current_populated_offset = c_seg->c_populated_offset;
1741 
1742 	while (c_seg->c_nextslot) {
1743 		cs = C_SEG_SLOT_FROM_INDEX(c_seg, (c_seg->c_nextslot - 1));
1744 
1745 		c_size = UNPACK_C_SIZE(cs);
1746 
1747 		if (c_size) {
1748 			if (current_nextslot != c_seg->c_nextslot) {
1749 				c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
1750 				c_offset = cs->c_offset + C_SEG_BYTES_TO_OFFSET(c_rounded_size);
1751 
1752 				c_seg->c_nextoffset = c_offset;
1753 				c_seg->c_populated_offset = (c_offset + (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1)) &
1754 				    ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1);
1755 
1756 				if (c_seg->c_firstemptyslot > c_seg->c_nextslot) {
1757 					c_seg->c_firstemptyslot = c_seg->c_nextslot;
1758 				}
1759 #if DEVELOPMENT || DEBUG
1760 				c_seg_trim_page_count += ((round_page_32(C_SEG_OFFSET_TO_BYTES(current_populated_offset)) -
1761 				    round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) /
1762 				    PAGE_SIZE);
1763 #endif
1764 			}
1765 			break;
1766 		}
1767 		c_seg->c_nextslot--;
1768 	}
1769 	assert(c_seg->c_nextslot);
1770 }
1771 
1772 
1773 int
c_seg_minor_compaction_and_unlock(c_segment_t c_seg,boolean_t clear_busy)1774 c_seg_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy)
1775 {
1776 	c_slot_mapping_t slot_ptr;
1777 	uint32_t        c_offset = 0;
1778 	uint32_t        old_populated_offset;
1779 	uint32_t        c_rounded_size;
1780 	uint32_t        c_size;
1781 	uint16_t        c_indx = 0;
1782 	int             i;
1783 	c_slot_t        c_dst;
1784 	c_slot_t        c_src;
1785 
1786 	assert(c_seg->c_busy);
1787 
1788 #if VALIDATE_C_SEGMENTS
1789 	c_seg_validate(c_seg, FALSE);
1790 #endif
1791 	if (c_seg->c_bytes_used == 0) {
1792 		c_seg_free(c_seg);
1793 		return 1;
1794 	}
1795 	lck_mtx_unlock_always(&c_seg->c_lock);
1796 
1797 	if (c_seg->c_firstemptyslot >= c_seg->c_nextslot || C_SEG_UNUSED_BYTES(c_seg) < PAGE_SIZE) {
1798 		goto done;
1799 	}
1800 
1801 /* TODO: assert first emptyslot's c_size is actually 0 */
1802 
1803 #if DEVELOPMENT || DEBUG
1804 	C_SEG_MAKE_WRITEABLE(c_seg);
1805 #endif
1806 
1807 #if VALIDATE_C_SEGMENTS
1808 	c_seg->c_was_minor_compacted++;
1809 #endif
1810 	c_indx = c_seg->c_firstemptyslot;
1811 	c_dst = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1812 
1813 	old_populated_offset = c_seg->c_populated_offset;
1814 	c_offset = c_dst->c_offset;
1815 
1816 	for (i = c_indx + 1; i < c_seg->c_nextslot && c_offset < c_seg->c_nextoffset; i++) {
1817 		c_src = C_SEG_SLOT_FROM_INDEX(c_seg, i);
1818 
1819 		c_size = UNPACK_C_SIZE(c_src);
1820 
1821 		if (c_size == 0) {
1822 			continue;
1823 		}
1824 
1825 		c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
1826 /* N.B.: This memcpy may be an overlapping copy */
1827 		memcpy(&c_seg->c_store.c_buffer[c_offset], &c_seg->c_store.c_buffer[c_src->c_offset], c_rounded_size);
1828 
1829 		cslot_copy(c_dst, c_src);
1830 		c_dst->c_offset = c_offset;
1831 
1832 		slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
1833 		slot_ptr->s_cindx = c_indx;
1834 
1835 		c_offset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
1836 		PACK_C_SIZE(c_src, 0);
1837 		c_indx++;
1838 
1839 		c_dst = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1840 	}
1841 	c_seg->c_firstemptyslot = c_indx;
1842 	c_seg->c_nextslot = c_indx;
1843 	c_seg->c_nextoffset = c_offset;
1844 	c_seg->c_populated_offset = (c_offset + (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1)) & ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1);
1845 	c_seg->c_bytes_unused = 0;
1846 
1847 #if VALIDATE_C_SEGMENTS
1848 	c_seg_validate(c_seg, TRUE);
1849 #endif
1850 	if (old_populated_offset > c_seg->c_populated_offset) {
1851 		uint32_t        gc_size;
1852 		int32_t         *gc_ptr;
1853 
1854 		gc_size = C_SEG_OFFSET_TO_BYTES(old_populated_offset - c_seg->c_populated_offset);
1855 		gc_ptr = &c_seg->c_store.c_buffer[c_seg->c_populated_offset];
1856 
1857 		kernel_memory_depopulate(compressor_map, (vm_offset_t)gc_ptr, gc_size,
1858 		    KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
1859 	}
1860 
1861 #if DEVELOPMENT || DEBUG
1862 	C_SEG_WRITE_PROTECT(c_seg);
1863 #endif
1864 
1865 done:
1866 	if (clear_busy == TRUE) {
1867 		lck_mtx_lock_spin_always(&c_seg->c_lock);
1868 		C_SEG_WAKEUP_DONE(c_seg);
1869 		lck_mtx_unlock_always(&c_seg->c_lock);
1870 	}
1871 	return 0;
1872 }
1873 
1874 
1875 static void
c_seg_alloc_nextslot(c_segment_t c_seg)1876 c_seg_alloc_nextslot(c_segment_t c_seg)
1877 {
1878 	struct c_slot   *old_slot_array = NULL;
1879 	struct c_slot   *new_slot_array = NULL;
1880 	int             newlen;
1881 	int             oldlen;
1882 
1883 	if (c_seg->c_nextslot < c_seg_fixed_array_len) {
1884 		return;
1885 	}
1886 
1887 	if ((c_seg->c_nextslot - c_seg_fixed_array_len) >= c_seg->c_slot_var_array_len) {
1888 		oldlen = c_seg->c_slot_var_array_len;
1889 		old_slot_array = c_seg->c_slot_var_array;
1890 
1891 		if (oldlen == 0) {
1892 			newlen = c_seg_slot_var_array_min_len;
1893 		} else {
1894 			newlen = oldlen * 2;
1895 		}
1896 
1897 		new_slot_array = kalloc_data(sizeof(struct c_slot) * newlen,
1898 		    Z_WAITOK);
1899 
1900 		lck_mtx_lock_spin_always(&c_seg->c_lock);
1901 
1902 		if (old_slot_array) {
1903 			memcpy(new_slot_array, old_slot_array,
1904 			    sizeof(struct c_slot) * oldlen);
1905 		}
1906 
1907 		c_seg->c_slot_var_array_len = newlen;
1908 		c_seg->c_slot_var_array = new_slot_array;
1909 
1910 		lck_mtx_unlock_always(&c_seg->c_lock);
1911 
1912 		kfree_data(old_slot_array, sizeof(struct c_slot) * oldlen);
1913 	}
1914 }
1915 
1916 
1917 #define C_SEG_MAJOR_COMPACT_STATS_MAX   (30)
1918 
1919 struct {
1920 	uint64_t asked_permission;
1921 	uint64_t compactions;
1922 	uint64_t moved_slots;
1923 	uint64_t moved_bytes;
1924 	uint64_t wasted_space_in_swapouts;
1925 	uint64_t count_of_swapouts;
1926 	uint64_t count_of_freed_segs;
1927 	uint64_t bailed_compactions;
1928 	uint64_t bytes_freed_rate_us;
1929 } c_seg_major_compact_stats[C_SEG_MAJOR_COMPACT_STATS_MAX];
1930 
1931 int c_seg_major_compact_stats_now = 0;
1932 
1933 
1934 #define C_MAJOR_COMPACTION_SIZE_APPROPRIATE     ((c_seg_bufsize * 90) / 100)
1935 
1936 
1937 boolean_t
c_seg_major_compact_ok(c_segment_t c_seg_dst,c_segment_t c_seg_src)1938 c_seg_major_compact_ok(
1939 	c_segment_t c_seg_dst,
1940 	c_segment_t c_seg_src)
1941 {
1942 	c_seg_major_compact_stats[c_seg_major_compact_stats_now].asked_permission++;
1943 
1944 	if (c_seg_src->c_bytes_used >= C_MAJOR_COMPACTION_SIZE_APPROPRIATE &&
1945 	    c_seg_dst->c_bytes_used >= C_MAJOR_COMPACTION_SIZE_APPROPRIATE) {
1946 		return FALSE;
1947 	}
1948 
1949 	if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
1950 		/*
1951 		 * destination segment is full... can't compact
1952 		 */
1953 		return FALSE;
1954 	}
1955 
1956 	return TRUE;
1957 }
1958 
1959 
1960 boolean_t
c_seg_major_compact(c_segment_t c_seg_dst,c_segment_t c_seg_src)1961 c_seg_major_compact(
1962 	c_segment_t c_seg_dst,
1963 	c_segment_t c_seg_src)
1964 {
1965 	c_slot_mapping_t slot_ptr;
1966 	uint32_t        c_rounded_size;
1967 	uint32_t        c_size;
1968 	uint16_t        dst_slot;
1969 	int             i;
1970 	c_slot_t        c_dst;
1971 	c_slot_t        c_src;
1972 	boolean_t       keep_compacting = TRUE;
1973 
1974 	/*
1975 	 * segments are not locked but they are both marked c_busy
1976 	 * which keeps c_decompress from working on them...
1977 	 * we can safely allocate new pages, move compressed data
1978 	 * from c_seg_src to c_seg_dst and update both c_segment's
1979 	 * state w/o holding the master lock
1980 	 */
1981 #if DEVELOPMENT || DEBUG
1982 	C_SEG_MAKE_WRITEABLE(c_seg_dst);
1983 #endif
1984 
1985 #if VALIDATE_C_SEGMENTS
1986 	c_seg_dst->c_was_major_compacted++;
1987 	c_seg_src->c_was_major_donor++;
1988 #endif
1989 	c_seg_major_compact_stats[c_seg_major_compact_stats_now].compactions++;
1990 
1991 	dst_slot = c_seg_dst->c_nextslot;
1992 
1993 	for (i = 0; i < c_seg_src->c_nextslot; i++) {
1994 		c_src = C_SEG_SLOT_FROM_INDEX(c_seg_src, i);
1995 
1996 		c_size = UNPACK_C_SIZE(c_src);
1997 
1998 		if (c_size == 0) {
1999 			/* BATCH: move what we have so far; */
2000 			continue;
2001 		}
2002 
2003 		if (C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset - c_seg_dst->c_nextoffset) < (unsigned) c_size) {
2004 			int     size_to_populate;
2005 
2006 			/* doesn't fit */
2007 			size_to_populate = c_seg_bufsize - C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset);
2008 
2009 			if (size_to_populate == 0) {
2010 				/* can't fit */
2011 				keep_compacting = FALSE;
2012 				break;
2013 			}
2014 			if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) {
2015 				size_to_populate = C_SEG_MAX_POPULATE_SIZE;
2016 			}
2017 
2018 			kernel_memory_populate(compressor_map,
2019 			    (vm_offset_t) &c_seg_dst->c_store.c_buffer[c_seg_dst->c_populated_offset],
2020 			    size_to_populate,
2021 			    KMA_COMPRESSOR,
2022 			    VM_KERN_MEMORY_COMPRESSOR);
2023 
2024 			c_seg_dst->c_populated_offset += C_SEG_BYTES_TO_OFFSET(size_to_populate);
2025 			assert(C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset) <= c_seg_bufsize);
2026 		}
2027 		c_seg_alloc_nextslot(c_seg_dst);
2028 
2029 		c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, c_seg_dst->c_nextslot);
2030 
2031 		memcpy(&c_seg_dst->c_store.c_buffer[c_seg_dst->c_nextoffset], &c_seg_src->c_store.c_buffer[c_src->c_offset], c_size);
2032 
2033 		c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
2034 
2035 		c_seg_major_compact_stats[c_seg_major_compact_stats_now].moved_slots++;
2036 		c_seg_major_compact_stats[c_seg_major_compact_stats_now].moved_bytes += c_size;
2037 
2038 		cslot_copy(c_dst, c_src);
2039 		c_dst->c_offset = c_seg_dst->c_nextoffset;
2040 
2041 		if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) {
2042 			c_seg_dst->c_firstemptyslot++;
2043 		}
2044 		c_seg_dst->c_slots_used++;
2045 		c_seg_dst->c_nextslot++;
2046 		c_seg_dst->c_bytes_used += c_rounded_size;
2047 		c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
2048 
2049 		PACK_C_SIZE(c_src, 0);
2050 
2051 		c_seg_src->c_bytes_used -= c_rounded_size;
2052 		c_seg_src->c_bytes_unused += c_rounded_size;
2053 		c_seg_src->c_firstemptyslot = 0;
2054 
2055 		assert(c_seg_src->c_slots_used);
2056 		c_seg_src->c_slots_used--;
2057 
2058 		if (!c_seg_src->c_swappedin) {
2059 			/* Pessimistically lose swappedin status when non-swappedin pages are added. */
2060 			c_seg_dst->c_swappedin = false;
2061 		}
2062 
2063 		if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
2064 			/* dest segment is now full */
2065 			keep_compacting = FALSE;
2066 			break;
2067 		}
2068 	}
2069 #if DEVELOPMENT || DEBUG
2070 	C_SEG_WRITE_PROTECT(c_seg_dst);
2071 #endif
2072 	if (dst_slot < c_seg_dst->c_nextslot) {
2073 		PAGE_REPLACEMENT_ALLOWED(TRUE);
2074 		/*
2075 		 * we've now locked out c_decompress from
2076 		 * converting the slot passed into it into
2077 		 * a c_segment_t which allows us to use
2078 		 * the backptr to change which c_segment and
2079 		 * index the slot points to
2080 		 */
2081 		while (dst_slot < c_seg_dst->c_nextslot) {
2082 			c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot);
2083 
2084 			slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
2085 			/* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
2086 			slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1;
2087 			slot_ptr->s_cindx = dst_slot++;
2088 		}
2089 		PAGE_REPLACEMENT_ALLOWED(FALSE);
2090 	}
2091 	return keep_compacting;
2092 }
2093 
2094 
2095 uint64_t
vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec,clock_nsec_t end_nsec,clock_sec_t start_sec,clock_nsec_t start_nsec)2096 vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec, clock_nsec_t end_nsec, clock_sec_t start_sec, clock_nsec_t start_nsec)
2097 {
2098 	uint64_t end_msecs;
2099 	uint64_t start_msecs;
2100 
2101 	end_msecs = (end_sec * 1000) + end_nsec / 1000000;
2102 	start_msecs = (start_sec * 1000) + start_nsec / 1000000;
2103 
2104 	return end_msecs - start_msecs;
2105 }
2106 
2107 
2108 
2109 uint32_t compressor_eval_period_in_msecs = 250;
2110 uint32_t compressor_sample_min_in_msecs = 500;
2111 uint32_t compressor_sample_max_in_msecs = 10000;
2112 uint32_t compressor_thrashing_threshold_per_10msecs = 50;
2113 uint32_t compressor_thrashing_min_per_10msecs = 20;
2114 
2115 /* When true, reset sample data next chance we get. */
2116 static boolean_t        compressor_need_sample_reset = FALSE;
2117 
2118 
2119 void
compute_swapout_target_age(void)2120 compute_swapout_target_age(void)
2121 {
2122 	clock_sec_t     cur_ts_sec;
2123 	clock_nsec_t    cur_ts_nsec;
2124 	uint32_t        min_operations_needed_in_this_sample;
2125 	uint64_t        elapsed_msecs_in_eval;
2126 	uint64_t        elapsed_msecs_in_sample;
2127 	boolean_t       need_eval_reset = FALSE;
2128 
2129 	clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec);
2130 
2131 	elapsed_msecs_in_sample = vm_compressor_compute_elapsed_msecs(cur_ts_sec, cur_ts_nsec, start_of_sample_period_sec, start_of_sample_period_nsec);
2132 
2133 	if (compressor_need_sample_reset ||
2134 	    elapsed_msecs_in_sample >= compressor_sample_max_in_msecs) {
2135 		compressor_need_sample_reset = TRUE;
2136 		need_eval_reset = TRUE;
2137 		goto done;
2138 	}
2139 	elapsed_msecs_in_eval = vm_compressor_compute_elapsed_msecs(cur_ts_sec, cur_ts_nsec, start_of_eval_period_sec, start_of_eval_period_nsec);
2140 
2141 	if (elapsed_msecs_in_eval < compressor_eval_period_in_msecs) {
2142 		goto done;
2143 	}
2144 	need_eval_reset = TRUE;
2145 
2146 	KERNEL_DEBUG(0xe0400020 | DBG_FUNC_START, elapsed_msecs_in_eval, sample_period_compression_count, sample_period_decompression_count, 0, 0);
2147 
2148 	min_operations_needed_in_this_sample = (compressor_thrashing_min_per_10msecs * (uint32_t)elapsed_msecs_in_eval) / 10;
2149 
2150 	if ((sample_period_compression_count - last_eval_compression_count) < min_operations_needed_in_this_sample ||
2151 	    (sample_period_decompression_count - last_eval_decompression_count) < min_operations_needed_in_this_sample) {
2152 		KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, sample_period_compression_count - last_eval_compression_count,
2153 		    sample_period_decompression_count - last_eval_decompression_count, 0, 1, 0);
2154 
2155 		swapout_target_age = 0;
2156 
2157 		compressor_need_sample_reset = TRUE;
2158 		need_eval_reset = TRUE;
2159 		goto done;
2160 	}
2161 	last_eval_compression_count = sample_period_compression_count;
2162 	last_eval_decompression_count = sample_period_decompression_count;
2163 
2164 	if (elapsed_msecs_in_sample < compressor_sample_min_in_msecs) {
2165 		KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, swapout_target_age, 0, 0, 5, 0);
2166 		goto done;
2167 	}
2168 	if (sample_period_decompression_count > ((compressor_thrashing_threshold_per_10msecs * elapsed_msecs_in_sample) / 10)) {
2169 		uint64_t        running_total;
2170 		uint64_t        working_target;
2171 		uint64_t        aging_target;
2172 		uint32_t        oldest_age_of_csegs_sampled = 0;
2173 		uint64_t        working_set_approximation = 0;
2174 
2175 		swapout_target_age = 0;
2176 
2177 		working_target = (sample_period_decompression_count / 100) * 95;                /* 95 percent */
2178 		aging_target = (sample_period_decompression_count / 100) * 1;                   /* 1 percent */
2179 		running_total = 0;
2180 
2181 		for (oldest_age_of_csegs_sampled = 0; oldest_age_of_csegs_sampled < DECOMPRESSION_SAMPLE_MAX_AGE; oldest_age_of_csegs_sampled++) {
2182 			running_total += age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2183 
2184 			working_set_approximation += oldest_age_of_csegs_sampled * age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2185 
2186 			if (running_total >= working_target) {
2187 				break;
2188 			}
2189 		}
2190 		if (oldest_age_of_csegs_sampled < DECOMPRESSION_SAMPLE_MAX_AGE) {
2191 			working_set_approximation = (working_set_approximation * 1000) / elapsed_msecs_in_sample;
2192 
2193 			if (working_set_approximation < VM_PAGE_COMPRESSOR_COUNT) {
2194 				running_total = overage_decompressions_during_sample_period;
2195 
2196 				for (oldest_age_of_csegs_sampled = DECOMPRESSION_SAMPLE_MAX_AGE - 1; oldest_age_of_csegs_sampled; oldest_age_of_csegs_sampled--) {
2197 					running_total += age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2198 
2199 					if (running_total >= aging_target) {
2200 						break;
2201 					}
2202 				}
2203 				swapout_target_age = (uint32_t)cur_ts_sec - oldest_age_of_csegs_sampled;
2204 
2205 				KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, swapout_target_age, working_set_approximation, VM_PAGE_COMPRESSOR_COUNT, 2, 0);
2206 			} else {
2207 				KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, working_set_approximation, VM_PAGE_COMPRESSOR_COUNT, 0, 3, 0);
2208 			}
2209 		} else {
2210 			KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, working_target, running_total, 0, 4, 0);
2211 		}
2212 
2213 		compressor_need_sample_reset = TRUE;
2214 		need_eval_reset = TRUE;
2215 	} else {
2216 		KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, sample_period_decompression_count, (compressor_thrashing_threshold_per_10msecs * elapsed_msecs_in_sample) / 10, 0, 6, 0);
2217 	}
2218 done:
2219 	if (compressor_need_sample_reset == TRUE) {
2220 		bzero(age_of_decompressions_during_sample_period, sizeof(age_of_decompressions_during_sample_period));
2221 		overage_decompressions_during_sample_period = 0;
2222 
2223 		start_of_sample_period_sec = cur_ts_sec;
2224 		start_of_sample_period_nsec = cur_ts_nsec;
2225 		sample_period_decompression_count = 0;
2226 		sample_period_compression_count = 0;
2227 		last_eval_decompression_count = 0;
2228 		last_eval_compression_count = 0;
2229 		compressor_need_sample_reset = FALSE;
2230 	}
2231 	if (need_eval_reset == TRUE) {
2232 		start_of_eval_period_sec = cur_ts_sec;
2233 		start_of_eval_period_nsec = cur_ts_nsec;
2234 	}
2235 }
2236 
2237 
2238 int             compaction_swapper_init_now = 0;
2239 int             compaction_swapper_running = 0;
2240 int             compaction_swapper_awakened = 0;
2241 int             compaction_swapper_abort = 0;
2242 
2243 
2244 #if CONFIG_JETSAM
2245 boolean_t       memorystatus_kill_on_VM_compressor_thrashing(boolean_t);
2246 boolean_t       memorystatus_kill_on_VM_compressor_space_shortage(boolean_t);
2247 boolean_t       memorystatus_kill_on_FC_thrashing(boolean_t);
2248 int             compressor_thrashing_induced_jetsam = 0;
2249 int             filecache_thrashing_induced_jetsam = 0;
2250 static boolean_t        vm_compressor_thrashing_detected = FALSE;
2251 #endif /* CONFIG_JETSAM */
2252 
2253 static bool
compressor_swapout_conditions_met(void)2254 compressor_swapout_conditions_met(void)
2255 {
2256 	bool should_swap = false;
2257 
2258 	if (COMPRESSOR_NEEDS_TO_SWAP()) {
2259 		should_swap = true;
2260 		vmcs_stats.compressor_swap_threshold_exceeded++;
2261 	}
2262 	if (VM_PAGE_Q_THROTTLED(&vm_pageout_queue_external) && vm_page_anonymous_count < (vm_page_inactive_count / 20)) {
2263 		should_swap = true;
2264 		vmcs_stats.external_q_throttled++;
2265 	}
2266 	if (vm_page_free_count < (vm_page_free_reserved - (COMPRESSOR_FREE_RESERVED_LIMIT * 2))) {
2267 		should_swap = true;
2268 		vmcs_stats.free_count_below_reserve++;
2269 	}
2270 	return should_swap;
2271 }
2272 
2273 static boolean_t
compressor_needs_to_swap(void)2274 compressor_needs_to_swap(void)
2275 {
2276 	boolean_t       should_swap = FALSE;
2277 
2278 	if (vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit) {
2279 		c_segment_t     c_seg;
2280 		clock_sec_t     now;
2281 		clock_sec_t     age;
2282 		clock_nsec_t    nsec;
2283 
2284 		clock_get_system_nanotime(&now, &nsec);
2285 		age = 0;
2286 
2287 		lck_mtx_lock_spin_always(c_list_lock);
2288 
2289 		if (!queue_empty(&c_age_list_head)) {
2290 			c_seg = (c_segment_t) queue_first(&c_age_list_head);
2291 
2292 			age = now - c_seg->c_creation_ts;
2293 		}
2294 		lck_mtx_unlock_always(c_list_lock);
2295 
2296 		if (age >= vm_ripe_target_age) {
2297 			should_swap = TRUE;
2298 			goto check_if_low_space;
2299 		}
2300 	}
2301 	if (VM_CONFIG_SWAP_IS_ACTIVE) {
2302 		should_swap =  compressor_swapout_conditions_met();
2303 		if (should_swap) {
2304 			goto check_if_low_space;
2305 		}
2306 	}
2307 
2308 #if (XNU_TARGET_OS_OSX && __arm64__)
2309 	/*
2310 	 * Thrashing detection disabled.
2311 	 */
2312 #else /* (XNU_TARGET_OS_OSX && __arm64__) */
2313 
2314 	compute_swapout_target_age();
2315 
2316 	if (swapout_target_age) {
2317 		c_segment_t     c_seg;
2318 
2319 		lck_mtx_lock_spin_always(c_list_lock);
2320 
2321 		if (!queue_empty(&c_age_list_head)) {
2322 			c_seg = (c_segment_t) queue_first(&c_age_list_head);
2323 
2324 			if (c_seg->c_creation_ts > swapout_target_age) {
2325 				swapout_target_age = 0;
2326 			}
2327 		}
2328 		lck_mtx_unlock_always(c_list_lock);
2329 	}
2330 #if CONFIG_PHANTOM_CACHE
2331 	if (vm_phantom_cache_check_pressure()) {
2332 		should_swap = TRUE;
2333 	}
2334 #endif
2335 	if (swapout_target_age) {
2336 		should_swap = TRUE;
2337 		vmcs_stats.thrashing_detected++;
2338 	}
2339 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
2340 
2341 check_if_low_space:
2342 
2343 #if CONFIG_JETSAM
2344 	if (should_swap || vm_compressor_low_on_space() == TRUE) {
2345 		if (vm_compressor_thrashing_detected == FALSE) {
2346 			vm_compressor_thrashing_detected = TRUE;
2347 
2348 			if (swapout_target_age) {
2349 				/* The compressor is thrashing. */
2350 				memorystatus_kill_on_VM_compressor_thrashing(TRUE /* async */);
2351 				compressor_thrashing_induced_jetsam++;
2352 			} else if (vm_compressor_low_on_space() == TRUE) {
2353 				/* The compressor is running low on space. */
2354 				memorystatus_kill_on_VM_compressor_space_shortage(TRUE /* async */);
2355 				compressor_thrashing_induced_jetsam++;
2356 			} else {
2357 				memorystatus_kill_on_FC_thrashing(TRUE /* async */);
2358 				filecache_thrashing_induced_jetsam++;
2359 			}
2360 		}
2361 		/*
2362 		 * let the jetsam take precedence over
2363 		 * any major compactions we might have
2364 		 * been able to do... otherwise we run
2365 		 * the risk of doing major compactions
2366 		 * on segments we're about to free up
2367 		 * due to the jetsam activity.
2368 		 */
2369 		should_swap = FALSE;
2370 	}
2371 
2372 #else /* CONFIG_JETSAM */
2373 	if (should_swap && vm_swap_low_on_space()) {
2374 		vm_compressor_take_paging_space_action();
2375 	}
2376 #endif /* CONFIG_JETSAM */
2377 
2378 	if (should_swap == FALSE) {
2379 		/*
2380 		 * vm_compressor_needs_to_major_compact returns true only if we're
2381 		 * about to run out of available compressor segments... in this
2382 		 * case, we absolutely need to run a major compaction even if
2383 		 * we've just kicked off a jetsam or we don't otherwise need to
2384 		 * swap... terminating objects releases
2385 		 * pages back to the uncompressed cache, but does not guarantee
2386 		 * that we will free up even a single compression segment
2387 		 */
2388 		should_swap = vm_compressor_needs_to_major_compact();
2389 		if (should_swap) {
2390 			vmcs_stats.fragmentation_detected++;
2391 		}
2392 	}
2393 
2394 	/*
2395 	 * returning TRUE when swap_supported == FALSE
2396 	 * will cause the major compaction engine to
2397 	 * run, but will not trigger any swapping...
2398 	 * segments that have been major compacted
2399 	 * will be moved to the majorcompact queue
2400 	 */
2401 	return should_swap;
2402 }
2403 
2404 #if CONFIG_JETSAM
2405 /*
2406  * This function is called from the jetsam thread after killing something to
2407  * mitigate thrashing.
2408  *
2409  * We need to restart our thrashing detection heuristics since memory pressure
2410  * has potentially changed significantly, and we don't want to detect on old
2411  * data from before the jetsam.
2412  */
2413 void
vm_thrashing_jetsam_done(void)2414 vm_thrashing_jetsam_done(void)
2415 {
2416 	vm_compressor_thrashing_detected = FALSE;
2417 
2418 	/* Were we compressor-thrashing or filecache-thrashing? */
2419 	if (swapout_target_age) {
2420 		swapout_target_age = 0;
2421 		compressor_need_sample_reset = TRUE;
2422 	}
2423 #if CONFIG_PHANTOM_CACHE
2424 	else {
2425 		vm_phantom_cache_restart_sample();
2426 	}
2427 #endif
2428 }
2429 #endif /* CONFIG_JETSAM */
2430 
2431 uint32_t vm_wake_compactor_swapper_calls = 0;
2432 uint32_t vm_run_compactor_already_running = 0;
2433 uint32_t vm_run_compactor_empty_minor_q = 0;
2434 uint32_t vm_run_compactor_did_compact = 0;
2435 uint32_t vm_run_compactor_waited = 0;
2436 
2437 void
vm_run_compactor(void)2438 vm_run_compactor(void)
2439 {
2440 	if (c_segment_count == 0) {
2441 		return;
2442 	}
2443 
2444 	lck_mtx_lock_spin_always(c_list_lock);
2445 
2446 	if (c_minor_count == 0) {
2447 		vm_run_compactor_empty_minor_q++;
2448 
2449 		lck_mtx_unlock_always(c_list_lock);
2450 		return;
2451 	}
2452 	if (compaction_swapper_running) {
2453 		if (vm_pageout_state.vm_restricted_to_single_processor == FALSE) {
2454 			vm_run_compactor_already_running++;
2455 
2456 			lck_mtx_unlock_always(c_list_lock);
2457 			return;
2458 		}
2459 		vm_run_compactor_waited++;
2460 
2461 		assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
2462 
2463 		lck_mtx_unlock_always(c_list_lock);
2464 
2465 		thread_block(THREAD_CONTINUE_NULL);
2466 
2467 		return;
2468 	}
2469 	vm_run_compactor_did_compact++;
2470 
2471 	fastwake_warmup = FALSE;
2472 	compaction_swapper_running = 1;
2473 
2474 	vm_compressor_do_delayed_compactions(FALSE);
2475 
2476 	compaction_swapper_running = 0;
2477 
2478 	lck_mtx_unlock_always(c_list_lock);
2479 
2480 	thread_wakeup((event_t)&compaction_swapper_running);
2481 }
2482 
2483 
2484 void
vm_wake_compactor_swapper(void)2485 vm_wake_compactor_swapper(void)
2486 {
2487 	if (compaction_swapper_running || compaction_swapper_awakened || c_segment_count == 0) {
2488 		return;
2489 	}
2490 
2491 	if (c_minor_count || vm_compressor_needs_to_major_compact()) {
2492 		lck_mtx_lock_spin_always(c_list_lock);
2493 
2494 		fastwake_warmup = FALSE;
2495 
2496 		if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
2497 			vm_wake_compactor_swapper_calls++;
2498 
2499 			compaction_swapper_awakened = 1;
2500 			thread_wakeup((event_t)&c_compressor_swap_trigger);
2501 		}
2502 		lck_mtx_unlock_always(c_list_lock);
2503 	}
2504 }
2505 
2506 
2507 void
vm_consider_swapping()2508 vm_consider_swapping()
2509 {
2510 	c_segment_t     c_seg, c_seg_next;
2511 	clock_sec_t     now;
2512 	clock_nsec_t    nsec;
2513 
2514 	assert(VM_CONFIG_SWAP_IS_PRESENT);
2515 
2516 	lck_mtx_lock_spin_always(c_list_lock);
2517 
2518 	compaction_swapper_abort = 1;
2519 
2520 	while (compaction_swapper_running) {
2521 		assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
2522 
2523 		lck_mtx_unlock_always(c_list_lock);
2524 
2525 		thread_block(THREAD_CONTINUE_NULL);
2526 
2527 		lck_mtx_lock_spin_always(c_list_lock);
2528 	}
2529 	compaction_swapper_abort = 0;
2530 	compaction_swapper_running = 1;
2531 
2532 	vm_swapout_ripe_segments = TRUE;
2533 
2534 	if (!queue_empty(&c_major_list_head)) {
2535 		clock_get_system_nanotime(&now, &nsec);
2536 
2537 		c_seg = (c_segment_t)queue_first(&c_major_list_head);
2538 
2539 		while (!queue_end(&c_major_list_head, (queue_entry_t)c_seg)) {
2540 			if (c_overage_swapped_count >= c_overage_swapped_limit) {
2541 				break;
2542 			}
2543 
2544 			c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
2545 
2546 			if ((now - c_seg->c_creation_ts) >= vm_ripe_target_age) {
2547 				lck_mtx_lock_spin_always(&c_seg->c_lock);
2548 
2549 				c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
2550 
2551 				lck_mtx_unlock_always(&c_seg->c_lock);
2552 			}
2553 			c_seg = c_seg_next;
2554 		}
2555 	}
2556 	vm_compressor_compact_and_swap(FALSE);
2557 
2558 	compaction_swapper_running = 0;
2559 
2560 	vm_swapout_ripe_segments = FALSE;
2561 
2562 	lck_mtx_unlock_always(c_list_lock);
2563 
2564 	thread_wakeup((event_t)&compaction_swapper_running);
2565 }
2566 
2567 
2568 void
vm_consider_waking_compactor_swapper(void)2569 vm_consider_waking_compactor_swapper(void)
2570 {
2571 	boolean_t       need_wakeup = FALSE;
2572 
2573 	if (c_segment_count == 0) {
2574 		return;
2575 	}
2576 
2577 	if (compaction_swapper_running || compaction_swapper_awakened) {
2578 		return;
2579 	}
2580 
2581 	if (!compaction_swapper_inited && !compaction_swapper_init_now) {
2582 		compaction_swapper_init_now = 1;
2583 		need_wakeup = TRUE;
2584 	}
2585 
2586 	if (c_minor_count && (COMPRESSOR_NEEDS_TO_MINOR_COMPACT())) {
2587 		need_wakeup = TRUE;
2588 	} else if (compressor_needs_to_swap()) {
2589 		need_wakeup = TRUE;
2590 	} else if (c_minor_count) {
2591 		uint64_t        total_bytes;
2592 
2593 		total_bytes = compressor_object->resident_page_count * PAGE_SIZE_64;
2594 
2595 		if ((total_bytes - compressor_bytes_used) > total_bytes / 10) {
2596 			need_wakeup = TRUE;
2597 		}
2598 	}
2599 	if (need_wakeup == TRUE) {
2600 		lck_mtx_lock_spin_always(c_list_lock);
2601 
2602 		fastwake_warmup = FALSE;
2603 
2604 		if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
2605 			memoryshot(VM_WAKEUP_COMPACTOR_SWAPPER, DBG_FUNC_NONE);
2606 
2607 			compaction_swapper_awakened = 1;
2608 			thread_wakeup((event_t)&c_compressor_swap_trigger);
2609 		}
2610 		lck_mtx_unlock_always(c_list_lock);
2611 	}
2612 }
2613 
2614 
2615 #define C_SWAPOUT_LIMIT                 4
2616 #define DELAYED_COMPACTIONS_PER_PASS    30
2617 
2618 void
vm_compressor_do_delayed_compactions(boolean_t flush_all)2619 vm_compressor_do_delayed_compactions(boolean_t flush_all)
2620 {
2621 	c_segment_t     c_seg;
2622 	int             number_compacted = 0;
2623 	boolean_t       needs_to_swap = FALSE;
2624 
2625 
2626 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_do_delayed_compactions, VM_COMPRESSOR_DO_DELAYED_COMPACTIONS, DBG_FUNC_START, c_minor_count, flush_all, 0, 0);
2627 
2628 #if XNU_TARGET_OS_OSX
2629 	LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
2630 #endif /* XNU_TARGET_OS_OSX */
2631 
2632 	while (!queue_empty(&c_minor_list_head) && needs_to_swap == FALSE) {
2633 		c_seg = (c_segment_t)queue_first(&c_minor_list_head);
2634 
2635 		lck_mtx_lock_spin_always(&c_seg->c_lock);
2636 
2637 		if (c_seg->c_busy) {
2638 			lck_mtx_unlock_always(c_list_lock);
2639 			c_seg_wait_on_busy(c_seg);
2640 			lck_mtx_lock_spin_always(c_list_lock);
2641 
2642 			continue;
2643 		}
2644 		C_SEG_BUSY(c_seg);
2645 
2646 		c_seg_do_minor_compaction_and_unlock(c_seg, TRUE, FALSE, TRUE);
2647 
2648 		if (VM_CONFIG_SWAP_IS_ACTIVE && (number_compacted++ > DELAYED_COMPACTIONS_PER_PASS)) {
2649 			if ((flush_all == TRUE || compressor_needs_to_swap() == TRUE) && c_swapout_count < C_SWAPOUT_LIMIT) {
2650 				needs_to_swap = TRUE;
2651 			}
2652 
2653 			number_compacted = 0;
2654 		}
2655 		lck_mtx_lock_spin_always(c_list_lock);
2656 	}
2657 
2658 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_do_delayed_compactions, VM_COMPRESSOR_DO_DELAYED_COMPACTIONS, DBG_FUNC_END, c_minor_count, number_compacted, needs_to_swap, 0);
2659 }
2660 
2661 
2662 #define C_SEGMENT_SWAPPEDIN_AGE_LIMIT   10
2663 
2664 static void
vm_compressor_age_swapped_in_segments(boolean_t flush_all)2665 vm_compressor_age_swapped_in_segments(boolean_t flush_all)
2666 {
2667 	c_segment_t     c_seg;
2668 	clock_sec_t     now;
2669 	clock_nsec_t    nsec;
2670 
2671 	clock_get_system_nanotime(&now, &nsec);
2672 
2673 	while (!queue_empty(&c_swappedin_list_head)) {
2674 		c_seg = (c_segment_t)queue_first(&c_swappedin_list_head);
2675 
2676 		if (flush_all == FALSE && (now - c_seg->c_swappedin_ts) < C_SEGMENT_SWAPPEDIN_AGE_LIMIT) {
2677 			break;
2678 		}
2679 
2680 		lck_mtx_lock_spin_always(&c_seg->c_lock);
2681 
2682 		c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
2683 		c_seg->c_agedin_ts = (uint32_t) now;
2684 
2685 		lck_mtx_unlock_always(&c_seg->c_lock);
2686 	}
2687 }
2688 
2689 
2690 extern  int     vm_num_swap_files;
2691 extern  int     vm_num_pinned_swap_files;
2692 extern  int     vm_swappin_enabled;
2693 
2694 extern  unsigned int    vm_swapfile_total_segs_used;
2695 extern  unsigned int    vm_swapfile_total_segs_alloced;
2696 
2697 
2698 void
vm_compressor_flush(void)2699 vm_compressor_flush(void)
2700 {
2701 	uint64_t        vm_swap_put_failures_at_start;
2702 	wait_result_t   wait_result = 0;
2703 	AbsoluteTime    startTime, endTime;
2704 	clock_sec_t     now_sec;
2705 	clock_nsec_t    now_nsec;
2706 	uint64_t        nsec;
2707 	c_segment_t     c_seg, c_seg_next;
2708 
2709 	HIBLOG("vm_compressor_flush - starting\n");
2710 
2711 	clock_get_uptime(&startTime);
2712 
2713 	lck_mtx_lock_spin_always(c_list_lock);
2714 
2715 	fastwake_warmup = FALSE;
2716 	compaction_swapper_abort = 1;
2717 
2718 	while (compaction_swapper_running) {
2719 		assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
2720 
2721 		lck_mtx_unlock_always(c_list_lock);
2722 
2723 		thread_block(THREAD_CONTINUE_NULL);
2724 
2725 		lck_mtx_lock_spin_always(c_list_lock);
2726 	}
2727 	compaction_swapper_abort = 0;
2728 	compaction_swapper_running = 1;
2729 
2730 	hibernate_flushing = TRUE;
2731 	hibernate_no_swapspace = FALSE;
2732 	hibernate_flush_timed_out = FALSE;
2733 	c_generation_id_flush_barrier = c_generation_id + 1000;
2734 
2735 	clock_get_system_nanotime(&now_sec, &now_nsec);
2736 	hibernate_flushing_deadline = now_sec + HIBERNATE_FLUSHING_SECS_TO_COMPLETE;
2737 
2738 	vm_swap_put_failures_at_start = vm_swap_put_failures;
2739 
2740 	/*
2741 	 * We are about to hibernate and so we want all segments flushed to disk.
2742 	 * Segments that are on the major compaction queue won't be considered in
2743 	 * the vm_compressor_compact_and_swap() pass. So we need to bring them to
2744 	 * the ageQ for consideration.
2745 	 */
2746 	if (!queue_empty(&c_major_list_head)) {
2747 		c_seg = (c_segment_t)queue_first(&c_major_list_head);
2748 
2749 		while (!queue_end(&c_major_list_head, (queue_entry_t)c_seg)) {
2750 			c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
2751 			lck_mtx_lock_spin_always(&c_seg->c_lock);
2752 			c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
2753 			lck_mtx_unlock_always(&c_seg->c_lock);
2754 			c_seg = c_seg_next;
2755 		}
2756 	}
2757 	vm_compressor_compact_and_swap(TRUE);
2758 
2759 	while (!queue_empty(&c_swapout_list_head)) {
2760 		assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC);
2761 
2762 		lck_mtx_unlock_always(c_list_lock);
2763 
2764 		wait_result = thread_block(THREAD_CONTINUE_NULL);
2765 
2766 		lck_mtx_lock_spin_always(c_list_lock);
2767 
2768 		if (wait_result == THREAD_TIMED_OUT) {
2769 			break;
2770 		}
2771 	}
2772 	hibernate_flushing = FALSE;
2773 	compaction_swapper_running = 0;
2774 
2775 	if (vm_swap_put_failures > vm_swap_put_failures_at_start) {
2776 		HIBLOG("vm_compressor_flush failed to clean %llu segments - vm_page_compressor_count(%d)\n",
2777 		    vm_swap_put_failures - vm_swap_put_failures_at_start, VM_PAGE_COMPRESSOR_COUNT);
2778 	}
2779 
2780 	lck_mtx_unlock_always(c_list_lock);
2781 
2782 	thread_wakeup((event_t)&compaction_swapper_running);
2783 
2784 	clock_get_uptime(&endTime);
2785 	SUB_ABSOLUTETIME(&endTime, &startTime);
2786 	absolutetime_to_nanoseconds(endTime, &nsec);
2787 
2788 	HIBLOG("vm_compressor_flush completed - took %qd msecs - vm_num_swap_files = %d, vm_num_pinned_swap_files = %d, vm_swappin_enabled = %d\n",
2789 	    nsec / 1000000ULL, vm_num_swap_files, vm_num_pinned_swap_files, vm_swappin_enabled);
2790 }
2791 
2792 
2793 int             compaction_swap_trigger_thread_awakened = 0;
2794 
2795 static void
vm_compressor_swap_trigger_thread(void)2796 vm_compressor_swap_trigger_thread(void)
2797 {
2798 	current_thread()->options |= TH_OPT_VMPRIV;
2799 
2800 	/*
2801 	 * compaction_swapper_init_now is set when the first call to
2802 	 * vm_consider_waking_compactor_swapper is made from
2803 	 * vm_pageout_scan... since this function is called upon
2804 	 * thread creation, we want to make sure to delay adjusting
2805 	 * the tuneables until we are awakened via vm_pageout_scan
2806 	 * so that we are at a point where the vm_swapfile_open will
2807 	 * be operating on the correct directory (in case the default
2808 	 * of using the VM volume is overridden by the dynamic_pager)
2809 	 */
2810 	if (compaction_swapper_init_now) {
2811 		vm_compaction_swapper_do_init();
2812 
2813 		if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
2814 			thread_vm_bind_group_add();
2815 		}
2816 #if CONFIG_THREAD_GROUPS
2817 		thread_group_vm_add();
2818 #endif
2819 		thread_set_thread_name(current_thread(), "VM_cswap_trigger");
2820 		compaction_swapper_init_now = 0;
2821 	}
2822 	lck_mtx_lock_spin_always(c_list_lock);
2823 
2824 	compaction_swap_trigger_thread_awakened++;
2825 	compaction_swapper_awakened = 0;
2826 
2827 	if (compaction_swapper_running == 0) {
2828 		compaction_swapper_running = 1;
2829 
2830 		vm_compressor_compact_and_swap(FALSE);
2831 
2832 		compaction_swapper_running = 0;
2833 	}
2834 	assert_wait((event_t)&c_compressor_swap_trigger, THREAD_UNINT);
2835 
2836 	if (compaction_swapper_running == 0) {
2837 		thread_wakeup((event_t)&compaction_swapper_running);
2838 	}
2839 
2840 	lck_mtx_unlock_always(c_list_lock);
2841 
2842 	thread_block((thread_continue_t)vm_compressor_swap_trigger_thread);
2843 
2844 	/* NOTREACHED */
2845 }
2846 
2847 
2848 void
vm_compressor_record_warmup_start(void)2849 vm_compressor_record_warmup_start(void)
2850 {
2851 	c_segment_t     c_seg;
2852 
2853 	lck_mtx_lock_spin_always(c_list_lock);
2854 
2855 	if (first_c_segment_to_warm_generation_id == 0) {
2856 		if (!queue_empty(&c_age_list_head)) {
2857 			c_seg = (c_segment_t)queue_last(&c_age_list_head);
2858 
2859 			first_c_segment_to_warm_generation_id = c_seg->c_generation_id;
2860 		} else {
2861 			first_c_segment_to_warm_generation_id = 0;
2862 		}
2863 
2864 		fastwake_recording_in_progress = TRUE;
2865 	}
2866 	lck_mtx_unlock_always(c_list_lock);
2867 }
2868 
2869 
2870 void
vm_compressor_record_warmup_end(void)2871 vm_compressor_record_warmup_end(void)
2872 {
2873 	c_segment_t     c_seg;
2874 
2875 	lck_mtx_lock_spin_always(c_list_lock);
2876 
2877 	if (fastwake_recording_in_progress == TRUE) {
2878 		if (!queue_empty(&c_age_list_head)) {
2879 			c_seg = (c_segment_t)queue_last(&c_age_list_head);
2880 
2881 			last_c_segment_to_warm_generation_id = c_seg->c_generation_id;
2882 		} else {
2883 			last_c_segment_to_warm_generation_id = first_c_segment_to_warm_generation_id;
2884 		}
2885 
2886 		fastwake_recording_in_progress = FALSE;
2887 
2888 		HIBLOG("vm_compressor_record_warmup (%qd - %qd)\n", first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id);
2889 	}
2890 	lck_mtx_unlock_always(c_list_lock);
2891 }
2892 
2893 
2894 #define DELAY_TRIM_ON_WAKE_SECS         25
2895 
2896 void
vm_compressor_delay_trim(void)2897 vm_compressor_delay_trim(void)
2898 {
2899 	clock_sec_t     sec;
2900 	clock_nsec_t    nsec;
2901 
2902 	clock_get_system_nanotime(&sec, &nsec);
2903 	dont_trim_until_ts = sec + DELAY_TRIM_ON_WAKE_SECS;
2904 }
2905 
2906 
2907 void
vm_compressor_do_warmup(void)2908 vm_compressor_do_warmup(void)
2909 {
2910 	lck_mtx_lock_spin_always(c_list_lock);
2911 
2912 	if (first_c_segment_to_warm_generation_id == last_c_segment_to_warm_generation_id) {
2913 		first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
2914 
2915 		lck_mtx_unlock_always(c_list_lock);
2916 		return;
2917 	}
2918 
2919 	if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
2920 		fastwake_warmup = TRUE;
2921 
2922 		compaction_swapper_awakened = 1;
2923 		thread_wakeup((event_t)&c_compressor_swap_trigger);
2924 	}
2925 	lck_mtx_unlock_always(c_list_lock);
2926 }
2927 
2928 void
do_fastwake_warmup_all(void)2929 do_fastwake_warmup_all(void)
2930 {
2931 	lck_mtx_lock_spin_always(c_list_lock);
2932 
2933 	if (queue_empty(&c_swappedout_list_head) && queue_empty(&c_swappedout_sparse_list_head)) {
2934 		lck_mtx_unlock_always(c_list_lock);
2935 		return;
2936 	}
2937 
2938 	fastwake_warmup = TRUE;
2939 
2940 	do_fastwake_warmup(&c_swappedout_list_head, TRUE);
2941 
2942 	do_fastwake_warmup(&c_swappedout_sparse_list_head, TRUE);
2943 
2944 	fastwake_warmup = FALSE;
2945 
2946 	lck_mtx_unlock_always(c_list_lock);
2947 }
2948 
2949 void
do_fastwake_warmup(queue_head_t * c_queue,boolean_t consider_all_cseg)2950 do_fastwake_warmup(queue_head_t *c_queue, boolean_t consider_all_cseg)
2951 {
2952 	c_segment_t     c_seg = NULL;
2953 	AbsoluteTime    startTime, endTime;
2954 	uint64_t        nsec;
2955 
2956 
2957 	HIBLOG("vm_compressor_fastwake_warmup (%qd - %qd) - starting\n", first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id);
2958 
2959 	clock_get_uptime(&startTime);
2960 
2961 	lck_mtx_unlock_always(c_list_lock);
2962 
2963 	proc_set_thread_policy(current_thread(),
2964 	    TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER2);
2965 
2966 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
2967 
2968 	lck_mtx_lock_spin_always(c_list_lock);
2969 
2970 	while (!queue_empty(c_queue) && fastwake_warmup == TRUE) {
2971 		c_seg = (c_segment_t) queue_first(c_queue);
2972 
2973 		if (consider_all_cseg == FALSE) {
2974 			if (c_seg->c_generation_id < first_c_segment_to_warm_generation_id ||
2975 			    c_seg->c_generation_id > last_c_segment_to_warm_generation_id) {
2976 				break;
2977 			}
2978 
2979 			if (vm_page_free_count < (AVAILABLE_MEMORY / 4)) {
2980 				break;
2981 			}
2982 		}
2983 
2984 		lck_mtx_lock_spin_always(&c_seg->c_lock);
2985 		lck_mtx_unlock_always(c_list_lock);
2986 
2987 		if (c_seg->c_busy) {
2988 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
2989 			c_seg_wait_on_busy(c_seg);
2990 			PAGE_REPLACEMENT_DISALLOWED(TRUE);
2991 		} else {
2992 			if (c_seg_swapin(c_seg, TRUE, FALSE) == 0) {
2993 				lck_mtx_unlock_always(&c_seg->c_lock);
2994 			}
2995 			c_segment_warmup_count++;
2996 
2997 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
2998 			vm_pageout_io_throttle();
2999 			PAGE_REPLACEMENT_DISALLOWED(TRUE);
3000 		}
3001 		lck_mtx_lock_spin_always(c_list_lock);
3002 	}
3003 	lck_mtx_unlock_always(c_list_lock);
3004 
3005 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
3006 
3007 	proc_set_thread_policy(current_thread(),
3008 	    TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER0);
3009 
3010 	clock_get_uptime(&endTime);
3011 	SUB_ABSOLUTETIME(&endTime, &startTime);
3012 	absolutetime_to_nanoseconds(endTime, &nsec);
3013 
3014 	HIBLOG("vm_compressor_fastwake_warmup completed - took %qd msecs\n", nsec / 1000000ULL);
3015 
3016 	lck_mtx_lock_spin_always(c_list_lock);
3017 
3018 	if (consider_all_cseg == FALSE) {
3019 		first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
3020 	}
3021 }
3022 
3023 int min_csegs_per_major_compaction = DELAYED_COMPACTIONS_PER_PASS;
3024 extern bool     vm_swapout_thread_running;
3025 extern boolean_t        compressor_store_stop_compaction;
3026 
3027 void
vm_compressor_compact_and_swap(boolean_t flush_all)3028 vm_compressor_compact_and_swap(boolean_t flush_all)
3029 {
3030 	c_segment_t     c_seg, c_seg_next;
3031 	boolean_t       keep_compacting, switch_state;
3032 	clock_sec_t     now;
3033 	clock_nsec_t    nsec;
3034 	mach_timespec_t start_ts, end_ts;
3035 	unsigned int    number_considered, wanted_cseg_found, yield_after_considered_per_pass, number_yields;
3036 	uint64_t        bytes_to_free, bytes_freed, delta_usec;
3037 
3038 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_START, c_age_count, c_minor_count, c_major_count, vm_page_free_count);
3039 
3040 	if (fastwake_warmup == TRUE) {
3041 		uint64_t        starting_warmup_count;
3042 
3043 		starting_warmup_count = c_segment_warmup_count;
3044 
3045 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_START, c_segment_warmup_count,
3046 		    first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id, 0, 0);
3047 		do_fastwake_warmup(&c_swappedout_list_head, FALSE);
3048 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_END, c_segment_warmup_count, c_segment_warmup_count - starting_warmup_count, 0, 0, 0);
3049 
3050 		fastwake_warmup = FALSE;
3051 	}
3052 
3053 #if (XNU_TARGET_OS_OSX && __arm64__)
3054 	/*
3055 	 * Re-considering major csegs showed benefits on all platforms by
3056 	 * significantly reducing fragmentation and getting back memory.
3057 	 * However, on smaller devices, eg watch, there was increased power
3058 	 * use for the additional compactions. And the turnover in csegs on
3059 	 * those smaller platforms is high enough in the decompression/free
3060 	 * path that we can skip reconsidering them here because we already
3061 	 * consider them for major compaction in those paths.
3062 	 */
3063 	vm_compressor_process_major_segments();
3064 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
3065 
3066 	/*
3067 	 * it's possible for the c_age_list_head to be empty if we
3068 	 * hit our limits for growing the compressor pool and we subsequently
3069 	 * hibernated... on the next hibernation we could see the queue as
3070 	 * empty and not proceeed even though we have a bunch of segments on
3071 	 * the swapped in queue that need to be dealt with.
3072 	 */
3073 	vm_compressor_do_delayed_compactions(flush_all);
3074 
3075 	vm_compressor_age_swapped_in_segments(flush_all);
3076 
3077 	/*
3078 	 * we only need to grab the timestamp once per
3079 	 * invocation of this function since the
3080 	 * timescale we're interested in is measured
3081 	 * in days
3082 	 */
3083 	clock_get_system_nanotime(&now, &nsec);
3084 
3085 	start_ts.tv_sec = (int) now;
3086 	start_ts.tv_nsec = nsec;
3087 	delta_usec = 0;
3088 	number_considered = 0;
3089 	wanted_cseg_found = 0;
3090 	number_yields = 0;
3091 	bytes_to_free = 0;
3092 	bytes_freed = 0;
3093 	yield_after_considered_per_pass = MAX(min_csegs_per_major_compaction, DELAYED_COMPACTIONS_PER_PASS);
3094 
3095 	while (!queue_empty(&c_age_list_head) && !compaction_swapper_abort && !compressor_store_stop_compaction) {
3096 		if (hibernate_flushing == TRUE) {
3097 			clock_sec_t     sec;
3098 
3099 			if (hibernate_should_abort()) {
3100 				HIBLOG("vm_compressor_flush - hibernate_should_abort returned TRUE\n");
3101 				break;
3102 			}
3103 			if (hibernate_no_swapspace == TRUE) {
3104 				HIBLOG("vm_compressor_flush - out of swap space\n");
3105 				break;
3106 			}
3107 			if (vm_swap_files_pinned() == FALSE) {
3108 				HIBLOG("vm_compressor_flush - unpinned swap files\n");
3109 				break;
3110 			}
3111 			if (hibernate_in_progress_with_pinned_swap == TRUE &&
3112 			    (vm_swapfile_total_segs_alloced == vm_swapfile_total_segs_used)) {
3113 				HIBLOG("vm_compressor_flush - out of pinned swap space\n");
3114 				break;
3115 			}
3116 			clock_get_system_nanotime(&sec, &nsec);
3117 
3118 			if (sec > hibernate_flushing_deadline) {
3119 				hibernate_flush_timed_out = TRUE;
3120 				HIBLOG("vm_compressor_flush - failed to finish before deadline\n");
3121 				break;
3122 			}
3123 		}
3124 		if (!vm_swap_out_of_space() && c_swapout_count >= C_SWAPOUT_LIMIT) {
3125 			assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 100, 1000 * NSEC_PER_USEC);
3126 
3127 			if (!vm_swapout_thread_running) {
3128 				thread_wakeup((event_t)&c_swapout_list_head);
3129 			}
3130 
3131 			lck_mtx_unlock_always(c_list_lock);
3132 
3133 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 1, c_swapout_count, 0, 0);
3134 
3135 			thread_block(THREAD_CONTINUE_NULL);
3136 
3137 			lck_mtx_lock_spin_always(c_list_lock);
3138 		}
3139 		/*
3140 		 * Minor compactions
3141 		 */
3142 		vm_compressor_do_delayed_compactions(flush_all);
3143 
3144 		vm_compressor_age_swapped_in_segments(flush_all);
3145 
3146 		if (!vm_swap_out_of_space() && c_swapout_count >= C_SWAPOUT_LIMIT) {
3147 			/*
3148 			 * we timed out on the above thread_block
3149 			 * let's loop around and try again
3150 			 * the timeout allows us to continue
3151 			 * to do minor compactions to make
3152 			 * more memory available
3153 			 */
3154 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 2, c_swapout_count, 0, 0);
3155 
3156 			continue;
3157 		}
3158 
3159 		/*
3160 		 * Swap out segments?
3161 		 */
3162 		if (flush_all == FALSE) {
3163 			boolean_t       needs_to_swap;
3164 
3165 			lck_mtx_unlock_always(c_list_lock);
3166 
3167 			needs_to_swap = compressor_needs_to_swap();
3168 
3169 			lck_mtx_lock_spin_always(c_list_lock);
3170 
3171 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 3, needs_to_swap, 0, 0);
3172 
3173 			if (needs_to_swap == FALSE) {
3174 				break;
3175 			}
3176 		}
3177 		if (queue_empty(&c_age_list_head)) {
3178 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 4, c_age_count, 0, 0);
3179 			break;
3180 		}
3181 		c_seg = (c_segment_t) queue_first(&c_age_list_head);
3182 
3183 		assert(c_seg->c_state == C_ON_AGE_Q);
3184 
3185 		if (flush_all == TRUE && c_seg->c_generation_id > c_generation_id_flush_barrier) {
3186 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 5, 0, 0, 0);
3187 			break;
3188 		}
3189 
3190 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3191 
3192 		if (c_seg->c_busy) {
3193 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 6, (void*) VM_KERNEL_ADDRPERM(c_seg), 0, 0);
3194 
3195 			lck_mtx_unlock_always(c_list_lock);
3196 			c_seg_wait_on_busy(c_seg);
3197 			lck_mtx_lock_spin_always(c_list_lock);
3198 
3199 			continue;
3200 		}
3201 		C_SEG_BUSY(c_seg);
3202 
3203 		if (c_seg_do_minor_compaction_and_unlock(c_seg, FALSE, TRUE, TRUE)) {
3204 			/*
3205 			 * found an empty c_segment and freed it
3206 			 * so go grab the next guy in the queue
3207 			 */
3208 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 7, 0, 0, 0);
3209 			c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3210 			continue;
3211 		}
3212 		/*
3213 		 * Major compaction
3214 		 */
3215 		keep_compacting = TRUE;
3216 		switch_state = TRUE;
3217 
3218 		while (keep_compacting == TRUE) {
3219 			assert(c_seg->c_busy);
3220 
3221 			/* look for another segment to consolidate */
3222 
3223 			c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
3224 
3225 			if (queue_end(&c_age_list_head, (queue_entry_t)c_seg_next)) {
3226 				break;
3227 			}
3228 
3229 			assert(c_seg_next->c_state == C_ON_AGE_Q);
3230 
3231 			number_considered++;
3232 
3233 			if (c_seg_major_compact_ok(c_seg, c_seg_next) == FALSE) {
3234 				break;
3235 			}
3236 
3237 			lck_mtx_lock_spin_always(&c_seg_next->c_lock);
3238 
3239 			if (c_seg_next->c_busy) {
3240 				/*
3241 				 * We are going to block for our neighbor.
3242 				 * If our c_seg is wanted, we should unbusy
3243 				 * it because we don't know how long we might
3244 				 * have to block here.
3245 				 */
3246 				if (c_seg->c_wanted) {
3247 					lck_mtx_unlock_always(&c_seg_next->c_lock);
3248 					switch_state = FALSE;
3249 					c_seg_major_compact_stats[c_seg_major_compact_stats_now].bailed_compactions++;
3250 					wanted_cseg_found++;
3251 					break;
3252 				}
3253 
3254 				lck_mtx_unlock_always(c_list_lock);
3255 
3256 				VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 8, (void*) VM_KERNEL_ADDRPERM(c_seg_next), 0, 0);
3257 
3258 				c_seg_wait_on_busy(c_seg_next);
3259 				lck_mtx_lock_spin_always(c_list_lock);
3260 
3261 				continue;
3262 			}
3263 			/* grab that segment */
3264 			C_SEG_BUSY(c_seg_next);
3265 
3266 			bytes_to_free = C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3267 			if (c_seg_do_minor_compaction_and_unlock(c_seg_next, FALSE, TRUE, TRUE)) {
3268 				/*
3269 				 * found an empty c_segment and freed it
3270 				 * so we can't continue to use c_seg_next
3271 				 */
3272 				bytes_freed += bytes_to_free;
3273 				c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3274 				continue;
3275 			}
3276 
3277 			/* unlock the list ... */
3278 			lck_mtx_unlock_always(c_list_lock);
3279 
3280 			/* do the major compaction */
3281 
3282 			keep_compacting = c_seg_major_compact(c_seg, c_seg_next);
3283 
3284 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 9, keep_compacting, 0, 0);
3285 
3286 			PAGE_REPLACEMENT_DISALLOWED(TRUE);
3287 
3288 			lck_mtx_lock_spin_always(&c_seg_next->c_lock);
3289 			/*
3290 			 * run a minor compaction on the donor segment
3291 			 * since we pulled at least some of it's
3292 			 * data into our target...  if we've emptied
3293 			 * it, now is a good time to free it which
3294 			 * c_seg_minor_compaction_and_unlock also takes care of
3295 			 *
3296 			 * by passing TRUE, we ask for c_busy to be cleared
3297 			 * and c_wanted to be taken care of
3298 			 */
3299 			bytes_to_free = C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3300 			if (c_seg_minor_compaction_and_unlock(c_seg_next, TRUE)) {
3301 				bytes_freed += bytes_to_free;
3302 				c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3303 			} else {
3304 				bytes_to_free -= C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3305 				bytes_freed += bytes_to_free;
3306 			}
3307 
3308 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
3309 
3310 			/* relock the list */
3311 			lck_mtx_lock_spin_always(c_list_lock);
3312 
3313 			if (c_seg->c_wanted) {
3314 				/*
3315 				 * Our c_seg is in demand. Let's
3316 				 * unbusy it and wakeup the waiters
3317 				 * instead of continuing the compaction
3318 				 * because we could be in this loop
3319 				 * for a while.
3320 				 */
3321 				switch_state = FALSE;
3322 				wanted_cseg_found++;
3323 				c_seg_major_compact_stats[c_seg_major_compact_stats_now].bailed_compactions++;
3324 				break;
3325 			}
3326 		} /* major compaction */
3327 
3328 		VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 10, number_considered, wanted_cseg_found, 0);
3329 
3330 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3331 
3332 		assert(c_seg->c_busy);
3333 		assert(!c_seg->c_on_minorcompact_q);
3334 
3335 		if (switch_state) {
3336 			if (VM_CONFIG_SWAP_IS_ACTIVE) {
3337 				int new_state = C_ON_SWAPOUT_Q;
3338 
3339 #if (XNU_TARGET_OS_OSX && __arm64__)
3340 				if (flush_all == false && compressor_swapout_conditions_met() == false) {
3341 					new_state = C_ON_MAJORCOMPACT_Q;
3342 				}
3343 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
3344 
3345 				if (new_state == C_ON_SWAPOUT_Q) {
3346 					/*
3347 					 * This mode of putting a generic c_seg on the swapout list is
3348 					 * only supported when we have general swapping enabled
3349 					 */
3350 					clock_sec_t lnow;
3351 					clock_nsec_t lnsec;
3352 					clock_get_system_nanotime(&lnow, &lnsec);
3353 					if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 30) {
3354 						vmcs_stats.unripe_under_30s++;
3355 					} else if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 60) {
3356 						vmcs_stats.unripe_under_60s++;
3357 					} else if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 300) {
3358 						vmcs_stats.unripe_under_300s++;
3359 					}
3360 				}
3361 
3362 				c_seg_switch_state(c_seg, new_state, FALSE);
3363 			} else {
3364 				if ((vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit)) {
3365 					assert(VM_CONFIG_SWAP_IS_PRESENT);
3366 					/*
3367 					 * we are running compressor sweeps with swap-behind
3368 					 * make sure the c_seg has aged enough before swapping it
3369 					 * out...
3370 					 */
3371 					if ((now - c_seg->c_creation_ts) >= vm_ripe_target_age) {
3372 						c_seg->c_overage_swap = TRUE;
3373 						c_overage_swapped_count++;
3374 						c_seg_switch_state(c_seg, C_ON_SWAPOUT_Q, FALSE);
3375 					}
3376 				}
3377 			}
3378 			if (c_seg->c_state == C_ON_AGE_Q) {
3379 				/*
3380 				 * this c_seg didn't get moved to the swapout queue
3381 				 * so we need to move it out of the way...
3382 				 * we just did a major compaction on it so put it
3383 				 * on that queue
3384 				 */
3385 				c_seg_switch_state(c_seg, C_ON_MAJORCOMPACT_Q, FALSE);
3386 			} else {
3387 				c_seg_major_compact_stats[c_seg_major_compact_stats_now].wasted_space_in_swapouts += c_seg_bufsize - c_seg->c_bytes_used;
3388 				c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_swapouts++;
3389 			}
3390 		}
3391 
3392 		C_SEG_WAKEUP_DONE(c_seg);
3393 
3394 		lck_mtx_unlock_always(&c_seg->c_lock);
3395 
3396 		if (c_swapout_count) {
3397 			/*
3398 			 * We don't pause/yield here because we will either
3399 			 * yield below or at the top of the loop with the
3400 			 * assert_wait_timeout.
3401 			 */
3402 			if (!vm_swapout_thread_running) {
3403 				thread_wakeup((event_t)&c_swapout_list_head);
3404 			}
3405 		}
3406 
3407 		if (number_considered >= yield_after_considered_per_pass) {
3408 			if (wanted_cseg_found) {
3409 				/*
3410 				 * We stopped major compactions on a c_seg
3411 				 * that is wanted. We don't know the priority
3412 				 * of the waiter unfortunately but we are at
3413 				 * a very high priority and so, just in case
3414 				 * the waiter is a critical system daemon or
3415 				 * UI thread, let's give up the CPU in case
3416 				 * the system is running a few CPU intensive
3417 				 * tasks.
3418 				 */
3419 				lck_mtx_unlock_always(c_list_lock);
3420 
3421 				mutex_pause(2); /* 100us yield */
3422 
3423 				number_yields++;
3424 
3425 				VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 11, number_considered, number_yields, 0);
3426 
3427 				lck_mtx_lock_spin_always(c_list_lock);
3428 			}
3429 
3430 			number_considered = 0;
3431 			wanted_cseg_found = 0;
3432 		}
3433 	}
3434 	clock_get_system_nanotime(&now, &nsec);
3435 	end_ts.tv_sec = (int) now;
3436 	end_ts.tv_nsec = nsec;
3437 
3438 	SUB_MACH_TIMESPEC(&end_ts, &start_ts);
3439 
3440 	delta_usec = (end_ts.tv_sec * USEC_PER_SEC) + (end_ts.tv_nsec / NSEC_PER_USEC) - (number_yields * 100);
3441 
3442 	delta_usec = MAX(1, delta_usec); /* we could have 0 usec run if conditions weren't right */
3443 
3444 	c_seg_major_compact_stats[c_seg_major_compact_stats_now].bytes_freed_rate_us = (bytes_freed / delta_usec);
3445 
3446 	if ((c_seg_major_compact_stats_now + 1) == C_SEG_MAJOR_COMPACT_STATS_MAX) {
3447 		c_seg_major_compact_stats_now = 0;
3448 	} else {
3449 		c_seg_major_compact_stats_now++;
3450 	}
3451 
3452 	assert(c_seg_major_compact_stats_now < C_SEG_MAJOR_COMPACT_STATS_MAX);
3453 
3454 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_END, c_age_count, c_minor_count, c_major_count, vm_page_free_count);
3455 }
3456 
3457 
3458 static c_segment_t
c_seg_allocate(c_segment_t * current_chead)3459 c_seg_allocate(c_segment_t *current_chead)
3460 {
3461 	c_segment_t     c_seg;
3462 	int             min_needed;
3463 	int             size_to_populate;
3464 
3465 #if XNU_TARGET_OS_OSX
3466 	if (vm_compressor_low_on_space()) {
3467 		vm_compressor_take_paging_space_action();
3468 	}
3469 #endif /* XNU_TARGET_OS_OSX */
3470 
3471 	if ((c_seg = *current_chead) == NULL) {
3472 		uint32_t        c_segno;
3473 
3474 		lck_mtx_lock_spin_always(c_list_lock);
3475 
3476 		while (c_segments_busy == TRUE) {
3477 			assert_wait((event_t) (&c_segments_busy), THREAD_UNINT);
3478 
3479 			lck_mtx_unlock_always(c_list_lock);
3480 
3481 			thread_block(THREAD_CONTINUE_NULL);
3482 
3483 			lck_mtx_lock_spin_always(c_list_lock);
3484 		}
3485 		if (c_free_segno_head == (uint32_t)-1) {
3486 			uint32_t        c_segments_available_new;
3487 			uint32_t        compressed_pages;
3488 
3489 #if CONFIG_FREEZE
3490 			if (freezer_incore_cseg_acct) {
3491 				compressed_pages = c_segment_pages_compressed_incore;
3492 			} else {
3493 				compressed_pages = c_segment_pages_compressed;
3494 			}
3495 #else
3496 			compressed_pages = c_segment_pages_compressed;
3497 #endif /* CONFIG_FREEZE */
3498 
3499 			if (c_segments_available >= c_segments_limit || compressed_pages >= c_segment_pages_compressed_limit) {
3500 				lck_mtx_unlock_always(c_list_lock);
3501 
3502 				return NULL;
3503 			}
3504 			c_segments_busy = TRUE;
3505 			lck_mtx_unlock_always(c_list_lock);
3506 
3507 			kernel_memory_populate(compressor_map, (vm_offset_t)c_segments_next_page,
3508 			    PAGE_SIZE, KMA_KOBJECT, VM_KERN_MEMORY_COMPRESSOR);
3509 			c_segments_next_page += PAGE_SIZE;
3510 
3511 			c_segments_available_new = c_segments_available + C_SEGMENTS_PER_PAGE;
3512 
3513 			if (c_segments_available_new > c_segments_limit) {
3514 				c_segments_available_new = c_segments_limit;
3515 			}
3516 
3517 			for (c_segno = c_segments_available + 1; c_segno < c_segments_available_new; c_segno++) {
3518 				c_segments[c_segno - 1].c_segno = c_segno;
3519 			}
3520 
3521 			lck_mtx_lock_spin_always(c_list_lock);
3522 
3523 			c_segments[c_segno - 1].c_segno = c_free_segno_head;
3524 			c_free_segno_head = c_segments_available;
3525 			c_segments_available = c_segments_available_new;
3526 
3527 			c_segments_busy = FALSE;
3528 			thread_wakeup((event_t) (&c_segments_busy));
3529 		}
3530 		c_segno = c_free_segno_head;
3531 		assert(c_segno >= 0 && c_segno < c_segments_limit);
3532 
3533 		c_free_segno_head = (uint32_t)c_segments[c_segno].c_segno;
3534 
3535 		/*
3536 		 * do the rest of the bookkeeping now while we're still behind
3537 		 * the list lock and grab our generation id now into a local
3538 		 * so that we can install it once we have the c_seg allocated
3539 		 */
3540 		c_segment_count++;
3541 		if (c_segment_count > c_segment_count_max) {
3542 			c_segment_count_max = c_segment_count;
3543 		}
3544 
3545 		lck_mtx_unlock_always(c_list_lock);
3546 
3547 		c_seg = zalloc_flags(compressor_segment_zone, Z_WAITOK | Z_ZERO);
3548 
3549 		c_seg->c_store.c_buffer = (int32_t *)C_SEG_BUFFER_ADDRESS(c_segno);
3550 
3551 		lck_mtx_init(&c_seg->c_lock, &vm_compressor_lck_grp, LCK_ATTR_NULL);
3552 
3553 		c_seg->c_state = C_IS_EMPTY;
3554 		c_seg->c_firstemptyslot = C_SLOT_MAX_INDEX;
3555 		c_seg->c_mysegno = c_segno;
3556 
3557 		lck_mtx_lock_spin_always(c_list_lock);
3558 		c_empty_count++;
3559 		c_seg_switch_state(c_seg, C_IS_FILLING, FALSE);
3560 		c_segments[c_segno].c_seg = c_seg;
3561 		assert(c_segments[c_segno].c_segno > c_segments_available);
3562 		lck_mtx_unlock_always(c_list_lock);
3563 
3564 		*current_chead = c_seg;
3565 
3566 #if DEVELOPMENT || DEBUG
3567 		C_SEG_MAKE_WRITEABLE(c_seg);
3568 #endif
3569 	}
3570 	c_seg_alloc_nextslot(c_seg);
3571 
3572 	size_to_populate = c_seg_allocsize - C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset);
3573 
3574 	if (size_to_populate) {
3575 		min_needed = PAGE_SIZE + (c_seg_allocsize - c_seg_bufsize);
3576 
3577 		if (C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset - c_seg->c_nextoffset) < (unsigned) min_needed) {
3578 			if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) {
3579 				size_to_populate = C_SEG_MAX_POPULATE_SIZE;
3580 			}
3581 
3582 			OSAddAtomic64(size_to_populate / PAGE_SIZE, &vm_pageout_vminfo.vm_compressor_pages_grabbed);
3583 
3584 			kernel_memory_populate(compressor_map,
3585 			    (vm_offset_t) &c_seg->c_store.c_buffer[c_seg->c_populated_offset],
3586 			    size_to_populate,
3587 			    KMA_COMPRESSOR,
3588 			    VM_KERN_MEMORY_COMPRESSOR);
3589 		} else {
3590 			size_to_populate = 0;
3591 		}
3592 	}
3593 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
3594 
3595 	lck_mtx_lock_spin_always(&c_seg->c_lock);
3596 
3597 	if (size_to_populate) {
3598 		c_seg->c_populated_offset += C_SEG_BYTES_TO_OFFSET(size_to_populate);
3599 	}
3600 
3601 	return c_seg;
3602 }
3603 
3604 #if DEVELOPMENT || DEBUG
3605 #if CONFIG_FREEZE
3606 extern boolean_t memorystatus_freeze_to_memory;
3607 #endif /* CONFIG_FREEZE */
3608 #endif /* DEVELOPMENT || DEBUG */
3609 
3610 static void
c_current_seg_filled(c_segment_t c_seg,c_segment_t * current_chead)3611 c_current_seg_filled(c_segment_t c_seg, c_segment_t *current_chead)
3612 {
3613 	uint32_t        unused_bytes;
3614 	uint32_t        offset_to_depopulate;
3615 	int             new_state = C_ON_AGE_Q;
3616 	clock_sec_t     sec;
3617 	clock_nsec_t    nsec;
3618 	boolean_t       head_insert = FALSE;
3619 
3620 	unused_bytes = trunc_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset - c_seg->c_nextoffset));
3621 
3622 	if (unused_bytes) {
3623 		offset_to_depopulate = C_SEG_BYTES_TO_OFFSET(round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_nextoffset)));
3624 
3625 		/*
3626 		 *  release the extra physical page(s) at the end of the segment
3627 		 */
3628 		lck_mtx_unlock_always(&c_seg->c_lock);
3629 
3630 		kernel_memory_depopulate(
3631 			compressor_map,
3632 			(vm_offset_t) &c_seg->c_store.c_buffer[offset_to_depopulate],
3633 			unused_bytes,
3634 			KMA_COMPRESSOR,
3635 			VM_KERN_MEMORY_COMPRESSOR);
3636 
3637 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3638 
3639 		c_seg->c_populated_offset = offset_to_depopulate;
3640 	}
3641 	assert(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset) <= c_seg_bufsize);
3642 
3643 #if DEVELOPMENT || DEBUG
3644 	{
3645 		boolean_t       c_seg_was_busy = FALSE;
3646 
3647 		if (!c_seg->c_busy) {
3648 			C_SEG_BUSY(c_seg);
3649 		} else {
3650 			c_seg_was_busy = TRUE;
3651 		}
3652 
3653 		lck_mtx_unlock_always(&c_seg->c_lock);
3654 
3655 		C_SEG_WRITE_PROTECT(c_seg);
3656 
3657 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3658 
3659 		if (c_seg_was_busy == FALSE) {
3660 			C_SEG_WAKEUP_DONE(c_seg);
3661 		}
3662 	}
3663 #endif
3664 
3665 #if CONFIG_FREEZE
3666 	if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead) &&
3667 	    VM_CONFIG_SWAP_IS_PRESENT &&
3668 	    VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
3669 #if DEVELOPMENT || DEBUG
3670 	    && !memorystatus_freeze_to_memory
3671 #endif /* DEVELOPMENT || DEBUG */
3672 	    ) {
3673 		new_state = C_ON_SWAPOUT_Q;
3674 	}
3675 #endif /* CONFIG_FREEZE */
3676 
3677 	if (vm_darkwake_mode == TRUE) {
3678 		new_state = C_ON_SWAPOUT_Q;
3679 		head_insert = TRUE;
3680 	}
3681 
3682 	clock_get_system_nanotime(&sec, &nsec);
3683 	c_seg->c_creation_ts = (uint32_t)sec;
3684 
3685 	lck_mtx_lock_spin_always(c_list_lock);
3686 
3687 	c_seg->c_generation_id = c_generation_id++;
3688 	c_seg_switch_state(c_seg, new_state, head_insert);
3689 
3690 #if CONFIG_FREEZE
3691 	if (c_seg->c_state == C_ON_SWAPOUT_Q) {
3692 		/*
3693 		 * darkwake and freezer can't co-exist together
3694 		 * We'll need to fix this accounting as a start.
3695 		 */
3696 		assert(vm_darkwake_mode == FALSE);
3697 		c_seg_update_task_owner(c_seg, freezer_context_global.freezer_ctx_task);
3698 		freezer_context_global.freezer_ctx_swapped_bytes += c_seg->c_bytes_used;
3699 	}
3700 #endif /* CONFIG_FREEZE */
3701 
3702 	if (c_seg->c_state == C_ON_AGE_Q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
3703 #if CONFIG_FREEZE
3704 		assert(c_seg->c_task_owner == NULL);
3705 #endif /* CONFIG_FREEZE */
3706 		c_seg_need_delayed_compaction(c_seg, TRUE);
3707 	}
3708 
3709 	lck_mtx_unlock_always(c_list_lock);
3710 
3711 	if (c_seg->c_state == C_ON_SWAPOUT_Q) {
3712 		/*
3713 		 * Darkwake and Freeze configs always
3714 		 * wake up the swapout thread because
3715 		 * the compactor thread that normally handles
3716 		 * it may not be running as much in these
3717 		 * configs.
3718 		 */
3719 		thread_wakeup((event_t)&c_swapout_list_head);
3720 	}
3721 
3722 	*current_chead = NULL;
3723 }
3724 
3725 
3726 #if (XNU_TARGET_OS_OSX && __arm64__)
3727 static void
vm_compressor_process_major_segments(void)3728 vm_compressor_process_major_segments(void)
3729 {
3730 	c_segment_t c_seg = NULL, c_seg_next = NULL;
3731 	if (!queue_empty(&c_major_list_head)) {
3732 		c_seg = (c_segment_t)queue_first(&c_major_list_head);
3733 
3734 		while (!queue_end(&c_major_list_head, (queue_entry_t)c_seg)) {
3735 			c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
3736 			lck_mtx_lock_spin_always(&c_seg->c_lock);
3737 			c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3738 			lck_mtx_unlock_always(&c_seg->c_lock);
3739 			c_seg = c_seg_next;
3740 		}
3741 	}
3742 }
3743 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
3744 
3745 /*
3746  * returns with c_seg locked
3747  */
3748 void
c_seg_swapin_requeue(c_segment_t c_seg,boolean_t has_data,boolean_t minor_compact_ok,boolean_t age_on_swapin_q)3749 c_seg_swapin_requeue(c_segment_t c_seg, boolean_t has_data, boolean_t minor_compact_ok, boolean_t age_on_swapin_q)
3750 {
3751 	clock_sec_t     sec;
3752 	clock_nsec_t    nsec;
3753 
3754 	clock_get_system_nanotime(&sec, &nsec);
3755 
3756 	lck_mtx_lock_spin_always(c_list_lock);
3757 	lck_mtx_lock_spin_always(&c_seg->c_lock);
3758 
3759 	assert(c_seg->c_busy_swapping);
3760 	assert(c_seg->c_busy);
3761 
3762 	c_seg->c_busy_swapping = 0;
3763 
3764 	if (c_seg->c_overage_swap == TRUE) {
3765 		c_overage_swapped_count--;
3766 		c_seg->c_overage_swap = FALSE;
3767 	}
3768 	if (has_data == TRUE) {
3769 		if (age_on_swapin_q == TRUE) {
3770 			c_seg_switch_state(c_seg, C_ON_SWAPPEDIN_Q, FALSE);
3771 		} else {
3772 			c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3773 		}
3774 
3775 		if (minor_compact_ok == TRUE && !c_seg->c_on_minorcompact_q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
3776 			c_seg_need_delayed_compaction(c_seg, TRUE);
3777 		}
3778 	} else {
3779 		c_seg->c_store.c_buffer = (int32_t*) NULL;
3780 		c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0);
3781 
3782 		c_seg_switch_state(c_seg, C_ON_BAD_Q, FALSE);
3783 	}
3784 	c_seg->c_swappedin_ts = (uint32_t)sec;
3785 	c_seg->c_swappedin = true;
3786 
3787 	lck_mtx_unlock_always(c_list_lock);
3788 }
3789 
3790 
3791 
3792 /*
3793  * c_seg has to be locked and is returned locked if the c_seg isn't freed
3794  * PAGE_REPLACMENT_DISALLOWED has to be TRUE on entry and is returned TRUE
3795  * c_seg_swapin returns 1 if the c_seg was freed, 0 otherwise
3796  */
3797 
3798 int
c_seg_swapin(c_segment_t c_seg,boolean_t force_minor_compaction,boolean_t age_on_swapin_q)3799 c_seg_swapin(c_segment_t c_seg, boolean_t force_minor_compaction, boolean_t age_on_swapin_q)
3800 {
3801 	vm_offset_t     addr = 0;
3802 	uint32_t        io_size = 0;
3803 	uint64_t        f_offset;
3804 	thread_pri_floor_t token;
3805 
3806 	assert(C_SEG_IS_ONDISK(c_seg));
3807 
3808 #if !CHECKSUM_THE_SWAP
3809 	c_seg_trim_tail(c_seg);
3810 #endif
3811 	io_size = round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset));
3812 	f_offset = c_seg->c_store.c_swap_handle;
3813 
3814 	C_SEG_BUSY(c_seg);
3815 	c_seg->c_busy_swapping = 1;
3816 
3817 	/*
3818 	 * This thread is likely going to block for I/O.
3819 	 * Make sure it is ready to run when the I/O completes because
3820 	 * it needs to clear the busy bit on the c_seg so that other
3821 	 * waiting threads can make progress too.
3822 	 */
3823 	token = thread_priority_floor_start();
3824 	lck_mtx_unlock_always(&c_seg->c_lock);
3825 
3826 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
3827 
3828 	addr = (vm_offset_t)C_SEG_BUFFER_ADDRESS(c_seg->c_mysegno);
3829 	c_seg->c_store.c_buffer = (int32_t*) addr;
3830 
3831 	kernel_memory_populate(compressor_map, addr, io_size, KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
3832 
3833 	if (vm_swap_get(c_seg, f_offset, io_size) != KERN_SUCCESS) {
3834 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
3835 
3836 		kernel_memory_depopulate(compressor_map, addr, io_size, KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
3837 
3838 		c_seg_swapin_requeue(c_seg, FALSE, TRUE, age_on_swapin_q);
3839 	} else {
3840 #if ENCRYPTED_SWAP
3841 		vm_swap_decrypt(c_seg);
3842 #endif /* ENCRYPTED_SWAP */
3843 
3844 #if CHECKSUM_THE_SWAP
3845 		if (c_seg->cseg_swap_size != io_size) {
3846 			panic("swapin size doesn't match swapout size");
3847 		}
3848 
3849 		if (c_seg->cseg_hash != vmc_hash((char*) c_seg->c_store.c_buffer, (int)io_size)) {
3850 			panic("c_seg_swapin - Swap hash mismatch");
3851 		}
3852 #endif /* CHECKSUM_THE_SWAP */
3853 
3854 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
3855 
3856 		c_seg_swapin_requeue(c_seg, TRUE, force_minor_compaction == TRUE ? FALSE : TRUE, age_on_swapin_q);
3857 
3858 #if CONFIG_FREEZE
3859 		/*
3860 		 * c_seg_swapin_requeue() returns with the c_seg lock held.
3861 		 */
3862 		if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
3863 			assert(c_seg->c_busy);
3864 
3865 			lck_mtx_unlock_always(&c_seg->c_lock);
3866 			lck_mtx_lock_spin_always(c_list_lock);
3867 			lck_mtx_lock_spin_always(&c_seg->c_lock);
3868 		}
3869 
3870 		if (c_seg->c_task_owner) {
3871 			c_seg_update_task_owner(c_seg, NULL);
3872 		}
3873 
3874 		lck_mtx_unlock_always(c_list_lock);
3875 
3876 		OSAddAtomic(c_seg->c_slots_used, &c_segment_pages_compressed_incore);
3877 #endif /* CONFIG_FREEZE */
3878 
3879 		OSAddAtomic64(c_seg->c_bytes_used, &compressor_bytes_used);
3880 
3881 		if (force_minor_compaction == TRUE) {
3882 			if (c_seg_minor_compaction_and_unlock(c_seg, FALSE)) {
3883 				/*
3884 				 * c_seg was completely empty so it was freed,
3885 				 * so be careful not to reference it again
3886 				 *
3887 				 * Drop the boost so that the thread priority
3888 				 * is returned back to where it is supposed to be.
3889 				 */
3890 				thread_priority_floor_end(&token);
3891 				return 1;
3892 			}
3893 
3894 			lck_mtx_lock_spin_always(&c_seg->c_lock);
3895 		}
3896 	}
3897 	C_SEG_WAKEUP_DONE(c_seg);
3898 
3899 	/*
3900 	 * Drop the boost so that the thread priority
3901 	 * is returned back to where it is supposed to be.
3902 	 */
3903 	thread_priority_floor_end(&token);
3904 
3905 	return 0;
3906 }
3907 
3908 
3909 static void
c_segment_sv_hash_drop_ref(int hash_indx)3910 c_segment_sv_hash_drop_ref(int hash_indx)
3911 {
3912 	struct c_sv_hash_entry o_sv_he, n_sv_he;
3913 
3914 	while (1) {
3915 		o_sv_he.he_record = c_segment_sv_hash_table[hash_indx].he_record;
3916 
3917 		n_sv_he.he_ref = o_sv_he.he_ref - 1;
3918 		n_sv_he.he_data = o_sv_he.he_data;
3919 
3920 		if (OSCompareAndSwap64((UInt64)o_sv_he.he_record, (UInt64)n_sv_he.he_record, (UInt64 *) &c_segment_sv_hash_table[hash_indx].he_record) == TRUE) {
3921 			if (n_sv_he.he_ref == 0) {
3922 				OSAddAtomic(-1, &c_segment_svp_in_hash);
3923 			}
3924 			break;
3925 		}
3926 	}
3927 }
3928 
3929 
3930 static int
c_segment_sv_hash_insert(uint32_t data)3931 c_segment_sv_hash_insert(uint32_t data)
3932 {
3933 	int             hash_sindx;
3934 	int             misses;
3935 	struct c_sv_hash_entry o_sv_he, n_sv_he;
3936 	boolean_t       got_ref = FALSE;
3937 
3938 	if (data == 0) {
3939 		OSAddAtomic(1, &c_segment_svp_zero_compressions);
3940 	} else {
3941 		OSAddAtomic(1, &c_segment_svp_nonzero_compressions);
3942 	}
3943 
3944 	hash_sindx = data & C_SV_HASH_MASK;
3945 
3946 	for (misses = 0; misses < C_SV_HASH_MAX_MISS; misses++) {
3947 		o_sv_he.he_record = c_segment_sv_hash_table[hash_sindx].he_record;
3948 
3949 		while (o_sv_he.he_data == data || o_sv_he.he_ref == 0) {
3950 			n_sv_he.he_ref = o_sv_he.he_ref + 1;
3951 			n_sv_he.he_data = data;
3952 
3953 			if (OSCompareAndSwap64((UInt64)o_sv_he.he_record, (UInt64)n_sv_he.he_record, (UInt64 *) &c_segment_sv_hash_table[hash_sindx].he_record) == TRUE) {
3954 				if (n_sv_he.he_ref == 1) {
3955 					OSAddAtomic(1, &c_segment_svp_in_hash);
3956 				}
3957 				got_ref = TRUE;
3958 				break;
3959 			}
3960 			o_sv_he.he_record = c_segment_sv_hash_table[hash_sindx].he_record;
3961 		}
3962 		if (got_ref == TRUE) {
3963 			break;
3964 		}
3965 		hash_sindx++;
3966 
3967 		if (hash_sindx == C_SV_HASH_SIZE) {
3968 			hash_sindx = 0;
3969 		}
3970 	}
3971 	if (got_ref == FALSE) {
3972 		return -1;
3973 	}
3974 
3975 	return hash_sindx;
3976 }
3977 
3978 
3979 #if RECORD_THE_COMPRESSED_DATA
3980 
3981 static void
c_compressed_record_data(char * src,int c_size)3982 c_compressed_record_data(char *src, int c_size)
3983 {
3984 	if ((c_compressed_record_cptr + c_size + 4) >= c_compressed_record_ebuf) {
3985 		panic("c_compressed_record_cptr >= c_compressed_record_ebuf");
3986 	}
3987 
3988 	*(int *)((void *)c_compressed_record_cptr) = c_size;
3989 
3990 	c_compressed_record_cptr += 4;
3991 
3992 	memcpy(c_compressed_record_cptr, src, c_size);
3993 	c_compressed_record_cptr += c_size;
3994 }
3995 #endif
3996 
3997 
3998 static int
c_compress_page(char * src,c_slot_mapping_t slot_ptr,c_segment_t * current_chead,char * scratch_buf)3999 c_compress_page(char *src, c_slot_mapping_t slot_ptr, c_segment_t *current_chead, char *scratch_buf)
4000 {
4001 	int             c_size = -1;
4002 	int             c_rounded_size = 0;
4003 	int             max_csize;
4004 	c_slot_t        cs;
4005 	c_segment_t     c_seg;
4006 
4007 	KERNEL_DEBUG(0xe0400000 | DBG_FUNC_START, *current_chead, 0, 0, 0, 0);
4008 retry:
4009 	if ((c_seg = c_seg_allocate(current_chead)) == NULL) {
4010 		return 1;
4011 	}
4012 	/*
4013 	 * returns with c_seg lock held
4014 	 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
4015 	 * c_nextslot has been allocated and
4016 	 * c_store.c_buffer populated
4017 	 */
4018 	assert(c_seg->c_state == C_IS_FILLING);
4019 
4020 	cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_seg->c_nextslot);
4021 
4022 	C_SLOT_ASSERT_PACKABLE(slot_ptr);
4023 	cs->c_packed_ptr = C_SLOT_PACK_PTR(slot_ptr);
4024 
4025 	cs->c_offset = c_seg->c_nextoffset;
4026 
4027 	max_csize = c_seg_bufsize - C_SEG_OFFSET_TO_BYTES((int32_t)cs->c_offset);
4028 
4029 	if (max_csize > PAGE_SIZE) {
4030 		max_csize = PAGE_SIZE;
4031 	}
4032 
4033 #if CHECKSUM_THE_DATA
4034 	cs->c_hash_data = vmc_hash(src, PAGE_SIZE);
4035 #endif
4036 	boolean_t incomp_copy = FALSE;
4037 	int max_csize_adj = (max_csize - 4);
4038 
4039 	if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) {
4040 #if defined(__arm__) || defined(__arm64__)
4041 		uint16_t ccodec = CINVALID;
4042 		uint32_t inline_popcount;
4043 		if (max_csize >= C_SEG_OFFSET_ALIGNMENT_BOUNDARY) {
4044 			c_size = metacompressor((const uint8_t *) src,
4045 			    (uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset],
4046 			    max_csize_adj, &ccodec,
4047 			    scratch_buf, &incomp_copy, &inline_popcount);
4048 #if __APPLE_WKDM_POPCNT_EXTENSIONS__
4049 			cs->c_inline_popcount = inline_popcount;
4050 #else
4051 			assert(inline_popcount == C_SLOT_NO_POPCOUNT);
4052 #endif
4053 
4054 #if C_SEG_OFFSET_ALIGNMENT_BOUNDARY > 4
4055 			if (c_size > max_csize_adj) {
4056 				c_size = -1;
4057 			}
4058 #endif
4059 		} else {
4060 			c_size = -1;
4061 		}
4062 		assert(ccodec == CCWK || ccodec == CCLZ4);
4063 		cs->c_codec = ccodec;
4064 #endif
4065 	} else {
4066 #if defined(__arm__) || defined(__arm64__)
4067 		cs->c_codec = CCWK;
4068 #endif
4069 #if defined(__arm64__)
4070 		__unreachable_ok_push
4071 		if (PAGE_SIZE == 4096) {
4072 			c_size = WKdm_compress_4k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4073 			    (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
4074 		} else {
4075 			c_size = WKdm_compress_16k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4076 			    (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
4077 		}
4078 		__unreachable_ok_pop
4079 #else
4080 		c_size = WKdm_compress_new((const WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4081 		    (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
4082 #endif
4083 	}
4084 	assertf(((c_size <= max_csize_adj) && (c_size >= -1)),
4085 	    "c_size invalid (%d, %d), cur compressions: %d", c_size, max_csize_adj, c_segment_pages_compressed);
4086 
4087 	if (c_size == -1) {
4088 		if (max_csize < PAGE_SIZE) {
4089 			c_current_seg_filled(c_seg, current_chead);
4090 			assert(*current_chead == NULL);
4091 
4092 			lck_mtx_unlock_always(&c_seg->c_lock);
4093 			/* TODO: it may be worth requiring codecs to distinguish
4094 			 * between incompressible inputs and failures due to
4095 			 * budget exhaustion.
4096 			 */
4097 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
4098 			goto retry;
4099 		}
4100 		c_size = PAGE_SIZE;
4101 
4102 		if (incomp_copy == FALSE) {
4103 			memcpy(&c_seg->c_store.c_buffer[cs->c_offset], src, c_size);
4104 		}
4105 
4106 		OSAddAtomic(1, &c_segment_noncompressible_pages);
4107 	} else if (c_size == 0) {
4108 		int             hash_index;
4109 
4110 		/*
4111 		 * special case - this is a page completely full of a single 32 bit value
4112 		 */
4113 		hash_index = c_segment_sv_hash_insert(*(uint32_t *)(uintptr_t)src);
4114 
4115 		if (hash_index != -1) {
4116 			slot_ptr->s_cindx = hash_index;
4117 			slot_ptr->s_cseg = C_SV_CSEG_ID;
4118 
4119 			OSAddAtomic(1, &c_segment_svp_hash_succeeded);
4120 #if RECORD_THE_COMPRESSED_DATA
4121 			c_compressed_record_data(src, 4);
4122 #endif
4123 			goto sv_compression;
4124 		}
4125 		c_size = 4;
4126 
4127 		memcpy(&c_seg->c_store.c_buffer[cs->c_offset], src, c_size);
4128 
4129 		OSAddAtomic(1, &c_segment_svp_hash_failed);
4130 	}
4131 
4132 #if RECORD_THE_COMPRESSED_DATA
4133 	c_compressed_record_data((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size);
4134 #endif
4135 #if CHECKSUM_THE_COMPRESSED_DATA
4136 	cs->c_hash_compressed_data = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size);
4137 #endif
4138 #if POPCOUNT_THE_COMPRESSED_DATA
4139 	cs->c_pop_cdata = vmc_pop((uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset], c_size);
4140 #endif
4141 	c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
4142 
4143 	PACK_C_SIZE(cs, c_size);
4144 	c_seg->c_bytes_used += c_rounded_size;
4145 	c_seg->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
4146 	c_seg->c_slots_used++;
4147 
4148 	slot_ptr->s_cindx = c_seg->c_nextslot++;
4149 	/* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
4150 	slot_ptr->s_cseg = c_seg->c_mysegno + 1;
4151 
4152 sv_compression:
4153 	if (c_seg->c_nextoffset >= c_seg_off_limit || c_seg->c_nextslot >= C_SLOT_MAX_INDEX) {
4154 		c_current_seg_filled(c_seg, current_chead);
4155 		assert(*current_chead == NULL);
4156 	}
4157 	lck_mtx_unlock_always(&c_seg->c_lock);
4158 
4159 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
4160 
4161 #if RECORD_THE_COMPRESSED_DATA
4162 	if ((c_compressed_record_cptr - c_compressed_record_sbuf) >= c_seg_allocsize) {
4163 		c_compressed_record_write(c_compressed_record_sbuf, (int)(c_compressed_record_cptr - c_compressed_record_sbuf));
4164 		c_compressed_record_cptr = c_compressed_record_sbuf;
4165 	}
4166 #endif
4167 	if (c_size) {
4168 		OSAddAtomic64(c_size, &c_segment_compressed_bytes);
4169 		OSAddAtomic64(c_rounded_size, &compressor_bytes_used);
4170 	}
4171 	OSAddAtomic64(PAGE_SIZE, &c_segment_input_bytes);
4172 
4173 	OSAddAtomic(1, &c_segment_pages_compressed);
4174 #if CONFIG_FREEZE
4175 	OSAddAtomic(1, &c_segment_pages_compressed_incore);
4176 #endif /* CONFIG_FREEZE */
4177 	OSAddAtomic(1, &sample_period_compression_count);
4178 
4179 	KERNEL_DEBUG(0xe0400000 | DBG_FUNC_END, *current_chead, c_size, c_segment_input_bytes, c_segment_compressed_bytes, 0);
4180 
4181 	return 0;
4182 }
4183 
4184 static inline void
sv_decompress(int32_t * ddst,int32_t pattern)4185 sv_decompress(int32_t *ddst, int32_t pattern)
4186 {
4187 //	assert(__builtin_constant_p(PAGE_SIZE) != 0);
4188 #if defined(__x86_64__)
4189 	memset_word(ddst, pattern, PAGE_SIZE / sizeof(int32_t));
4190 #elif defined(__arm64__)
4191 	assert((PAGE_SIZE % 128) == 0);
4192 	if (pattern == 0) {
4193 		fill32_dczva((addr64_t)ddst, PAGE_SIZE);
4194 	} else {
4195 		fill32_nt((addr64_t)ddst, PAGE_SIZE, pattern);
4196 	}
4197 #else
4198 	size_t          i;
4199 
4200 	/* Unroll the pattern fill loop 4x to encourage the
4201 	 * compiler to emit NEON stores, cf.
4202 	 * <rdar://problem/25839866> Loop autovectorization
4203 	 * anomalies.
4204 	 */
4205 	/* * We use separate loops for each PAGE_SIZE
4206 	 * to allow the autovectorizer to engage, as PAGE_SIZE
4207 	 * may not be a constant.
4208 	 */
4209 
4210 	__unreachable_ok_push
4211 	if (PAGE_SIZE == 4096) {
4212 		for (i = 0; i < (4096U / sizeof(int32_t)); i += 4) {
4213 			*ddst++ = pattern;
4214 			*ddst++ = pattern;
4215 			*ddst++ = pattern;
4216 			*ddst++ = pattern;
4217 		}
4218 	} else {
4219 		assert(PAGE_SIZE == 16384);
4220 		for (i = 0; i < (int)(16384U / sizeof(int32_t)); i += 4) {
4221 			*ddst++ = pattern;
4222 			*ddst++ = pattern;
4223 			*ddst++ = pattern;
4224 			*ddst++ = pattern;
4225 		}
4226 	}
4227 	__unreachable_ok_pop
4228 #endif
4229 }
4230 
4231 static int
c_decompress_page(char * dst,volatile c_slot_mapping_t slot_ptr,int flags,int * zeroslot)4232 c_decompress_page(char *dst, volatile c_slot_mapping_t slot_ptr, int flags, int *zeroslot)
4233 {
4234 	c_slot_t        cs;
4235 	c_segment_t     c_seg;
4236 	uint32_t        c_segno;
4237 	uint16_t        c_indx;
4238 	int             c_rounded_size;
4239 	uint32_t        c_size;
4240 	int             retval = 0;
4241 	boolean_t       need_unlock = TRUE;
4242 	boolean_t       consider_defragmenting = FALSE;
4243 	boolean_t       kdp_mode = FALSE;
4244 
4245 	if (__improbable(flags & C_KDP)) {
4246 		if (not_in_kdp) {
4247 			panic("C_KDP passed to decompress page from outside of debugger context");
4248 		}
4249 
4250 		assert((flags & C_KEEP) == C_KEEP);
4251 		assert((flags & C_DONT_BLOCK) == C_DONT_BLOCK);
4252 
4253 		if ((flags & (C_DONT_BLOCK | C_KEEP)) != (C_DONT_BLOCK | C_KEEP)) {
4254 			return -2;
4255 		}
4256 
4257 		kdp_mode = TRUE;
4258 		*zeroslot = 0;
4259 	}
4260 
4261 ReTry:
4262 	if (__probable(!kdp_mode)) {
4263 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
4264 	} else {
4265 		if (kdp_lck_rw_lock_is_acquired_exclusive(&c_master_lock)) {
4266 			return -2;
4267 		}
4268 	}
4269 
4270 #if HIBERNATION
4271 	/*
4272 	 * if hibernation is enabled, it indicates (via a call
4273 	 * to 'vm_decompressor_lock' that no further
4274 	 * decompressions are allowed once it reaches
4275 	 * the point of flushing all of the currently dirty
4276 	 * anonymous memory through the compressor and out
4277 	 * to disk... in this state we allow freeing of compressed
4278 	 * pages and must honor the C_DONT_BLOCK case
4279 	 */
4280 	if (__improbable(dst && decompressions_blocked == TRUE)) {
4281 		if (flags & C_DONT_BLOCK) {
4282 			if (__probable(!kdp_mode)) {
4283 				PAGE_REPLACEMENT_DISALLOWED(FALSE);
4284 			}
4285 
4286 			*zeroslot = 0;
4287 			return -2;
4288 		}
4289 		/*
4290 		 * it's safe to atomically assert and block behind the
4291 		 * lock held in shared mode because "decompressions_blocked" is
4292 		 * only set and cleared and the thread_wakeup done when the lock
4293 		 * is held exclusively
4294 		 */
4295 		assert_wait((event_t)&decompressions_blocked, THREAD_UNINT);
4296 
4297 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
4298 
4299 		thread_block(THREAD_CONTINUE_NULL);
4300 
4301 		goto ReTry;
4302 	}
4303 #endif
4304 	/* s_cseg is actually "segno+1" */
4305 	c_segno = slot_ptr->s_cseg - 1;
4306 
4307 	if (__improbable(c_segno >= c_segments_available)) {
4308 		panic("c_decompress_page: c_segno %d >= c_segments_available %d, slot_ptr(%p), slot_data(%x)",
4309 		    c_segno, c_segments_available, slot_ptr, *(int *)((void *)slot_ptr));
4310 	}
4311 
4312 	if (__improbable(c_segments[c_segno].c_segno < c_segments_available)) {
4313 		panic("c_decompress_page: c_segno %d is free, slot_ptr(%p), slot_data(%x)",
4314 		    c_segno, slot_ptr, *(int *)((void *)slot_ptr));
4315 	}
4316 
4317 	c_seg = c_segments[c_segno].c_seg;
4318 
4319 	if (__probable(!kdp_mode)) {
4320 		lck_mtx_lock_spin_always(&c_seg->c_lock);
4321 	} else {
4322 		if (kdp_lck_mtx_lock_spin_is_acquired(&c_seg->c_lock)) {
4323 			return -2;
4324 		}
4325 	}
4326 
4327 	assert(c_seg->c_state != C_IS_EMPTY && c_seg->c_state != C_IS_FREE);
4328 
4329 	if (dst == NULL && c_seg->c_busy_swapping) {
4330 		assert(c_seg->c_busy);
4331 
4332 		goto bypass_busy_check;
4333 	}
4334 	if (flags & C_DONT_BLOCK) {
4335 		if (c_seg->c_busy || (C_SEG_IS_ONDISK(c_seg) && dst)) {
4336 			*zeroslot = 0;
4337 
4338 			retval = -2;
4339 			goto done;
4340 		}
4341 	}
4342 	if (c_seg->c_busy) {
4343 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
4344 
4345 		c_seg_wait_on_busy(c_seg);
4346 
4347 		goto ReTry;
4348 	}
4349 bypass_busy_check:
4350 
4351 	c_indx = slot_ptr->s_cindx;
4352 
4353 	if (__improbable(c_indx >= c_seg->c_nextslot)) {
4354 		panic("c_decompress_page: c_indx %d >= c_nextslot %d, c_seg(%p), slot_ptr(%p), slot_data(%x)",
4355 		    c_indx, c_seg->c_nextslot, c_seg, slot_ptr, *(int *)((void *)slot_ptr));
4356 	}
4357 
4358 	cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
4359 
4360 	c_size = UNPACK_C_SIZE(cs);
4361 
4362 	if (__improbable(c_size == 0)) {
4363 		panic("c_decompress_page: c_size == 0, c_seg(%p), slot_ptr(%p), slot_data(%x)",
4364 		    c_seg, slot_ptr, *(int *)((void *)slot_ptr));
4365 	}
4366 
4367 	c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
4368 
4369 	if (dst) {
4370 		uint32_t        age_of_cseg;
4371 		clock_sec_t     cur_ts_sec;
4372 		clock_nsec_t    cur_ts_nsec;
4373 
4374 		if (C_SEG_IS_ONDISK(c_seg)) {
4375 #if CONFIG_FREEZE
4376 			if (freezer_incore_cseg_acct) {
4377 				if ((c_seg->c_slots_used + c_segment_pages_compressed_incore) >= c_segment_pages_compressed_nearing_limit) {
4378 					PAGE_REPLACEMENT_DISALLOWED(FALSE);
4379 					lck_mtx_unlock_always(&c_seg->c_lock);
4380 
4381 					memorystatus_kill_on_VM_compressor_space_shortage(FALSE /* async */);
4382 
4383 					goto ReTry;
4384 				}
4385 
4386 				uint32_t incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
4387 				if ((incore_seg_count + 1) >= c_segments_nearing_limit) {
4388 					PAGE_REPLACEMENT_DISALLOWED(FALSE);
4389 					lck_mtx_unlock_always(&c_seg->c_lock);
4390 
4391 					memorystatus_kill_on_VM_compressor_space_shortage(FALSE /* async */);
4392 
4393 					goto ReTry;
4394 				}
4395 			}
4396 #endif /* CONFIG_FREEZE */
4397 			assert(kdp_mode == FALSE);
4398 			retval = c_seg_swapin(c_seg, FALSE, TRUE);
4399 			assert(retval == 0);
4400 
4401 			retval = 1;
4402 		}
4403 		if (c_seg->c_state == C_ON_BAD_Q) {
4404 			assert(c_seg->c_store.c_buffer == NULL);
4405 			*zeroslot = 0;
4406 
4407 			retval = -1;
4408 			goto done;
4409 		}
4410 
4411 #if POPCOUNT_THE_COMPRESSED_DATA
4412 		unsigned csvpop;
4413 		uintptr_t csvaddr = (uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset];
4414 		if (cs->c_pop_cdata != (csvpop = vmc_pop(csvaddr, c_size))) {
4415 			panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%x 0x%x 0x%x 0x%x", (csvpop - cs->c_pop_cdata), (void *)csvaddr, (void *) kvtophys(csvaddr), c_seg, cs, cs->c_offset, c_size, csvpop, cs->c_pop_cdata);
4416 		}
4417 #endif
4418 
4419 #if CHECKSUM_THE_COMPRESSED_DATA
4420 		unsigned csvhash;
4421 		if (cs->c_hash_compressed_data != (csvhash = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))) {
4422 			panic("Compressed data doesn't match original %p %p %u %u %u", c_seg, cs, c_size, cs->c_hash_compressed_data, csvhash);
4423 		}
4424 #endif
4425 		if (c_rounded_size == PAGE_SIZE) {
4426 			/*
4427 			 * page wasn't compressible... just copy it out
4428 			 */
4429 			memcpy(dst, &c_seg->c_store.c_buffer[cs->c_offset], PAGE_SIZE);
4430 		} else if (c_size == 4) {
4431 			int32_t         data;
4432 			int32_t         *dptr;
4433 
4434 			/*
4435 			 * page was populated with a single value
4436 			 * that didn't fit into our fast hash
4437 			 * so we packed it in as a single non-compressed value
4438 			 * that we need to populate the page with
4439 			 */
4440 			dptr = (int32_t *)(uintptr_t)dst;
4441 			data = *(int32_t *)(&c_seg->c_store.c_buffer[cs->c_offset]);
4442 			sv_decompress(dptr, data);
4443 		} else {
4444 			uint32_t        my_cpu_no;
4445 			char            *scratch_buf;
4446 
4447 			if (__probable(!kdp_mode)) {
4448 				/*
4449 				 * we're behind the c_seg lock held in spin mode
4450 				 * which means pre-emption is disabled... therefore
4451 				 * the following sequence is atomic and safe
4452 				 */
4453 				my_cpu_no = cpu_number();
4454 
4455 				assert(my_cpu_no < compressor_cpus);
4456 
4457 				scratch_buf = &compressor_scratch_bufs[my_cpu_no * vm_compressor_get_decode_scratch_size()];
4458 			} else {
4459 				scratch_buf = kdp_compressor_scratch_buf;
4460 			}
4461 
4462 			if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) {
4463 #if defined(__arm__) || defined(__arm64__)
4464 				uint16_t c_codec = cs->c_codec;
4465 				uint32_t inline_popcount;
4466 				if (!metadecompressor((const uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset],
4467 				    (uint8_t *)dst, c_size, c_codec, (void *)scratch_buf, &inline_popcount)) {
4468 					retval = -1;
4469 				} else {
4470 #if __APPLE_WKDM_POPCNT_EXTENSIONS__
4471 					if (inline_popcount != cs->c_inline_popcount) {
4472 						/*
4473 						 * The codec choice in compression and
4474 						 * decompression must agree, so there
4475 						 * should never be a disagreement in
4476 						 * whether an inline population count
4477 						 * was performed.
4478 						 */
4479 						assert(inline_popcount != C_SLOT_NO_POPCOUNT);
4480 						assert(cs->c_inline_popcount != C_SLOT_NO_POPCOUNT);
4481 						printf("decompression failure from physical region %llx+%05x: popcount mismatch (%d != %d)\n",
4482 						    (unsigned long long)kvtophys((uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset]), c_size,
4483 						    inline_popcount,
4484 						    cs->c_inline_popcount);
4485 						retval = -1;
4486 					}
4487 #else
4488 					assert(inline_popcount == C_SLOT_NO_POPCOUNT);
4489 #endif /* __APPLE_WKDM_POPCNT_EXTENSIONS__ */
4490 				}
4491 #endif
4492 			} else {
4493 #if defined(__arm64__)
4494 				__unreachable_ok_push
4495 				if (PAGE_SIZE == 4096) {
4496 					WKdm_decompress_4k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4497 					    (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
4498 				} else {
4499 					WKdm_decompress_16k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4500 					    (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
4501 				}
4502 				__unreachable_ok_pop
4503 #else
4504 				WKdm_decompress_new((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4505 				    (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
4506 #endif
4507 			}
4508 		}
4509 
4510 #if CHECKSUM_THE_DATA
4511 		if (cs->c_hash_data != vmc_hash(dst, PAGE_SIZE)) {
4512 #if     defined(__arm__) || defined(__arm64__)
4513 			int32_t *dinput = &c_seg->c_store.c_buffer[cs->c_offset];
4514 			panic("decompressed data doesn't match original cs: %p, hash: 0x%x, offset: %d, c_size: %d, c_rounded_size: %d, codec: %d, header: 0x%x 0x%x 0x%x", cs, cs->c_hash_data, cs->c_offset, c_size, c_rounded_size, cs->c_codec, *dinput, *(dinput + 1), *(dinput + 2));
4515 #else
4516 			panic("decompressed data doesn't match original cs: %p, hash: %d, offset: 0x%x, c_size: %d", cs, cs->c_hash_data, cs->c_offset, c_size);
4517 #endif
4518 		}
4519 #endif
4520 		if (c_seg->c_swappedin_ts == 0 && !kdp_mode) {
4521 			clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec);
4522 
4523 			age_of_cseg = (uint32_t)cur_ts_sec - c_seg->c_creation_ts;
4524 			if (age_of_cseg < DECOMPRESSION_SAMPLE_MAX_AGE) {
4525 				OSAddAtomic(1, &age_of_decompressions_during_sample_period[age_of_cseg]);
4526 			} else {
4527 				OSAddAtomic(1, &overage_decompressions_during_sample_period);
4528 			}
4529 
4530 			OSAddAtomic(1, &sample_period_decompression_count);
4531 		}
4532 	}
4533 #if CONFIG_FREEZE
4534 	else {
4535 		/*
4536 		 * We are freeing an uncompressed page from this c_seg and so balance the ledgers.
4537 		 */
4538 		if (C_SEG_IS_ONDISK(c_seg)) {
4539 			/*
4540 			 * The compression sweep feature will push out anonymous pages to disk
4541 			 * without going through the freezer path and so those c_segs, while
4542 			 * swapped out, won't have an owner.
4543 			 */
4544 			if (c_seg->c_task_owner) {
4545 				task_update_frozen_to_swap_acct(c_seg->c_task_owner, PAGE_SIZE_64, DEBIT_FROM_SWAP);
4546 			}
4547 
4548 			/*
4549 			 * We are freeing a page in swap without swapping it in. We bump the in-core
4550 			 * count here to simulate a swapin of a page so that we can accurately
4551 			 * decrement it below.
4552 			 */
4553 			OSAddAtomic(1, &c_segment_pages_compressed_incore);
4554 		}
4555 	}
4556 #endif /* CONFIG_FREEZE */
4557 
4558 	if (flags & C_KEEP) {
4559 		*zeroslot = 0;
4560 		goto done;
4561 	}
4562 	assert(kdp_mode == FALSE);
4563 
4564 	c_seg->c_bytes_unused += c_rounded_size;
4565 	c_seg->c_bytes_used -= c_rounded_size;
4566 
4567 	assert(c_seg->c_slots_used);
4568 	c_seg->c_slots_used--;
4569 	if (dst && c_seg->c_swappedin) {
4570 		task_t task = current_task();
4571 		if (task) {
4572 			ledger_credit(task->ledger, task_ledgers.swapins, PAGE_SIZE);
4573 		}
4574 	}
4575 
4576 	PACK_C_SIZE(cs, 0);
4577 
4578 	if (c_indx < c_seg->c_firstemptyslot) {
4579 		c_seg->c_firstemptyslot = c_indx;
4580 	}
4581 
4582 	OSAddAtomic(-1, &c_segment_pages_compressed);
4583 #if CONFIG_FREEZE
4584 	OSAddAtomic(-1, &c_segment_pages_compressed_incore);
4585 	assertf(c_segment_pages_compressed_incore >= 0, "-ve incore count %p 0x%x", c_seg, c_segment_pages_compressed_incore);
4586 #endif /* CONFIG_FREEZE */
4587 
4588 	if (c_seg->c_state != C_ON_BAD_Q && !(C_SEG_IS_ONDISK(c_seg))) {
4589 		/*
4590 		 * C_SEG_IS_ONDISK == TRUE can occur when we're doing a
4591 		 * free of a compressed page (i.e. dst == NULL)
4592 		 */
4593 		OSAddAtomic64(-c_rounded_size, &compressor_bytes_used);
4594 	}
4595 	if (c_seg->c_busy_swapping) {
4596 		/*
4597 		 * bypass case for c_busy_swapping...
4598 		 * let the swapin/swapout paths deal with putting
4599 		 * the c_seg on the minor compaction queue if needed
4600 		 */
4601 		assert(c_seg->c_busy);
4602 		goto done;
4603 	}
4604 	assert(!c_seg->c_busy);
4605 
4606 	if (c_seg->c_state != C_IS_FILLING) {
4607 		if (c_seg->c_bytes_used == 0) {
4608 			if (!(C_SEG_IS_ONDISK(c_seg))) {
4609 				int     pages_populated;
4610 
4611 				pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE;
4612 				c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0);
4613 
4614 				if (pages_populated) {
4615 					assert(c_seg->c_state != C_ON_BAD_Q);
4616 					assert(c_seg->c_store.c_buffer != NULL);
4617 
4618 					C_SEG_BUSY(c_seg);
4619 					lck_mtx_unlock_always(&c_seg->c_lock);
4620 
4621 					kernel_memory_depopulate(compressor_map,
4622 					    (vm_offset_t) c_seg->c_store.c_buffer,
4623 					    pages_populated * PAGE_SIZE, KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
4624 
4625 					lck_mtx_lock_spin_always(&c_seg->c_lock);
4626 					C_SEG_WAKEUP_DONE(c_seg);
4627 				}
4628 				if (!c_seg->c_on_minorcompact_q && c_seg->c_state != C_ON_SWAPOUT_Q && c_seg->c_state != C_ON_SWAPIO_Q) {
4629 					c_seg_need_delayed_compaction(c_seg, FALSE);
4630 				}
4631 			} else {
4632 				if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q) {
4633 					c_seg_move_to_sparse_list(c_seg);
4634 					consider_defragmenting = TRUE;
4635 				}
4636 			}
4637 		} else if (c_seg->c_on_minorcompact_q) {
4638 			assert(c_seg->c_state != C_ON_BAD_Q);
4639 			assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg));
4640 
4641 			if (C_SEG_SHOULD_MINORCOMPACT_NOW(c_seg)) {
4642 				c_seg_try_minor_compaction_and_unlock(c_seg);
4643 				need_unlock = FALSE;
4644 			}
4645 		} else if (!(C_SEG_IS_ONDISK(c_seg))) {
4646 			if (c_seg->c_state != C_ON_BAD_Q && c_seg->c_state != C_ON_SWAPOUT_Q && c_seg->c_state != C_ON_SWAPIO_Q &&
4647 			    C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
4648 				c_seg_need_delayed_compaction(c_seg, FALSE);
4649 			}
4650 		} else if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q && C_SEG_ONDISK_IS_SPARSE(c_seg)) {
4651 			c_seg_move_to_sparse_list(c_seg);
4652 			consider_defragmenting = TRUE;
4653 		}
4654 	}
4655 done:
4656 	if (__improbable(kdp_mode)) {
4657 		return retval;
4658 	}
4659 
4660 	if (need_unlock == TRUE) {
4661 		lck_mtx_unlock_always(&c_seg->c_lock);
4662 	}
4663 
4664 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
4665 
4666 	if (consider_defragmenting == TRUE) {
4667 		vm_swap_consider_defragmenting(VM_SWAP_FLAGS_NONE);
4668 	}
4669 
4670 #if !XNU_TARGET_OS_OSX
4671 	if ((c_minor_count && COMPRESSOR_NEEDS_TO_MINOR_COMPACT()) || vm_compressor_needs_to_major_compact()) {
4672 		vm_wake_compactor_swapper();
4673 	}
4674 #endif /* !XNU_TARGET_OS_OSX */
4675 
4676 	return retval;
4677 }
4678 
4679 
4680 int
vm_compressor_get(ppnum_t pn,int * slot,int flags)4681 vm_compressor_get(ppnum_t pn, int *slot, int flags)
4682 {
4683 	c_slot_mapping_t  slot_ptr;
4684 	char    *dst;
4685 	int     zeroslot = 1;
4686 	int     retval;
4687 
4688 	dst = pmap_map_compressor_page(pn);
4689 	slot_ptr = (c_slot_mapping_t)slot;
4690 
4691 	assert(dst != NULL);
4692 
4693 	if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
4694 		int32_t         data;
4695 		int32_t         *dptr;
4696 
4697 		/*
4698 		 * page was populated with a single value
4699 		 * that found a home in our hash table
4700 		 * grab that value from the hash and populate the page
4701 		 * that we need to populate the page with
4702 		 */
4703 		dptr = (int32_t *)(uintptr_t)dst;
4704 		data = c_segment_sv_hash_table[slot_ptr->s_cindx].he_data;
4705 		sv_decompress(dptr, data);
4706 		if (!(flags & C_KEEP)) {
4707 			c_segment_sv_hash_drop_ref(slot_ptr->s_cindx);
4708 
4709 			OSAddAtomic(-1, &c_segment_pages_compressed);
4710 #if CONFIG_FREEZE
4711 			OSAddAtomic(-1, &c_segment_pages_compressed_incore);
4712 			assertf(c_segment_pages_compressed_incore >= 0, "-ve incore count 0x%x", c_segment_pages_compressed_incore);
4713 #endif /* CONFIG_FREEZE */
4714 			*slot = 0;
4715 		}
4716 		if (data) {
4717 			OSAddAtomic(1, &c_segment_svp_nonzero_decompressions);
4718 		} else {
4719 			OSAddAtomic(1, &c_segment_svp_zero_decompressions);
4720 		}
4721 
4722 		pmap_unmap_compressor_page(pn, dst);
4723 		return 0;
4724 	}
4725 
4726 	retval = c_decompress_page(dst, slot_ptr, flags, &zeroslot);
4727 
4728 	/*
4729 	 * zeroslot will be set to 0 by c_decompress_page if (flags & C_KEEP)
4730 	 * or (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be TRUE
4731 	 */
4732 	if (zeroslot) {
4733 		*slot = 0;
4734 	}
4735 
4736 	pmap_unmap_compressor_page(pn, dst);
4737 
4738 	/*
4739 	 * returns 0 if we successfully decompressed a page from a segment already in memory
4740 	 * returns 1 if we had to first swap in the segment, before successfully decompressing the page
4741 	 * returns -1 if we encountered an error swapping in the segment - decompression failed
4742 	 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be true
4743 	 */
4744 	return retval;
4745 }
4746 
4747 #if DEVELOPMENT || DEBUG
4748 
4749 void
vm_compressor_inject_error(int * slot)4750 vm_compressor_inject_error(int *slot)
4751 {
4752 	c_slot_mapping_t slot_ptr = (c_slot_mapping_t)slot;
4753 
4754 	/* No error detection for single-value compression. */
4755 	if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
4756 		printf("%s(): cannot inject errors in SV-compressed pages\n", __func__ );
4757 		return;
4758 	}
4759 
4760 	/* s_cseg is actually "segno+1" */
4761 	const uint32_t c_segno = slot_ptr->s_cseg - 1;
4762 
4763 	assert(c_segno < c_segments_available);
4764 	assert(c_segments[c_segno].c_segno >= c_segments_available);
4765 
4766 	const c_segment_t c_seg = c_segments[c_segno].c_seg;
4767 
4768 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
4769 
4770 	lck_mtx_lock_spin_always(&c_seg->c_lock);
4771 	assert(c_seg->c_state != C_IS_EMPTY && c_seg->c_state != C_IS_FREE);
4772 
4773 	const uint16_t c_indx = slot_ptr->s_cindx;
4774 	assert(c_indx < c_seg->c_nextslot);
4775 
4776 	/*
4777 	 * To safely make this segment temporarily writable, we need to mark
4778 	 * the segment busy, which allows us to release the segment lock.
4779 	 */
4780 	while (c_seg->c_busy) {
4781 		c_seg_wait_on_busy(c_seg);
4782 		lck_mtx_lock_spin_always(&c_seg->c_lock);
4783 	}
4784 	C_SEG_BUSY(c_seg);
4785 
4786 	bool already_writable = (c_seg->c_state == C_IS_FILLING);
4787 	if (!already_writable) {
4788 		/*
4789 		 * Protection update must be performed preemptibly, so temporarily drop
4790 		 * the lock. Having set c_busy will prevent most other concurrent
4791 		 * operations.
4792 		 */
4793 		lck_mtx_unlock_always(&c_seg->c_lock);
4794 		C_SEG_MAKE_WRITEABLE(c_seg);
4795 		lck_mtx_lock_spin_always(&c_seg->c_lock);
4796 	}
4797 
4798 	/*
4799 	 * Once we've released the lock following our c_state == C_IS_FILLING check,
4800 	 * c_current_seg_filled() can (re-)write-protect the segment. However, it
4801 	 * will transition from C_IS_FILLING before releasing the c_seg lock, so we
4802 	 * can detect this by re-checking after we've reobtained the lock.
4803 	 */
4804 	if (already_writable && c_seg->c_state != C_IS_FILLING) {
4805 		lck_mtx_unlock_always(&c_seg->c_lock);
4806 		C_SEG_MAKE_WRITEABLE(c_seg);
4807 		lck_mtx_lock_spin_always(&c_seg->c_lock);
4808 		already_writable = false;
4809 		/* Segment can't be freed while c_busy is set. */
4810 		assert(c_seg->c_state != C_IS_FILLING);
4811 	}
4812 
4813 	/*
4814 	 * Skip if the segment is on disk. This check can only be performed after
4815 	 * the final acquisition of the segment lock before we attempt to write to
4816 	 * the segment.
4817 	 */
4818 	if (!C_SEG_IS_ON_DISK_OR_SOQ(c_seg)) {
4819 		c_slot_t cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
4820 		int32_t *data = &c_seg->c_store.c_buffer[cs->c_offset];
4821 		/* assume that the compressed data holds at least one int32_t */
4822 		assert(UNPACK_C_SIZE(cs) > sizeof(*data));
4823 		/*
4824 		 * This bit is known to be in the payload of a MISS packet resulting from
4825 		 * the pattern used in the test pattern from decompression_failure.c.
4826 		 * Flipping it should result in many corrupted bits in the test page.
4827 		 */
4828 		data[0] ^= 0x00000100;
4829 	}
4830 
4831 	if (!already_writable) {
4832 		lck_mtx_unlock_always(&c_seg->c_lock);
4833 		C_SEG_WRITE_PROTECT(c_seg);
4834 		lck_mtx_lock_spin_always(&c_seg->c_lock);
4835 	}
4836 
4837 	C_SEG_WAKEUP_DONE(c_seg);
4838 	lck_mtx_unlock_always(&c_seg->c_lock);
4839 
4840 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
4841 }
4842 
4843 #endif /* DEVELOPMENT || DEBUG */
4844 
4845 int
vm_compressor_free(int * slot,int flags)4846 vm_compressor_free(int *slot, int flags)
4847 {
4848 	c_slot_mapping_t  slot_ptr;
4849 	int     zeroslot = 1;
4850 	int     retval;
4851 
4852 	assert(flags == 0 || flags == C_DONT_BLOCK);
4853 
4854 	slot_ptr = (c_slot_mapping_t)slot;
4855 
4856 	if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
4857 		c_segment_sv_hash_drop_ref(slot_ptr->s_cindx);
4858 		OSAddAtomic(-1, &c_segment_pages_compressed);
4859 #if CONFIG_FREEZE
4860 		OSAddAtomic(-1, &c_segment_pages_compressed_incore);
4861 		assertf(c_segment_pages_compressed_incore >= 0, "-ve incore count 0x%x", c_segment_pages_compressed_incore);
4862 #endif /* CONFIG_FREEZE */
4863 
4864 		*slot = 0;
4865 		return 0;
4866 	}
4867 	retval = c_decompress_page(NULL, slot_ptr, flags, &zeroslot);
4868 	/*
4869 	 * returns 0 if we successfully freed the specified compressed page
4870 	 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' set
4871 	 */
4872 
4873 	if (retval == 0) {
4874 		*slot = 0;
4875 	} else {
4876 		assert(retval == -2);
4877 	}
4878 
4879 	return retval;
4880 }
4881 
4882 
4883 int
vm_compressor_put(ppnum_t pn,int * slot,void ** current_chead,char * scratch_buf)4884 vm_compressor_put(ppnum_t pn, int *slot, void  **current_chead, char *scratch_buf)
4885 {
4886 	char    *src;
4887 	int     retval;
4888 
4889 	src = pmap_map_compressor_page(pn);
4890 	assert(src != NULL);
4891 
4892 	retval = c_compress_page(src, (c_slot_mapping_t)slot, (c_segment_t *)current_chead, scratch_buf);
4893 	pmap_unmap_compressor_page(pn, src);
4894 
4895 	return retval;
4896 }
4897 
4898 void
vm_compressor_transfer(int * dst_slot_p,int * src_slot_p)4899 vm_compressor_transfer(
4900 	int     *dst_slot_p,
4901 	int     *src_slot_p)
4902 {
4903 	c_slot_mapping_t        dst_slot, src_slot;
4904 	c_segment_t             c_seg;
4905 	uint16_t                c_indx;
4906 	c_slot_t                cs;
4907 
4908 	src_slot = (c_slot_mapping_t) src_slot_p;
4909 
4910 	if (src_slot->s_cseg == C_SV_CSEG_ID) {
4911 		*dst_slot_p = *src_slot_p;
4912 		*src_slot_p = 0;
4913 		return;
4914 	}
4915 	dst_slot = (c_slot_mapping_t) dst_slot_p;
4916 Retry:
4917 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
4918 	/* get segment for src_slot */
4919 	c_seg = c_segments[src_slot->s_cseg - 1].c_seg;
4920 	/* lock segment */
4921 	lck_mtx_lock_spin_always(&c_seg->c_lock);
4922 	/* wait if it's busy */
4923 	if (c_seg->c_busy && !c_seg->c_busy_swapping) {
4924 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
4925 		c_seg_wait_on_busy(c_seg);
4926 		goto Retry;
4927 	}
4928 	/* find the c_slot */
4929 	c_indx = src_slot->s_cindx;
4930 	cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
4931 	/* point the c_slot back to dst_slot instead of src_slot */
4932 	C_SLOT_ASSERT_PACKABLE(dst_slot);
4933 	cs->c_packed_ptr = C_SLOT_PACK_PTR(dst_slot);
4934 	/* transfer */
4935 	*dst_slot_p = *src_slot_p;
4936 	*src_slot_p = 0;
4937 	lck_mtx_unlock_always(&c_seg->c_lock);
4938 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
4939 }
4940 
4941 #if defined(__arm64__)
4942 extern clock_sec_t             vm_swapfile_last_failed_to_create_ts;
4943 __attribute__((noreturn))
4944 void
vm_panic_hibernate_write_image_failed(int err)4945 vm_panic_hibernate_write_image_failed(int err)
4946 {
4947 	panic("hibernate_write_image encountered error 0x%x - %u, %u, %d, %d, %d, %d, %d, %d, %d, %d, %llu, %d, %d, %d\n",
4948 	    err,
4949 	    VM_PAGE_COMPRESSOR_COUNT, vm_page_wire_count,
4950 	    c_age_count, c_major_count, c_minor_count, c_swapout_count, c_swappedout_sparse_count,
4951 	    vm_num_swap_files, vm_num_pinned_swap_files, vm_swappin_enabled, vm_swap_put_failures,
4952 	    (vm_swapfile_last_failed_to_create_ts ? 1:0), hibernate_no_swapspace, hibernate_flush_timed_out);
4953 }
4954 #endif /*(__arm64__)*/
4955 
4956 #if CONFIG_FREEZE
4957 
4958 int     freezer_finished_filling = 0;
4959 
4960 void
vm_compressor_finished_filling(void ** current_chead)4961 vm_compressor_finished_filling(
4962 	void    **current_chead)
4963 {
4964 	c_segment_t     c_seg;
4965 
4966 	if ((c_seg = *(c_segment_t *)current_chead) == NULL) {
4967 		return;
4968 	}
4969 
4970 	assert(c_seg->c_state == C_IS_FILLING);
4971 
4972 	lck_mtx_lock_spin_always(&c_seg->c_lock);
4973 
4974 	c_current_seg_filled(c_seg, (c_segment_t *)current_chead);
4975 
4976 	lck_mtx_unlock_always(&c_seg->c_lock);
4977 
4978 	freezer_finished_filling++;
4979 }
4980 
4981 
4982 /*
4983  * This routine is used to transfer the compressed chunks from
4984  * the c_seg/cindx pointed to by slot_p into a new c_seg headed
4985  * by the current_chead and a new cindx within that c_seg.
4986  *
4987  * Currently, this routine is only used by the "freezer backed by
4988  * compressor with swap" mode to create a series of c_segs that
4989  * only contain compressed data belonging to one task. So, we
4990  * move a task's previously compressed data into a set of new
4991  * c_segs which will also hold the task's yet to be compressed data.
4992  */
4993 
4994 kern_return_t
vm_compressor_relocate(void ** current_chead,int * slot_p)4995 vm_compressor_relocate(
4996 	void            **current_chead,
4997 	int             *slot_p)
4998 {
4999 	c_slot_mapping_t        slot_ptr;
5000 	c_slot_mapping_t        src_slot;
5001 	uint32_t                c_rounded_size;
5002 	uint32_t                c_size;
5003 	uint16_t                dst_slot;
5004 	c_slot_t                c_dst;
5005 	c_slot_t                c_src;
5006 	uint16_t                c_indx;
5007 	c_segment_t             c_seg_dst = NULL;
5008 	c_segment_t             c_seg_src = NULL;
5009 	kern_return_t           kr = KERN_SUCCESS;
5010 
5011 
5012 	src_slot = (c_slot_mapping_t) slot_p;
5013 
5014 	if (src_slot->s_cseg == C_SV_CSEG_ID) {
5015 		/*
5016 		 * no need to relocate... this is a page full of a single
5017 		 * value which is hashed to a single entry not contained
5018 		 * in a c_segment_t
5019 		 */
5020 		return kr;
5021 	}
5022 
5023 Relookup_dst:
5024 	c_seg_dst = c_seg_allocate((c_segment_t *)current_chead);
5025 	/*
5026 	 * returns with c_seg lock held
5027 	 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
5028 	 * c_nextslot has been allocated and
5029 	 * c_store.c_buffer populated
5030 	 */
5031 	if (c_seg_dst == NULL) {
5032 		/*
5033 		 * Out of compression segments?
5034 		 */
5035 		kr = KERN_RESOURCE_SHORTAGE;
5036 		goto out;
5037 	}
5038 
5039 	assert(c_seg_dst->c_busy == 0);
5040 
5041 	C_SEG_BUSY(c_seg_dst);
5042 
5043 	dst_slot = c_seg_dst->c_nextslot;
5044 
5045 	lck_mtx_unlock_always(&c_seg_dst->c_lock);
5046 
5047 Relookup_src:
5048 	c_seg_src = c_segments[src_slot->s_cseg - 1].c_seg;
5049 
5050 	assert(c_seg_dst != c_seg_src);
5051 
5052 	lck_mtx_lock_spin_always(&c_seg_src->c_lock);
5053 
5054 	if (C_SEG_IS_ON_DISK_OR_SOQ(c_seg_src) ||
5055 	    c_seg_src->c_state == C_IS_FILLING) {
5056 		/*
5057 		 * Skip this page if :-
5058 		 * a) the src c_seg is already on-disk (or on its way there)
5059 		 *    A "thaw" can mark a process as eligible for
5060 		 * another freeze cycle without bringing any of
5061 		 * its swapped out c_segs back from disk (because
5062 		 * that is done on-demand).
5063 		 *    Or, this page may be mapped elsewhere in the task's map,
5064 		 * and we may have marked it for swap already.
5065 		 *
5066 		 * b) Or, the src c_seg is being filled by the compressor
5067 		 * thread. We don't want the added latency of waiting for
5068 		 * this c_seg in the freeze path and so we skip it.
5069 		 */
5070 
5071 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5072 
5073 		lck_mtx_unlock_always(&c_seg_src->c_lock);
5074 
5075 		c_seg_src = NULL;
5076 
5077 		goto out;
5078 	}
5079 
5080 	if (c_seg_src->c_busy) {
5081 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5082 		c_seg_wait_on_busy(c_seg_src);
5083 
5084 		c_seg_src = NULL;
5085 
5086 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
5087 
5088 		goto Relookup_src;
5089 	}
5090 
5091 	C_SEG_BUSY(c_seg_src);
5092 
5093 	lck_mtx_unlock_always(&c_seg_src->c_lock);
5094 
5095 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
5096 
5097 	/* find the c_slot */
5098 	c_indx = src_slot->s_cindx;
5099 
5100 	c_src = C_SEG_SLOT_FROM_INDEX(c_seg_src, c_indx);
5101 
5102 	c_size = UNPACK_C_SIZE(c_src);
5103 
5104 	assert(c_size);
5105 
5106 	if (c_size > (uint32_t)(c_seg_bufsize - C_SEG_OFFSET_TO_BYTES((int32_t)c_seg_dst->c_nextoffset))) {
5107 		/*
5108 		 * This segment is full. We need a new one.
5109 		 */
5110 
5111 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
5112 
5113 		lck_mtx_lock_spin_always(&c_seg_src->c_lock);
5114 		C_SEG_WAKEUP_DONE(c_seg_src);
5115 		lck_mtx_unlock_always(&c_seg_src->c_lock);
5116 
5117 		c_seg_src = NULL;
5118 
5119 		lck_mtx_lock_spin_always(&c_seg_dst->c_lock);
5120 
5121 		assert(c_seg_dst->c_busy);
5122 		assert(c_seg_dst->c_state == C_IS_FILLING);
5123 		assert(!c_seg_dst->c_on_minorcompact_q);
5124 
5125 		c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead);
5126 		assert(*current_chead == NULL);
5127 
5128 		C_SEG_WAKEUP_DONE(c_seg_dst);
5129 
5130 		lck_mtx_unlock_always(&c_seg_dst->c_lock);
5131 
5132 		c_seg_dst = NULL;
5133 
5134 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5135 
5136 		goto Relookup_dst;
5137 	}
5138 
5139 	c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, c_seg_dst->c_nextslot);
5140 
5141 	memcpy(&c_seg_dst->c_store.c_buffer[c_seg_dst->c_nextoffset], &c_seg_src->c_store.c_buffer[c_src->c_offset], c_size);
5142 	/*
5143 	 * Is platform alignment actually necessary since wkdm aligns its output?
5144 	 */
5145 	c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
5146 
5147 	cslot_copy(c_dst, c_src);
5148 	c_dst->c_offset = c_seg_dst->c_nextoffset;
5149 
5150 	if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) {
5151 		c_seg_dst->c_firstemptyslot++;
5152 	}
5153 
5154 	c_seg_dst->c_slots_used++;
5155 	c_seg_dst->c_nextslot++;
5156 	c_seg_dst->c_bytes_used += c_rounded_size;
5157 	c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
5158 
5159 
5160 	PACK_C_SIZE(c_src, 0);
5161 
5162 	c_seg_src->c_bytes_used -= c_rounded_size;
5163 	c_seg_src->c_bytes_unused += c_rounded_size;
5164 
5165 	assert(c_seg_src->c_slots_used);
5166 	c_seg_src->c_slots_used--;
5167 
5168 	if (!c_seg_src->c_swappedin) {
5169 		/* Pessimistically lose swappedin status when non-swappedin pages are added. */
5170 		c_seg_dst->c_swappedin = false;
5171 	}
5172 
5173 	if (c_indx < c_seg_src->c_firstemptyslot) {
5174 		c_seg_src->c_firstemptyslot = c_indx;
5175 	}
5176 
5177 	c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot);
5178 
5179 	PAGE_REPLACEMENT_ALLOWED(TRUE);
5180 	slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
5181 	/* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
5182 	slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1;
5183 	slot_ptr->s_cindx = dst_slot;
5184 
5185 	PAGE_REPLACEMENT_ALLOWED(FALSE);
5186 
5187 out:
5188 	if (c_seg_src) {
5189 		lck_mtx_lock_spin_always(&c_seg_src->c_lock);
5190 
5191 		C_SEG_WAKEUP_DONE(c_seg_src);
5192 
5193 		if (c_seg_src->c_bytes_used == 0 && c_seg_src->c_state != C_IS_FILLING) {
5194 			if (!c_seg_src->c_on_minorcompact_q) {
5195 				c_seg_need_delayed_compaction(c_seg_src, FALSE);
5196 			}
5197 		}
5198 
5199 		lck_mtx_unlock_always(&c_seg_src->c_lock);
5200 	}
5201 
5202 	if (c_seg_dst) {
5203 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
5204 
5205 		lck_mtx_lock_spin_always(&c_seg_dst->c_lock);
5206 
5207 		if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
5208 			/*
5209 			 * Nearing or exceeded maximum slot and offset capacity.
5210 			 */
5211 			assert(c_seg_dst->c_busy);
5212 			assert(c_seg_dst->c_state == C_IS_FILLING);
5213 			assert(!c_seg_dst->c_on_minorcompact_q);
5214 
5215 			c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead);
5216 			assert(*current_chead == NULL);
5217 		}
5218 
5219 		C_SEG_WAKEUP_DONE(c_seg_dst);
5220 
5221 		lck_mtx_unlock_always(&c_seg_dst->c_lock);
5222 
5223 		c_seg_dst = NULL;
5224 
5225 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5226 	}
5227 
5228 	return kr;
5229 }
5230 #endif /* CONFIG_FREEZE */
5231