1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <vm/vm_compressor.h>
30
31 #if CONFIG_PHANTOM_CACHE
32 #include <vm/vm_phantom_cache.h>
33 #endif
34
35 #include <vm/vm_map.h>
36 #include <vm/vm_pageout.h>
37 #include <vm/memory_object.h>
38 #include <vm/vm_compressor_algorithms.h>
39 #include <vm/vm_fault.h>
40 #include <vm/vm_protos.h>
41 #include <mach/mach_host.h> /* for host_info() */
42 #if DEVELOPMENT || DEBUG
43 #include <kern/hvg_hypercall.h>
44 #endif
45 #include <kern/ledger.h>
46 #include <kern/policy_internal.h>
47 #include <kern/thread_group.h>
48 #include <san/kasan.h>
49
50 #if defined(__x86_64__)
51 #include <i386/misc_protos.h>
52 #endif
53 #if defined(__arm64__)
54 #include <arm/machine_routines.h>
55 #endif
56
57 #include <IOKit/IOHibernatePrivate.h>
58
59 TUNABLE(uint32_t, c_seg_bufsize, "vm_compressor_segment_buffer_size", (1024 * 256));
60 uint32_t c_seg_max_pages, c_seg_off_limit, c_seg_allocsize, c_seg_slot_var_array_min_len;
61
62 extern boolean_t vm_darkwake_mode;
63 extern zone_t vm_page_zone;
64
65 #if DEVELOPMENT || DEBUG
66 /* sysctl defined in bsd/dev/arm64/sysctl.c */
67 int do_cseg_wedge_thread(void);
68 int do_cseg_unwedge_thread(void);
69 static event_t debug_cseg_wait_event = NULL;
70 #endif /* DEVELOPMENT || DEBUG */
71
72 #if CONFIG_FREEZE
73 bool freezer_incore_cseg_acct = TRUE; /* Only count incore compressed memory for jetsams. */
74 void task_disown_frozen_csegs(task_t owner_task);
75 #endif /* CONFIG_FREEZE */
76
77 #if POPCOUNT_THE_COMPRESSED_DATA
78 boolean_t popcount_c_segs = TRUE;
79
80 static inline uint32_t
vmc_pop(uintptr_t ins,int sz)81 vmc_pop(uintptr_t ins, int sz)
82 {
83 uint32_t rv = 0;
84
85 if (__probable(popcount_c_segs == FALSE)) {
86 return 0xDEAD707C;
87 }
88
89 while (sz >= 16) {
90 uint32_t rv1, rv2;
91 uint64_t *ins64 = (uint64_t *) ins;
92 uint64_t *ins642 = (uint64_t *) (ins + 8);
93 rv1 = __builtin_popcountll(*ins64);
94 rv2 = __builtin_popcountll(*ins642);
95 rv += rv1 + rv2;
96 sz -= 16;
97 ins += 16;
98 }
99
100 while (sz >= 4) {
101 uint32_t *ins32 = (uint32_t *) ins;
102 rv += __builtin_popcount(*ins32);
103 sz -= 4;
104 ins += 4;
105 }
106
107 while (sz > 0) {
108 char *ins8 = (char *)ins;
109 rv += __builtin_popcount(*ins8);
110 sz--;
111 ins++;
112 }
113 return rv;
114 }
115 #endif
116
117 #if VALIDATE_C_SEGMENTS
118 boolean_t validate_c_segs = TRUE;
119 #endif
120 /*
121 * vm_compressor_mode has a heirarchy of control to set its value.
122 * boot-args are checked first, then device-tree, and finally
123 * the default value that is defined below. See vm_fault_init() for
124 * the boot-arg & device-tree code.
125 */
126
127 #if !XNU_TARGET_OS_OSX
128
129 #if CONFIG_FREEZE
130 int vm_compressor_mode = VM_PAGER_FREEZER_DEFAULT;
131 struct freezer_context freezer_context_global;
132 #else /* CONFIG_FREEZE */
133 int vm_compressor_mode = VM_PAGER_NOT_CONFIGURED;
134 #endif /* CONFIG_FREEZE */
135
136 #else /* !XNU_TARGET_OS_OSX */
137 int vm_compressor_mode = VM_PAGER_COMPRESSOR_WITH_SWAP;
138
139 #endif /* !XNU_TARGET_OS_OSX */
140
141 TUNABLE(uint32_t, vm_compression_limit, "vm_compression_limit", 0);
142 int vm_compressor_is_active = 0;
143 int vm_compressor_available = 0;
144
145 extern uint64_t vm_swap_get_max_configured_space(void);
146 extern void vm_pageout_io_throttle(void);
147
148 #if CHECKSUM_THE_DATA || CHECKSUM_THE_SWAP || CHECKSUM_THE_COMPRESSED_DATA
149 extern unsigned int hash_string(char *cp, int len);
150 static unsigned int vmc_hash(char *, int);
151 boolean_t checksum_c_segs = TRUE;
152
153 unsigned int
vmc_hash(char * cp,int len)154 vmc_hash(char *cp, int len)
155 {
156 if (__probable(checksum_c_segs == FALSE)) {
157 return 0xDEAD7A37;
158 }
159 return hash_string(cp, len);
160 }
161 #endif
162
163 #define UNPACK_C_SIZE(cs) ((cs->c_size == (PAGE_SIZE-1)) ? PAGE_SIZE : cs->c_size)
164 #define PACK_C_SIZE(cs, size) (cs->c_size = ((size == PAGE_SIZE) ? PAGE_SIZE - 1 : size))
165
166
167 struct c_sv_hash_entry {
168 union {
169 struct {
170 uint32_t c_sv_he_ref;
171 uint32_t c_sv_he_data;
172 } c_sv_he;
173 uint64_t c_sv_he_record;
174 } c_sv_he_un;
175 };
176
177 #define he_ref c_sv_he_un.c_sv_he.c_sv_he_ref
178 #define he_data c_sv_he_un.c_sv_he.c_sv_he_data
179 #define he_record c_sv_he_un.c_sv_he_record
180
181 #define C_SV_HASH_MAX_MISS 32
182 #define C_SV_HASH_SIZE ((1 << 10))
183 #define C_SV_HASH_MASK ((1 << 10) - 1)
184 #define C_SV_CSEG_ID ((1 << 22) - 1)
185
186
187 union c_segu {
188 c_segment_t c_seg;
189 uintptr_t c_segno;
190 };
191
192 #define C_SLOT_ASSERT_PACKABLE(ptr) \
193 VM_ASSERT_POINTER_PACKABLE((vm_offset_t)(ptr), C_SLOT_PACKED_PTR);
194
195 #define C_SLOT_PACK_PTR(ptr) \
196 VM_PACK_POINTER((vm_offset_t)(ptr), C_SLOT_PACKED_PTR)
197
198 #define C_SLOT_UNPACK_PTR(cslot) \
199 (c_slot_mapping_t)VM_UNPACK_POINTER((cslot)->c_packed_ptr, C_SLOT_PACKED_PTR)
200
201 /* for debugging purposes */
202 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) c_slot_packing_params =
203 VM_PACKING_PARAMS(C_SLOT_PACKED_PTR);
204
205 uint32_t c_segment_count = 0;
206 uint32_t c_segment_count_max = 0;
207
208 uint64_t c_generation_id = 0;
209 uint64_t c_generation_id_flush_barrier;
210
211
212 #define HIBERNATE_FLUSHING_SECS_TO_COMPLETE 120
213
214 boolean_t hibernate_no_swapspace = FALSE;
215 boolean_t hibernate_flush_timed_out = FALSE;
216 clock_sec_t hibernate_flushing_deadline = 0;
217
218
219 #if RECORD_THE_COMPRESSED_DATA
220 char *c_compressed_record_sbuf;
221 char *c_compressed_record_ebuf;
222 char *c_compressed_record_cptr;
223 #endif
224
225
226 queue_head_t c_age_list_head;
227 queue_head_t c_swappedin_list_head;
228 queue_head_t c_swapout_list_head;
229 queue_head_t c_swapio_list_head;
230 queue_head_t c_swappedout_list_head;
231 queue_head_t c_swappedout_sparse_list_head;
232 queue_head_t c_major_list_head;
233 queue_head_t c_filling_list_head;
234 queue_head_t c_bad_list_head;
235
236 uint32_t c_age_count = 0;
237 uint32_t c_swappedin_count = 0;
238 uint32_t c_swapout_count = 0;
239 uint32_t c_swapio_count = 0;
240 uint32_t c_swappedout_count = 0;
241 uint32_t c_swappedout_sparse_count = 0;
242 uint32_t c_major_count = 0;
243 uint32_t c_filling_count = 0;
244 uint32_t c_empty_count = 0;
245 uint32_t c_bad_count = 0;
246
247
248 queue_head_t c_minor_list_head;
249 uint32_t c_minor_count = 0;
250
251 int c_overage_swapped_count = 0;
252 int c_overage_swapped_limit = 0;
253
254 int c_seg_fixed_array_len;
255 union c_segu *c_segments;
256 vm_offset_t c_buffers;
257 vm_size_t c_buffers_size;
258 caddr_t c_segments_next_page;
259 boolean_t c_segments_busy;
260 uint32_t c_segments_available;
261 uint32_t c_segments_limit;
262 uint32_t c_segments_nearing_limit;
263
264 uint32_t c_segment_svp_in_hash;
265 uint32_t c_segment_svp_hash_succeeded;
266 uint32_t c_segment_svp_hash_failed;
267 uint32_t c_segment_svp_zero_compressions;
268 uint32_t c_segment_svp_nonzero_compressions;
269 uint32_t c_segment_svp_zero_decompressions;
270 uint32_t c_segment_svp_nonzero_decompressions;
271
272 uint32_t c_segment_noncompressible_pages;
273
274 uint32_t c_segment_pages_compressed = 0; /* Tracks # of uncompressed pages fed into the compressor */
275 #if CONFIG_FREEZE
276 int32_t c_segment_pages_compressed_incore = 0; /* Tracks # of uncompressed pages fed into the compressor that are in memory */
277 uint32_t c_segments_incore_limit = 0; /* Tracks # of segments allowed to be in-core. Based on compressor pool size */
278 #endif /* CONFIG_FREEZE */
279
280 uint32_t c_segment_pages_compressed_limit;
281 uint32_t c_segment_pages_compressed_nearing_limit;
282 uint32_t c_free_segno_head = (uint32_t)-1;
283
284 uint32_t vm_compressor_minorcompact_threshold_divisor = 10;
285 uint32_t vm_compressor_majorcompact_threshold_divisor = 10;
286 uint32_t vm_compressor_unthrottle_threshold_divisor = 10;
287 uint32_t vm_compressor_catchup_threshold_divisor = 10;
288
289 uint32_t vm_compressor_minorcompact_threshold_divisor_overridden = 0;
290 uint32_t vm_compressor_majorcompact_threshold_divisor_overridden = 0;
291 uint32_t vm_compressor_unthrottle_threshold_divisor_overridden = 0;
292 uint32_t vm_compressor_catchup_threshold_divisor_overridden = 0;
293
294 #define C_SEGMENTS_PER_PAGE (PAGE_SIZE / sizeof(union c_segu))
295
296 LCK_GRP_DECLARE(vm_compressor_lck_grp, "vm_compressor");
297 LCK_RW_DECLARE(c_master_lock, &vm_compressor_lck_grp);
298 LCK_MTX_DECLARE(c_list_lock_storage, &vm_compressor_lck_grp);
299
300 boolean_t decompressions_blocked = FALSE;
301
302 zone_t compressor_segment_zone;
303 int c_compressor_swap_trigger = 0;
304
305 uint32_t compressor_cpus;
306 char *compressor_scratch_bufs;
307 char *kdp_compressor_scratch_buf;
308 char *kdp_compressor_decompressed_page;
309 addr64_t kdp_compressor_decompressed_page_paddr;
310 ppnum_t kdp_compressor_decompressed_page_ppnum;
311
312 clock_sec_t start_of_sample_period_sec = 0;
313 clock_nsec_t start_of_sample_period_nsec = 0;
314 clock_sec_t start_of_eval_period_sec = 0;
315 clock_nsec_t start_of_eval_period_nsec = 0;
316 uint32_t sample_period_decompression_count = 0;
317 uint32_t sample_period_compression_count = 0;
318 uint32_t last_eval_decompression_count = 0;
319 uint32_t last_eval_compression_count = 0;
320
321 #define DECOMPRESSION_SAMPLE_MAX_AGE (60 * 30)
322
323 boolean_t vm_swapout_ripe_segments = FALSE;
324 uint32_t vm_ripe_target_age = (60 * 60 * 48);
325
326 uint32_t swapout_target_age = 0;
327 uint32_t age_of_decompressions_during_sample_period[DECOMPRESSION_SAMPLE_MAX_AGE];
328 uint32_t overage_decompressions_during_sample_period = 0;
329
330
331 void do_fastwake_warmup(queue_head_t *, boolean_t);
332 boolean_t fastwake_warmup = FALSE;
333 boolean_t fastwake_recording_in_progress = FALSE;
334 clock_sec_t dont_trim_until_ts = 0;
335
336 uint64_t c_segment_warmup_count;
337 uint64_t first_c_segment_to_warm_generation_id = 0;
338 uint64_t last_c_segment_to_warm_generation_id = 0;
339 boolean_t hibernate_flushing = FALSE;
340
341 int64_t c_segment_input_bytes __attribute__((aligned(8))) = 0;
342 int64_t c_segment_compressed_bytes __attribute__((aligned(8))) = 0;
343 int64_t compressor_bytes_used __attribute__((aligned(8))) = 0;
344
345
346 struct c_sv_hash_entry c_segment_sv_hash_table[C_SV_HASH_SIZE] __attribute__ ((aligned(8)));
347
348 static boolean_t compressor_needs_to_swap(void);
349 static void vm_compressor_swap_trigger_thread(void);
350 static void vm_compressor_do_delayed_compactions(boolean_t);
351 static void vm_compressor_process_major_segments(void);
352 static void vm_compressor_compact_and_swap(boolean_t);
353 static void vm_compressor_age_swapped_in_segments(boolean_t);
354
355 struct vm_compressor_swapper_stats vmcs_stats;
356
357 #if XNU_TARGET_OS_OSX
358 static void vm_compressor_take_paging_space_action(void);
359 #endif /* XNU_TARGET_OS_OSX */
360
361 void compute_swapout_target_age(void);
362
363 boolean_t c_seg_major_compact(c_segment_t, c_segment_t);
364 boolean_t c_seg_major_compact_ok(c_segment_t, c_segment_t);
365
366 int c_seg_minor_compaction_and_unlock(c_segment_t, boolean_t);
367 int c_seg_do_minor_compaction_and_unlock(c_segment_t, boolean_t, boolean_t, boolean_t);
368 void c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg);
369
370 void c_seg_move_to_sparse_list(c_segment_t);
371 void c_seg_insert_into_q(queue_head_t *, c_segment_t);
372
373 uint64_t vm_available_memory(void);
374 uint64_t vm_compressor_pages_compressed(void);
375 uint32_t vm_compressor_pool_size(void);
376
377 /*
378 * indicate the need to do a major compaction if
379 * the overall set of in-use compression segments
380 * becomes sparse... on systems that support pressure
381 * driven swapping, this will also cause swapouts to
382 * be initiated.
383 */
384 static inline boolean_t
vm_compressor_needs_to_major_compact()385 vm_compressor_needs_to_major_compact()
386 {
387 uint32_t incore_seg_count;
388
389 incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
390
391 if ((c_segment_count >= (c_segments_nearing_limit / 8)) &&
392 ((incore_seg_count * c_seg_max_pages) - VM_PAGE_COMPRESSOR_COUNT) >
393 ((incore_seg_count / 8) * c_seg_max_pages)) {
394 return 1;
395 }
396 return 0;
397 }
398
399
400 uint64_t
vm_available_memory(void)401 vm_available_memory(void)
402 {
403 return ((uint64_t)AVAILABLE_NON_COMPRESSED_MEMORY) * PAGE_SIZE_64;
404 }
405
406
407 uint32_t
vm_compressor_pool_size(void)408 vm_compressor_pool_size(void)
409 {
410 return VM_PAGE_COMPRESSOR_COUNT;
411 }
412
413 uint64_t
vm_compressor_pages_compressed(void)414 vm_compressor_pages_compressed(void)
415 {
416 return c_segment_pages_compressed * PAGE_SIZE_64;
417 }
418
419
420 boolean_t
vm_compressor_low_on_space(void)421 vm_compressor_low_on_space(void)
422 {
423 #if CONFIG_FREEZE
424 uint64_t incore_seg_count;
425 uint32_t incore_compressed_pages;
426 if (freezer_incore_cseg_acct) {
427 incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
428 incore_compressed_pages = c_segment_pages_compressed_incore;
429 } else {
430 incore_seg_count = c_segment_count;
431 incore_compressed_pages = c_segment_pages_compressed;
432 }
433
434 if ((incore_compressed_pages > c_segment_pages_compressed_nearing_limit) ||
435 (incore_seg_count > c_segments_nearing_limit)) {
436 return TRUE;
437 }
438 #else /* CONFIG_FREEZE */
439 if ((c_segment_pages_compressed > c_segment_pages_compressed_nearing_limit) ||
440 (c_segment_count > c_segments_nearing_limit)) {
441 return TRUE;
442 }
443 #endif /* CONFIG_FREEZE */
444 return FALSE;
445 }
446
447
448 boolean_t
vm_compressor_out_of_space(void)449 vm_compressor_out_of_space(void)
450 {
451 #if CONFIG_FREEZE
452 uint64_t incore_seg_count;
453 uint32_t incore_compressed_pages;
454 if (freezer_incore_cseg_acct) {
455 incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
456 incore_compressed_pages = c_segment_pages_compressed_incore;
457 } else {
458 incore_seg_count = c_segment_count;
459 incore_compressed_pages = c_segment_pages_compressed;
460 }
461
462 if ((incore_compressed_pages >= c_segment_pages_compressed_limit) ||
463 (incore_seg_count > c_segments_incore_limit)) {
464 return TRUE;
465 }
466 #else /* CONFIG_FREEZE */
467 if ((c_segment_pages_compressed >= c_segment_pages_compressed_limit) ||
468 (c_segment_count >= c_segments_limit)) {
469 return TRUE;
470 }
471 #endif /* CONFIG_FREEZE */
472 return FALSE;
473 }
474
475
476 int
vm_wants_task_throttled(task_t task)477 vm_wants_task_throttled(task_t task)
478 {
479 ledger_amount_t compressed;
480 if (task == kernel_task) {
481 return 0;
482 }
483
484 if (VM_CONFIG_SWAP_IS_ACTIVE) {
485 if ((vm_compressor_low_on_space() || HARD_THROTTLE_LIMIT_REACHED())) {
486 ledger_get_balance(task->ledger, task_ledgers.internal_compressed, &compressed);
487 compressed >>= VM_MAP_PAGE_SHIFT(task->map);
488 if ((unsigned int)compressed > (c_segment_pages_compressed / 4)) {
489 return 1;
490 }
491 }
492 }
493 return 0;
494 }
495
496
497 #if DEVELOPMENT || DEBUG
498 /*
499 * On compressor/swap exhaustion, kill the largest process regardless of
500 * its chosen process policy.
501 */
502 TUNABLE(bool, kill_on_no_paging_space, "-kill_on_no_paging_space", false);
503 #endif /* DEVELOPMENT || DEBUG */
504
505 #if XNU_TARGET_OS_OSX
506
507 static uint32_t no_paging_space_action_in_progress = 0;
508 extern void memorystatus_send_low_swap_note(void);
509
510 static void
vm_compressor_take_paging_space_action(void)511 vm_compressor_take_paging_space_action(void)
512 {
513 if (no_paging_space_action_in_progress == 0) {
514 if (OSCompareAndSwap(0, 1, (UInt32 *)&no_paging_space_action_in_progress)) {
515 if (no_paging_space_action()) {
516 #if DEVELOPMENT || DEBUG
517 if (kill_on_no_paging_space) {
518 /*
519 * Since we are choosing to always kill a process, we don't need the
520 * "out of application memory" dialog box in this mode. And, hence we won't
521 * send the knote.
522 */
523 no_paging_space_action_in_progress = 0;
524 return;
525 }
526 #endif /* DEVELOPMENT || DEBUG */
527 memorystatus_send_low_swap_note();
528 }
529
530 no_paging_space_action_in_progress = 0;
531 }
532 }
533 }
534 #endif /* XNU_TARGET_OS_OSX */
535
536
537 void
vm_decompressor_lock(void)538 vm_decompressor_lock(void)
539 {
540 PAGE_REPLACEMENT_ALLOWED(TRUE);
541
542 decompressions_blocked = TRUE;
543
544 PAGE_REPLACEMENT_ALLOWED(FALSE);
545 }
546
547 void
vm_decompressor_unlock(void)548 vm_decompressor_unlock(void)
549 {
550 PAGE_REPLACEMENT_ALLOWED(TRUE);
551
552 decompressions_blocked = FALSE;
553
554 PAGE_REPLACEMENT_ALLOWED(FALSE);
555
556 thread_wakeup((event_t)&decompressions_blocked);
557 }
558
559 static inline void
cslot_copy(c_slot_t cdst,c_slot_t csrc)560 cslot_copy(c_slot_t cdst, c_slot_t csrc)
561 {
562 #if CHECKSUM_THE_DATA
563 cdst->c_hash_data = csrc->c_hash_data;
564 #endif
565 #if CHECKSUM_THE_COMPRESSED_DATA
566 cdst->c_hash_compressed_data = csrc->c_hash_compressed_data;
567 #endif
568 #if POPCOUNT_THE_COMPRESSED_DATA
569 cdst->c_pop_cdata = csrc->c_pop_cdata;
570 #endif
571 cdst->c_size = csrc->c_size;
572 cdst->c_packed_ptr = csrc->c_packed_ptr;
573 #if defined(__arm__) || defined(__arm64__)
574 cdst->c_codec = csrc->c_codec;
575 #endif
576 #if __APPLE_WKDM_POPCNT_EXTENSIONS__
577 cdst->c_inline_popcount = csrc->c_inline_popcount;
578 #endif
579 }
580
581 vm_map_t compressor_map;
582 uint64_t compressor_pool_max_size;
583 uint64_t compressor_pool_size;
584 uint32_t compressor_pool_multiplier;
585
586 #if DEVELOPMENT || DEBUG
587 /*
588 * Compressor segments are write-protected in development/debug
589 * kernels to help debug memory corruption.
590 * In cases where performance is a concern, this can be disabled
591 * via the boot-arg "-disable_cseg_write_protection".
592 */
593 boolean_t write_protect_c_segs = TRUE;
594 int vm_compressor_test_seg_wp;
595 uint32_t vm_ktrace_enabled;
596 #endif /* DEVELOPMENT || DEBUG */
597
598 #if (XNU_TARGET_OS_OSX && __arm64__)
599
600 #include <IOKit/IOPlatformExpert.h>
601 #include <sys/random.h>
602
603 static const char *csegbufsizeExperimentProperty = "_csegbufsz_experiment";
604 static thread_call_t csegbufsz_experiment_thread_call;
605
606 extern boolean_t IOServiceWaitForMatchingResource(const char * property, uint64_t timeout);
607 static void
erase_csegbufsz_experiment_property(__unused void * param0,__unused void * param1)608 erase_csegbufsz_experiment_property(__unused void *param0, __unused void *param1)
609 {
610 // Wait for NVRAM to be writable
611 if (!IOServiceWaitForMatchingResource("IONVRAM", UINT64_MAX)) {
612 printf("csegbufsz_experiment_property: Failed to wait for IONVRAM.");
613 }
614
615 if (!PERemoveNVRAMProperty(csegbufsizeExperimentProperty)) {
616 printf("csegbufsize_experiment_property: Failed to remove %s from NVRAM.", csegbufsizeExperimentProperty);
617 }
618 thread_call_free(csegbufsz_experiment_thread_call);
619 }
620
621 static void
erase_csegbufsz_experiment_property_async()622 erase_csegbufsz_experiment_property_async()
623 {
624 csegbufsz_experiment_thread_call = thread_call_allocate_with_priority(
625 erase_csegbufsz_experiment_property,
626 NULL,
627 THREAD_CALL_PRIORITY_LOW
628 );
629 if (csegbufsz_experiment_thread_call == NULL) {
630 printf("csegbufsize_experiment_property: Unable to allocate thread call.");
631 } else {
632 thread_call_enter(csegbufsz_experiment_thread_call);
633 }
634 }
635
636 static void
cleanup_csegbufsz_experiment(__unused void * arg0)637 cleanup_csegbufsz_experiment(__unused void *arg0)
638 {
639 char nvram = 0;
640 unsigned int len = sizeof(nvram);
641 if (PEReadNVRAMProperty(csegbufsizeExperimentProperty, &nvram, &len)) {
642 erase_csegbufsz_experiment_property_async();
643 }
644 }
645
646 STARTUP_ARG(EARLY_BOOT, STARTUP_RANK_FIRST, cleanup_csegbufsz_experiment, NULL);
647 #endif /* XNU_TARGET_OS_OSX && __arm64__ */
648
649 void
vm_compressor_init(void)650 vm_compressor_init(void)
651 {
652 thread_t thread;
653 int attempts = 1;
654 kern_return_t retval = KERN_SUCCESS;
655 vm_offset_t start_addr = 0;
656 vm_size_t c_segments_arr_size = 0, compressor_submap_size = 0;
657 vm_map_kernel_flags_t vmk_flags;
658 #if RECORD_THE_COMPRESSED_DATA
659 vm_size_t c_compressed_record_sbuf_size = 0;
660 #endif /* RECORD_THE_COMPRESSED_DATA */
661
662 #if DEVELOPMENT || DEBUG || CONFIG_FREEZE
663 char bootarg_name[32];
664 #endif /* DEVELOPMENT || DEBUG || CONFIG_FREEZE */
665
666 #if DEVELOPMENT || DEBUG
667 if (PE_parse_boot_argn("-disable_cseg_write_protection", bootarg_name, sizeof(bootarg_name))) {
668 write_protect_c_segs = FALSE;
669 }
670 int vmcval = 1;
671 PE_parse_boot_argn("vm_compressor_validation", &vmcval, sizeof(vmcval));
672
673 if (kern_feature_override(KF_COMPRSV_OVRD)) {
674 vmcval = 0;
675 }
676 if (vmcval == 0) {
677 #if POPCOUNT_THE_COMPRESSED_DATA
678 popcount_c_segs = FALSE;
679 #endif
680 #if CHECKSUM_THE_DATA || CHECKSUM_THE_COMPRESSED_DATA
681 checksum_c_segs = FALSE;
682 #endif
683 #if VALIDATE_C_SEGMENTS
684 validate_c_segs = FALSE;
685 #endif
686 write_protect_c_segs = FALSE;
687 }
688 #endif /* DEVELOPMENT || DEBUG */
689
690 #if CONFIG_FREEZE
691 if (PE_parse_boot_argn("-disable_freezer_cseg_acct", bootarg_name, sizeof(bootarg_name))) {
692 freezer_incore_cseg_acct = FALSE;
693 }
694 #endif /* CONFIG_FREEZE */
695
696 assert((C_SEGMENTS_PER_PAGE * sizeof(union c_segu)) == PAGE_SIZE);
697
698 #if !XNU_TARGET_OS_OSX
699 vm_compressor_minorcompact_threshold_divisor = 20;
700 vm_compressor_majorcompact_threshold_divisor = 30;
701 vm_compressor_unthrottle_threshold_divisor = 40;
702 vm_compressor_catchup_threshold_divisor = 60;
703 #else /* !XNU_TARGET_OS_OSX */
704 if (max_mem <= (3ULL * 1024ULL * 1024ULL * 1024ULL)) {
705 vm_compressor_minorcompact_threshold_divisor = 11;
706 vm_compressor_majorcompact_threshold_divisor = 13;
707 vm_compressor_unthrottle_threshold_divisor = 20;
708 vm_compressor_catchup_threshold_divisor = 35;
709 } else {
710 vm_compressor_minorcompact_threshold_divisor = 20;
711 vm_compressor_majorcompact_threshold_divisor = 25;
712 vm_compressor_unthrottle_threshold_divisor = 35;
713 vm_compressor_catchup_threshold_divisor = 50;
714 }
715 #endif /* !XNU_TARGET_OS_OSX */
716
717 queue_init(&c_bad_list_head);
718 queue_init(&c_age_list_head);
719 queue_init(&c_minor_list_head);
720 queue_init(&c_major_list_head);
721 queue_init(&c_filling_list_head);
722 queue_init(&c_swapout_list_head);
723 queue_init(&c_swapio_list_head);
724 queue_init(&c_swappedin_list_head);
725 queue_init(&c_swappedout_list_head);
726 queue_init(&c_swappedout_sparse_list_head);
727
728 c_free_segno_head = -1;
729 c_segments_available = 0;
730
731 if (vm_compression_limit) {
732 compressor_pool_size = ptoa_64(vm_compression_limit);
733 }
734
735 compressor_pool_max_size = C_SEG_MAX_LIMIT;
736 compressor_pool_max_size *= c_seg_bufsize;
737
738 #if XNU_TARGET_OS_OSX
739
740 if (vm_compression_limit == 0) {
741 if (max_mem <= (4ULL * 1024ULL * 1024ULL * 1024ULL)) {
742 compressor_pool_size = 16ULL * max_mem;
743 } else if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) {
744 compressor_pool_size = 8ULL * max_mem;
745 } else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) {
746 compressor_pool_size = 4ULL * max_mem;
747 } else {
748 compressor_pool_size = 2ULL * max_mem;
749 }
750 }
751 if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) {
752 compressor_pool_multiplier = 1;
753 } else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) {
754 compressor_pool_multiplier = 2;
755 } else {
756 compressor_pool_multiplier = 4;
757 }
758
759 #elif defined(__arm__)
760
761 #define VM_RESERVE_SIZE (1024 * 1024 * 256)
762 #define MAX_COMPRESSOR_POOL_SIZE (1024 * 1024 * 450)
763
764 if (compressor_pool_max_size > MAX_COMPRESSOR_POOL_SIZE) {
765 compressor_pool_max_size = MAX_COMPRESSOR_POOL_SIZE;
766 }
767
768 if (vm_compression_limit == 0) {
769 compressor_pool_size = ((kernel_map->max_offset - kernel_map->min_offset) - kernel_map->size) - VM_RESERVE_SIZE;
770 }
771 compressor_pool_multiplier = 1;
772
773 #elif defined(__arm64__) && defined(XNU_TARGET_OS_WATCH)
774
775 /*
776 * On M9 watches the compressor can become big and can lead to
777 * churn in workingset resulting in audio drops. Setting a cap
778 * on the compressor size favors reclaiming unused memory
779 * sitting in idle band via jetsams
780 */
781
782 #define COMPRESSOR_CAP_PERCENTAGE 37ULL
783
784 if (compressor_pool_max_size > max_mem) {
785 compressor_pool_max_size = max_mem;
786 }
787
788 if (vm_compression_limit == 0) {
789 compressor_pool_size = (max_mem * COMPRESSOR_CAP_PERCENTAGE) / 100ULL;
790 }
791 compressor_pool_multiplier = 1;
792
793 #else
794
795 if (compressor_pool_max_size > max_mem) {
796 compressor_pool_max_size = max_mem;
797 }
798
799 if (vm_compression_limit == 0) {
800 compressor_pool_size = max_mem;
801 }
802 compressor_pool_multiplier = 1;
803 #endif
804 if (compressor_pool_size > compressor_pool_max_size) {
805 compressor_pool_size = compressor_pool_max_size;
806 }
807
808 c_seg_max_pages = (c_seg_bufsize / PAGE_SIZE);
809 c_seg_slot_var_array_min_len = c_seg_max_pages;
810
811 #if !defined(__x86_64__)
812 c_seg_off_limit = (C_SEG_BYTES_TO_OFFSET((c_seg_bufsize - 512)));
813 c_seg_allocsize = (c_seg_bufsize + PAGE_SIZE);
814 #else
815 c_seg_off_limit = (C_SEG_BYTES_TO_OFFSET((c_seg_bufsize - 128)));
816 c_seg_allocsize = c_seg_bufsize;
817 #endif /* !defined(__x86_64__) */
818
819 try_again:
820 c_segments_limit = (uint32_t)(compressor_pool_size / (vm_size_t)(c_seg_allocsize));
821 c_segments_nearing_limit = (uint32_t)(((uint64_t)c_segments_limit * 98ULL) / 100ULL);
822
823 c_segment_pages_compressed_limit = (c_segments_limit * (c_seg_bufsize / PAGE_SIZE) * compressor_pool_multiplier);
824
825 if (c_segment_pages_compressed_limit < (uint32_t)(max_mem / PAGE_SIZE)) {
826 #if defined(XNU_TARGET_OS_WATCH)
827 c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE);
828 #else
829 if (!vm_compression_limit) {
830 c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE);
831 }
832 #endif
833 }
834
835 c_segment_pages_compressed_nearing_limit = (uint32_t)(((uint64_t)c_segment_pages_compressed_limit * 98ULL) / 100ULL);
836
837 #if CONFIG_FREEZE
838 /*
839 * Our in-core limits are based on the size of the compressor pool.
840 * The c_segments_nearing_limit is also based on the compressor pool
841 * size and calculated above.
842 */
843 c_segments_incore_limit = c_segments_limit;
844
845 if (freezer_incore_cseg_acct) {
846 /*
847 * Add enough segments to track all frozen c_segs that can be stored in swap.
848 */
849 c_segments_limit += (uint32_t)(vm_swap_get_max_configured_space() / (vm_size_t)(c_seg_allocsize));
850 }
851 #endif
852 /*
853 * Submap needs space for:
854 * - c_segments
855 * - c_buffers
856 * - swap reclaimations -- c_seg_bufsize
857 */
858 c_segments_arr_size = vm_map_round_page((sizeof(union c_segu) * c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
859 c_buffers_size = vm_map_round_page(((vm_size_t)c_seg_allocsize * (vm_size_t)c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
860
861 compressor_submap_size = c_segments_arr_size + c_buffers_size + c_seg_bufsize;
862
863 #if RECORD_THE_COMPRESSED_DATA
864 c_compressed_record_sbuf_size = (vm_size_t)c_seg_allocsize + (PAGE_SIZE * 2);
865 compressor_submap_size += c_compressed_record_sbuf_size;
866 #endif /* RECORD_THE_COMPRESSED_DATA */
867
868 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
869 vmk_flags.vmkf_permanent = TRUE;
870 retval = kmem_suballoc(kernel_map, &start_addr, compressor_submap_size,
871 FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_COMPRESSOR,
872 &compressor_map);
873
874 if (retval != KERN_SUCCESS) {
875 if (++attempts > 3) {
876 panic("vm_compressor_init: kmem_suballoc failed - 0x%llx", (uint64_t)compressor_submap_size);
877 }
878
879 compressor_pool_size = compressor_pool_size / 2;
880
881 kprintf("retrying creation of the compressor submap at 0x%llx bytes\n", compressor_pool_size);
882 goto try_again;
883 }
884 if (kernel_memory_allocate(compressor_map, (vm_offset_t *)(&c_segments),
885 (sizeof(union c_segu) * c_segments_limit), 0,
886 KMA_KOBJECT | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS) {
887 panic("vm_compressor_init: kernel_memory_allocate failed - c_segments");
888 }
889 if (kernel_memory_allocate(compressor_map, &c_buffers, c_buffers_size, 0,
890 KMA_COMPRESSOR | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS) {
891 panic("vm_compressor_init: kernel_memory_allocate failed - c_buffers");
892 }
893
894 #if DEVELOPMENT || DEBUG
895 hvg_hcall_set_coredump_data();
896 #endif
897
898 /*
899 * Pick a good size that will minimize fragmentation in zalloc
900 * by minimizing the fragmentation in a 16k run.
901 *
902 * c_seg_slot_var_array_min_len is larger on 4k systems than 16k ones,
903 * making the fragmentation in a 4k page terrible. Using 16k for all
904 * systems matches zalloc() and will minimize fragmentation.
905 */
906 uint32_t c_segment_size = sizeof(struct c_segment) + (c_seg_slot_var_array_min_len * sizeof(struct c_slot));
907 uint32_t cnt = (16 << 10) / c_segment_size;
908 uint32_t frag = (16 << 10) % c_segment_size;
909
910 c_seg_fixed_array_len = c_seg_slot_var_array_min_len;
911
912 while (cnt * sizeof(struct c_slot) < frag) {
913 c_segment_size += sizeof(struct c_slot);
914 c_seg_fixed_array_len++;
915 frag -= cnt * sizeof(struct c_slot);
916 }
917
918 compressor_segment_zone = zone_create("compressor_segment",
919 c_segment_size, ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM);
920
921 c_segments_busy = FALSE;
922
923 c_segments_next_page = (caddr_t)c_segments;
924 vm_compressor_algorithm_init();
925
926 {
927 host_basic_info_data_t hinfo;
928 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
929 size_t bufsize;
930 char *buf;
931
932 #define BSD_HOST 1
933 host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
934
935 compressor_cpus = hinfo.max_cpus;
936
937 bufsize = PAGE_SIZE;
938 bufsize += compressor_cpus * vm_compressor_get_decode_scratch_size();
939 /* For the KDP path */
940 bufsize += vm_compressor_get_decode_scratch_size();
941 #if CONFIG_FREEZE
942 bufsize += vm_compressor_get_encode_scratch_size();
943 #endif
944 #if RECORD_THE_COMPRESSED_DATA
945 bufsize += c_compressed_record_sbuf_size;
946 #endif
947
948 if (kernel_memory_allocate(kernel_map, (vm_offset_t *)&buf, bufsize,
949 PAGE_MASK, KMA_KOBJECT | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR)) {
950 panic("vm_compressor_init: Unable to allocate %zd bytes", bufsize);
951 }
952
953 /*
954 * kdp_compressor_decompressed_page must be page aligned because we access
955 * it through the physical aperture by page number.
956 */
957 kdp_compressor_decompressed_page = buf;
958 kdp_compressor_decompressed_page_paddr = kvtophys((vm_offset_t)kdp_compressor_decompressed_page);
959 kdp_compressor_decompressed_page_ppnum = (ppnum_t) atop(kdp_compressor_decompressed_page_paddr);
960 buf += PAGE_SIZE;
961 bufsize -= PAGE_SIZE;
962
963 compressor_scratch_bufs = buf;
964 buf += compressor_cpus * vm_compressor_get_decode_scratch_size();
965 bufsize -= compressor_cpus * vm_compressor_get_decode_scratch_size();
966
967 kdp_compressor_scratch_buf = buf;
968 buf += vm_compressor_get_decode_scratch_size();
969 bufsize -= vm_compressor_get_decode_scratch_size();
970
971 #if CONFIG_FREEZE
972 freezer_context_global.freezer_ctx_compressor_scratch_buf = buf;
973 buf += vm_compressor_get_encode_scratch_size();
974 bufsize -= vm_compressor_get_encode_scratch_size();
975 #endif
976
977 #if RECORD_THE_COMPRESSED_DATA
978 c_compressed_record_sbuf = buf;
979 c_compressed_record_cptr = buf;
980 c_compressed_record_ebuf = c_compressed_record_sbuf + c_compressed_record_sbuf_size;
981 buf += c_compressed_record_sbuf_size;
982 bufsize -= c_compressed_record_sbuf_size;
983 #endif
984 assert(bufsize == 0);
985 }
986
987 if (kernel_thread_start_priority((thread_continue_t)vm_compressor_swap_trigger_thread, NULL,
988 BASEPRI_VM, &thread) != KERN_SUCCESS) {
989 panic("vm_compressor_swap_trigger_thread: create failed");
990 }
991 thread_deallocate(thread);
992
993 if (vm_pageout_internal_start() != KERN_SUCCESS) {
994 panic("vm_compressor_init: Failed to start the internal pageout thread.");
995 }
996 if (VM_CONFIG_SWAP_IS_PRESENT) {
997 vm_compressor_swap_init();
998 }
999
1000 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
1001 vm_compressor_is_active = 1;
1002 }
1003
1004 #if CONFIG_FREEZE
1005 memorystatus_freeze_enabled = TRUE;
1006 #endif /* CONFIG_FREEZE */
1007
1008 vm_compressor_available = 1;
1009
1010 vm_page_reactivate_all_throttled();
1011
1012 bzero(&vmcs_stats, sizeof(struct vm_compressor_swapper_stats));
1013 }
1014
1015
1016 #if VALIDATE_C_SEGMENTS
1017
1018 static void
c_seg_validate(c_segment_t c_seg,boolean_t must_be_compact)1019 c_seg_validate(c_segment_t c_seg, boolean_t must_be_compact)
1020 {
1021 uint16_t c_indx;
1022 int32_t bytes_used;
1023 uint32_t c_rounded_size;
1024 uint32_t c_size;
1025 c_slot_t cs;
1026
1027 if (__probable(validate_c_segs == FALSE)) {
1028 return;
1029 }
1030 if (c_seg->c_firstemptyslot < c_seg->c_nextslot) {
1031 c_indx = c_seg->c_firstemptyslot;
1032 cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1033
1034 if (cs == NULL) {
1035 panic("c_seg_validate: no slot backing c_firstemptyslot");
1036 }
1037
1038 if (cs->c_size) {
1039 panic("c_seg_validate: c_firstemptyslot has non-zero size (%d)", cs->c_size);
1040 }
1041 }
1042 bytes_used = 0;
1043
1044 for (c_indx = 0; c_indx < c_seg->c_nextslot; c_indx++) {
1045 cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1046
1047 c_size = UNPACK_C_SIZE(cs);
1048
1049 c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
1050
1051 bytes_used += c_rounded_size;
1052
1053 #if CHECKSUM_THE_COMPRESSED_DATA
1054 unsigned csvhash;
1055 if (c_size && cs->c_hash_compressed_data != (csvhash = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))) {
1056 addr64_t csvphys = kvtophys((vm_offset_t)&c_seg->c_store.c_buffer[cs->c_offset]);
1057 panic("Compressed data doesn't match original %p phys: 0x%llx %d %p %d %d 0x%x 0x%x", c_seg, csvphys, cs->c_offset, cs, c_indx, c_size, cs->c_hash_compressed_data, csvhash);
1058 }
1059 #endif
1060 #if POPCOUNT_THE_COMPRESSED_DATA
1061 unsigned csvpop;
1062 if (c_size) {
1063 uintptr_t csvaddr = (uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset];
1064 if (cs->c_pop_cdata != (csvpop = vmc_pop(csvaddr, c_size))) {
1065 panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%llx 0x%x 0x%x 0x%x", (csvpop - cs->c_pop_cdata), (void *)csvaddr, (void *) kvtophys(csvaddr), c_seg, cs, (uint64_t)cs->c_offset, c_size, csvpop, cs->c_pop_cdata);
1066 }
1067 }
1068 #endif
1069 }
1070
1071 if (bytes_used != c_seg->c_bytes_used) {
1072 panic("c_seg_validate: bytes_used mismatch - found %d, segment has %d", bytes_used, c_seg->c_bytes_used);
1073 }
1074
1075 if (c_seg->c_bytes_used > C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) {
1076 panic("c_seg_validate: c_bytes_used > c_nextoffset - c_nextoffset = %d, c_bytes_used = %d",
1077 (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used);
1078 }
1079
1080 if (must_be_compact) {
1081 if (c_seg->c_bytes_used != C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) {
1082 panic("c_seg_validate: c_bytes_used doesn't match c_nextoffset - c_nextoffset = %d, c_bytes_used = %d",
1083 (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used);
1084 }
1085 }
1086 }
1087
1088 #endif
1089
1090
1091 void
c_seg_need_delayed_compaction(c_segment_t c_seg,boolean_t c_list_lock_held)1092 c_seg_need_delayed_compaction(c_segment_t c_seg, boolean_t c_list_lock_held)
1093 {
1094 boolean_t clear_busy = FALSE;
1095
1096 if (c_list_lock_held == FALSE) {
1097 if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1098 C_SEG_BUSY(c_seg);
1099
1100 lck_mtx_unlock_always(&c_seg->c_lock);
1101 lck_mtx_lock_spin_always(c_list_lock);
1102 lck_mtx_lock_spin_always(&c_seg->c_lock);
1103
1104 clear_busy = TRUE;
1105 }
1106 }
1107 assert(c_seg->c_state != C_IS_FILLING);
1108
1109 if (!c_seg->c_on_minorcompact_q && !(C_SEG_IS_ON_DISK_OR_SOQ(c_seg))) {
1110 queue_enter(&c_minor_list_head, c_seg, c_segment_t, c_list);
1111 c_seg->c_on_minorcompact_q = 1;
1112 c_minor_count++;
1113 }
1114 if (c_list_lock_held == FALSE) {
1115 lck_mtx_unlock_always(c_list_lock);
1116 }
1117
1118 if (clear_busy == TRUE) {
1119 C_SEG_WAKEUP_DONE(c_seg);
1120 }
1121 }
1122
1123
1124 unsigned int c_seg_moved_to_sparse_list = 0;
1125
1126 void
c_seg_move_to_sparse_list(c_segment_t c_seg)1127 c_seg_move_to_sparse_list(c_segment_t c_seg)
1128 {
1129 boolean_t clear_busy = FALSE;
1130
1131 if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1132 C_SEG_BUSY(c_seg);
1133
1134 lck_mtx_unlock_always(&c_seg->c_lock);
1135 lck_mtx_lock_spin_always(c_list_lock);
1136 lck_mtx_lock_spin_always(&c_seg->c_lock);
1137
1138 clear_busy = TRUE;
1139 }
1140 c_seg_switch_state(c_seg, C_ON_SWAPPEDOUTSPARSE_Q, FALSE);
1141
1142 c_seg_moved_to_sparse_list++;
1143
1144 lck_mtx_unlock_always(c_list_lock);
1145
1146 if (clear_busy == TRUE) {
1147 C_SEG_WAKEUP_DONE(c_seg);
1148 }
1149 }
1150
1151
1152 void
c_seg_insert_into_q(queue_head_t * qhead,c_segment_t c_seg)1153 c_seg_insert_into_q(queue_head_t *qhead, c_segment_t c_seg)
1154 {
1155 c_segment_t c_seg_next;
1156
1157 if (queue_empty(qhead)) {
1158 queue_enter(qhead, c_seg, c_segment_t, c_age_list);
1159 } else {
1160 c_seg_next = (c_segment_t)queue_first(qhead);
1161
1162 while (TRUE) {
1163 if (c_seg->c_generation_id < c_seg_next->c_generation_id) {
1164 queue_insert_before(qhead, c_seg, c_seg_next, c_segment_t, c_age_list);
1165 break;
1166 }
1167 c_seg_next = (c_segment_t) queue_next(&c_seg_next->c_age_list);
1168
1169 if (queue_end(qhead, (queue_entry_t) c_seg_next)) {
1170 queue_enter(qhead, c_seg, c_segment_t, c_age_list);
1171 break;
1172 }
1173 }
1174 }
1175 }
1176
1177
1178 int try_minor_compaction_failed = 0;
1179 int try_minor_compaction_succeeded = 0;
1180
1181 void
c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg)1182 c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg)
1183 {
1184 assert(c_seg->c_on_minorcompact_q);
1185 /*
1186 * c_seg is currently on the delayed minor compaction
1187 * queue and we have c_seg locked... if we can get the
1188 * c_list_lock w/o blocking (if we blocked we could deadlock
1189 * because the lock order is c_list_lock then c_seg's lock)
1190 * we'll pull it from the delayed list and free it directly
1191 */
1192 if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1193 /*
1194 * c_list_lock is held, we need to bail
1195 */
1196 try_minor_compaction_failed++;
1197
1198 lck_mtx_unlock_always(&c_seg->c_lock);
1199 } else {
1200 try_minor_compaction_succeeded++;
1201
1202 C_SEG_BUSY(c_seg);
1203 c_seg_do_minor_compaction_and_unlock(c_seg, TRUE, FALSE, FALSE);
1204 }
1205 }
1206
1207
1208 int
c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg,boolean_t clear_busy,boolean_t need_list_lock,boolean_t disallow_page_replacement)1209 c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy, boolean_t need_list_lock, boolean_t disallow_page_replacement)
1210 {
1211 int c_seg_freed;
1212
1213 assert(c_seg->c_busy);
1214 assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg));
1215
1216 /*
1217 * check for the case that can occur when we are not swapping
1218 * and this segment has been major compacted in the past
1219 * and moved to the majorcompact q to remove it from further
1220 * consideration... if the occupancy falls too low we need
1221 * to put it back on the age_q so that it will be considered
1222 * in the next major compaction sweep... if we don't do this
1223 * we will eventually run into the c_segments_limit
1224 */
1225 if (c_seg->c_state == C_ON_MAJORCOMPACT_Q && C_SEG_SHOULD_MAJORCOMPACT_NOW(c_seg)) {
1226 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
1227 }
1228 if (!c_seg->c_on_minorcompact_q) {
1229 if (clear_busy == TRUE) {
1230 C_SEG_WAKEUP_DONE(c_seg);
1231 }
1232
1233 lck_mtx_unlock_always(&c_seg->c_lock);
1234
1235 return 0;
1236 }
1237 queue_remove(&c_minor_list_head, c_seg, c_segment_t, c_list);
1238 c_seg->c_on_minorcompact_q = 0;
1239 c_minor_count--;
1240
1241 lck_mtx_unlock_always(c_list_lock);
1242
1243 if (disallow_page_replacement == TRUE) {
1244 lck_mtx_unlock_always(&c_seg->c_lock);
1245
1246 PAGE_REPLACEMENT_DISALLOWED(TRUE);
1247
1248 lck_mtx_lock_spin_always(&c_seg->c_lock);
1249 }
1250 c_seg_freed = c_seg_minor_compaction_and_unlock(c_seg, clear_busy);
1251
1252 if (disallow_page_replacement == TRUE) {
1253 PAGE_REPLACEMENT_DISALLOWED(FALSE);
1254 }
1255
1256 if (need_list_lock == TRUE) {
1257 lck_mtx_lock_spin_always(c_list_lock);
1258 }
1259
1260 return c_seg_freed;
1261 }
1262
1263 void
kdp_compressor_busy_find_owner(event64_t wait_event,thread_waitinfo_t * waitinfo)1264 kdp_compressor_busy_find_owner(event64_t wait_event, thread_waitinfo_t *waitinfo)
1265 {
1266 c_segment_t c_seg = (c_segment_t) wait_event;
1267
1268 waitinfo->owner = thread_tid(c_seg->c_busy_for_thread);
1269 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(c_seg);
1270 }
1271
1272 #if DEVELOPMENT || DEBUG
1273 int
do_cseg_wedge_thread(void)1274 do_cseg_wedge_thread(void)
1275 {
1276 struct c_segment c_seg;
1277 c_seg.c_busy_for_thread = current_thread();
1278
1279 debug_cseg_wait_event = (event_t) &c_seg;
1280
1281 thread_set_pending_block_hint(current_thread(), kThreadWaitCompressor);
1282 assert_wait((event_t) (&c_seg), THREAD_INTERRUPTIBLE);
1283
1284 thread_block(THREAD_CONTINUE_NULL);
1285
1286 return 0;
1287 }
1288
1289 int
do_cseg_unwedge_thread(void)1290 do_cseg_unwedge_thread(void)
1291 {
1292 thread_wakeup(debug_cseg_wait_event);
1293 debug_cseg_wait_event = NULL;
1294
1295 return 0;
1296 }
1297 #endif /* DEVELOPMENT || DEBUG */
1298
1299 void
c_seg_wait_on_busy(c_segment_t c_seg)1300 c_seg_wait_on_busy(c_segment_t c_seg)
1301 {
1302 c_seg->c_wanted = 1;
1303
1304 thread_set_pending_block_hint(current_thread(), kThreadWaitCompressor);
1305 assert_wait((event_t) (c_seg), THREAD_UNINT);
1306
1307 lck_mtx_unlock_always(&c_seg->c_lock);
1308 thread_block(THREAD_CONTINUE_NULL);
1309 }
1310
1311 #if CONFIG_FREEZE
1312 /*
1313 * We don't have the task lock held while updating the task's
1314 * c_seg queues. We can do that because of the following restrictions:
1315 *
1316 * - SINGLE FREEZER CONTEXT:
1317 * We 'insert' c_segs into the task list on the task_freeze path.
1318 * There can only be one such freeze in progress and the task
1319 * isn't disappearing because we have the VM map lock held throughout
1320 * and we have a reference on the proc too.
1321 *
1322 * - SINGLE TASK DISOWN CONTEXT:
1323 * We 'disown' c_segs of a task ONLY from the task_terminate context. So
1324 * we don't need the task lock but we need the c_list_lock and the
1325 * compressor master lock (shared). We also hold the individual
1326 * c_seg locks (exclusive).
1327 *
1328 * If we either:
1329 * - can't get the c_seg lock on a try, then we start again because maybe
1330 * the c_seg is part of a compaction and might get freed. So we can't trust
1331 * that linkage and need to restart our queue traversal.
1332 * - OR, we run into a busy c_seg (say being swapped in or free-ing) we
1333 * drop all locks again and wait and restart our queue traversal.
1334 *
1335 * - The new_owner_task below is currently only the kernel or NULL.
1336 *
1337 */
1338 void
c_seg_update_task_owner(c_segment_t c_seg,task_t new_owner_task)1339 c_seg_update_task_owner(c_segment_t c_seg, task_t new_owner_task)
1340 {
1341 task_t owner_task = c_seg->c_task_owner;
1342 uint64_t uncompressed_bytes = ((c_seg->c_slots_used) * PAGE_SIZE_64);
1343
1344 LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
1345 LCK_MTX_ASSERT(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED);
1346
1347 if (owner_task) {
1348 task_update_frozen_to_swap_acct(owner_task, uncompressed_bytes, DEBIT_FROM_SWAP);
1349 queue_remove(&owner_task->task_frozen_cseg_q, c_seg,
1350 c_segment_t, c_task_list_next_cseg);
1351 }
1352
1353 if (new_owner_task) {
1354 queue_enter(&new_owner_task->task_frozen_cseg_q, c_seg,
1355 c_segment_t, c_task_list_next_cseg);
1356 task_update_frozen_to_swap_acct(new_owner_task, uncompressed_bytes, CREDIT_TO_SWAP);
1357 }
1358
1359 c_seg->c_task_owner = new_owner_task;
1360 }
1361
1362 void
task_disown_frozen_csegs(task_t owner_task)1363 task_disown_frozen_csegs(task_t owner_task)
1364 {
1365 c_segment_t c_seg = NULL, next_cseg = NULL;
1366
1367 again:
1368 PAGE_REPLACEMENT_DISALLOWED(TRUE);
1369 lck_mtx_lock_spin_always(c_list_lock);
1370
1371 for (c_seg = (c_segment_t) queue_first(&owner_task->task_frozen_cseg_q);
1372 !queue_end(&owner_task->task_frozen_cseg_q, (queue_entry_t) c_seg);
1373 c_seg = next_cseg) {
1374 next_cseg = (c_segment_t) queue_next(&c_seg->c_task_list_next_cseg);
1375
1376 if (!lck_mtx_try_lock_spin_always(&c_seg->c_lock)) {
1377 lck_mtx_unlock(c_list_lock);
1378 PAGE_REPLACEMENT_DISALLOWED(FALSE);
1379 goto again;
1380 }
1381
1382 if (c_seg->c_busy) {
1383 lck_mtx_unlock(c_list_lock);
1384 PAGE_REPLACEMENT_DISALLOWED(FALSE);
1385
1386 c_seg_wait_on_busy(c_seg);
1387
1388 goto again;
1389 }
1390 assert(c_seg->c_task_owner == owner_task);
1391 c_seg_update_task_owner(c_seg, kernel_task);
1392 lck_mtx_unlock_always(&c_seg->c_lock);
1393 }
1394
1395 lck_mtx_unlock(c_list_lock);
1396 PAGE_REPLACEMENT_DISALLOWED(FALSE);
1397 }
1398 #endif /* CONFIG_FREEZE */
1399
1400 void
c_seg_switch_state(c_segment_t c_seg,int new_state,boolean_t insert_head)1401 c_seg_switch_state(c_segment_t c_seg, int new_state, boolean_t insert_head)
1402 {
1403 int old_state = c_seg->c_state;
1404
1405 #if XNU_TARGET_OS_OSX
1406 #if DEVELOPMENT || DEBUG
1407 if (new_state != C_IS_FILLING) {
1408 LCK_MTX_ASSERT(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED);
1409 }
1410 LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
1411 #endif
1412 #endif /* XNU_TARGET_OS_OSX */
1413 switch (old_state) {
1414 case C_IS_EMPTY:
1415 assert(new_state == C_IS_FILLING || new_state == C_IS_FREE);
1416
1417 c_empty_count--;
1418 break;
1419
1420 case C_IS_FILLING:
1421 assert(new_state == C_ON_AGE_Q || new_state == C_ON_SWAPOUT_Q);
1422
1423 queue_remove(&c_filling_list_head, c_seg, c_segment_t, c_age_list);
1424 c_filling_count--;
1425 break;
1426
1427 case C_ON_AGE_Q:
1428 assert(new_state == C_ON_SWAPOUT_Q || new_state == C_ON_MAJORCOMPACT_Q ||
1429 new_state == C_IS_FREE);
1430
1431 queue_remove(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1432 c_age_count--;
1433 break;
1434
1435 case C_ON_SWAPPEDIN_Q:
1436 assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE);
1437
1438 queue_remove(&c_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1439 c_swappedin_count--;
1440 break;
1441
1442 case C_ON_SWAPOUT_Q:
1443 assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE || new_state == C_IS_EMPTY || new_state == C_ON_SWAPIO_Q);
1444
1445 #if CONFIG_FREEZE
1446 if (c_seg->c_task_owner && (new_state != C_ON_SWAPIO_Q)) {
1447 c_seg_update_task_owner(c_seg, NULL);
1448 }
1449 #endif /* CONFIG_FREEZE */
1450
1451 queue_remove(&c_swapout_list_head, c_seg, c_segment_t, c_age_list);
1452 thread_wakeup((event_t)&compaction_swapper_running);
1453 c_swapout_count--;
1454 break;
1455
1456 case C_ON_SWAPIO_Q:
1457 assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_AGE_Q);
1458
1459 queue_remove(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1460 c_swapio_count--;
1461 break;
1462
1463 case C_ON_SWAPPEDOUT_Q:
1464 assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q ||
1465 new_state == C_ON_SWAPPEDOUTSPARSE_Q ||
1466 new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE);
1467
1468 queue_remove(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
1469 c_swappedout_count--;
1470 break;
1471
1472 case C_ON_SWAPPEDOUTSPARSE_Q:
1473 assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q ||
1474 new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE);
1475
1476 queue_remove(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
1477 c_swappedout_sparse_count--;
1478 break;
1479
1480 case C_ON_MAJORCOMPACT_Q:
1481 assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE);
1482
1483 queue_remove(&c_major_list_head, c_seg, c_segment_t, c_age_list);
1484 c_major_count--;
1485 break;
1486
1487 case C_ON_BAD_Q:
1488 assert(new_state == C_IS_FREE);
1489
1490 queue_remove(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
1491 c_bad_count--;
1492 break;
1493
1494 default:
1495 panic("c_seg %p has bad c_state = %d", c_seg, old_state);
1496 }
1497
1498 switch (new_state) {
1499 case C_IS_FREE:
1500 assert(old_state != C_IS_FILLING);
1501
1502 break;
1503
1504 case C_IS_EMPTY:
1505 assert(old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1506
1507 c_empty_count++;
1508 break;
1509
1510 case C_IS_FILLING:
1511 assert(old_state == C_IS_EMPTY);
1512
1513 queue_enter(&c_filling_list_head, c_seg, c_segment_t, c_age_list);
1514 c_filling_count++;
1515 break;
1516
1517 case C_ON_AGE_Q:
1518 assert(old_state == C_IS_FILLING || old_state == C_ON_SWAPPEDIN_Q ||
1519 old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPIO_Q ||
1520 old_state == C_ON_MAJORCOMPACT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1521
1522 if (old_state == C_IS_FILLING) {
1523 queue_enter(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1524 } else {
1525 if (!queue_empty(&c_age_list_head)) {
1526 c_segment_t c_first;
1527
1528 c_first = (c_segment_t)queue_first(&c_age_list_head);
1529 c_seg->c_creation_ts = c_first->c_creation_ts;
1530 }
1531 queue_enter_first(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1532 }
1533 c_age_count++;
1534 break;
1535
1536 case C_ON_SWAPPEDIN_Q:
1537 assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1538
1539 if (insert_head == TRUE) {
1540 queue_enter_first(&c_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1541 } else {
1542 queue_enter(&c_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1543 }
1544 c_swappedin_count++;
1545 break;
1546
1547 case C_ON_SWAPOUT_Q:
1548 assert(old_state == C_ON_AGE_Q || old_state == C_IS_FILLING);
1549
1550 if (insert_head == TRUE) {
1551 queue_enter_first(&c_swapout_list_head, c_seg, c_segment_t, c_age_list);
1552 } else {
1553 queue_enter(&c_swapout_list_head, c_seg, c_segment_t, c_age_list);
1554 }
1555 c_swapout_count++;
1556 break;
1557
1558 case C_ON_SWAPIO_Q:
1559 assert(old_state == C_ON_SWAPOUT_Q);
1560
1561 if (insert_head == TRUE) {
1562 queue_enter_first(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1563 } else {
1564 queue_enter(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1565 }
1566 c_swapio_count++;
1567 break;
1568
1569 case C_ON_SWAPPEDOUT_Q:
1570 assert(old_state == C_ON_SWAPIO_Q);
1571
1572 if (insert_head == TRUE) {
1573 queue_enter_first(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
1574 } else {
1575 queue_enter(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
1576 }
1577 c_swappedout_count++;
1578 break;
1579
1580 case C_ON_SWAPPEDOUTSPARSE_Q:
1581 assert(old_state == C_ON_SWAPIO_Q || old_state == C_ON_SWAPPEDOUT_Q);
1582
1583 if (insert_head == TRUE) {
1584 queue_enter_first(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
1585 } else {
1586 queue_enter(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
1587 }
1588
1589 c_swappedout_sparse_count++;
1590 break;
1591
1592 case C_ON_MAJORCOMPACT_Q:
1593 assert(old_state == C_ON_AGE_Q);
1594
1595 if (insert_head == TRUE) {
1596 queue_enter_first(&c_major_list_head, c_seg, c_segment_t, c_age_list);
1597 } else {
1598 queue_enter(&c_major_list_head, c_seg, c_segment_t, c_age_list);
1599 }
1600 c_major_count++;
1601 break;
1602
1603 case C_ON_BAD_Q:
1604 assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1605
1606 if (insert_head == TRUE) {
1607 queue_enter_first(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
1608 } else {
1609 queue_enter(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
1610 }
1611 c_bad_count++;
1612 break;
1613
1614 default:
1615 panic("c_seg %p requesting bad c_state = %d", c_seg, new_state);
1616 }
1617 c_seg->c_state = new_state;
1618 }
1619
1620
1621
1622 void
c_seg_free(c_segment_t c_seg)1623 c_seg_free(c_segment_t c_seg)
1624 {
1625 assert(c_seg->c_busy);
1626
1627 lck_mtx_unlock_always(&c_seg->c_lock);
1628 lck_mtx_lock_spin_always(c_list_lock);
1629 lck_mtx_lock_spin_always(&c_seg->c_lock);
1630
1631 c_seg_free_locked(c_seg);
1632 }
1633
1634
1635 void
c_seg_free_locked(c_segment_t c_seg)1636 c_seg_free_locked(c_segment_t c_seg)
1637 {
1638 int segno;
1639 int pages_populated = 0;
1640 int32_t *c_buffer = NULL;
1641 uint64_t c_swap_handle = 0;
1642
1643 assert(c_seg->c_busy);
1644 assert(c_seg->c_slots_used == 0);
1645 assert(!c_seg->c_on_minorcompact_q);
1646 assert(!c_seg->c_busy_swapping);
1647
1648 if (c_seg->c_overage_swap == TRUE) {
1649 c_overage_swapped_count--;
1650 c_seg->c_overage_swap = FALSE;
1651 }
1652 if (!(C_SEG_IS_ONDISK(c_seg))) {
1653 c_buffer = c_seg->c_store.c_buffer;
1654 } else {
1655 c_swap_handle = c_seg->c_store.c_swap_handle;
1656 }
1657
1658 c_seg_switch_state(c_seg, C_IS_FREE, FALSE);
1659
1660 if (c_buffer) {
1661 pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE;
1662 c_seg->c_store.c_buffer = NULL;
1663 } else {
1664 #if CONFIG_FREEZE
1665 c_seg_update_task_owner(c_seg, NULL);
1666 #endif /* CONFIG_FREEZE */
1667
1668 c_seg->c_store.c_swap_handle = (uint64_t)-1;
1669 }
1670
1671 lck_mtx_unlock_always(&c_seg->c_lock);
1672
1673 lck_mtx_unlock_always(c_list_lock);
1674
1675 if (c_buffer) {
1676 if (pages_populated) {
1677 kernel_memory_depopulate(compressor_map, (vm_offset_t)c_buffer,
1678 pages_populated * PAGE_SIZE, KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
1679 }
1680 } else if (c_swap_handle) {
1681 /*
1682 * Free swap space on disk.
1683 */
1684 vm_swap_free(c_swap_handle);
1685 }
1686 lck_mtx_lock_spin_always(&c_seg->c_lock);
1687 /*
1688 * c_seg must remain busy until
1689 * after the call to vm_swap_free
1690 */
1691 C_SEG_WAKEUP_DONE(c_seg);
1692 lck_mtx_unlock_always(&c_seg->c_lock);
1693
1694 segno = c_seg->c_mysegno;
1695
1696 lck_mtx_lock_spin_always(c_list_lock);
1697 /*
1698 * because the c_buffer is now associated with the segno,
1699 * we can't put the segno back on the free list until
1700 * after we have depopulated the c_buffer range, or
1701 * we run the risk of depopulating a range that is
1702 * now being used in one of the compressor heads
1703 */
1704 c_segments[segno].c_segno = c_free_segno_head;
1705 c_free_segno_head = segno;
1706 c_segment_count--;
1707
1708 lck_mtx_unlock_always(c_list_lock);
1709
1710 lck_mtx_destroy(&c_seg->c_lock, &vm_compressor_lck_grp);
1711
1712 if (c_seg->c_slot_var_array_len) {
1713 kfree_data(c_seg->c_slot_var_array,
1714 sizeof(struct c_slot) * c_seg->c_slot_var_array_len);
1715 }
1716
1717 zfree(compressor_segment_zone, c_seg);
1718 }
1719
1720 #if DEVELOPMENT || DEBUG
1721 int c_seg_trim_page_count = 0;
1722 #endif
1723
1724 void
c_seg_trim_tail(c_segment_t c_seg)1725 c_seg_trim_tail(c_segment_t c_seg)
1726 {
1727 c_slot_t cs;
1728 uint32_t c_size;
1729 uint32_t c_offset;
1730 uint32_t c_rounded_size;
1731 uint16_t current_nextslot;
1732 uint32_t current_populated_offset;
1733
1734 if (c_seg->c_bytes_used == 0) {
1735 return;
1736 }
1737 current_nextslot = c_seg->c_nextslot;
1738 current_populated_offset = c_seg->c_populated_offset;
1739
1740 while (c_seg->c_nextslot) {
1741 cs = C_SEG_SLOT_FROM_INDEX(c_seg, (c_seg->c_nextslot - 1));
1742
1743 c_size = UNPACK_C_SIZE(cs);
1744
1745 if (c_size) {
1746 if (current_nextslot != c_seg->c_nextslot) {
1747 c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
1748 c_offset = cs->c_offset + C_SEG_BYTES_TO_OFFSET(c_rounded_size);
1749
1750 c_seg->c_nextoffset = c_offset;
1751 c_seg->c_populated_offset = (c_offset + (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1)) &
1752 ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1);
1753
1754 if (c_seg->c_firstemptyslot > c_seg->c_nextslot) {
1755 c_seg->c_firstemptyslot = c_seg->c_nextslot;
1756 }
1757 #if DEVELOPMENT || DEBUG
1758 c_seg_trim_page_count += ((round_page_32(C_SEG_OFFSET_TO_BYTES(current_populated_offset)) -
1759 round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) /
1760 PAGE_SIZE);
1761 #endif
1762 }
1763 break;
1764 }
1765 c_seg->c_nextslot--;
1766 }
1767 assert(c_seg->c_nextslot);
1768 }
1769
1770
1771 int
c_seg_minor_compaction_and_unlock(c_segment_t c_seg,boolean_t clear_busy)1772 c_seg_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy)
1773 {
1774 c_slot_mapping_t slot_ptr;
1775 uint32_t c_offset = 0;
1776 uint32_t old_populated_offset;
1777 uint32_t c_rounded_size;
1778 uint32_t c_size;
1779 uint16_t c_indx = 0;
1780 int i;
1781 c_slot_t c_dst;
1782 c_slot_t c_src;
1783
1784 assert(c_seg->c_busy);
1785
1786 #if VALIDATE_C_SEGMENTS
1787 c_seg_validate(c_seg, FALSE);
1788 #endif
1789 if (c_seg->c_bytes_used == 0) {
1790 c_seg_free(c_seg);
1791 return 1;
1792 }
1793 lck_mtx_unlock_always(&c_seg->c_lock);
1794
1795 if (c_seg->c_firstemptyslot >= c_seg->c_nextslot || C_SEG_UNUSED_BYTES(c_seg) < PAGE_SIZE) {
1796 goto done;
1797 }
1798
1799 /* TODO: assert first emptyslot's c_size is actually 0 */
1800
1801 #if DEVELOPMENT || DEBUG
1802 C_SEG_MAKE_WRITEABLE(c_seg);
1803 #endif
1804
1805 #if VALIDATE_C_SEGMENTS
1806 c_seg->c_was_minor_compacted++;
1807 #endif
1808 c_indx = c_seg->c_firstemptyslot;
1809 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1810
1811 old_populated_offset = c_seg->c_populated_offset;
1812 c_offset = c_dst->c_offset;
1813
1814 for (i = c_indx + 1; i < c_seg->c_nextslot && c_offset < c_seg->c_nextoffset; i++) {
1815 c_src = C_SEG_SLOT_FROM_INDEX(c_seg, i);
1816
1817 c_size = UNPACK_C_SIZE(c_src);
1818
1819 if (c_size == 0) {
1820 continue;
1821 }
1822
1823 c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
1824 /* N.B.: This memcpy may be an overlapping copy */
1825 memcpy(&c_seg->c_store.c_buffer[c_offset], &c_seg->c_store.c_buffer[c_src->c_offset], c_rounded_size);
1826
1827 cslot_copy(c_dst, c_src);
1828 c_dst->c_offset = c_offset;
1829
1830 slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
1831 slot_ptr->s_cindx = c_indx;
1832
1833 c_offset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
1834 PACK_C_SIZE(c_src, 0);
1835 c_indx++;
1836
1837 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1838 }
1839 c_seg->c_firstemptyslot = c_indx;
1840 c_seg->c_nextslot = c_indx;
1841 c_seg->c_nextoffset = c_offset;
1842 c_seg->c_populated_offset = (c_offset + (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1)) & ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1);
1843 c_seg->c_bytes_unused = 0;
1844
1845 #if VALIDATE_C_SEGMENTS
1846 c_seg_validate(c_seg, TRUE);
1847 #endif
1848 if (old_populated_offset > c_seg->c_populated_offset) {
1849 uint32_t gc_size;
1850 int32_t *gc_ptr;
1851
1852 gc_size = C_SEG_OFFSET_TO_BYTES(old_populated_offset - c_seg->c_populated_offset);
1853 gc_ptr = &c_seg->c_store.c_buffer[c_seg->c_populated_offset];
1854
1855 kernel_memory_depopulate(compressor_map, (vm_offset_t)gc_ptr, gc_size,
1856 KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
1857 }
1858
1859 #if DEVELOPMENT || DEBUG
1860 C_SEG_WRITE_PROTECT(c_seg);
1861 #endif
1862
1863 done:
1864 if (clear_busy == TRUE) {
1865 lck_mtx_lock_spin_always(&c_seg->c_lock);
1866 C_SEG_WAKEUP_DONE(c_seg);
1867 lck_mtx_unlock_always(&c_seg->c_lock);
1868 }
1869 return 0;
1870 }
1871
1872
1873 static void
c_seg_alloc_nextslot(c_segment_t c_seg)1874 c_seg_alloc_nextslot(c_segment_t c_seg)
1875 {
1876 struct c_slot *old_slot_array = NULL;
1877 struct c_slot *new_slot_array = NULL;
1878 int newlen;
1879 int oldlen;
1880
1881 if (c_seg->c_nextslot < c_seg_fixed_array_len) {
1882 return;
1883 }
1884
1885 if ((c_seg->c_nextslot - c_seg_fixed_array_len) >= c_seg->c_slot_var_array_len) {
1886 oldlen = c_seg->c_slot_var_array_len;
1887 old_slot_array = c_seg->c_slot_var_array;
1888
1889 if (oldlen == 0) {
1890 newlen = c_seg_slot_var_array_min_len;
1891 } else {
1892 newlen = oldlen * 2;
1893 }
1894
1895 new_slot_array = kalloc_data(sizeof(struct c_slot) * newlen,
1896 Z_WAITOK);
1897
1898 lck_mtx_lock_spin_always(&c_seg->c_lock);
1899
1900 if (old_slot_array) {
1901 memcpy(new_slot_array, old_slot_array,
1902 sizeof(struct c_slot) * oldlen);
1903 }
1904
1905 c_seg->c_slot_var_array_len = newlen;
1906 c_seg->c_slot_var_array = new_slot_array;
1907
1908 lck_mtx_unlock_always(&c_seg->c_lock);
1909
1910 kfree_data(old_slot_array, sizeof(struct c_slot) * oldlen);
1911 }
1912 }
1913
1914
1915 #define C_SEG_MAJOR_COMPACT_STATS_MAX (30)
1916
1917 struct {
1918 uint64_t asked_permission;
1919 uint64_t compactions;
1920 uint64_t moved_slots;
1921 uint64_t moved_bytes;
1922 uint64_t wasted_space_in_swapouts;
1923 uint64_t count_of_swapouts;
1924 uint64_t count_of_freed_segs;
1925 uint64_t bailed_compactions;
1926 uint64_t bytes_freed_rate_us;
1927 } c_seg_major_compact_stats[C_SEG_MAJOR_COMPACT_STATS_MAX];
1928
1929 int c_seg_major_compact_stats_now = 0;
1930
1931
1932 #define C_MAJOR_COMPACTION_SIZE_APPROPRIATE ((c_seg_bufsize * 90) / 100)
1933
1934
1935 boolean_t
c_seg_major_compact_ok(c_segment_t c_seg_dst,c_segment_t c_seg_src)1936 c_seg_major_compact_ok(
1937 c_segment_t c_seg_dst,
1938 c_segment_t c_seg_src)
1939 {
1940 c_seg_major_compact_stats[c_seg_major_compact_stats_now].asked_permission++;
1941
1942 if (c_seg_src->c_bytes_used >= C_MAJOR_COMPACTION_SIZE_APPROPRIATE &&
1943 c_seg_dst->c_bytes_used >= C_MAJOR_COMPACTION_SIZE_APPROPRIATE) {
1944 return FALSE;
1945 }
1946
1947 if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
1948 /*
1949 * destination segment is full... can't compact
1950 */
1951 return FALSE;
1952 }
1953
1954 return TRUE;
1955 }
1956
1957
1958 boolean_t
c_seg_major_compact(c_segment_t c_seg_dst,c_segment_t c_seg_src)1959 c_seg_major_compact(
1960 c_segment_t c_seg_dst,
1961 c_segment_t c_seg_src)
1962 {
1963 c_slot_mapping_t slot_ptr;
1964 uint32_t c_rounded_size;
1965 uint32_t c_size;
1966 uint16_t dst_slot;
1967 int i;
1968 c_slot_t c_dst;
1969 c_slot_t c_src;
1970 boolean_t keep_compacting = TRUE;
1971
1972 /*
1973 * segments are not locked but they are both marked c_busy
1974 * which keeps c_decompress from working on them...
1975 * we can safely allocate new pages, move compressed data
1976 * from c_seg_src to c_seg_dst and update both c_segment's
1977 * state w/o holding the master lock
1978 */
1979 #if DEVELOPMENT || DEBUG
1980 C_SEG_MAKE_WRITEABLE(c_seg_dst);
1981 #endif
1982
1983 #if VALIDATE_C_SEGMENTS
1984 c_seg_dst->c_was_major_compacted++;
1985 c_seg_src->c_was_major_donor++;
1986 #endif
1987 c_seg_major_compact_stats[c_seg_major_compact_stats_now].compactions++;
1988
1989 dst_slot = c_seg_dst->c_nextslot;
1990
1991 for (i = 0; i < c_seg_src->c_nextslot; i++) {
1992 c_src = C_SEG_SLOT_FROM_INDEX(c_seg_src, i);
1993
1994 c_size = UNPACK_C_SIZE(c_src);
1995
1996 if (c_size == 0) {
1997 /* BATCH: move what we have so far; */
1998 continue;
1999 }
2000
2001 if (C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset - c_seg_dst->c_nextoffset) < (unsigned) c_size) {
2002 int size_to_populate;
2003
2004 /* doesn't fit */
2005 size_to_populate = c_seg_bufsize - C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset);
2006
2007 if (size_to_populate == 0) {
2008 /* can't fit */
2009 keep_compacting = FALSE;
2010 break;
2011 }
2012 if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) {
2013 size_to_populate = C_SEG_MAX_POPULATE_SIZE;
2014 }
2015
2016 kernel_memory_populate(compressor_map,
2017 (vm_offset_t) &c_seg_dst->c_store.c_buffer[c_seg_dst->c_populated_offset],
2018 size_to_populate,
2019 KMA_COMPRESSOR,
2020 VM_KERN_MEMORY_COMPRESSOR);
2021
2022 c_seg_dst->c_populated_offset += C_SEG_BYTES_TO_OFFSET(size_to_populate);
2023 assert(C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset) <= c_seg_bufsize);
2024 }
2025 c_seg_alloc_nextslot(c_seg_dst);
2026
2027 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, c_seg_dst->c_nextslot);
2028
2029 memcpy(&c_seg_dst->c_store.c_buffer[c_seg_dst->c_nextoffset], &c_seg_src->c_store.c_buffer[c_src->c_offset], c_size);
2030
2031 c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
2032
2033 c_seg_major_compact_stats[c_seg_major_compact_stats_now].moved_slots++;
2034 c_seg_major_compact_stats[c_seg_major_compact_stats_now].moved_bytes += c_size;
2035
2036 cslot_copy(c_dst, c_src);
2037 c_dst->c_offset = c_seg_dst->c_nextoffset;
2038
2039 if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) {
2040 c_seg_dst->c_firstemptyslot++;
2041 }
2042 c_seg_dst->c_slots_used++;
2043 c_seg_dst->c_nextslot++;
2044 c_seg_dst->c_bytes_used += c_rounded_size;
2045 c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
2046
2047 PACK_C_SIZE(c_src, 0);
2048
2049 c_seg_src->c_bytes_used -= c_rounded_size;
2050 c_seg_src->c_bytes_unused += c_rounded_size;
2051 c_seg_src->c_firstemptyslot = 0;
2052
2053 assert(c_seg_src->c_slots_used);
2054 c_seg_src->c_slots_used--;
2055
2056 if (!c_seg_src->c_swappedin) {
2057 /* Pessimistically lose swappedin status when non-swappedin pages are added. */
2058 c_seg_dst->c_swappedin = false;
2059 }
2060
2061 if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
2062 /* dest segment is now full */
2063 keep_compacting = FALSE;
2064 break;
2065 }
2066 }
2067 #if DEVELOPMENT || DEBUG
2068 C_SEG_WRITE_PROTECT(c_seg_dst);
2069 #endif
2070 if (dst_slot < c_seg_dst->c_nextslot) {
2071 PAGE_REPLACEMENT_ALLOWED(TRUE);
2072 /*
2073 * we've now locked out c_decompress from
2074 * converting the slot passed into it into
2075 * a c_segment_t which allows us to use
2076 * the backptr to change which c_segment and
2077 * index the slot points to
2078 */
2079 while (dst_slot < c_seg_dst->c_nextslot) {
2080 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot);
2081
2082 slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
2083 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
2084 slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1;
2085 slot_ptr->s_cindx = dst_slot++;
2086 }
2087 PAGE_REPLACEMENT_ALLOWED(FALSE);
2088 }
2089 return keep_compacting;
2090 }
2091
2092
2093 uint64_t
vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec,clock_nsec_t end_nsec,clock_sec_t start_sec,clock_nsec_t start_nsec)2094 vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec, clock_nsec_t end_nsec, clock_sec_t start_sec, clock_nsec_t start_nsec)
2095 {
2096 uint64_t end_msecs;
2097 uint64_t start_msecs;
2098
2099 end_msecs = (end_sec * 1000) + end_nsec / 1000000;
2100 start_msecs = (start_sec * 1000) + start_nsec / 1000000;
2101
2102 return end_msecs - start_msecs;
2103 }
2104
2105
2106
2107 uint32_t compressor_eval_period_in_msecs = 250;
2108 uint32_t compressor_sample_min_in_msecs = 500;
2109 uint32_t compressor_sample_max_in_msecs = 10000;
2110 uint32_t compressor_thrashing_threshold_per_10msecs = 50;
2111 uint32_t compressor_thrashing_min_per_10msecs = 20;
2112
2113 /* When true, reset sample data next chance we get. */
2114 static boolean_t compressor_need_sample_reset = FALSE;
2115
2116
2117 void
compute_swapout_target_age(void)2118 compute_swapout_target_age(void)
2119 {
2120 clock_sec_t cur_ts_sec;
2121 clock_nsec_t cur_ts_nsec;
2122 uint32_t min_operations_needed_in_this_sample;
2123 uint64_t elapsed_msecs_in_eval;
2124 uint64_t elapsed_msecs_in_sample;
2125 boolean_t need_eval_reset = FALSE;
2126
2127 clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec);
2128
2129 elapsed_msecs_in_sample = vm_compressor_compute_elapsed_msecs(cur_ts_sec, cur_ts_nsec, start_of_sample_period_sec, start_of_sample_period_nsec);
2130
2131 if (compressor_need_sample_reset ||
2132 elapsed_msecs_in_sample >= compressor_sample_max_in_msecs) {
2133 compressor_need_sample_reset = TRUE;
2134 need_eval_reset = TRUE;
2135 goto done;
2136 }
2137 elapsed_msecs_in_eval = vm_compressor_compute_elapsed_msecs(cur_ts_sec, cur_ts_nsec, start_of_eval_period_sec, start_of_eval_period_nsec);
2138
2139 if (elapsed_msecs_in_eval < compressor_eval_period_in_msecs) {
2140 goto done;
2141 }
2142 need_eval_reset = TRUE;
2143
2144 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_START, elapsed_msecs_in_eval, sample_period_compression_count, sample_period_decompression_count, 0, 0);
2145
2146 min_operations_needed_in_this_sample = (compressor_thrashing_min_per_10msecs * (uint32_t)elapsed_msecs_in_eval) / 10;
2147
2148 if ((sample_period_compression_count - last_eval_compression_count) < min_operations_needed_in_this_sample ||
2149 (sample_period_decompression_count - last_eval_decompression_count) < min_operations_needed_in_this_sample) {
2150 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, sample_period_compression_count - last_eval_compression_count,
2151 sample_period_decompression_count - last_eval_decompression_count, 0, 1, 0);
2152
2153 swapout_target_age = 0;
2154
2155 compressor_need_sample_reset = TRUE;
2156 need_eval_reset = TRUE;
2157 goto done;
2158 }
2159 last_eval_compression_count = sample_period_compression_count;
2160 last_eval_decompression_count = sample_period_decompression_count;
2161
2162 if (elapsed_msecs_in_sample < compressor_sample_min_in_msecs) {
2163 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, swapout_target_age, 0, 0, 5, 0);
2164 goto done;
2165 }
2166 if (sample_period_decompression_count > ((compressor_thrashing_threshold_per_10msecs * elapsed_msecs_in_sample) / 10)) {
2167 uint64_t running_total;
2168 uint64_t working_target;
2169 uint64_t aging_target;
2170 uint32_t oldest_age_of_csegs_sampled = 0;
2171 uint64_t working_set_approximation = 0;
2172
2173 swapout_target_age = 0;
2174
2175 working_target = (sample_period_decompression_count / 100) * 95; /* 95 percent */
2176 aging_target = (sample_period_decompression_count / 100) * 1; /* 1 percent */
2177 running_total = 0;
2178
2179 for (oldest_age_of_csegs_sampled = 0; oldest_age_of_csegs_sampled < DECOMPRESSION_SAMPLE_MAX_AGE; oldest_age_of_csegs_sampled++) {
2180 running_total += age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2181
2182 working_set_approximation += oldest_age_of_csegs_sampled * age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2183
2184 if (running_total >= working_target) {
2185 break;
2186 }
2187 }
2188 if (oldest_age_of_csegs_sampled < DECOMPRESSION_SAMPLE_MAX_AGE) {
2189 working_set_approximation = (working_set_approximation * 1000) / elapsed_msecs_in_sample;
2190
2191 if (working_set_approximation < VM_PAGE_COMPRESSOR_COUNT) {
2192 running_total = overage_decompressions_during_sample_period;
2193
2194 for (oldest_age_of_csegs_sampled = DECOMPRESSION_SAMPLE_MAX_AGE - 1; oldest_age_of_csegs_sampled; oldest_age_of_csegs_sampled--) {
2195 running_total += age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2196
2197 if (running_total >= aging_target) {
2198 break;
2199 }
2200 }
2201 swapout_target_age = (uint32_t)cur_ts_sec - oldest_age_of_csegs_sampled;
2202
2203 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, swapout_target_age, working_set_approximation, VM_PAGE_COMPRESSOR_COUNT, 2, 0);
2204 } else {
2205 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, working_set_approximation, VM_PAGE_COMPRESSOR_COUNT, 0, 3, 0);
2206 }
2207 } else {
2208 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, working_target, running_total, 0, 4, 0);
2209 }
2210
2211 compressor_need_sample_reset = TRUE;
2212 need_eval_reset = TRUE;
2213 } else {
2214 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, sample_period_decompression_count, (compressor_thrashing_threshold_per_10msecs * elapsed_msecs_in_sample) / 10, 0, 6, 0);
2215 }
2216 done:
2217 if (compressor_need_sample_reset == TRUE) {
2218 bzero(age_of_decompressions_during_sample_period, sizeof(age_of_decompressions_during_sample_period));
2219 overage_decompressions_during_sample_period = 0;
2220
2221 start_of_sample_period_sec = cur_ts_sec;
2222 start_of_sample_period_nsec = cur_ts_nsec;
2223 sample_period_decompression_count = 0;
2224 sample_period_compression_count = 0;
2225 last_eval_decompression_count = 0;
2226 last_eval_compression_count = 0;
2227 compressor_need_sample_reset = FALSE;
2228 }
2229 if (need_eval_reset == TRUE) {
2230 start_of_eval_period_sec = cur_ts_sec;
2231 start_of_eval_period_nsec = cur_ts_nsec;
2232 }
2233 }
2234
2235
2236 int compaction_swapper_init_now = 0;
2237 int compaction_swapper_running = 0;
2238 int compaction_swapper_awakened = 0;
2239 int compaction_swapper_abort = 0;
2240
2241
2242 #if CONFIG_JETSAM
2243 boolean_t memorystatus_kill_on_VM_compressor_thrashing(boolean_t);
2244 boolean_t memorystatus_kill_on_VM_compressor_space_shortage(boolean_t);
2245 boolean_t memorystatus_kill_on_FC_thrashing(boolean_t);
2246 int compressor_thrashing_induced_jetsam = 0;
2247 int filecache_thrashing_induced_jetsam = 0;
2248 static boolean_t vm_compressor_thrashing_detected = FALSE;
2249 #endif /* CONFIG_JETSAM */
2250
2251 static bool
compressor_swapout_conditions_met(void)2252 compressor_swapout_conditions_met(void)
2253 {
2254 bool should_swap = false;
2255
2256 if (COMPRESSOR_NEEDS_TO_SWAP()) {
2257 should_swap = true;
2258 vmcs_stats.compressor_swap_threshold_exceeded++;
2259 }
2260 if (VM_PAGE_Q_THROTTLED(&vm_pageout_queue_external) && vm_page_anonymous_count < (vm_page_inactive_count / 20)) {
2261 should_swap = true;
2262 vmcs_stats.external_q_throttled++;
2263 }
2264 if (vm_page_free_count < (vm_page_free_reserved - (COMPRESSOR_FREE_RESERVED_LIMIT * 2))) {
2265 should_swap = true;
2266 vmcs_stats.free_count_below_reserve++;
2267 }
2268 return should_swap;
2269 }
2270
2271 static boolean_t
compressor_needs_to_swap(void)2272 compressor_needs_to_swap(void)
2273 {
2274 boolean_t should_swap = FALSE;
2275
2276 if (vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit) {
2277 c_segment_t c_seg;
2278 clock_sec_t now;
2279 clock_sec_t age;
2280 clock_nsec_t nsec;
2281
2282 clock_get_system_nanotime(&now, &nsec);
2283 age = 0;
2284
2285 lck_mtx_lock_spin_always(c_list_lock);
2286
2287 if (!queue_empty(&c_age_list_head)) {
2288 c_seg = (c_segment_t) queue_first(&c_age_list_head);
2289
2290 age = now - c_seg->c_creation_ts;
2291 }
2292 lck_mtx_unlock_always(c_list_lock);
2293
2294 if (age >= vm_ripe_target_age) {
2295 should_swap = TRUE;
2296 goto check_if_low_space;
2297 }
2298 }
2299 if (VM_CONFIG_SWAP_IS_ACTIVE) {
2300 should_swap = compressor_swapout_conditions_met();
2301 if (should_swap) {
2302 goto check_if_low_space;
2303 }
2304 }
2305
2306 #if (XNU_TARGET_OS_OSX && __arm64__)
2307 /*
2308 * Thrashing detection disabled.
2309 */
2310 #else /* (XNU_TARGET_OS_OSX && __arm64__) */
2311
2312 compute_swapout_target_age();
2313
2314 if (swapout_target_age) {
2315 c_segment_t c_seg;
2316
2317 lck_mtx_lock_spin_always(c_list_lock);
2318
2319 if (!queue_empty(&c_age_list_head)) {
2320 c_seg = (c_segment_t) queue_first(&c_age_list_head);
2321
2322 if (c_seg->c_creation_ts > swapout_target_age) {
2323 swapout_target_age = 0;
2324 }
2325 }
2326 lck_mtx_unlock_always(c_list_lock);
2327 }
2328 #if CONFIG_PHANTOM_CACHE
2329 if (vm_phantom_cache_check_pressure()) {
2330 should_swap = TRUE;
2331 }
2332 #endif
2333 if (swapout_target_age) {
2334 should_swap = TRUE;
2335 vmcs_stats.thrashing_detected++;
2336 }
2337 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
2338
2339 check_if_low_space:
2340
2341 #if CONFIG_JETSAM
2342 if (should_swap || vm_compressor_low_on_space() == TRUE) {
2343 if (vm_compressor_thrashing_detected == FALSE) {
2344 vm_compressor_thrashing_detected = TRUE;
2345
2346 if (swapout_target_age) {
2347 /* The compressor is thrashing. */
2348 memorystatus_kill_on_VM_compressor_thrashing(TRUE /* async */);
2349 compressor_thrashing_induced_jetsam++;
2350 } else if (vm_compressor_low_on_space() == TRUE) {
2351 /* The compressor is running low on space. */
2352 memorystatus_kill_on_VM_compressor_space_shortage(TRUE /* async */);
2353 compressor_thrashing_induced_jetsam++;
2354 } else {
2355 memorystatus_kill_on_FC_thrashing(TRUE /* async */);
2356 filecache_thrashing_induced_jetsam++;
2357 }
2358 }
2359 /*
2360 * let the jetsam take precedence over
2361 * any major compactions we might have
2362 * been able to do... otherwise we run
2363 * the risk of doing major compactions
2364 * on segments we're about to free up
2365 * due to the jetsam activity.
2366 */
2367 should_swap = FALSE;
2368 }
2369
2370 #else /* CONFIG_JETSAM */
2371 if (should_swap && vm_swap_low_on_space()) {
2372 vm_compressor_take_paging_space_action();
2373 }
2374 #endif /* CONFIG_JETSAM */
2375
2376 if (should_swap == FALSE) {
2377 /*
2378 * vm_compressor_needs_to_major_compact returns true only if we're
2379 * about to run out of available compressor segments... in this
2380 * case, we absolutely need to run a major compaction even if
2381 * we've just kicked off a jetsam or we don't otherwise need to
2382 * swap... terminating objects releases
2383 * pages back to the uncompressed cache, but does not guarantee
2384 * that we will free up even a single compression segment
2385 */
2386 should_swap = vm_compressor_needs_to_major_compact();
2387 if (should_swap) {
2388 vmcs_stats.fragmentation_detected++;
2389 }
2390 }
2391
2392 /*
2393 * returning TRUE when swap_supported == FALSE
2394 * will cause the major compaction engine to
2395 * run, but will not trigger any swapping...
2396 * segments that have been major compacted
2397 * will be moved to the majorcompact queue
2398 */
2399 return should_swap;
2400 }
2401
2402 #if CONFIG_JETSAM
2403 /*
2404 * This function is called from the jetsam thread after killing something to
2405 * mitigate thrashing.
2406 *
2407 * We need to restart our thrashing detection heuristics since memory pressure
2408 * has potentially changed significantly, and we don't want to detect on old
2409 * data from before the jetsam.
2410 */
2411 void
vm_thrashing_jetsam_done(void)2412 vm_thrashing_jetsam_done(void)
2413 {
2414 vm_compressor_thrashing_detected = FALSE;
2415
2416 /* Were we compressor-thrashing or filecache-thrashing? */
2417 if (swapout_target_age) {
2418 swapout_target_age = 0;
2419 compressor_need_sample_reset = TRUE;
2420 }
2421 #if CONFIG_PHANTOM_CACHE
2422 else {
2423 vm_phantom_cache_restart_sample();
2424 }
2425 #endif
2426 }
2427 #endif /* CONFIG_JETSAM */
2428
2429 uint32_t vm_wake_compactor_swapper_calls = 0;
2430 uint32_t vm_run_compactor_already_running = 0;
2431 uint32_t vm_run_compactor_empty_minor_q = 0;
2432 uint32_t vm_run_compactor_did_compact = 0;
2433 uint32_t vm_run_compactor_waited = 0;
2434
2435 void
vm_run_compactor(void)2436 vm_run_compactor(void)
2437 {
2438 if (c_segment_count == 0) {
2439 return;
2440 }
2441
2442 lck_mtx_lock_spin_always(c_list_lock);
2443
2444 if (c_minor_count == 0) {
2445 vm_run_compactor_empty_minor_q++;
2446
2447 lck_mtx_unlock_always(c_list_lock);
2448 return;
2449 }
2450 if (compaction_swapper_running) {
2451 if (vm_pageout_state.vm_restricted_to_single_processor == FALSE) {
2452 vm_run_compactor_already_running++;
2453
2454 lck_mtx_unlock_always(c_list_lock);
2455 return;
2456 }
2457 vm_run_compactor_waited++;
2458
2459 assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
2460
2461 lck_mtx_unlock_always(c_list_lock);
2462
2463 thread_block(THREAD_CONTINUE_NULL);
2464
2465 return;
2466 }
2467 vm_run_compactor_did_compact++;
2468
2469 fastwake_warmup = FALSE;
2470 compaction_swapper_running = 1;
2471
2472 vm_compressor_do_delayed_compactions(FALSE);
2473
2474 compaction_swapper_running = 0;
2475
2476 lck_mtx_unlock_always(c_list_lock);
2477
2478 thread_wakeup((event_t)&compaction_swapper_running);
2479 }
2480
2481
2482 void
vm_wake_compactor_swapper(void)2483 vm_wake_compactor_swapper(void)
2484 {
2485 if (compaction_swapper_running || compaction_swapper_awakened || c_segment_count == 0) {
2486 return;
2487 }
2488
2489 if (c_minor_count || vm_compressor_needs_to_major_compact()) {
2490 lck_mtx_lock_spin_always(c_list_lock);
2491
2492 fastwake_warmup = FALSE;
2493
2494 if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
2495 vm_wake_compactor_swapper_calls++;
2496
2497 compaction_swapper_awakened = 1;
2498 thread_wakeup((event_t)&c_compressor_swap_trigger);
2499 }
2500 lck_mtx_unlock_always(c_list_lock);
2501 }
2502 }
2503
2504
2505 void
vm_consider_swapping()2506 vm_consider_swapping()
2507 {
2508 c_segment_t c_seg, c_seg_next;
2509 clock_sec_t now;
2510 clock_nsec_t nsec;
2511
2512 assert(VM_CONFIG_SWAP_IS_PRESENT);
2513
2514 lck_mtx_lock_spin_always(c_list_lock);
2515
2516 compaction_swapper_abort = 1;
2517
2518 while (compaction_swapper_running) {
2519 assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
2520
2521 lck_mtx_unlock_always(c_list_lock);
2522
2523 thread_block(THREAD_CONTINUE_NULL);
2524
2525 lck_mtx_lock_spin_always(c_list_lock);
2526 }
2527 compaction_swapper_abort = 0;
2528 compaction_swapper_running = 1;
2529
2530 vm_swapout_ripe_segments = TRUE;
2531
2532 if (!queue_empty(&c_major_list_head)) {
2533 clock_get_system_nanotime(&now, &nsec);
2534
2535 c_seg = (c_segment_t)queue_first(&c_major_list_head);
2536
2537 while (!queue_end(&c_major_list_head, (queue_entry_t)c_seg)) {
2538 if (c_overage_swapped_count >= c_overage_swapped_limit) {
2539 break;
2540 }
2541
2542 c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
2543
2544 if ((now - c_seg->c_creation_ts) >= vm_ripe_target_age) {
2545 lck_mtx_lock_spin_always(&c_seg->c_lock);
2546
2547 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
2548
2549 lck_mtx_unlock_always(&c_seg->c_lock);
2550 }
2551 c_seg = c_seg_next;
2552 }
2553 }
2554 vm_compressor_compact_and_swap(FALSE);
2555
2556 compaction_swapper_running = 0;
2557
2558 vm_swapout_ripe_segments = FALSE;
2559
2560 lck_mtx_unlock_always(c_list_lock);
2561
2562 thread_wakeup((event_t)&compaction_swapper_running);
2563 }
2564
2565
2566 void
vm_consider_waking_compactor_swapper(void)2567 vm_consider_waking_compactor_swapper(void)
2568 {
2569 boolean_t need_wakeup = FALSE;
2570
2571 if (c_segment_count == 0) {
2572 return;
2573 }
2574
2575 if (compaction_swapper_running || compaction_swapper_awakened) {
2576 return;
2577 }
2578
2579 if (!compaction_swapper_inited && !compaction_swapper_init_now) {
2580 compaction_swapper_init_now = 1;
2581 need_wakeup = TRUE;
2582 }
2583
2584 if (c_minor_count && (COMPRESSOR_NEEDS_TO_MINOR_COMPACT())) {
2585 need_wakeup = TRUE;
2586 } else if (compressor_needs_to_swap()) {
2587 need_wakeup = TRUE;
2588 } else if (c_minor_count) {
2589 uint64_t total_bytes;
2590
2591 total_bytes = compressor_object->resident_page_count * PAGE_SIZE_64;
2592
2593 if ((total_bytes - compressor_bytes_used) > total_bytes / 10) {
2594 need_wakeup = TRUE;
2595 }
2596 }
2597 if (need_wakeup == TRUE) {
2598 lck_mtx_lock_spin_always(c_list_lock);
2599
2600 fastwake_warmup = FALSE;
2601
2602 if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
2603 memoryshot(VM_WAKEUP_COMPACTOR_SWAPPER, DBG_FUNC_NONE);
2604
2605 compaction_swapper_awakened = 1;
2606 thread_wakeup((event_t)&c_compressor_swap_trigger);
2607 }
2608 lck_mtx_unlock_always(c_list_lock);
2609 }
2610 }
2611
2612
2613 #define C_SWAPOUT_LIMIT 4
2614 #define DELAYED_COMPACTIONS_PER_PASS 30
2615
2616 void
vm_compressor_do_delayed_compactions(boolean_t flush_all)2617 vm_compressor_do_delayed_compactions(boolean_t flush_all)
2618 {
2619 c_segment_t c_seg;
2620 int number_compacted = 0;
2621 boolean_t needs_to_swap = FALSE;
2622
2623
2624 VM_DEBUG_CONSTANT_EVENT(vm_compressor_do_delayed_compactions, VM_COMPRESSOR_DO_DELAYED_COMPACTIONS, DBG_FUNC_START, c_minor_count, flush_all, 0, 0);
2625
2626 #if XNU_TARGET_OS_OSX
2627 LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
2628 #endif /* XNU_TARGET_OS_OSX */
2629
2630 while (!queue_empty(&c_minor_list_head) && needs_to_swap == FALSE) {
2631 c_seg = (c_segment_t)queue_first(&c_minor_list_head);
2632
2633 lck_mtx_lock_spin_always(&c_seg->c_lock);
2634
2635 if (c_seg->c_busy) {
2636 lck_mtx_unlock_always(c_list_lock);
2637 c_seg_wait_on_busy(c_seg);
2638 lck_mtx_lock_spin_always(c_list_lock);
2639
2640 continue;
2641 }
2642 C_SEG_BUSY(c_seg);
2643
2644 c_seg_do_minor_compaction_and_unlock(c_seg, TRUE, FALSE, TRUE);
2645
2646 if (VM_CONFIG_SWAP_IS_ACTIVE && (number_compacted++ > DELAYED_COMPACTIONS_PER_PASS)) {
2647 if ((flush_all == TRUE || compressor_needs_to_swap() == TRUE) && c_swapout_count < C_SWAPOUT_LIMIT) {
2648 needs_to_swap = TRUE;
2649 }
2650
2651 number_compacted = 0;
2652 }
2653 lck_mtx_lock_spin_always(c_list_lock);
2654 }
2655
2656 VM_DEBUG_CONSTANT_EVENT(vm_compressor_do_delayed_compactions, VM_COMPRESSOR_DO_DELAYED_COMPACTIONS, DBG_FUNC_END, c_minor_count, number_compacted, needs_to_swap, 0);
2657 }
2658
2659
2660 #define C_SEGMENT_SWAPPEDIN_AGE_LIMIT 10
2661
2662 static void
vm_compressor_age_swapped_in_segments(boolean_t flush_all)2663 vm_compressor_age_swapped_in_segments(boolean_t flush_all)
2664 {
2665 c_segment_t c_seg;
2666 clock_sec_t now;
2667 clock_nsec_t nsec;
2668
2669 clock_get_system_nanotime(&now, &nsec);
2670
2671 while (!queue_empty(&c_swappedin_list_head)) {
2672 c_seg = (c_segment_t)queue_first(&c_swappedin_list_head);
2673
2674 if (flush_all == FALSE && (now - c_seg->c_swappedin_ts) < C_SEGMENT_SWAPPEDIN_AGE_LIMIT) {
2675 break;
2676 }
2677
2678 lck_mtx_lock_spin_always(&c_seg->c_lock);
2679
2680 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
2681 c_seg->c_agedin_ts = (uint32_t) now;
2682
2683 lck_mtx_unlock_always(&c_seg->c_lock);
2684 }
2685 }
2686
2687
2688 extern int vm_num_swap_files;
2689 extern int vm_num_pinned_swap_files;
2690 extern int vm_swappin_enabled;
2691
2692 extern unsigned int vm_swapfile_total_segs_used;
2693 extern unsigned int vm_swapfile_total_segs_alloced;
2694
2695
2696 void
vm_compressor_flush(void)2697 vm_compressor_flush(void)
2698 {
2699 uint64_t vm_swap_put_failures_at_start;
2700 wait_result_t wait_result = 0;
2701 AbsoluteTime startTime, endTime;
2702 clock_sec_t now_sec;
2703 clock_nsec_t now_nsec;
2704 uint64_t nsec;
2705 c_segment_t c_seg, c_seg_next;
2706
2707 HIBLOG("vm_compressor_flush - starting\n");
2708
2709 clock_get_uptime(&startTime);
2710
2711 lck_mtx_lock_spin_always(c_list_lock);
2712
2713 fastwake_warmup = FALSE;
2714 compaction_swapper_abort = 1;
2715
2716 while (compaction_swapper_running) {
2717 assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
2718
2719 lck_mtx_unlock_always(c_list_lock);
2720
2721 thread_block(THREAD_CONTINUE_NULL);
2722
2723 lck_mtx_lock_spin_always(c_list_lock);
2724 }
2725 compaction_swapper_abort = 0;
2726 compaction_swapper_running = 1;
2727
2728 hibernate_flushing = TRUE;
2729 hibernate_no_swapspace = FALSE;
2730 hibernate_flush_timed_out = FALSE;
2731 c_generation_id_flush_barrier = c_generation_id + 1000;
2732
2733 clock_get_system_nanotime(&now_sec, &now_nsec);
2734 hibernate_flushing_deadline = now_sec + HIBERNATE_FLUSHING_SECS_TO_COMPLETE;
2735
2736 vm_swap_put_failures_at_start = vm_swap_put_failures;
2737
2738 /*
2739 * We are about to hibernate and so we want all segments flushed to disk.
2740 * Segments that are on the major compaction queue won't be considered in
2741 * the vm_compressor_compact_and_swap() pass. So we need to bring them to
2742 * the ageQ for consideration.
2743 */
2744 if (!queue_empty(&c_major_list_head)) {
2745 c_seg = (c_segment_t)queue_first(&c_major_list_head);
2746
2747 while (!queue_end(&c_major_list_head, (queue_entry_t)c_seg)) {
2748 c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
2749 lck_mtx_lock_spin_always(&c_seg->c_lock);
2750 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
2751 lck_mtx_unlock_always(&c_seg->c_lock);
2752 c_seg = c_seg_next;
2753 }
2754 }
2755 vm_compressor_compact_and_swap(TRUE);
2756
2757 while (!queue_empty(&c_swapout_list_head)) {
2758 assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC);
2759
2760 lck_mtx_unlock_always(c_list_lock);
2761
2762 wait_result = thread_block(THREAD_CONTINUE_NULL);
2763
2764 lck_mtx_lock_spin_always(c_list_lock);
2765
2766 if (wait_result == THREAD_TIMED_OUT) {
2767 break;
2768 }
2769 }
2770 hibernate_flushing = FALSE;
2771 compaction_swapper_running = 0;
2772
2773 if (vm_swap_put_failures > vm_swap_put_failures_at_start) {
2774 HIBLOG("vm_compressor_flush failed to clean %llu segments - vm_page_compressor_count(%d)\n",
2775 vm_swap_put_failures - vm_swap_put_failures_at_start, VM_PAGE_COMPRESSOR_COUNT);
2776 }
2777
2778 lck_mtx_unlock_always(c_list_lock);
2779
2780 thread_wakeup((event_t)&compaction_swapper_running);
2781
2782 clock_get_uptime(&endTime);
2783 SUB_ABSOLUTETIME(&endTime, &startTime);
2784 absolutetime_to_nanoseconds(endTime, &nsec);
2785
2786 HIBLOG("vm_compressor_flush completed - took %qd msecs - vm_num_swap_files = %d, vm_num_pinned_swap_files = %d, vm_swappin_enabled = %d\n",
2787 nsec / 1000000ULL, vm_num_swap_files, vm_num_pinned_swap_files, vm_swappin_enabled);
2788 }
2789
2790
2791 int compaction_swap_trigger_thread_awakened = 0;
2792
2793 static void
vm_compressor_swap_trigger_thread(void)2794 vm_compressor_swap_trigger_thread(void)
2795 {
2796 current_thread()->options |= TH_OPT_VMPRIV;
2797
2798 /*
2799 * compaction_swapper_init_now is set when the first call to
2800 * vm_consider_waking_compactor_swapper is made from
2801 * vm_pageout_scan... since this function is called upon
2802 * thread creation, we want to make sure to delay adjusting
2803 * the tuneables until we are awakened via vm_pageout_scan
2804 * so that we are at a point where the vm_swapfile_open will
2805 * be operating on the correct directory (in case the default
2806 * of using the VM volume is overridden by the dynamic_pager)
2807 */
2808 if (compaction_swapper_init_now) {
2809 vm_compaction_swapper_do_init();
2810
2811 if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
2812 thread_vm_bind_group_add();
2813 }
2814 #if CONFIG_THREAD_GROUPS
2815 thread_group_vm_add();
2816 #endif
2817 thread_set_thread_name(current_thread(), "VM_cswap_trigger");
2818 compaction_swapper_init_now = 0;
2819 }
2820 lck_mtx_lock_spin_always(c_list_lock);
2821
2822 compaction_swap_trigger_thread_awakened++;
2823 compaction_swapper_awakened = 0;
2824
2825 if (compaction_swapper_running == 0) {
2826 compaction_swapper_running = 1;
2827
2828 vm_compressor_compact_and_swap(FALSE);
2829
2830 compaction_swapper_running = 0;
2831 }
2832 assert_wait((event_t)&c_compressor_swap_trigger, THREAD_UNINT);
2833
2834 if (compaction_swapper_running == 0) {
2835 thread_wakeup((event_t)&compaction_swapper_running);
2836 }
2837
2838 lck_mtx_unlock_always(c_list_lock);
2839
2840 thread_block((thread_continue_t)vm_compressor_swap_trigger_thread);
2841
2842 /* NOTREACHED */
2843 }
2844
2845
2846 void
vm_compressor_record_warmup_start(void)2847 vm_compressor_record_warmup_start(void)
2848 {
2849 c_segment_t c_seg;
2850
2851 lck_mtx_lock_spin_always(c_list_lock);
2852
2853 if (first_c_segment_to_warm_generation_id == 0) {
2854 if (!queue_empty(&c_age_list_head)) {
2855 c_seg = (c_segment_t)queue_last(&c_age_list_head);
2856
2857 first_c_segment_to_warm_generation_id = c_seg->c_generation_id;
2858 } else {
2859 first_c_segment_to_warm_generation_id = 0;
2860 }
2861
2862 fastwake_recording_in_progress = TRUE;
2863 }
2864 lck_mtx_unlock_always(c_list_lock);
2865 }
2866
2867
2868 void
vm_compressor_record_warmup_end(void)2869 vm_compressor_record_warmup_end(void)
2870 {
2871 c_segment_t c_seg;
2872
2873 lck_mtx_lock_spin_always(c_list_lock);
2874
2875 if (fastwake_recording_in_progress == TRUE) {
2876 if (!queue_empty(&c_age_list_head)) {
2877 c_seg = (c_segment_t)queue_last(&c_age_list_head);
2878
2879 last_c_segment_to_warm_generation_id = c_seg->c_generation_id;
2880 } else {
2881 last_c_segment_to_warm_generation_id = first_c_segment_to_warm_generation_id;
2882 }
2883
2884 fastwake_recording_in_progress = FALSE;
2885
2886 HIBLOG("vm_compressor_record_warmup (%qd - %qd)\n", first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id);
2887 }
2888 lck_mtx_unlock_always(c_list_lock);
2889 }
2890
2891
2892 #define DELAY_TRIM_ON_WAKE_SECS 25
2893
2894 void
vm_compressor_delay_trim(void)2895 vm_compressor_delay_trim(void)
2896 {
2897 clock_sec_t sec;
2898 clock_nsec_t nsec;
2899
2900 clock_get_system_nanotime(&sec, &nsec);
2901 dont_trim_until_ts = sec + DELAY_TRIM_ON_WAKE_SECS;
2902 }
2903
2904
2905 void
vm_compressor_do_warmup(void)2906 vm_compressor_do_warmup(void)
2907 {
2908 lck_mtx_lock_spin_always(c_list_lock);
2909
2910 if (first_c_segment_to_warm_generation_id == last_c_segment_to_warm_generation_id) {
2911 first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
2912
2913 lck_mtx_unlock_always(c_list_lock);
2914 return;
2915 }
2916
2917 if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
2918 fastwake_warmup = TRUE;
2919
2920 compaction_swapper_awakened = 1;
2921 thread_wakeup((event_t)&c_compressor_swap_trigger);
2922 }
2923 lck_mtx_unlock_always(c_list_lock);
2924 }
2925
2926 void
do_fastwake_warmup_all(void)2927 do_fastwake_warmup_all(void)
2928 {
2929 lck_mtx_lock_spin_always(c_list_lock);
2930
2931 if (queue_empty(&c_swappedout_list_head) && queue_empty(&c_swappedout_sparse_list_head)) {
2932 lck_mtx_unlock_always(c_list_lock);
2933 return;
2934 }
2935
2936 fastwake_warmup = TRUE;
2937
2938 do_fastwake_warmup(&c_swappedout_list_head, TRUE);
2939
2940 do_fastwake_warmup(&c_swappedout_sparse_list_head, TRUE);
2941
2942 fastwake_warmup = FALSE;
2943
2944 lck_mtx_unlock_always(c_list_lock);
2945 }
2946
2947 void
do_fastwake_warmup(queue_head_t * c_queue,boolean_t consider_all_cseg)2948 do_fastwake_warmup(queue_head_t *c_queue, boolean_t consider_all_cseg)
2949 {
2950 c_segment_t c_seg = NULL;
2951 AbsoluteTime startTime, endTime;
2952 uint64_t nsec;
2953
2954
2955 HIBLOG("vm_compressor_fastwake_warmup (%qd - %qd) - starting\n", first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id);
2956
2957 clock_get_uptime(&startTime);
2958
2959 lck_mtx_unlock_always(c_list_lock);
2960
2961 proc_set_thread_policy(current_thread(),
2962 TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER2);
2963
2964 PAGE_REPLACEMENT_DISALLOWED(TRUE);
2965
2966 lck_mtx_lock_spin_always(c_list_lock);
2967
2968 while (!queue_empty(c_queue) && fastwake_warmup == TRUE) {
2969 c_seg = (c_segment_t) queue_first(c_queue);
2970
2971 if (consider_all_cseg == FALSE) {
2972 if (c_seg->c_generation_id < first_c_segment_to_warm_generation_id ||
2973 c_seg->c_generation_id > last_c_segment_to_warm_generation_id) {
2974 break;
2975 }
2976
2977 if (vm_page_free_count < (AVAILABLE_MEMORY / 4)) {
2978 break;
2979 }
2980 }
2981
2982 lck_mtx_lock_spin_always(&c_seg->c_lock);
2983 lck_mtx_unlock_always(c_list_lock);
2984
2985 if (c_seg->c_busy) {
2986 PAGE_REPLACEMENT_DISALLOWED(FALSE);
2987 c_seg_wait_on_busy(c_seg);
2988 PAGE_REPLACEMENT_DISALLOWED(TRUE);
2989 } else {
2990 if (c_seg_swapin(c_seg, TRUE, FALSE) == 0) {
2991 lck_mtx_unlock_always(&c_seg->c_lock);
2992 }
2993 c_segment_warmup_count++;
2994
2995 PAGE_REPLACEMENT_DISALLOWED(FALSE);
2996 vm_pageout_io_throttle();
2997 PAGE_REPLACEMENT_DISALLOWED(TRUE);
2998 }
2999 lck_mtx_lock_spin_always(c_list_lock);
3000 }
3001 lck_mtx_unlock_always(c_list_lock);
3002
3003 PAGE_REPLACEMENT_DISALLOWED(FALSE);
3004
3005 proc_set_thread_policy(current_thread(),
3006 TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER0);
3007
3008 clock_get_uptime(&endTime);
3009 SUB_ABSOLUTETIME(&endTime, &startTime);
3010 absolutetime_to_nanoseconds(endTime, &nsec);
3011
3012 HIBLOG("vm_compressor_fastwake_warmup completed - took %qd msecs\n", nsec / 1000000ULL);
3013
3014 lck_mtx_lock_spin_always(c_list_lock);
3015
3016 if (consider_all_cseg == FALSE) {
3017 first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
3018 }
3019 }
3020
3021 int min_csegs_per_major_compaction = DELAYED_COMPACTIONS_PER_PASS;
3022 extern bool vm_swapout_thread_running;
3023 extern boolean_t compressor_store_stop_compaction;
3024
3025 void
vm_compressor_compact_and_swap(boolean_t flush_all)3026 vm_compressor_compact_and_swap(boolean_t flush_all)
3027 {
3028 c_segment_t c_seg, c_seg_next;
3029 boolean_t keep_compacting, switch_state;
3030 clock_sec_t now;
3031 clock_nsec_t nsec;
3032 mach_timespec_t start_ts, end_ts;
3033 unsigned int number_considered, wanted_cseg_found, yield_after_considered_per_pass, number_yields;
3034 uint64_t bytes_to_free, bytes_freed, delta_usec;
3035
3036 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_START, c_age_count, c_minor_count, c_major_count, vm_page_free_count);
3037
3038 if (fastwake_warmup == TRUE) {
3039 uint64_t starting_warmup_count;
3040
3041 starting_warmup_count = c_segment_warmup_count;
3042
3043 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_START, c_segment_warmup_count,
3044 first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id, 0, 0);
3045 do_fastwake_warmup(&c_swappedout_list_head, FALSE);
3046 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_END, c_segment_warmup_count, c_segment_warmup_count - starting_warmup_count, 0, 0, 0);
3047
3048 fastwake_warmup = FALSE;
3049 }
3050
3051 vm_compressor_process_major_segments();
3052
3053 /*
3054 * it's possible for the c_age_list_head to be empty if we
3055 * hit our limits for growing the compressor pool and we subsequently
3056 * hibernated... on the next hibernation we could see the queue as
3057 * empty and not proceeed even though we have a bunch of segments on
3058 * the swapped in queue that need to be dealt with.
3059 */
3060 vm_compressor_do_delayed_compactions(flush_all);
3061
3062 vm_compressor_age_swapped_in_segments(flush_all);
3063
3064 /*
3065 * we only need to grab the timestamp once per
3066 * invocation of this function since the
3067 * timescale we're interested in is measured
3068 * in days
3069 */
3070 clock_get_system_nanotime(&now, &nsec);
3071
3072 start_ts.tv_sec = (int) now;
3073 start_ts.tv_nsec = nsec;
3074 delta_usec = 0;
3075 number_considered = 0;
3076 wanted_cseg_found = 0;
3077 number_yields = 0;
3078 bytes_to_free = 0;
3079 bytes_freed = 0;
3080 yield_after_considered_per_pass = MAX(min_csegs_per_major_compaction, DELAYED_COMPACTIONS_PER_PASS);
3081
3082 while (!queue_empty(&c_age_list_head) && !compaction_swapper_abort && !compressor_store_stop_compaction) {
3083 if (hibernate_flushing == TRUE) {
3084 clock_sec_t sec;
3085
3086 if (hibernate_should_abort()) {
3087 HIBLOG("vm_compressor_flush - hibernate_should_abort returned TRUE\n");
3088 break;
3089 }
3090 if (hibernate_no_swapspace == TRUE) {
3091 HIBLOG("vm_compressor_flush - out of swap space\n");
3092 break;
3093 }
3094 if (vm_swap_files_pinned() == FALSE) {
3095 HIBLOG("vm_compressor_flush - unpinned swap files\n");
3096 break;
3097 }
3098 if (hibernate_in_progress_with_pinned_swap == TRUE &&
3099 (vm_swapfile_total_segs_alloced == vm_swapfile_total_segs_used)) {
3100 HIBLOG("vm_compressor_flush - out of pinned swap space\n");
3101 break;
3102 }
3103 clock_get_system_nanotime(&sec, &nsec);
3104
3105 if (sec > hibernate_flushing_deadline) {
3106 hibernate_flush_timed_out = TRUE;
3107 HIBLOG("vm_compressor_flush - failed to finish before deadline\n");
3108 break;
3109 }
3110 }
3111 if (!vm_swap_out_of_space() && c_swapout_count >= C_SWAPOUT_LIMIT) {
3112 assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 100, 1000 * NSEC_PER_USEC);
3113
3114 if (!vm_swapout_thread_running) {
3115 thread_wakeup((event_t)&c_swapout_list_head);
3116 }
3117
3118 lck_mtx_unlock_always(c_list_lock);
3119
3120 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 1, c_swapout_count, 0, 0);
3121
3122 thread_block(THREAD_CONTINUE_NULL);
3123
3124 lck_mtx_lock_spin_always(c_list_lock);
3125 }
3126 /*
3127 * Minor compactions
3128 */
3129 vm_compressor_do_delayed_compactions(flush_all);
3130
3131 vm_compressor_age_swapped_in_segments(flush_all);
3132
3133 if (!vm_swap_out_of_space() && c_swapout_count >= C_SWAPOUT_LIMIT) {
3134 /*
3135 * we timed out on the above thread_block
3136 * let's loop around and try again
3137 * the timeout allows us to continue
3138 * to do minor compactions to make
3139 * more memory available
3140 */
3141 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 2, c_swapout_count, 0, 0);
3142
3143 continue;
3144 }
3145
3146 /*
3147 * Swap out segments?
3148 */
3149 if (flush_all == FALSE) {
3150 boolean_t needs_to_swap;
3151
3152 lck_mtx_unlock_always(c_list_lock);
3153
3154 needs_to_swap = compressor_needs_to_swap();
3155
3156 lck_mtx_lock_spin_always(c_list_lock);
3157
3158 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 3, needs_to_swap, 0, 0);
3159
3160 if (needs_to_swap == FALSE) {
3161 break;
3162 }
3163 }
3164 if (queue_empty(&c_age_list_head)) {
3165 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 4, c_age_count, 0, 0);
3166 break;
3167 }
3168 c_seg = (c_segment_t) queue_first(&c_age_list_head);
3169
3170 assert(c_seg->c_state == C_ON_AGE_Q);
3171
3172 if (flush_all == TRUE && c_seg->c_generation_id > c_generation_id_flush_barrier) {
3173 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 5, 0, 0, 0);
3174 break;
3175 }
3176
3177 lck_mtx_lock_spin_always(&c_seg->c_lock);
3178
3179 if (c_seg->c_busy) {
3180 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 6, (void*) VM_KERNEL_ADDRPERM(c_seg), 0, 0);
3181
3182 lck_mtx_unlock_always(c_list_lock);
3183 c_seg_wait_on_busy(c_seg);
3184 lck_mtx_lock_spin_always(c_list_lock);
3185
3186 continue;
3187 }
3188 C_SEG_BUSY(c_seg);
3189
3190 if (c_seg_do_minor_compaction_and_unlock(c_seg, FALSE, TRUE, TRUE)) {
3191 /*
3192 * found an empty c_segment and freed it
3193 * so go grab the next guy in the queue
3194 */
3195 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 7, 0, 0, 0);
3196 c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3197 continue;
3198 }
3199 /*
3200 * Major compaction
3201 */
3202 keep_compacting = TRUE;
3203 switch_state = TRUE;
3204
3205 while (keep_compacting == TRUE) {
3206 assert(c_seg->c_busy);
3207
3208 /* look for another segment to consolidate */
3209
3210 c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
3211
3212 if (queue_end(&c_age_list_head, (queue_entry_t)c_seg_next)) {
3213 break;
3214 }
3215
3216 assert(c_seg_next->c_state == C_ON_AGE_Q);
3217
3218 number_considered++;
3219
3220 if (c_seg_major_compact_ok(c_seg, c_seg_next) == FALSE) {
3221 break;
3222 }
3223
3224 lck_mtx_lock_spin_always(&c_seg_next->c_lock);
3225
3226 if (c_seg_next->c_busy) {
3227 /*
3228 * We are going to block for our neighbor.
3229 * If our c_seg is wanted, we should unbusy
3230 * it because we don't know how long we might
3231 * have to block here.
3232 */
3233 if (c_seg->c_wanted) {
3234 lck_mtx_unlock_always(&c_seg_next->c_lock);
3235 switch_state = FALSE;
3236 c_seg_major_compact_stats[c_seg_major_compact_stats_now].bailed_compactions++;
3237 wanted_cseg_found++;
3238 break;
3239 }
3240
3241 lck_mtx_unlock_always(c_list_lock);
3242
3243 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 8, (void*) VM_KERNEL_ADDRPERM(c_seg_next), 0, 0);
3244
3245 c_seg_wait_on_busy(c_seg_next);
3246 lck_mtx_lock_spin_always(c_list_lock);
3247
3248 continue;
3249 }
3250 /* grab that segment */
3251 C_SEG_BUSY(c_seg_next);
3252
3253 bytes_to_free = C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3254 if (c_seg_do_minor_compaction_and_unlock(c_seg_next, FALSE, TRUE, TRUE)) {
3255 /*
3256 * found an empty c_segment and freed it
3257 * so we can't continue to use c_seg_next
3258 */
3259 bytes_freed += bytes_to_free;
3260 c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3261 continue;
3262 }
3263
3264 /* unlock the list ... */
3265 lck_mtx_unlock_always(c_list_lock);
3266
3267 /* do the major compaction */
3268
3269 keep_compacting = c_seg_major_compact(c_seg, c_seg_next);
3270
3271 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 9, keep_compacting, 0, 0);
3272
3273 PAGE_REPLACEMENT_DISALLOWED(TRUE);
3274
3275 lck_mtx_lock_spin_always(&c_seg_next->c_lock);
3276 /*
3277 * run a minor compaction on the donor segment
3278 * since we pulled at least some of it's
3279 * data into our target... if we've emptied
3280 * it, now is a good time to free it which
3281 * c_seg_minor_compaction_and_unlock also takes care of
3282 *
3283 * by passing TRUE, we ask for c_busy to be cleared
3284 * and c_wanted to be taken care of
3285 */
3286 bytes_to_free = C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3287 if (c_seg_minor_compaction_and_unlock(c_seg_next, TRUE)) {
3288 bytes_freed += bytes_to_free;
3289 c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3290 } else {
3291 bytes_to_free -= C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3292 bytes_freed += bytes_to_free;
3293 }
3294
3295 PAGE_REPLACEMENT_DISALLOWED(FALSE);
3296
3297 /* relock the list */
3298 lck_mtx_lock_spin_always(c_list_lock);
3299
3300 if (c_seg->c_wanted) {
3301 /*
3302 * Our c_seg is in demand. Let's
3303 * unbusy it and wakeup the waiters
3304 * instead of continuing the compaction
3305 * because we could be in this loop
3306 * for a while.
3307 */
3308 switch_state = FALSE;
3309 wanted_cseg_found++;
3310 c_seg_major_compact_stats[c_seg_major_compact_stats_now].bailed_compactions++;
3311 break;
3312 }
3313 } /* major compaction */
3314
3315 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 10, number_considered, wanted_cseg_found, 0);
3316
3317 lck_mtx_lock_spin_always(&c_seg->c_lock);
3318
3319 assert(c_seg->c_busy);
3320 assert(!c_seg->c_on_minorcompact_q);
3321
3322 if (switch_state) {
3323 if (VM_CONFIG_SWAP_IS_ACTIVE) {
3324 int new_state = C_ON_SWAPOUT_Q;
3325
3326 #if (XNU_TARGET_OS_OSX && __arm64__)
3327 if (flush_all == false && compressor_swapout_conditions_met() == false) {
3328 new_state = C_ON_MAJORCOMPACT_Q;
3329 }
3330 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
3331
3332 if (new_state == C_ON_SWAPOUT_Q) {
3333 /*
3334 * This mode of putting a generic c_seg on the swapout list is
3335 * only supported when we have general swapping enabled
3336 */
3337 clock_sec_t lnow;
3338 clock_nsec_t lnsec;
3339 clock_get_system_nanotime(&lnow, &lnsec);
3340 if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 30) {
3341 vmcs_stats.unripe_under_30s++;
3342 } else if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 60) {
3343 vmcs_stats.unripe_under_60s++;
3344 } else if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 300) {
3345 vmcs_stats.unripe_under_300s++;
3346 }
3347 }
3348
3349 c_seg_switch_state(c_seg, new_state, FALSE);
3350 } else {
3351 if ((vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit)) {
3352 assert(VM_CONFIG_SWAP_IS_PRESENT);
3353 /*
3354 * we are running compressor sweeps with swap-behind
3355 * make sure the c_seg has aged enough before swapping it
3356 * out...
3357 */
3358 if ((now - c_seg->c_creation_ts) >= vm_ripe_target_age) {
3359 c_seg->c_overage_swap = TRUE;
3360 c_overage_swapped_count++;
3361 c_seg_switch_state(c_seg, C_ON_SWAPOUT_Q, FALSE);
3362 }
3363 }
3364 }
3365 if (c_seg->c_state == C_ON_AGE_Q) {
3366 /*
3367 * this c_seg didn't get moved to the swapout queue
3368 * so we need to move it out of the way...
3369 * we just did a major compaction on it so put it
3370 * on that queue
3371 */
3372 c_seg_switch_state(c_seg, C_ON_MAJORCOMPACT_Q, FALSE);
3373 } else {
3374 c_seg_major_compact_stats[c_seg_major_compact_stats_now].wasted_space_in_swapouts += c_seg_bufsize - c_seg->c_bytes_used;
3375 c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_swapouts++;
3376 }
3377 }
3378
3379 C_SEG_WAKEUP_DONE(c_seg);
3380
3381 lck_mtx_unlock_always(&c_seg->c_lock);
3382
3383 if (c_swapout_count) {
3384 /*
3385 * We don't pause/yield here because we will either
3386 * yield below or at the top of the loop with the
3387 * assert_wait_timeout.
3388 */
3389 if (!vm_swapout_thread_running) {
3390 thread_wakeup((event_t)&c_swapout_list_head);
3391 }
3392 }
3393
3394 if (number_considered >= yield_after_considered_per_pass) {
3395 if (wanted_cseg_found) {
3396 /*
3397 * We stopped major compactions on a c_seg
3398 * that is wanted. We don't know the priority
3399 * of the waiter unfortunately but we are at
3400 * a very high priority and so, just in case
3401 * the waiter is a critical system daemon or
3402 * UI thread, let's give up the CPU in case
3403 * the system is running a few CPU intensive
3404 * tasks.
3405 */
3406 lck_mtx_unlock_always(c_list_lock);
3407
3408 mutex_pause(2); /* 100us yield */
3409
3410 number_yields++;
3411
3412 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 11, number_considered, number_yields, 0);
3413
3414 lck_mtx_lock_spin_always(c_list_lock);
3415 }
3416
3417 number_considered = 0;
3418 wanted_cseg_found = 0;
3419 }
3420 }
3421 clock_get_system_nanotime(&now, &nsec);
3422 end_ts.tv_sec = (int) now;
3423 end_ts.tv_nsec = nsec;
3424
3425 SUB_MACH_TIMESPEC(&end_ts, &start_ts);
3426
3427 delta_usec = (end_ts.tv_sec * USEC_PER_SEC) + (end_ts.tv_nsec / NSEC_PER_USEC) - (number_yields * 100);
3428
3429 delta_usec = MAX(1, delta_usec); /* we could have 0 usec run if conditions weren't right */
3430
3431 c_seg_major_compact_stats[c_seg_major_compact_stats_now].bytes_freed_rate_us = (bytes_freed / delta_usec);
3432
3433 if ((c_seg_major_compact_stats_now + 1) == C_SEG_MAJOR_COMPACT_STATS_MAX) {
3434 c_seg_major_compact_stats_now = 0;
3435 } else {
3436 c_seg_major_compact_stats_now++;
3437 }
3438
3439 assert(c_seg_major_compact_stats_now < C_SEG_MAJOR_COMPACT_STATS_MAX);
3440
3441 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_END, c_age_count, c_minor_count, c_major_count, vm_page_free_count);
3442 }
3443
3444
3445 static c_segment_t
c_seg_allocate(c_segment_t * current_chead)3446 c_seg_allocate(c_segment_t *current_chead)
3447 {
3448 c_segment_t c_seg;
3449 int min_needed;
3450 int size_to_populate;
3451
3452 #if XNU_TARGET_OS_OSX
3453 if (vm_compressor_low_on_space()) {
3454 vm_compressor_take_paging_space_action();
3455 }
3456 #endif /* XNU_TARGET_OS_OSX */
3457
3458 if ((c_seg = *current_chead) == NULL) {
3459 uint32_t c_segno;
3460
3461 lck_mtx_lock_spin_always(c_list_lock);
3462
3463 while (c_segments_busy == TRUE) {
3464 assert_wait((event_t) (&c_segments_busy), THREAD_UNINT);
3465
3466 lck_mtx_unlock_always(c_list_lock);
3467
3468 thread_block(THREAD_CONTINUE_NULL);
3469
3470 lck_mtx_lock_spin_always(c_list_lock);
3471 }
3472 if (c_free_segno_head == (uint32_t)-1) {
3473 uint32_t c_segments_available_new;
3474 uint32_t compressed_pages;
3475
3476 #if CONFIG_FREEZE
3477 if (freezer_incore_cseg_acct) {
3478 compressed_pages = c_segment_pages_compressed_incore;
3479 } else {
3480 compressed_pages = c_segment_pages_compressed;
3481 }
3482 #else
3483 compressed_pages = c_segment_pages_compressed;
3484 #endif /* CONFIG_FREEZE */
3485
3486 if (c_segments_available >= c_segments_limit || compressed_pages >= c_segment_pages_compressed_limit) {
3487 lck_mtx_unlock_always(c_list_lock);
3488
3489 return NULL;
3490 }
3491 c_segments_busy = TRUE;
3492 lck_mtx_unlock_always(c_list_lock);
3493
3494 kernel_memory_populate(compressor_map, (vm_offset_t)c_segments_next_page,
3495 PAGE_SIZE, KMA_KOBJECT, VM_KERN_MEMORY_COMPRESSOR);
3496 c_segments_next_page += PAGE_SIZE;
3497
3498 c_segments_available_new = c_segments_available + C_SEGMENTS_PER_PAGE;
3499
3500 if (c_segments_available_new > c_segments_limit) {
3501 c_segments_available_new = c_segments_limit;
3502 }
3503
3504 for (c_segno = c_segments_available + 1; c_segno < c_segments_available_new; c_segno++) {
3505 c_segments[c_segno - 1].c_segno = c_segno;
3506 }
3507
3508 lck_mtx_lock_spin_always(c_list_lock);
3509
3510 c_segments[c_segno - 1].c_segno = c_free_segno_head;
3511 c_free_segno_head = c_segments_available;
3512 c_segments_available = c_segments_available_new;
3513
3514 c_segments_busy = FALSE;
3515 thread_wakeup((event_t) (&c_segments_busy));
3516 }
3517 c_segno = c_free_segno_head;
3518 assert(c_segno >= 0 && c_segno < c_segments_limit);
3519
3520 c_free_segno_head = (uint32_t)c_segments[c_segno].c_segno;
3521
3522 /*
3523 * do the rest of the bookkeeping now while we're still behind
3524 * the list lock and grab our generation id now into a local
3525 * so that we can install it once we have the c_seg allocated
3526 */
3527 c_segment_count++;
3528 if (c_segment_count > c_segment_count_max) {
3529 c_segment_count_max = c_segment_count;
3530 }
3531
3532 lck_mtx_unlock_always(c_list_lock);
3533
3534 c_seg = zalloc_flags(compressor_segment_zone, Z_WAITOK | Z_ZERO);
3535
3536 c_seg->c_store.c_buffer = (int32_t *)C_SEG_BUFFER_ADDRESS(c_segno);
3537
3538 lck_mtx_init(&c_seg->c_lock, &vm_compressor_lck_grp, LCK_ATTR_NULL);
3539
3540 c_seg->c_state = C_IS_EMPTY;
3541 c_seg->c_firstemptyslot = C_SLOT_MAX_INDEX;
3542 c_seg->c_mysegno = c_segno;
3543
3544 lck_mtx_lock_spin_always(c_list_lock);
3545 c_empty_count++;
3546 c_seg_switch_state(c_seg, C_IS_FILLING, FALSE);
3547 c_segments[c_segno].c_seg = c_seg;
3548 assert(c_segments[c_segno].c_segno > c_segments_available);
3549 lck_mtx_unlock_always(c_list_lock);
3550
3551 *current_chead = c_seg;
3552
3553 #if DEVELOPMENT || DEBUG
3554 C_SEG_MAKE_WRITEABLE(c_seg);
3555 #endif
3556 }
3557 c_seg_alloc_nextslot(c_seg);
3558
3559 size_to_populate = c_seg_allocsize - C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset);
3560
3561 if (size_to_populate) {
3562 min_needed = PAGE_SIZE + (c_seg_allocsize - c_seg_bufsize);
3563
3564 if (C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset - c_seg->c_nextoffset) < (unsigned) min_needed) {
3565 if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) {
3566 size_to_populate = C_SEG_MAX_POPULATE_SIZE;
3567 }
3568
3569 OSAddAtomic64(size_to_populate / PAGE_SIZE, &vm_pageout_vminfo.vm_compressor_pages_grabbed);
3570
3571 kernel_memory_populate(compressor_map,
3572 (vm_offset_t) &c_seg->c_store.c_buffer[c_seg->c_populated_offset],
3573 size_to_populate,
3574 KMA_COMPRESSOR,
3575 VM_KERN_MEMORY_COMPRESSOR);
3576 } else {
3577 size_to_populate = 0;
3578 }
3579 }
3580 PAGE_REPLACEMENT_DISALLOWED(TRUE);
3581
3582 lck_mtx_lock_spin_always(&c_seg->c_lock);
3583
3584 if (size_to_populate) {
3585 c_seg->c_populated_offset += C_SEG_BYTES_TO_OFFSET(size_to_populate);
3586 }
3587
3588 return c_seg;
3589 }
3590
3591 #if DEVELOPMENT || DEBUG
3592 #if CONFIG_FREEZE
3593 extern boolean_t memorystatus_freeze_to_memory;
3594 #endif /* CONFIG_FREEZE */
3595 #endif /* DEVELOPMENT || DEBUG */
3596
3597 static void
c_current_seg_filled(c_segment_t c_seg,c_segment_t * current_chead)3598 c_current_seg_filled(c_segment_t c_seg, c_segment_t *current_chead)
3599 {
3600 uint32_t unused_bytes;
3601 uint32_t offset_to_depopulate;
3602 int new_state = C_ON_AGE_Q;
3603 clock_sec_t sec;
3604 clock_nsec_t nsec;
3605 boolean_t head_insert = FALSE;
3606
3607 unused_bytes = trunc_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset - c_seg->c_nextoffset));
3608
3609 if (unused_bytes) {
3610 offset_to_depopulate = C_SEG_BYTES_TO_OFFSET(round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_nextoffset)));
3611
3612 /*
3613 * release the extra physical page(s) at the end of the segment
3614 */
3615 lck_mtx_unlock_always(&c_seg->c_lock);
3616
3617 kernel_memory_depopulate(
3618 compressor_map,
3619 (vm_offset_t) &c_seg->c_store.c_buffer[offset_to_depopulate],
3620 unused_bytes,
3621 KMA_COMPRESSOR,
3622 VM_KERN_MEMORY_COMPRESSOR);
3623
3624 lck_mtx_lock_spin_always(&c_seg->c_lock);
3625
3626 c_seg->c_populated_offset = offset_to_depopulate;
3627 }
3628 assert(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset) <= c_seg_bufsize);
3629
3630 #if DEVELOPMENT || DEBUG
3631 {
3632 boolean_t c_seg_was_busy = FALSE;
3633
3634 if (!c_seg->c_busy) {
3635 C_SEG_BUSY(c_seg);
3636 } else {
3637 c_seg_was_busy = TRUE;
3638 }
3639
3640 lck_mtx_unlock_always(&c_seg->c_lock);
3641
3642 C_SEG_WRITE_PROTECT(c_seg);
3643
3644 lck_mtx_lock_spin_always(&c_seg->c_lock);
3645
3646 if (c_seg_was_busy == FALSE) {
3647 C_SEG_WAKEUP_DONE(c_seg);
3648 }
3649 }
3650 #endif
3651
3652 #if CONFIG_FREEZE
3653 if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead) &&
3654 VM_CONFIG_SWAP_IS_PRESENT &&
3655 VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
3656 #if DEVELOPMENT || DEBUG
3657 && !memorystatus_freeze_to_memory
3658 #endif /* DEVELOPMENT || DEBUG */
3659 ) {
3660 new_state = C_ON_SWAPOUT_Q;
3661 }
3662 #endif /* CONFIG_FREEZE */
3663
3664 if (vm_darkwake_mode == TRUE) {
3665 new_state = C_ON_SWAPOUT_Q;
3666 head_insert = TRUE;
3667 }
3668
3669 clock_get_system_nanotime(&sec, &nsec);
3670 c_seg->c_creation_ts = (uint32_t)sec;
3671
3672 lck_mtx_lock_spin_always(c_list_lock);
3673
3674 c_seg->c_generation_id = c_generation_id++;
3675 c_seg_switch_state(c_seg, new_state, head_insert);
3676
3677 #if CONFIG_FREEZE
3678 if (c_seg->c_state == C_ON_SWAPOUT_Q) {
3679 /*
3680 * darkwake and freezer can't co-exist together
3681 * We'll need to fix this accounting as a start.
3682 */
3683 assert(vm_darkwake_mode == FALSE);
3684 c_seg_update_task_owner(c_seg, freezer_context_global.freezer_ctx_task);
3685 freezer_context_global.freezer_ctx_swapped_bytes += c_seg->c_bytes_used;
3686 }
3687 #endif /* CONFIG_FREEZE */
3688
3689 if (c_seg->c_state == C_ON_AGE_Q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
3690 #if CONFIG_FREEZE
3691 assert(c_seg->c_task_owner == NULL);
3692 #endif /* CONFIG_FREEZE */
3693 c_seg_need_delayed_compaction(c_seg, TRUE);
3694 }
3695
3696 lck_mtx_unlock_always(c_list_lock);
3697
3698 if (c_seg->c_state == C_ON_SWAPOUT_Q) {
3699 /*
3700 * Darkwake and Freeze configs always
3701 * wake up the swapout thread because
3702 * the compactor thread that normally handles
3703 * it may not be running as much in these
3704 * configs.
3705 */
3706 thread_wakeup((event_t)&c_swapout_list_head);
3707 }
3708
3709 *current_chead = NULL;
3710 }
3711
3712
3713 static void
vm_compressor_process_major_segments(void)3714 vm_compressor_process_major_segments(void)
3715 {
3716 c_segment_t c_seg = NULL, c_seg_next = NULL;
3717 if (!queue_empty(&c_major_list_head)) {
3718 c_seg = (c_segment_t)queue_first(&c_major_list_head);
3719
3720 while (!queue_end(&c_major_list_head, (queue_entry_t)c_seg)) {
3721 c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
3722 lck_mtx_lock_spin_always(&c_seg->c_lock);
3723 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3724 lck_mtx_unlock_always(&c_seg->c_lock);
3725 c_seg = c_seg_next;
3726 }
3727 }
3728 }
3729
3730 /*
3731 * returns with c_seg locked
3732 */
3733 void
c_seg_swapin_requeue(c_segment_t c_seg,boolean_t has_data,boolean_t minor_compact_ok,boolean_t age_on_swapin_q)3734 c_seg_swapin_requeue(c_segment_t c_seg, boolean_t has_data, boolean_t minor_compact_ok, boolean_t age_on_swapin_q)
3735 {
3736 clock_sec_t sec;
3737 clock_nsec_t nsec;
3738
3739 clock_get_system_nanotime(&sec, &nsec);
3740
3741 lck_mtx_lock_spin_always(c_list_lock);
3742 lck_mtx_lock_spin_always(&c_seg->c_lock);
3743
3744 assert(c_seg->c_busy_swapping);
3745 assert(c_seg->c_busy);
3746
3747 c_seg->c_busy_swapping = 0;
3748
3749 if (c_seg->c_overage_swap == TRUE) {
3750 c_overage_swapped_count--;
3751 c_seg->c_overage_swap = FALSE;
3752 }
3753 if (has_data == TRUE) {
3754 if (age_on_swapin_q == TRUE) {
3755 c_seg_switch_state(c_seg, C_ON_SWAPPEDIN_Q, FALSE);
3756 } else {
3757 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3758 }
3759
3760 if (minor_compact_ok == TRUE && !c_seg->c_on_minorcompact_q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
3761 c_seg_need_delayed_compaction(c_seg, TRUE);
3762 }
3763 } else {
3764 c_seg->c_store.c_buffer = (int32_t*) NULL;
3765 c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0);
3766
3767 c_seg_switch_state(c_seg, C_ON_BAD_Q, FALSE);
3768 }
3769 c_seg->c_swappedin_ts = (uint32_t)sec;
3770 c_seg->c_swappedin = true;
3771
3772 lck_mtx_unlock_always(c_list_lock);
3773 }
3774
3775
3776
3777 /*
3778 * c_seg has to be locked and is returned locked if the c_seg isn't freed
3779 * PAGE_REPLACMENT_DISALLOWED has to be TRUE on entry and is returned TRUE
3780 * c_seg_swapin returns 1 if the c_seg was freed, 0 otherwise
3781 */
3782
3783 int
c_seg_swapin(c_segment_t c_seg,boolean_t force_minor_compaction,boolean_t age_on_swapin_q)3784 c_seg_swapin(c_segment_t c_seg, boolean_t force_minor_compaction, boolean_t age_on_swapin_q)
3785 {
3786 vm_offset_t addr = 0;
3787 uint32_t io_size = 0;
3788 uint64_t f_offset;
3789 thread_pri_floor_t token;
3790
3791 assert(C_SEG_IS_ONDISK(c_seg));
3792
3793 #if !CHECKSUM_THE_SWAP
3794 c_seg_trim_tail(c_seg);
3795 #endif
3796 io_size = round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset));
3797 f_offset = c_seg->c_store.c_swap_handle;
3798
3799 C_SEG_BUSY(c_seg);
3800 c_seg->c_busy_swapping = 1;
3801
3802 /*
3803 * This thread is likely going to block for I/O.
3804 * Make sure it is ready to run when the I/O completes because
3805 * it needs to clear the busy bit on the c_seg so that other
3806 * waiting threads can make progress too.
3807 */
3808 token = thread_priority_floor_start();
3809 lck_mtx_unlock_always(&c_seg->c_lock);
3810
3811 PAGE_REPLACEMENT_DISALLOWED(FALSE);
3812
3813 addr = (vm_offset_t)C_SEG_BUFFER_ADDRESS(c_seg->c_mysegno);
3814 c_seg->c_store.c_buffer = (int32_t*) addr;
3815
3816 kernel_memory_populate(compressor_map, addr, io_size, KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
3817
3818 if (vm_swap_get(c_seg, f_offset, io_size) != KERN_SUCCESS) {
3819 PAGE_REPLACEMENT_DISALLOWED(TRUE);
3820
3821 kernel_memory_depopulate(compressor_map, addr, io_size, KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
3822
3823 c_seg_swapin_requeue(c_seg, FALSE, TRUE, age_on_swapin_q);
3824 } else {
3825 #if ENCRYPTED_SWAP
3826 vm_swap_decrypt(c_seg);
3827 #endif /* ENCRYPTED_SWAP */
3828
3829 #if CHECKSUM_THE_SWAP
3830 if (c_seg->cseg_swap_size != io_size) {
3831 panic("swapin size doesn't match swapout size");
3832 }
3833
3834 if (c_seg->cseg_hash != vmc_hash((char*) c_seg->c_store.c_buffer, (int)io_size)) {
3835 panic("c_seg_swapin - Swap hash mismatch");
3836 }
3837 #endif /* CHECKSUM_THE_SWAP */
3838
3839 PAGE_REPLACEMENT_DISALLOWED(TRUE);
3840
3841 c_seg_swapin_requeue(c_seg, TRUE, force_minor_compaction == TRUE ? FALSE : TRUE, age_on_swapin_q);
3842
3843 #if CONFIG_FREEZE
3844 /*
3845 * c_seg_swapin_requeue() returns with the c_seg lock held.
3846 */
3847 if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
3848 assert(c_seg->c_busy);
3849
3850 lck_mtx_unlock_always(&c_seg->c_lock);
3851 lck_mtx_lock_spin_always(c_list_lock);
3852 lck_mtx_lock_spin_always(&c_seg->c_lock);
3853 }
3854
3855 if (c_seg->c_task_owner) {
3856 c_seg_update_task_owner(c_seg, NULL);
3857 }
3858
3859 lck_mtx_unlock_always(c_list_lock);
3860
3861 OSAddAtomic(c_seg->c_slots_used, &c_segment_pages_compressed_incore);
3862 #endif /* CONFIG_FREEZE */
3863
3864 OSAddAtomic64(c_seg->c_bytes_used, &compressor_bytes_used);
3865
3866 if (force_minor_compaction == TRUE) {
3867 if (c_seg_minor_compaction_and_unlock(c_seg, FALSE)) {
3868 /*
3869 * c_seg was completely empty so it was freed,
3870 * so be careful not to reference it again
3871 *
3872 * Drop the boost so that the thread priority
3873 * is returned back to where it is supposed to be.
3874 */
3875 thread_priority_floor_end(&token);
3876 return 1;
3877 }
3878
3879 lck_mtx_lock_spin_always(&c_seg->c_lock);
3880 }
3881 }
3882 C_SEG_WAKEUP_DONE(c_seg);
3883
3884 /*
3885 * Drop the boost so that the thread priority
3886 * is returned back to where it is supposed to be.
3887 */
3888 thread_priority_floor_end(&token);
3889
3890 return 0;
3891 }
3892
3893
3894 static void
c_segment_sv_hash_drop_ref(int hash_indx)3895 c_segment_sv_hash_drop_ref(int hash_indx)
3896 {
3897 struct c_sv_hash_entry o_sv_he, n_sv_he;
3898
3899 while (1) {
3900 o_sv_he.he_record = c_segment_sv_hash_table[hash_indx].he_record;
3901
3902 n_sv_he.he_ref = o_sv_he.he_ref - 1;
3903 n_sv_he.he_data = o_sv_he.he_data;
3904
3905 if (OSCompareAndSwap64((UInt64)o_sv_he.he_record, (UInt64)n_sv_he.he_record, (UInt64 *) &c_segment_sv_hash_table[hash_indx].he_record) == TRUE) {
3906 if (n_sv_he.he_ref == 0) {
3907 OSAddAtomic(-1, &c_segment_svp_in_hash);
3908 }
3909 break;
3910 }
3911 }
3912 }
3913
3914
3915 static int
c_segment_sv_hash_insert(uint32_t data)3916 c_segment_sv_hash_insert(uint32_t data)
3917 {
3918 int hash_sindx;
3919 int misses;
3920 struct c_sv_hash_entry o_sv_he, n_sv_he;
3921 boolean_t got_ref = FALSE;
3922
3923 if (data == 0) {
3924 OSAddAtomic(1, &c_segment_svp_zero_compressions);
3925 } else {
3926 OSAddAtomic(1, &c_segment_svp_nonzero_compressions);
3927 }
3928
3929 hash_sindx = data & C_SV_HASH_MASK;
3930
3931 for (misses = 0; misses < C_SV_HASH_MAX_MISS; misses++) {
3932 o_sv_he.he_record = c_segment_sv_hash_table[hash_sindx].he_record;
3933
3934 while (o_sv_he.he_data == data || o_sv_he.he_ref == 0) {
3935 n_sv_he.he_ref = o_sv_he.he_ref + 1;
3936 n_sv_he.he_data = data;
3937
3938 if (OSCompareAndSwap64((UInt64)o_sv_he.he_record, (UInt64)n_sv_he.he_record, (UInt64 *) &c_segment_sv_hash_table[hash_sindx].he_record) == TRUE) {
3939 if (n_sv_he.he_ref == 1) {
3940 OSAddAtomic(1, &c_segment_svp_in_hash);
3941 }
3942 got_ref = TRUE;
3943 break;
3944 }
3945 o_sv_he.he_record = c_segment_sv_hash_table[hash_sindx].he_record;
3946 }
3947 if (got_ref == TRUE) {
3948 break;
3949 }
3950 hash_sindx++;
3951
3952 if (hash_sindx == C_SV_HASH_SIZE) {
3953 hash_sindx = 0;
3954 }
3955 }
3956 if (got_ref == FALSE) {
3957 return -1;
3958 }
3959
3960 return hash_sindx;
3961 }
3962
3963
3964 #if RECORD_THE_COMPRESSED_DATA
3965
3966 static void
c_compressed_record_data(char * src,int c_size)3967 c_compressed_record_data(char *src, int c_size)
3968 {
3969 if ((c_compressed_record_cptr + c_size + 4) >= c_compressed_record_ebuf) {
3970 panic("c_compressed_record_cptr >= c_compressed_record_ebuf");
3971 }
3972
3973 *(int *)((void *)c_compressed_record_cptr) = c_size;
3974
3975 c_compressed_record_cptr += 4;
3976
3977 memcpy(c_compressed_record_cptr, src, c_size);
3978 c_compressed_record_cptr += c_size;
3979 }
3980 #endif
3981
3982
3983 static int
c_compress_page(char * src,c_slot_mapping_t slot_ptr,c_segment_t * current_chead,char * scratch_buf)3984 c_compress_page(char *src, c_slot_mapping_t slot_ptr, c_segment_t *current_chead, char *scratch_buf)
3985 {
3986 int c_size;
3987 int c_rounded_size = 0;
3988 int max_csize;
3989 c_slot_t cs;
3990 c_segment_t c_seg;
3991
3992 KERNEL_DEBUG(0xe0400000 | DBG_FUNC_START, *current_chead, 0, 0, 0, 0);
3993 retry:
3994 if ((c_seg = c_seg_allocate(current_chead)) == NULL) {
3995 return 1;
3996 }
3997 /*
3998 * returns with c_seg lock held
3999 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
4000 * c_nextslot has been allocated and
4001 * c_store.c_buffer populated
4002 */
4003 assert(c_seg->c_state == C_IS_FILLING);
4004
4005 cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_seg->c_nextslot);
4006
4007 C_SLOT_ASSERT_PACKABLE(slot_ptr);
4008 cs->c_packed_ptr = C_SLOT_PACK_PTR(slot_ptr);
4009
4010 cs->c_offset = c_seg->c_nextoffset;
4011
4012 max_csize = c_seg_bufsize - C_SEG_OFFSET_TO_BYTES((int32_t)cs->c_offset);
4013
4014 if (max_csize > PAGE_SIZE) {
4015 max_csize = PAGE_SIZE;
4016 }
4017
4018 #if CHECKSUM_THE_DATA
4019 cs->c_hash_data = vmc_hash(src, PAGE_SIZE);
4020 #endif
4021 boolean_t incomp_copy = FALSE;
4022 int max_csize_adj = (max_csize - 4);
4023
4024 if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) {
4025 #if defined(__arm__) || defined(__arm64__)
4026 uint16_t ccodec = CINVALID;
4027 uint32_t inline_popcount;
4028 if (max_csize >= C_SEG_OFFSET_ALIGNMENT_BOUNDARY) {
4029 c_size = metacompressor((const uint8_t *) src,
4030 (uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset],
4031 max_csize_adj, &ccodec,
4032 scratch_buf, &incomp_copy, &inline_popcount);
4033 #if __APPLE_WKDM_POPCNT_EXTENSIONS__
4034 cs->c_inline_popcount = inline_popcount;
4035 #else
4036 assert(inline_popcount == C_SLOT_NO_POPCOUNT);
4037 #endif
4038
4039 #if C_SEG_OFFSET_ALIGNMENT_BOUNDARY > 4
4040 if (c_size > max_csize_adj) {
4041 c_size = -1;
4042 }
4043 #endif
4044 } else {
4045 c_size = -1;
4046 }
4047 assert(ccodec == CCWK || ccodec == CCLZ4);
4048 cs->c_codec = ccodec;
4049 #endif
4050 } else {
4051 #if defined(__arm__) || defined(__arm64__)
4052 cs->c_codec = CCWK;
4053 #endif
4054 #if defined(__arm64__)
4055 __unreachable_ok_push
4056 if (PAGE_SIZE == 4096) {
4057 c_size = WKdm_compress_4k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4058 (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
4059 } else {
4060 c_size = WKdm_compress_16k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4061 (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
4062 }
4063 __unreachable_ok_pop
4064 #else
4065 c_size = WKdm_compress_new((const WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4066 (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
4067 #endif
4068 }
4069 assertf(((c_size <= max_csize_adj) && (c_size >= -1)),
4070 "c_size invalid (%d, %d), cur compressions: %d", c_size, max_csize_adj, c_segment_pages_compressed);
4071
4072 if (c_size == -1) {
4073 if (max_csize < PAGE_SIZE) {
4074 c_current_seg_filled(c_seg, current_chead);
4075 assert(*current_chead == NULL);
4076
4077 lck_mtx_unlock_always(&c_seg->c_lock);
4078 /* TODO: it may be worth requiring codecs to distinguish
4079 * between incompressible inputs and failures due to
4080 * budget exhaustion.
4081 */
4082 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4083 goto retry;
4084 }
4085 c_size = PAGE_SIZE;
4086
4087 if (incomp_copy == FALSE) {
4088 memcpy(&c_seg->c_store.c_buffer[cs->c_offset], src, c_size);
4089 }
4090
4091 OSAddAtomic(1, &c_segment_noncompressible_pages);
4092 } else if (c_size == 0) {
4093 int hash_index;
4094
4095 /*
4096 * special case - this is a page completely full of a single 32 bit value
4097 */
4098 hash_index = c_segment_sv_hash_insert(*(uint32_t *)(uintptr_t)src);
4099
4100 if (hash_index != -1) {
4101 slot_ptr->s_cindx = hash_index;
4102 slot_ptr->s_cseg = C_SV_CSEG_ID;
4103
4104 OSAddAtomic(1, &c_segment_svp_hash_succeeded);
4105 #if RECORD_THE_COMPRESSED_DATA
4106 c_compressed_record_data(src, 4);
4107 #endif
4108 goto sv_compression;
4109 }
4110 c_size = 4;
4111
4112 memcpy(&c_seg->c_store.c_buffer[cs->c_offset], src, c_size);
4113
4114 OSAddAtomic(1, &c_segment_svp_hash_failed);
4115 }
4116
4117 #if RECORD_THE_COMPRESSED_DATA
4118 c_compressed_record_data((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size);
4119 #endif
4120 #if CHECKSUM_THE_COMPRESSED_DATA
4121 cs->c_hash_compressed_data = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size);
4122 #endif
4123 #if POPCOUNT_THE_COMPRESSED_DATA
4124 cs->c_pop_cdata = vmc_pop((uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset], c_size);
4125 #endif
4126 c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
4127
4128 PACK_C_SIZE(cs, c_size);
4129 c_seg->c_bytes_used += c_rounded_size;
4130 c_seg->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
4131 c_seg->c_slots_used++;
4132
4133 slot_ptr->s_cindx = c_seg->c_nextslot++;
4134 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
4135 slot_ptr->s_cseg = c_seg->c_mysegno + 1;
4136
4137 sv_compression:
4138 if (c_seg->c_nextoffset >= c_seg_off_limit || c_seg->c_nextslot >= C_SLOT_MAX_INDEX) {
4139 c_current_seg_filled(c_seg, current_chead);
4140 assert(*current_chead == NULL);
4141 }
4142 lck_mtx_unlock_always(&c_seg->c_lock);
4143
4144 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4145
4146 #if RECORD_THE_COMPRESSED_DATA
4147 if ((c_compressed_record_cptr - c_compressed_record_sbuf) >= c_seg_allocsize) {
4148 c_compressed_record_write(c_compressed_record_sbuf, (int)(c_compressed_record_cptr - c_compressed_record_sbuf));
4149 c_compressed_record_cptr = c_compressed_record_sbuf;
4150 }
4151 #endif
4152 if (c_size) {
4153 OSAddAtomic64(c_size, &c_segment_compressed_bytes);
4154 OSAddAtomic64(c_rounded_size, &compressor_bytes_used);
4155 }
4156 OSAddAtomic64(PAGE_SIZE, &c_segment_input_bytes);
4157
4158 OSAddAtomic(1, &c_segment_pages_compressed);
4159 #if CONFIG_FREEZE
4160 OSAddAtomic(1, &c_segment_pages_compressed_incore);
4161 #endif /* CONFIG_FREEZE */
4162 OSAddAtomic(1, &sample_period_compression_count);
4163
4164 KERNEL_DEBUG(0xe0400000 | DBG_FUNC_END, *current_chead, c_size, c_segment_input_bytes, c_segment_compressed_bytes, 0);
4165
4166 return 0;
4167 }
4168
4169 static inline void
sv_decompress(int32_t * ddst,int32_t pattern)4170 sv_decompress(int32_t *ddst, int32_t pattern)
4171 {
4172 // assert(__builtin_constant_p(PAGE_SIZE) != 0);
4173 #if defined(__x86_64__)
4174 memset_word(ddst, pattern, PAGE_SIZE / sizeof(int32_t));
4175 #elif defined(__arm64__)
4176 assert((PAGE_SIZE % 128) == 0);
4177 if (pattern == 0) {
4178 fill32_dczva((addr64_t)ddst, PAGE_SIZE);
4179 } else {
4180 fill32_nt((addr64_t)ddst, PAGE_SIZE, pattern);
4181 }
4182 #else
4183 size_t i;
4184
4185 /* Unroll the pattern fill loop 4x to encourage the
4186 * compiler to emit NEON stores, cf.
4187 * <rdar://problem/25839866> Loop autovectorization
4188 * anomalies.
4189 */
4190 /* * We use separate loops for each PAGE_SIZE
4191 * to allow the autovectorizer to engage, as PAGE_SIZE
4192 * may not be a constant.
4193 */
4194
4195 __unreachable_ok_push
4196 if (PAGE_SIZE == 4096) {
4197 for (i = 0; i < (4096U / sizeof(int32_t)); i += 4) {
4198 *ddst++ = pattern;
4199 *ddst++ = pattern;
4200 *ddst++ = pattern;
4201 *ddst++ = pattern;
4202 }
4203 } else {
4204 assert(PAGE_SIZE == 16384);
4205 for (i = 0; i < (int)(16384U / sizeof(int32_t)); i += 4) {
4206 *ddst++ = pattern;
4207 *ddst++ = pattern;
4208 *ddst++ = pattern;
4209 *ddst++ = pattern;
4210 }
4211 }
4212 __unreachable_ok_pop
4213 #endif
4214 }
4215
4216 static int
c_decompress_page(char * dst,volatile c_slot_mapping_t slot_ptr,int flags,int * zeroslot)4217 c_decompress_page(char *dst, volatile c_slot_mapping_t slot_ptr, int flags, int *zeroslot)
4218 {
4219 c_slot_t cs;
4220 c_segment_t c_seg;
4221 uint32_t c_segno;
4222 uint16_t c_indx;
4223 int c_rounded_size;
4224 uint32_t c_size;
4225 int retval = 0;
4226 boolean_t need_unlock = TRUE;
4227 boolean_t consider_defragmenting = FALSE;
4228 boolean_t kdp_mode = FALSE;
4229
4230 if (__improbable(flags & C_KDP)) {
4231 if (not_in_kdp) {
4232 panic("C_KDP passed to decompress page from outside of debugger context");
4233 }
4234
4235 assert((flags & C_KEEP) == C_KEEP);
4236 assert((flags & C_DONT_BLOCK) == C_DONT_BLOCK);
4237
4238 if ((flags & (C_DONT_BLOCK | C_KEEP)) != (C_DONT_BLOCK | C_KEEP)) {
4239 return -2;
4240 }
4241
4242 kdp_mode = TRUE;
4243 *zeroslot = 0;
4244 }
4245
4246 ReTry:
4247 if (__probable(!kdp_mode)) {
4248 PAGE_REPLACEMENT_DISALLOWED(TRUE);
4249 } else {
4250 if (kdp_lck_rw_lock_is_acquired_exclusive(&c_master_lock)) {
4251 return -2;
4252 }
4253 }
4254
4255 #if HIBERNATION
4256 /*
4257 * if hibernation is enabled, it indicates (via a call
4258 * to 'vm_decompressor_lock' that no further
4259 * decompressions are allowed once it reaches
4260 * the point of flushing all of the currently dirty
4261 * anonymous memory through the compressor and out
4262 * to disk... in this state we allow freeing of compressed
4263 * pages and must honor the C_DONT_BLOCK case
4264 */
4265 if (__improbable(dst && decompressions_blocked == TRUE)) {
4266 if (flags & C_DONT_BLOCK) {
4267 if (__probable(!kdp_mode)) {
4268 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4269 }
4270
4271 *zeroslot = 0;
4272 return -2;
4273 }
4274 /*
4275 * it's safe to atomically assert and block behind the
4276 * lock held in shared mode because "decompressions_blocked" is
4277 * only set and cleared and the thread_wakeup done when the lock
4278 * is held exclusively
4279 */
4280 assert_wait((event_t)&decompressions_blocked, THREAD_UNINT);
4281
4282 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4283
4284 thread_block(THREAD_CONTINUE_NULL);
4285
4286 goto ReTry;
4287 }
4288 #endif
4289 /* s_cseg is actually "segno+1" */
4290 c_segno = slot_ptr->s_cseg - 1;
4291
4292 if (__improbable(c_segno >= c_segments_available)) {
4293 panic("c_decompress_page: c_segno %d >= c_segments_available %d, slot_ptr(%p), slot_data(%x)",
4294 c_segno, c_segments_available, slot_ptr, *(int *)((void *)slot_ptr));
4295 }
4296
4297 if (__improbable(c_segments[c_segno].c_segno < c_segments_available)) {
4298 panic("c_decompress_page: c_segno %d is free, slot_ptr(%p), slot_data(%x)",
4299 c_segno, slot_ptr, *(int *)((void *)slot_ptr));
4300 }
4301
4302 c_seg = c_segments[c_segno].c_seg;
4303
4304 if (__probable(!kdp_mode)) {
4305 lck_mtx_lock_spin_always(&c_seg->c_lock);
4306 } else {
4307 if (kdp_lck_mtx_lock_spin_is_acquired(&c_seg->c_lock)) {
4308 return -2;
4309 }
4310 }
4311
4312 assert(c_seg->c_state != C_IS_EMPTY && c_seg->c_state != C_IS_FREE);
4313
4314 if (dst == NULL && c_seg->c_busy_swapping) {
4315 assert(c_seg->c_busy);
4316
4317 goto bypass_busy_check;
4318 }
4319 if (flags & C_DONT_BLOCK) {
4320 if (c_seg->c_busy || (C_SEG_IS_ONDISK(c_seg) && dst)) {
4321 *zeroslot = 0;
4322
4323 retval = -2;
4324 goto done;
4325 }
4326 }
4327 if (c_seg->c_busy) {
4328 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4329
4330 c_seg_wait_on_busy(c_seg);
4331
4332 goto ReTry;
4333 }
4334 bypass_busy_check:
4335
4336 c_indx = slot_ptr->s_cindx;
4337
4338 if (__improbable(c_indx >= c_seg->c_nextslot)) {
4339 panic("c_decompress_page: c_indx %d >= c_nextslot %d, c_seg(%p), slot_ptr(%p), slot_data(%x)",
4340 c_indx, c_seg->c_nextslot, c_seg, slot_ptr, *(int *)((void *)slot_ptr));
4341 }
4342
4343 cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
4344
4345 c_size = UNPACK_C_SIZE(cs);
4346
4347 if (__improbable(c_size == 0)) {
4348 panic("c_decompress_page: c_size == 0, c_seg(%p), slot_ptr(%p), slot_data(%x)",
4349 c_seg, slot_ptr, *(int *)((void *)slot_ptr));
4350 }
4351
4352 c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
4353
4354 if (dst) {
4355 uint32_t age_of_cseg;
4356 clock_sec_t cur_ts_sec;
4357 clock_nsec_t cur_ts_nsec;
4358
4359 if (C_SEG_IS_ONDISK(c_seg)) {
4360 #if CONFIG_FREEZE
4361 if (freezer_incore_cseg_acct) {
4362 if ((c_seg->c_slots_used + c_segment_pages_compressed_incore) >= c_segment_pages_compressed_nearing_limit) {
4363 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4364 lck_mtx_unlock_always(&c_seg->c_lock);
4365
4366 memorystatus_kill_on_VM_compressor_space_shortage(FALSE /* async */);
4367
4368 goto ReTry;
4369 }
4370
4371 uint32_t incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
4372 if ((incore_seg_count + 1) >= c_segments_nearing_limit) {
4373 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4374 lck_mtx_unlock_always(&c_seg->c_lock);
4375
4376 memorystatus_kill_on_VM_compressor_space_shortage(FALSE /* async */);
4377
4378 goto ReTry;
4379 }
4380 }
4381 #endif /* CONFIG_FREEZE */
4382 assert(kdp_mode == FALSE);
4383 retval = c_seg_swapin(c_seg, FALSE, TRUE);
4384 assert(retval == 0);
4385
4386 retval = 1;
4387 }
4388 if (c_seg->c_state == C_ON_BAD_Q) {
4389 assert(c_seg->c_store.c_buffer == NULL);
4390 *zeroslot = 0;
4391
4392 retval = -1;
4393 goto done;
4394 }
4395
4396 #if POPCOUNT_THE_COMPRESSED_DATA
4397 unsigned csvpop;
4398 uintptr_t csvaddr = (uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset];
4399 if (cs->c_pop_cdata != (csvpop = vmc_pop(csvaddr, c_size))) {
4400 panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%x 0x%x 0x%x 0x%x", (csvpop - cs->c_pop_cdata), (void *)csvaddr, (void *) kvtophys(csvaddr), c_seg, cs, cs->c_offset, c_size, csvpop, cs->c_pop_cdata);
4401 }
4402 #endif
4403
4404 #if CHECKSUM_THE_COMPRESSED_DATA
4405 unsigned csvhash;
4406 if (cs->c_hash_compressed_data != (csvhash = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))) {
4407 panic("Compressed data doesn't match original %p %p %u %u %u", c_seg, cs, c_size, cs->c_hash_compressed_data, csvhash);
4408 }
4409 #endif
4410 if (c_rounded_size == PAGE_SIZE) {
4411 /*
4412 * page wasn't compressible... just copy it out
4413 */
4414 memcpy(dst, &c_seg->c_store.c_buffer[cs->c_offset], PAGE_SIZE);
4415 } else if (c_size == 4) {
4416 int32_t data;
4417 int32_t *dptr;
4418
4419 /*
4420 * page was populated with a single value
4421 * that didn't fit into our fast hash
4422 * so we packed it in as a single non-compressed value
4423 * that we need to populate the page with
4424 */
4425 dptr = (int32_t *)(uintptr_t)dst;
4426 data = *(int32_t *)(&c_seg->c_store.c_buffer[cs->c_offset]);
4427 sv_decompress(dptr, data);
4428 } else {
4429 uint32_t my_cpu_no;
4430 char *scratch_buf;
4431
4432 if (__probable(!kdp_mode)) {
4433 /*
4434 * we're behind the c_seg lock held in spin mode
4435 * which means pre-emption is disabled... therefore
4436 * the following sequence is atomic and safe
4437 */
4438 my_cpu_no = cpu_number();
4439
4440 assert(my_cpu_no < compressor_cpus);
4441
4442 scratch_buf = &compressor_scratch_bufs[my_cpu_no * vm_compressor_get_decode_scratch_size()];
4443 } else {
4444 scratch_buf = kdp_compressor_scratch_buf;
4445 }
4446
4447 if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) {
4448 #if defined(__arm__) || defined(__arm64__)
4449 uint16_t c_codec = cs->c_codec;
4450 uint32_t inline_popcount;
4451 if (!metadecompressor((const uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset],
4452 (uint8_t *)dst, c_size, c_codec, (void *)scratch_buf, &inline_popcount)) {
4453 retval = -1;
4454 } else {
4455 #if __APPLE_WKDM_POPCNT_EXTENSIONS__
4456 if (inline_popcount != cs->c_inline_popcount) {
4457 /*
4458 * The codec choice in compression and
4459 * decompression must agree, so there
4460 * should never be a disagreement in
4461 * whether an inline population count
4462 * was performed.
4463 */
4464 assert(inline_popcount != C_SLOT_NO_POPCOUNT);
4465 assert(cs->c_inline_popcount != C_SLOT_NO_POPCOUNT);
4466 printf("decompression failure from physical region %llx+%05x: popcount mismatch (%d != %d)\n",
4467 (unsigned long long)kvtophys((uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset]), c_size,
4468 inline_popcount,
4469 cs->c_inline_popcount);
4470 retval = -1;
4471 }
4472 #else
4473 assert(inline_popcount == C_SLOT_NO_POPCOUNT);
4474 #endif /* __APPLE_WKDM_POPCNT_EXTENSIONS__ */
4475 }
4476 #endif
4477 } else {
4478 #if defined(__arm64__)
4479 __unreachable_ok_push
4480 if (PAGE_SIZE == 4096) {
4481 WKdm_decompress_4k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4482 (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
4483 } else {
4484 WKdm_decompress_16k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4485 (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
4486 }
4487 __unreachable_ok_pop
4488 #else
4489 WKdm_decompress_new((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4490 (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
4491 #endif
4492 }
4493 }
4494
4495 #if CHECKSUM_THE_DATA
4496 if (cs->c_hash_data != vmc_hash(dst, PAGE_SIZE)) {
4497 #if defined(__arm__) || defined(__arm64__)
4498 int32_t *dinput = &c_seg->c_store.c_buffer[cs->c_offset];
4499 panic("decompressed data doesn't match original cs: %p, hash: 0x%x, offset: %d, c_size: %d, c_rounded_size: %d, codec: %d, header: 0x%x 0x%x 0x%x", cs, cs->c_hash_data, cs->c_offset, c_size, c_rounded_size, cs->c_codec, *dinput, *(dinput + 1), *(dinput + 2));
4500 #else
4501 panic("decompressed data doesn't match original cs: %p, hash: %d, offset: 0x%x, c_size: %d", cs, cs->c_hash_data, cs->c_offset, c_size);
4502 #endif
4503 }
4504 #endif
4505 if (c_seg->c_swappedin_ts == 0 && !kdp_mode) {
4506 clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec);
4507
4508 age_of_cseg = (uint32_t)cur_ts_sec - c_seg->c_creation_ts;
4509 if (age_of_cseg < DECOMPRESSION_SAMPLE_MAX_AGE) {
4510 OSAddAtomic(1, &age_of_decompressions_during_sample_period[age_of_cseg]);
4511 } else {
4512 OSAddAtomic(1, &overage_decompressions_during_sample_period);
4513 }
4514
4515 OSAddAtomic(1, &sample_period_decompression_count);
4516 }
4517 }
4518 #if CONFIG_FREEZE
4519 else {
4520 /*
4521 * We are freeing an uncompressed page from this c_seg and so balance the ledgers.
4522 */
4523 if (C_SEG_IS_ONDISK(c_seg)) {
4524 /*
4525 * The compression sweep feature will push out anonymous pages to disk
4526 * without going through the freezer path and so those c_segs, while
4527 * swapped out, won't have an owner.
4528 */
4529 if (c_seg->c_task_owner) {
4530 task_update_frozen_to_swap_acct(c_seg->c_task_owner, PAGE_SIZE_64, DEBIT_FROM_SWAP);
4531 }
4532
4533 /*
4534 * We are freeing a page in swap without swapping it in. We bump the in-core
4535 * count here to simulate a swapin of a page so that we can accurately
4536 * decrement it below.
4537 */
4538 OSAddAtomic(1, &c_segment_pages_compressed_incore);
4539 }
4540 }
4541 #endif /* CONFIG_FREEZE */
4542
4543 if (flags & C_KEEP) {
4544 *zeroslot = 0;
4545 goto done;
4546 }
4547 assert(kdp_mode == FALSE);
4548
4549 c_seg->c_bytes_unused += c_rounded_size;
4550 c_seg->c_bytes_used -= c_rounded_size;
4551
4552 assert(c_seg->c_slots_used);
4553 c_seg->c_slots_used--;
4554 if (dst && c_seg->c_swappedin) {
4555 task_t task = current_task();
4556 if (task) {
4557 ledger_credit(task->ledger, task_ledgers.swapins, PAGE_SIZE);
4558 }
4559 }
4560
4561 PACK_C_SIZE(cs, 0);
4562
4563 if (c_indx < c_seg->c_firstemptyslot) {
4564 c_seg->c_firstemptyslot = c_indx;
4565 }
4566
4567 OSAddAtomic(-1, &c_segment_pages_compressed);
4568 #if CONFIG_FREEZE
4569 OSAddAtomic(-1, &c_segment_pages_compressed_incore);
4570 assertf(c_segment_pages_compressed_incore >= 0, "-ve incore count %p 0x%x", c_seg, c_segment_pages_compressed_incore);
4571 #endif /* CONFIG_FREEZE */
4572
4573 if (c_seg->c_state != C_ON_BAD_Q && !(C_SEG_IS_ONDISK(c_seg))) {
4574 /*
4575 * C_SEG_IS_ONDISK == TRUE can occur when we're doing a
4576 * free of a compressed page (i.e. dst == NULL)
4577 */
4578 OSAddAtomic64(-c_rounded_size, &compressor_bytes_used);
4579 }
4580 if (c_seg->c_busy_swapping) {
4581 /*
4582 * bypass case for c_busy_swapping...
4583 * let the swapin/swapout paths deal with putting
4584 * the c_seg on the minor compaction queue if needed
4585 */
4586 assert(c_seg->c_busy);
4587 goto done;
4588 }
4589 assert(!c_seg->c_busy);
4590
4591 if (c_seg->c_state != C_IS_FILLING) {
4592 if (c_seg->c_bytes_used == 0) {
4593 if (!(C_SEG_IS_ONDISK(c_seg))) {
4594 int pages_populated;
4595
4596 pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE;
4597 c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0);
4598
4599 if (pages_populated) {
4600 assert(c_seg->c_state != C_ON_BAD_Q);
4601 assert(c_seg->c_store.c_buffer != NULL);
4602
4603 C_SEG_BUSY(c_seg);
4604 lck_mtx_unlock_always(&c_seg->c_lock);
4605
4606 kernel_memory_depopulate(compressor_map,
4607 (vm_offset_t) c_seg->c_store.c_buffer,
4608 pages_populated * PAGE_SIZE, KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
4609
4610 lck_mtx_lock_spin_always(&c_seg->c_lock);
4611 C_SEG_WAKEUP_DONE(c_seg);
4612 }
4613 if (!c_seg->c_on_minorcompact_q && c_seg->c_state != C_ON_SWAPOUT_Q && c_seg->c_state != C_ON_SWAPIO_Q) {
4614 c_seg_need_delayed_compaction(c_seg, FALSE);
4615 }
4616 } else {
4617 if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q) {
4618 c_seg_move_to_sparse_list(c_seg);
4619 consider_defragmenting = TRUE;
4620 }
4621 }
4622 } else if (c_seg->c_on_minorcompact_q) {
4623 assert(c_seg->c_state != C_ON_BAD_Q);
4624 assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg));
4625
4626 if (C_SEG_SHOULD_MINORCOMPACT_NOW(c_seg)) {
4627 c_seg_try_minor_compaction_and_unlock(c_seg);
4628 need_unlock = FALSE;
4629 }
4630 } else if (!(C_SEG_IS_ONDISK(c_seg))) {
4631 if (c_seg->c_state != C_ON_BAD_Q && c_seg->c_state != C_ON_SWAPOUT_Q && c_seg->c_state != C_ON_SWAPIO_Q &&
4632 C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
4633 c_seg_need_delayed_compaction(c_seg, FALSE);
4634 }
4635 } else if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q && C_SEG_ONDISK_IS_SPARSE(c_seg)) {
4636 c_seg_move_to_sparse_list(c_seg);
4637 consider_defragmenting = TRUE;
4638 }
4639 }
4640 done:
4641 if (__improbable(kdp_mode)) {
4642 return retval;
4643 }
4644
4645 if (need_unlock == TRUE) {
4646 lck_mtx_unlock_always(&c_seg->c_lock);
4647 }
4648
4649 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4650
4651 if (consider_defragmenting == TRUE) {
4652 vm_swap_consider_defragmenting(VM_SWAP_FLAGS_NONE);
4653 }
4654
4655 #if !XNU_TARGET_OS_OSX
4656 if ((c_minor_count && COMPRESSOR_NEEDS_TO_MINOR_COMPACT()) || vm_compressor_needs_to_major_compact()) {
4657 vm_wake_compactor_swapper();
4658 }
4659 #endif /* !XNU_TARGET_OS_OSX */
4660
4661 return retval;
4662 }
4663
4664
4665 int
vm_compressor_get(ppnum_t pn,int * slot,int flags)4666 vm_compressor_get(ppnum_t pn, int *slot, int flags)
4667 {
4668 c_slot_mapping_t slot_ptr;
4669 char *dst;
4670 int zeroslot = 1;
4671 int retval;
4672
4673 dst = pmap_map_compressor_page(pn);
4674 slot_ptr = (c_slot_mapping_t)slot;
4675
4676 assert(dst != NULL);
4677
4678 if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
4679 int32_t data;
4680 int32_t *dptr;
4681
4682 /*
4683 * page was populated with a single value
4684 * that found a home in our hash table
4685 * grab that value from the hash and populate the page
4686 * that we need to populate the page with
4687 */
4688 dptr = (int32_t *)(uintptr_t)dst;
4689 data = c_segment_sv_hash_table[slot_ptr->s_cindx].he_data;
4690 sv_decompress(dptr, data);
4691 if (!(flags & C_KEEP)) {
4692 c_segment_sv_hash_drop_ref(slot_ptr->s_cindx);
4693
4694 OSAddAtomic(-1, &c_segment_pages_compressed);
4695 #if CONFIG_FREEZE
4696 OSAddAtomic(-1, &c_segment_pages_compressed_incore);
4697 assertf(c_segment_pages_compressed_incore >= 0, "-ve incore count 0x%x", c_segment_pages_compressed_incore);
4698 #endif /* CONFIG_FREEZE */
4699 *slot = 0;
4700 }
4701 if (data) {
4702 OSAddAtomic(1, &c_segment_svp_nonzero_decompressions);
4703 } else {
4704 OSAddAtomic(1, &c_segment_svp_zero_decompressions);
4705 }
4706
4707 pmap_unmap_compressor_page(pn, dst);
4708 return 0;
4709 }
4710
4711 retval = c_decompress_page(dst, slot_ptr, flags, &zeroslot);
4712
4713 /*
4714 * zeroslot will be set to 0 by c_decompress_page if (flags & C_KEEP)
4715 * or (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be TRUE
4716 */
4717 if (zeroslot) {
4718 *slot = 0;
4719 }
4720
4721 pmap_unmap_compressor_page(pn, dst);
4722
4723 /*
4724 * returns 0 if we successfully decompressed a page from a segment already in memory
4725 * returns 1 if we had to first swap in the segment, before successfully decompressing the page
4726 * returns -1 if we encountered an error swapping in the segment - decompression failed
4727 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be true
4728 */
4729 return retval;
4730 }
4731
4732 #if DEVELOPMENT || DEBUG
4733
4734 void
vm_compressor_inject_error(int * slot)4735 vm_compressor_inject_error(int *slot)
4736 {
4737 c_slot_mapping_t slot_ptr = (c_slot_mapping_t)slot;
4738
4739 /* No error detection for single-value compression. */
4740 if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
4741 printf("%s(): cannot inject errors in SV-compressed pages\n", __func__ );
4742 return;
4743 }
4744
4745 /* s_cseg is actually "segno+1" */
4746 const uint32_t c_segno = slot_ptr->s_cseg - 1;
4747
4748 assert(c_segno < c_segments_available);
4749 assert(c_segments[c_segno].c_segno >= c_segments_available);
4750
4751 const c_segment_t c_seg = c_segments[c_segno].c_seg;
4752
4753 PAGE_REPLACEMENT_DISALLOWED(TRUE);
4754
4755 lck_mtx_lock_spin_always(&c_seg->c_lock);
4756 assert(c_seg->c_state != C_IS_EMPTY && c_seg->c_state != C_IS_FREE);
4757
4758 const uint16_t c_indx = slot_ptr->s_cindx;
4759 assert(c_indx < c_seg->c_nextslot);
4760
4761 /*
4762 * To safely make this segment temporarily writable, we need to mark
4763 * the segment busy, which allows us to release the segment lock.
4764 */
4765 while (c_seg->c_busy) {
4766 c_seg_wait_on_busy(c_seg);
4767 lck_mtx_lock_spin_always(&c_seg->c_lock);
4768 }
4769 C_SEG_BUSY(c_seg);
4770
4771 bool already_writable = (c_seg->c_state == C_IS_FILLING);
4772 if (!already_writable) {
4773 /*
4774 * Protection update must be performed preemptibly, so temporarily drop
4775 * the lock. Having set c_busy will prevent most other concurrent
4776 * operations.
4777 */
4778 lck_mtx_unlock_always(&c_seg->c_lock);
4779 C_SEG_MAKE_WRITEABLE(c_seg);
4780 lck_mtx_lock_spin_always(&c_seg->c_lock);
4781 }
4782
4783 /*
4784 * Once we've released the lock following our c_state == C_IS_FILLING check,
4785 * c_current_seg_filled() can (re-)write-protect the segment. However, it
4786 * will transition from C_IS_FILLING before releasing the c_seg lock, so we
4787 * can detect this by re-checking after we've reobtained the lock.
4788 */
4789 if (already_writable && c_seg->c_state != C_IS_FILLING) {
4790 lck_mtx_unlock_always(&c_seg->c_lock);
4791 C_SEG_MAKE_WRITEABLE(c_seg);
4792 lck_mtx_lock_spin_always(&c_seg->c_lock);
4793 already_writable = false;
4794 /* Segment can't be freed while c_busy is set. */
4795 assert(c_seg->c_state != C_IS_FILLING);
4796 }
4797
4798 /*
4799 * Skip if the segment is on disk. This check can only be performed after
4800 * the final acquisition of the segment lock before we attempt to write to
4801 * the segment.
4802 */
4803 if (!C_SEG_IS_ON_DISK_OR_SOQ(c_seg)) {
4804 c_slot_t cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
4805 int32_t *data = &c_seg->c_store.c_buffer[cs->c_offset];
4806 /* assume that the compressed data holds at least one int32_t */
4807 assert(UNPACK_C_SIZE(cs) > sizeof(*data));
4808 /*
4809 * This bit is known to be in the payload of a MISS packet resulting from
4810 * the pattern used in the test pattern from decompression_failure.c.
4811 * Flipping it should result in many corrupted bits in the test page.
4812 */
4813 data[0] ^= 0x00000100;
4814 }
4815
4816 if (!already_writable) {
4817 lck_mtx_unlock_always(&c_seg->c_lock);
4818 C_SEG_WRITE_PROTECT(c_seg);
4819 lck_mtx_lock_spin_always(&c_seg->c_lock);
4820 }
4821
4822 C_SEG_WAKEUP_DONE(c_seg);
4823 lck_mtx_unlock_always(&c_seg->c_lock);
4824
4825 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4826 }
4827
4828 #endif /* DEVELOPMENT || DEBUG */
4829
4830 int
vm_compressor_free(int * slot,int flags)4831 vm_compressor_free(int *slot, int flags)
4832 {
4833 c_slot_mapping_t slot_ptr;
4834 int zeroslot = 1;
4835 int retval;
4836
4837 assert(flags == 0 || flags == C_DONT_BLOCK);
4838
4839 slot_ptr = (c_slot_mapping_t)slot;
4840
4841 if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
4842 c_segment_sv_hash_drop_ref(slot_ptr->s_cindx);
4843 OSAddAtomic(-1, &c_segment_pages_compressed);
4844 #if CONFIG_FREEZE
4845 OSAddAtomic(-1, &c_segment_pages_compressed_incore);
4846 assertf(c_segment_pages_compressed_incore >= 0, "-ve incore count 0x%x", c_segment_pages_compressed_incore);
4847 #endif /* CONFIG_FREEZE */
4848
4849 *slot = 0;
4850 return 0;
4851 }
4852 retval = c_decompress_page(NULL, slot_ptr, flags, &zeroslot);
4853 /*
4854 * returns 0 if we successfully freed the specified compressed page
4855 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' set
4856 */
4857
4858 if (retval == 0) {
4859 *slot = 0;
4860 } else {
4861 assert(retval == -2);
4862 }
4863
4864 return retval;
4865 }
4866
4867
4868 int
vm_compressor_put(ppnum_t pn,int * slot,void ** current_chead,char * scratch_buf)4869 vm_compressor_put(ppnum_t pn, int *slot, void **current_chead, char *scratch_buf)
4870 {
4871 char *src;
4872 int retval;
4873
4874 src = pmap_map_compressor_page(pn);
4875 assert(src != NULL);
4876
4877 retval = c_compress_page(src, (c_slot_mapping_t)slot, (c_segment_t *)current_chead, scratch_buf);
4878 pmap_unmap_compressor_page(pn, src);
4879
4880 return retval;
4881 }
4882
4883 void
vm_compressor_transfer(int * dst_slot_p,int * src_slot_p)4884 vm_compressor_transfer(
4885 int *dst_slot_p,
4886 int *src_slot_p)
4887 {
4888 c_slot_mapping_t dst_slot, src_slot;
4889 c_segment_t c_seg;
4890 uint16_t c_indx;
4891 c_slot_t cs;
4892
4893 src_slot = (c_slot_mapping_t) src_slot_p;
4894
4895 if (src_slot->s_cseg == C_SV_CSEG_ID) {
4896 *dst_slot_p = *src_slot_p;
4897 *src_slot_p = 0;
4898 return;
4899 }
4900 dst_slot = (c_slot_mapping_t) dst_slot_p;
4901 Retry:
4902 PAGE_REPLACEMENT_DISALLOWED(TRUE);
4903 /* get segment for src_slot */
4904 c_seg = c_segments[src_slot->s_cseg - 1].c_seg;
4905 /* lock segment */
4906 lck_mtx_lock_spin_always(&c_seg->c_lock);
4907 /* wait if it's busy */
4908 if (c_seg->c_busy && !c_seg->c_busy_swapping) {
4909 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4910 c_seg_wait_on_busy(c_seg);
4911 goto Retry;
4912 }
4913 /* find the c_slot */
4914 c_indx = src_slot->s_cindx;
4915 cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
4916 /* point the c_slot back to dst_slot instead of src_slot */
4917 C_SLOT_ASSERT_PACKABLE(dst_slot);
4918 cs->c_packed_ptr = C_SLOT_PACK_PTR(dst_slot);
4919 /* transfer */
4920 *dst_slot_p = *src_slot_p;
4921 *src_slot_p = 0;
4922 lck_mtx_unlock_always(&c_seg->c_lock);
4923 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4924 }
4925
4926 #if defined(__arm64__)
4927 extern clock_sec_t vm_swapfile_last_failed_to_create_ts;
4928 __attribute__((noreturn))
4929 void
vm_panic_hibernate_write_image_failed(int err)4930 vm_panic_hibernate_write_image_failed(int err)
4931 {
4932 panic("hibernate_write_image encountered error 0x%x - %u, %u, %d, %d, %d, %d, %d, %d, %d, %d, %llu, %d, %d, %d\n",
4933 err,
4934 VM_PAGE_COMPRESSOR_COUNT, vm_page_wire_count,
4935 c_age_count, c_major_count, c_minor_count, c_swapout_count, c_swappedout_sparse_count,
4936 vm_num_swap_files, vm_num_pinned_swap_files, vm_swappin_enabled, vm_swap_put_failures,
4937 (vm_swapfile_last_failed_to_create_ts ? 1:0), hibernate_no_swapspace, hibernate_flush_timed_out);
4938 }
4939 #endif /*(__arm64__)*/
4940
4941 #if CONFIG_FREEZE
4942
4943 int freezer_finished_filling = 0;
4944
4945 void
vm_compressor_finished_filling(void ** current_chead)4946 vm_compressor_finished_filling(
4947 void **current_chead)
4948 {
4949 c_segment_t c_seg;
4950
4951 if ((c_seg = *(c_segment_t *)current_chead) == NULL) {
4952 return;
4953 }
4954
4955 assert(c_seg->c_state == C_IS_FILLING);
4956
4957 lck_mtx_lock_spin_always(&c_seg->c_lock);
4958
4959 c_current_seg_filled(c_seg, (c_segment_t *)current_chead);
4960
4961 lck_mtx_unlock_always(&c_seg->c_lock);
4962
4963 freezer_finished_filling++;
4964 }
4965
4966
4967 /*
4968 * This routine is used to transfer the compressed chunks from
4969 * the c_seg/cindx pointed to by slot_p into a new c_seg headed
4970 * by the current_chead and a new cindx within that c_seg.
4971 *
4972 * Currently, this routine is only used by the "freezer backed by
4973 * compressor with swap" mode to create a series of c_segs that
4974 * only contain compressed data belonging to one task. So, we
4975 * move a task's previously compressed data into a set of new
4976 * c_segs which will also hold the task's yet to be compressed data.
4977 */
4978
4979 kern_return_t
vm_compressor_relocate(void ** current_chead,int * slot_p)4980 vm_compressor_relocate(
4981 void **current_chead,
4982 int *slot_p)
4983 {
4984 c_slot_mapping_t slot_ptr;
4985 c_slot_mapping_t src_slot;
4986 uint32_t c_rounded_size;
4987 uint32_t c_size;
4988 uint16_t dst_slot;
4989 c_slot_t c_dst;
4990 c_slot_t c_src;
4991 uint16_t c_indx;
4992 c_segment_t c_seg_dst = NULL;
4993 c_segment_t c_seg_src = NULL;
4994 kern_return_t kr = KERN_SUCCESS;
4995
4996
4997 src_slot = (c_slot_mapping_t) slot_p;
4998
4999 if (src_slot->s_cseg == C_SV_CSEG_ID) {
5000 /*
5001 * no need to relocate... this is a page full of a single
5002 * value which is hashed to a single entry not contained
5003 * in a c_segment_t
5004 */
5005 return kr;
5006 }
5007
5008 Relookup_dst:
5009 c_seg_dst = c_seg_allocate((c_segment_t *)current_chead);
5010 /*
5011 * returns with c_seg lock held
5012 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
5013 * c_nextslot has been allocated and
5014 * c_store.c_buffer populated
5015 */
5016 if (c_seg_dst == NULL) {
5017 /*
5018 * Out of compression segments?
5019 */
5020 kr = KERN_RESOURCE_SHORTAGE;
5021 goto out;
5022 }
5023
5024 assert(c_seg_dst->c_busy == 0);
5025
5026 C_SEG_BUSY(c_seg_dst);
5027
5028 dst_slot = c_seg_dst->c_nextslot;
5029
5030 lck_mtx_unlock_always(&c_seg_dst->c_lock);
5031
5032 Relookup_src:
5033 c_seg_src = c_segments[src_slot->s_cseg - 1].c_seg;
5034
5035 assert(c_seg_dst != c_seg_src);
5036
5037 lck_mtx_lock_spin_always(&c_seg_src->c_lock);
5038
5039 if (C_SEG_IS_ON_DISK_OR_SOQ(c_seg_src) ||
5040 c_seg_src->c_state == C_IS_FILLING) {
5041 /*
5042 * Skip this page if :-
5043 * a) the src c_seg is already on-disk (or on its way there)
5044 * A "thaw" can mark a process as eligible for
5045 * another freeze cycle without bringing any of
5046 * its swapped out c_segs back from disk (because
5047 * that is done on-demand).
5048 * Or, this page may be mapped elsewhere in the task's map,
5049 * and we may have marked it for swap already.
5050 *
5051 * b) Or, the src c_seg is being filled by the compressor
5052 * thread. We don't want the added latency of waiting for
5053 * this c_seg in the freeze path and so we skip it.
5054 */
5055
5056 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5057
5058 lck_mtx_unlock_always(&c_seg_src->c_lock);
5059
5060 c_seg_src = NULL;
5061
5062 goto out;
5063 }
5064
5065 if (c_seg_src->c_busy) {
5066 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5067 c_seg_wait_on_busy(c_seg_src);
5068
5069 c_seg_src = NULL;
5070
5071 PAGE_REPLACEMENT_DISALLOWED(TRUE);
5072
5073 goto Relookup_src;
5074 }
5075
5076 C_SEG_BUSY(c_seg_src);
5077
5078 lck_mtx_unlock_always(&c_seg_src->c_lock);
5079
5080 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5081
5082 /* find the c_slot */
5083 c_indx = src_slot->s_cindx;
5084
5085 c_src = C_SEG_SLOT_FROM_INDEX(c_seg_src, c_indx);
5086
5087 c_size = UNPACK_C_SIZE(c_src);
5088
5089 assert(c_size);
5090
5091 if (c_size > (uint32_t)(c_seg_bufsize - C_SEG_OFFSET_TO_BYTES((int32_t)c_seg_dst->c_nextoffset))) {
5092 /*
5093 * This segment is full. We need a new one.
5094 */
5095
5096 PAGE_REPLACEMENT_DISALLOWED(TRUE);
5097
5098 lck_mtx_lock_spin_always(&c_seg_src->c_lock);
5099 C_SEG_WAKEUP_DONE(c_seg_src);
5100 lck_mtx_unlock_always(&c_seg_src->c_lock);
5101
5102 c_seg_src = NULL;
5103
5104 lck_mtx_lock_spin_always(&c_seg_dst->c_lock);
5105
5106 assert(c_seg_dst->c_busy);
5107 assert(c_seg_dst->c_state == C_IS_FILLING);
5108 assert(!c_seg_dst->c_on_minorcompact_q);
5109
5110 c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead);
5111 assert(*current_chead == NULL);
5112
5113 C_SEG_WAKEUP_DONE(c_seg_dst);
5114
5115 lck_mtx_unlock_always(&c_seg_dst->c_lock);
5116
5117 c_seg_dst = NULL;
5118
5119 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5120
5121 goto Relookup_dst;
5122 }
5123
5124 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, c_seg_dst->c_nextslot);
5125
5126 memcpy(&c_seg_dst->c_store.c_buffer[c_seg_dst->c_nextoffset], &c_seg_src->c_store.c_buffer[c_src->c_offset], c_size);
5127 /*
5128 * Is platform alignment actually necessary since wkdm aligns its output?
5129 */
5130 c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
5131
5132 cslot_copy(c_dst, c_src);
5133 c_dst->c_offset = c_seg_dst->c_nextoffset;
5134
5135 if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) {
5136 c_seg_dst->c_firstemptyslot++;
5137 }
5138
5139 c_seg_dst->c_slots_used++;
5140 c_seg_dst->c_nextslot++;
5141 c_seg_dst->c_bytes_used += c_rounded_size;
5142 c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
5143
5144
5145 PACK_C_SIZE(c_src, 0);
5146
5147 c_seg_src->c_bytes_used -= c_rounded_size;
5148 c_seg_src->c_bytes_unused += c_rounded_size;
5149
5150 assert(c_seg_src->c_slots_used);
5151 c_seg_src->c_slots_used--;
5152
5153 if (!c_seg_src->c_swappedin) {
5154 /* Pessimistically lose swappedin status when non-swappedin pages are added. */
5155 c_seg_dst->c_swappedin = false;
5156 }
5157
5158 if (c_indx < c_seg_src->c_firstemptyslot) {
5159 c_seg_src->c_firstemptyslot = c_indx;
5160 }
5161
5162 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot);
5163
5164 PAGE_REPLACEMENT_ALLOWED(TRUE);
5165 slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
5166 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
5167 slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1;
5168 slot_ptr->s_cindx = dst_slot;
5169
5170 PAGE_REPLACEMENT_ALLOWED(FALSE);
5171
5172 out:
5173 if (c_seg_src) {
5174 lck_mtx_lock_spin_always(&c_seg_src->c_lock);
5175
5176 C_SEG_WAKEUP_DONE(c_seg_src);
5177
5178 if (c_seg_src->c_bytes_used == 0 && c_seg_src->c_state != C_IS_FILLING) {
5179 if (!c_seg_src->c_on_minorcompact_q) {
5180 c_seg_need_delayed_compaction(c_seg_src, FALSE);
5181 }
5182 }
5183
5184 lck_mtx_unlock_always(&c_seg_src->c_lock);
5185 }
5186
5187 if (c_seg_dst) {
5188 PAGE_REPLACEMENT_DISALLOWED(TRUE);
5189
5190 lck_mtx_lock_spin_always(&c_seg_dst->c_lock);
5191
5192 if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
5193 /*
5194 * Nearing or exceeded maximum slot and offset capacity.
5195 */
5196 assert(c_seg_dst->c_busy);
5197 assert(c_seg_dst->c_state == C_IS_FILLING);
5198 assert(!c_seg_dst->c_on_minorcompact_q);
5199
5200 c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead);
5201 assert(*current_chead == NULL);
5202 }
5203
5204 C_SEG_WAKEUP_DONE(c_seg_dst);
5205
5206 lck_mtx_unlock_always(&c_seg_dst->c_lock);
5207
5208 c_seg_dst = NULL;
5209
5210 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5211 }
5212
5213 return kr;
5214 }
5215 #endif /* CONFIG_FREEZE */
5216