1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <vm/vm_compressor_internal.h>
30
31 #if CONFIG_PHANTOM_CACHE
32 #include <vm/vm_phantom_cache_internal.h>
33 #endif
34
35 #include <vm/vm_map_xnu.h>
36 #include <vm/vm_pageout_xnu.h>
37 #include <vm/vm_map_internal.h>
38 #include <vm/memory_object.h>
39 #include <vm/vm_compressor_algorithms_internal.h>
40 #include <vm/vm_compressor_backing_store_internal.h>
41 #include <vm/vm_fault.h>
42 #include <vm/vm_protos.h>
43 #include <vm/vm_kern_xnu.h>
44 #include <vm/vm_compressor_pager_internal.h>
45 #include <vm/vm_iokit.h>
46 #include <vm/vm_far.h>
47 #include <mach/mach_host.h> /* for host_info() */
48 #if DEVELOPMENT || DEBUG
49 #include <kern/hvg_hypercall.h>
50 #include <vm/vm_compressor_info.h> /* for c_segment_info */
51 #endif
52 #include <kern/ledger.h>
53 #include <kern/policy_internal.h>
54 #include <kern/thread_group.h>
55 #include <san/kasan.h>
56 #include <sys/kern_memorystatus_xnu.h>
57 #include <os/atomic_private.h>
58 #include <vm/vm_log.h>
59 #include <pexpert/pexpert.h>
60 #include <pexpert/device_tree.h>
61
62 #if defined(__x86_64__)
63 #include <i386/misc_protos.h>
64 #endif
65 #if defined(__arm64__)
66 #include <arm/machine_routines.h>
67 #endif
68 #if HAS_MTE
69 #include <arm64/mte_xnu.h>
70 #include <arm64/vm_mte_compress.h>
71 #endif /* HAS_MTE */
72
73 #include <IOKit/IOHibernatePrivate.h>
74
75 /*
76 * The segment buffer size is a tradeoff.
77 * A larger buffer leads to faster I/O throughput, better compression ratios
78 * (since fewer bytes are wasted at the end of the segment),
79 * and less overhead (both in time and space).
80 * However, a smaller buffer causes less swap when the system is overcommited
81 * b/c a higher percentage of the swapped-in segment is definitely accessed
82 * before it goes back out to storage.
83 *
84 * So on systems without swap, a larger segment is a clear win.
85 * On systems with swap, the choice is murkier. Empirically, we've
86 * found that a 64KB segment provides a better tradeoff both in terms of
87 * performance and swap writes than a 256KB segment on systems with fast SSDs
88 * and a HW compression block.
89 */
90 #define C_SEG_BUFSIZE_ARM_SWAP (1024 * 64)
91 #if XNU_TARGET_OS_OSX && defined(__arm64__)
92 #define C_SEG_BUFSIZE_DEFAULT C_SEG_BUFSIZE_ARM_SWAP
93 #else
94 #define C_SEG_BUFSIZE_DEFAULT (1024 * 256)
95 #endif /* TARGET_OS_OSX && defined(__arm64__) */
96 uint32_t c_seg_bufsize;
97
98 uint32_t c_seg_max_pages; /* maximum number of pages the compressed data of a segment can take */
99 uint32_t c_seg_off_limit; /* if we've reached this size while filling the segment, don't bother trying to fill anymore
100 * because it's unlikely to succeed, in units of uint32_t, same as c_nextoffset */
101 uint32_t c_seg_allocsize, c_seg_slot_var_array_min_len;
102
103 extern boolean_t vm_darkwake_mode;
104 extern zone_t vm_page_zone;
105
106 #if DEVELOPMENT || DEBUG
107 /* sysctl defined in bsd/dev/arm64/sysctl.c */
108 static event_t debug_cseg_wait_event = NULL;
109 #endif /* DEVELOPMENT || DEBUG */
110
111 #if CONFIG_FREEZE
112 bool freezer_incore_cseg_acct = TRUE; /* Only count incore compressed memory for jetsams. */
113 #endif /* CONFIG_FREEZE */
114
115 #if POPCOUNT_THE_COMPRESSED_DATA
116 boolean_t popcount_c_segs = TRUE;
117
118 static inline uint32_t
vmc_pop(uintptr_t ins,int sz)119 vmc_pop(uintptr_t ins, int sz)
120 {
121 uint32_t rv = 0;
122
123 if (__probable(popcount_c_segs == FALSE)) {
124 return 0xDEAD707C;
125 }
126
127 while (sz >= 16) {
128 uint32_t rv1, rv2;
129 uint64_t *ins64 = (uint64_t *) ins;
130 uint64_t *ins642 = (uint64_t *) (ins + 8);
131 rv1 = __builtin_popcountll(*ins64);
132 rv2 = __builtin_popcountll(*ins642);
133 rv += rv1 + rv2;
134 sz -= 16;
135 ins += 16;
136 }
137
138 while (sz >= 4) {
139 uint32_t *ins32 = (uint32_t *) ins;
140 rv += __builtin_popcount(*ins32);
141 sz -= 4;
142 ins += 4;
143 }
144
145 while (sz > 0) {
146 char *ins8 = (char *)ins;
147 rv += __builtin_popcount(*ins8);
148 sz--;
149 ins++;
150 }
151 return rv;
152 }
153 #endif
154
155 #if VALIDATE_C_SEGMENTS
156 boolean_t validate_c_segs = TRUE;
157 #endif
158 /*
159 * vm_compressor_mode has a hierarchy of control to set its value.
160 * boot-args are checked first, then device-tree, and finally
161 * the default value that is defined below. See vm_fault_init() for
162 * the boot-arg & device-tree code.
163 */
164
165 #if !XNU_TARGET_OS_OSX
166
167 #if CONFIG_FREEZE
168 int vm_compressor_mode = VM_PAGER_FREEZER_DEFAULT;
169 struct freezer_context freezer_context_global;
170 #else /* CONFIG_FREEZE */
171 int vm_compressor_mode = VM_PAGER_NOT_CONFIGURED;
172 #endif /* CONFIG_FREEZE */
173
174 #else /* !XNU_TARGET_OS_OSX */
175 int vm_compressor_mode = VM_PAGER_COMPRESSOR_WITH_SWAP;
176
177 #endif /* !XNU_TARGET_OS_OSX */
178
179 TUNABLE(uint32_t, vm_compression_limit, "vm_compression_limit", 0);
180 boolean_t vm_compressor_is_active = 0;
181 boolean_t vm_compressor_available = 0;
182
183 extern uint64_t vm_swap_get_max_configured_space(void);
184 extern void vm_pageout_io_throttle(void);
185
186 #if CHECKSUM_THE_DATA || CHECKSUM_THE_SWAP || CHECKSUM_THE_COMPRESSED_DATA
187 extern unsigned int hash_string(char *cp, int len);
188 static unsigned int vmc_hash(char *, int);
189 boolean_t checksum_c_segs = TRUE;
190
191 unsigned int
vmc_hash(char * cp,int len)192 vmc_hash(char *cp, int len)
193 {
194 unsigned int result;
195 if (__probable(checksum_c_segs == FALSE)) {
196 return 0xDEAD7A37;
197 }
198 vm_memtag_disable_checking();
199 result = hash_string(cp, len);
200 vm_memtag_enable_checking();
201 return result;
202 }
203 #endif
204
205 #define UNPACK_C_SIZE(cs) ((cs->c_size == (PAGE_SIZE-1)) ? PAGE_SIZE : cs->c_size)
206 #define PACK_C_SIZE(cs, size) (cs->c_size = ((size == PAGE_SIZE) ? PAGE_SIZE - 1 : size))
207
208
209 struct c_sv_hash_entry {
210 union {
211 struct {
212 uint32_t c_sv_he_ref;
213 uint32_t c_sv_he_data;
214 } c_sv_he;
215 uint64_t c_sv_he_record;
216 } c_sv_he_un;
217 };
218
219 #define he_ref c_sv_he_un.c_sv_he.c_sv_he_ref
220 #define he_data c_sv_he_un.c_sv_he.c_sv_he_data
221 #define he_record c_sv_he_un.c_sv_he_record
222
223 #define C_SV_HASH_MAX_MISS 32
224 #define C_SV_HASH_SIZE ((1 << 10))
225 #define C_SV_HASH_MASK ((1 << 10) - 1)
226
227 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
228 #define C_SV_CSEG_ID ((1 << 21) - 1)
229 #else /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
230 #define C_SV_CSEG_ID ((1 << 22) - 1)
231 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
232
233 /* elements of c_segments array */
234 union c_segu {
235 c_segment_t c_seg;
236 uintptr_t c_segno; /* index of the next element in the segments free-list, c_free_segno_head is the head */
237 };
238
239 #define C_SLOT_ASSERT_PACKABLE(ptr) \
240 VM_ASSERT_POINTER_PACKABLE((vm_offset_t)(ptr), C_SLOT_PACKED_PTR);
241
242 #define C_SLOT_PACK_PTR(ptr) \
243 VM_PACK_POINTER((vm_offset_t)(ptr), C_SLOT_PACKED_PTR)
244
245 #define C_SLOT_UNPACK_PTR(cslot) \
246 (c_slot_mapping_t)VM_UNPACK_POINTER((cslot)->c_packed_ptr, C_SLOT_PACKED_PTR)
247
248 /* for debugging purposes */
249 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) c_slot_packing_params =
250 VM_PACKING_PARAMS(C_SLOT_PACKED_PTR);
251
252 uint32_t c_segment_count = 0; /* count all allocated c_segments in all queues */
253 uint32_t c_segment_count_max = 0; /* maximum c_segment_count has ever been */
254
255 uint64_t c_generation_id = 0;
256 uint64_t c_generation_id_flush_barrier;
257
258 boolean_t hibernate_no_swapspace = FALSE;
259 boolean_t hibernate_flush_timed_out = FALSE;
260 clock_sec_t hibernate_flushing_deadline = 0;
261
262 #if RECORD_THE_COMPRESSED_DATA
263 /* buffer used as an intermediate stage before writing to file */
264 char *c_compressed_record_sbuf; /* start */
265 char *c_compressed_record_ebuf; /* end */
266 char *c_compressed_record_cptr; /* next buffered write */
267 #endif
268
269 /* the different queues a c_segment can be in via c_age_list */
270 queue_head_t c_age_list_head;
271 queue_head_t c_early_swappedin_list_head, c_regular_swappedin_list_head, c_late_swappedin_list_head;
272 queue_head_t c_early_swapout_list_head, c_regular_swapout_list_head, c_late_swapout_list_head;
273 queue_head_t c_swapio_list_head;
274 queue_head_t c_swappedout_list_head;
275 queue_head_t c_swappedout_sparse_list_head;
276 queue_head_t c_major_list_head;
277 queue_head_t c_filling_list_head;
278 queue_head_t c_bad_list_head;
279
280 /* count of each of the queues above */
281 uint32_t c_age_count = 0;
282 uint32_t c_early_swappedin_count = 0, c_regular_swappedin_count = 0, c_late_swappedin_count = 0;
283 uint32_t c_early_swapout_count = 0, c_regular_swapout_count = 0, c_late_swapout_count = 0;
284 uint32_t c_swapio_count = 0;
285 uint32_t c_swappedout_count = 0;
286 uint32_t c_swappedout_sparse_count = 0;
287 uint32_t c_major_count = 0;
288 uint32_t c_filling_count = 0;
289 uint32_t c_empty_count = 0;
290 uint32_t c_bad_count = 0;
291
292 /* a c_segment can be in the minor-compact queue as well as one of the above ones, via c_list */
293 queue_head_t c_minor_list_head;
294 uint32_t c_minor_count = 0;
295
296 int c_overage_swapped_count = 0;
297 int c_overage_swapped_limit = 0;
298
299 int c_seg_fixed_array_len; /* number of slots in the c_segment inline slots array */
300 union c_segu *c_segments; /* array of all c_segments, not all of it may be populated */
301 vm_offset_t c_buffers; /* starting address of all compressed data pointed to by c_segment.c_store.c_buffer */
302 vm_size_t c_buffers_size; /* total size allocated in c_buffers */
303 caddr_t c_segments_next_page; /* next page to populate for extending c_segments */
304 boolean_t c_segments_busy;
305 uint32_t c_segments_available; /* how many segments are in populated memory (used or free), populated size of c_segments array */
306 uint32_t c_segments_limit; /* max size of c_segments array */
307 uint32_t c_segments_nearing_limit;
308
309 uint32_t c_segment_svp_in_hash;
310 uint32_t c_segment_svp_hash_succeeded;
311 uint32_t c_segment_svp_hash_failed;
312 uint32_t c_segment_svp_zero_compressions;
313 uint32_t c_segment_svp_nonzero_compressions;
314 uint32_t c_segment_svp_zero_decompressions;
315 uint32_t c_segment_svp_nonzero_decompressions;
316
317 uint32_t c_segment_noncompressible_pages;
318
319 uint32_t c_segment_pages_compressed = 0; /* Tracks # of uncompressed pages fed into the compressor, including SV (single value) pages */
320 #if CONFIG_FREEZE
321 int32_t c_segment_pages_compressed_incore = 0; /* Tracks # of uncompressed pages fed into the compressor that are in memory */
322 int32_t c_segment_pages_compressed_incore_late_swapout = 0; /* Tracks # of uncompressed pages fed into the compressor that are in memory and tagged for swapout */
323 uint32_t c_segments_incore_limit = 0; /* Tracks # of segments allowed to be in-core. Based on compressor pool size */
324 #endif /* CONFIG_FREEZE */
325
326 uint32_t c_segment_pages_compressed_limit;
327 uint32_t c_segment_pages_compressed_nearing_limit;
328 uint32_t c_free_segno_head = (uint32_t)-1; /* head of free list of c_segment pointers in c_segments */
329
330 uint32_t vm_compressor_minorcompact_threshold_divisor = 10;
331 uint32_t vm_compressor_majorcompact_threshold_divisor = 10;
332 uint32_t vm_compressor_unthrottle_threshold_divisor = 10;
333 uint32_t vm_compressor_catchup_threshold_divisor = 10;
334
335 uint32_t vm_compressor_minorcompact_threshold_divisor_overridden = 0;
336 uint32_t vm_compressor_majorcompact_threshold_divisor_overridden = 0;
337 uint32_t vm_compressor_unthrottle_threshold_divisor_overridden = 0;
338 uint32_t vm_compressor_catchup_threshold_divisor_overridden = 0;
339
340 #define C_SEGMENTS_PER_PAGE (PAGE_SIZE / sizeof(union c_segu))
341
342 LCK_GRP_DECLARE(vm_compressor_lck_grp, "vm_compressor");
343 LCK_RW_DECLARE(c_master_lock, &vm_compressor_lck_grp);
344 LCK_MTX_DECLARE(c_list_lock_storage, &vm_compressor_lck_grp);
345
346 boolean_t decompressions_blocked = FALSE;
347
348 zone_t compressor_segment_zone;
349 int c_compressor_swap_trigger = 0;
350
351 uint32_t compressor_cpus;
352 char *compressor_scratch_bufs;
353
354 struct vm_compressor_kdp_state vm_compressor_kdp_state;
355
356 clock_sec_t start_of_sample_period_sec = 0;
357 clock_nsec_t start_of_sample_period_nsec = 0;
358 clock_sec_t start_of_eval_period_sec = 0;
359 clock_nsec_t start_of_eval_period_nsec = 0;
360 uint32_t sample_period_decompression_count = 0;
361 uint32_t sample_period_compression_count = 0;
362 uint32_t last_eval_decompression_count = 0;
363 uint32_t last_eval_compression_count = 0;
364
365 #define DECOMPRESSION_SAMPLE_MAX_AGE (60 * 30)
366
367 boolean_t vm_swapout_ripe_segments = FALSE;
368 uint32_t vm_ripe_target_age = (60 * 60 * 48);
369
370 uint32_t swapout_target_age = 0;
371 uint32_t age_of_decompressions_during_sample_period[DECOMPRESSION_SAMPLE_MAX_AGE];
372 uint32_t overage_decompressions_during_sample_period = 0;
373
374
375 void do_fastwake_warmup(queue_head_t *, boolean_t);
376 boolean_t fastwake_warmup = FALSE;
377 boolean_t fastwake_recording_in_progress = FALSE;
378 uint64_t dont_trim_until_ts = 0;
379
380 uint64_t c_segment_warmup_count;
381 uint64_t first_c_segment_to_warm_generation_id = 0;
382 uint64_t last_c_segment_to_warm_generation_id = 0;
383 boolean_t hibernate_flushing = FALSE;
384
385 _Atomic uint64_t c_segment_input_bytes = 0;
386 _Atomic uint64_t c_segment_compressed_bytes = 0;
387 _Atomic uint64_t compressor_bytes_used = 0;
388
389 /* Keeps track of the most recent timestamp for when major compaction finished. */
390 mach_timespec_t major_compact_ts;
391
392 struct c_sv_hash_entry c_segment_sv_hash_table[C_SV_HASH_SIZE] __attribute__ ((aligned(8)));
393
394 static void vm_compressor_swap_trigger_thread(void);
395 static void vm_compressor_do_delayed_compactions(boolean_t);
396 static void vm_compressor_compact_and_swap(boolean_t);
397 static void vm_compressor_process_regular_swapped_in_segments(boolean_t);
398 static void vm_compressor_process_special_swapped_in_segments_locked(void);
399
400 struct vm_compressor_swapper_stats vmcs_stats;
401
402 static void vm_compressor_process_major_segments(bool);
403
404 void compute_swapout_target_age(void);
405
406 boolean_t c_seg_coalesce(c_segment_t, c_segment_t);
407 boolean_t c_seg_major_compact_ok(c_segment_t, c_segment_t);
408
409 int c_seg_minor_compaction_and_unlock(c_segment_t, boolean_t);
410 int c_seg_do_minor_compaction_and_unlock(c_segment_t, boolean_t, boolean_t, boolean_t);
411 void c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg);
412
413 void c_seg_move_to_sparse_list(c_segment_t);
414 void c_seg_insert_into_q(queue_head_t *, c_segment_t);
415
416 uint64_t vm_available_memory(void);
417
418 /*
419 * Get the address of a given entry in the c_segments array
420 */
421 static inline union c_segu *
c_segments_get(uint32_t segno)422 c_segments_get(uint32_t segno)
423 {
424 return VM_FAR_ADD_PTR_UNBOUNDED(c_segments, segno);
425 }
426
427 /*
428 * indicate the need to do a major compaction if
429 * the overall set of in-use compression segments
430 * becomes sparse... on systems that support pressure
431 * driven swapping, this will also cause swapouts to
432 * be initiated.
433 */
434 static bool
vm_compressor_needs_to_major_compact(void)435 vm_compressor_needs_to_major_compact(void)
436 {
437 uint32_t incore_seg_count;
438
439 incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
440
441 /* second condition:
442 * first term:
443 * - (incore_seg_count * c_seg_max_pages) is the maximum number of pages that all resident segments can hold in their buffers
444 * - VM_PAGE_COMPRESSOR_COUNT is the current size that is actually held by the buffers
445 * -- subtracting these gives the amount of pages that is wasted as holes due to segments not being full
446 * second term:
447 * - 1/8 of the maximum size that can be held by this many segments
448 * meaning of the comparison: is the ratio of wasted space greater than 1/8
449 * first condition:
450 * compare number of segments being used vs the number of segments that can ever be allocated
451 * if we don't have a lot of data in the compressor, then we don't need to bother caring about wasted space in holes
452 */
453
454 if ((c_segment_count >= (c_segments_nearing_limit / 8)) &&
455 ((incore_seg_count * c_seg_max_pages) - VM_PAGE_COMPRESSOR_COUNT) >
456 ((incore_seg_count / 8) * c_seg_max_pages)) {
457 return true;
458 }
459 return false;
460 }
461
462 uint32_t
vm_compressor_get_swapped_segment_count(void)463 vm_compressor_get_swapped_segment_count(void)
464 {
465 return c_swappedout_count + c_swappedout_sparse_count;
466 }
467
468 uint32_t
vm_compressor_incore_fragmentation_wasted_pages(void)469 vm_compressor_incore_fragmentation_wasted_pages(void)
470 {
471 /* return one of the components of the calculation in vm_compressor_needs_to_major_compact() */
472 uint32_t incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
473 return (incore_seg_count * c_seg_max_pages) - VM_PAGE_COMPRESSOR_COUNT;
474 }
475
476 TUNABLE_WRITEABLE(uint64_t, vm_compressor_minor_fragmentation_threshold_pct, "vm_compressor_minor_frag_threshold_pct", 10);
477
478 static bool
vm_compressor_needs_to_minor_compact(void)479 vm_compressor_needs_to_minor_compact(void)
480 {
481 uint32_t compactible_seg_count = os_atomic_load(&c_minor_count, relaxed);
482 if (compactible_seg_count == 0) {
483 return false;
484 }
485
486 bool is_pressured = AVAILABLE_NON_COMPRESSED_MEMORY <
487 VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD;
488 if (!is_pressured) {
489 return false;
490 }
491
492 uint64_t bytes_used = os_atomic_load(&compressor_bytes_used, relaxed);
493 uint64_t bytes_total = VM_PAGE_COMPRESSOR_COUNT * PAGE_SIZE_64;
494 uint64_t bytes_frag = bytes_total - bytes_used;
495 bool is_fragmented = bytes_frag >
496 bytes_total * vm_compressor_minor_fragmentation_threshold_pct / 100;
497
498 return is_fragmented;
499 }
500
501 uint64_t
vm_available_memory(void)502 vm_available_memory(void)
503 {
504 return ((uint64_t)AVAILABLE_NON_COMPRESSED_MEMORY) * PAGE_SIZE_64;
505 }
506
507 uint32_t
vm_compressor_pool_size(void)508 vm_compressor_pool_size(void)
509 {
510 return VM_PAGE_COMPRESSOR_COUNT;
511 }
512
513 uint32_t
vm_compressor_fragmentation_level(void)514 vm_compressor_fragmentation_level(void)
515 {
516 const uint32_t incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
517 if ((incore_seg_count == 0) || (c_seg_max_pages == 0)) {
518 return 0;
519 }
520 return 100 - (vm_compressor_pool_size() * 100 / (incore_seg_count * c_seg_max_pages));
521 }
522
523 uint32_t
vm_compression_ratio(void)524 vm_compression_ratio(void)
525 {
526 if (vm_compressor_pool_size() == 0) {
527 return UINT32_MAX;
528 }
529 return c_segment_pages_compressed / vm_compressor_pool_size();
530 }
531
532 uint32_t
vm_compressor_pages_compressed(void)533 vm_compressor_pages_compressed(void)
534 {
535 #if CONFIG_FREEZE
536 if (freezer_incore_cseg_acct) {
537 return os_atomic_load(&c_segment_pages_compressed_incore, relaxed);
538 }
539 #endif /* CONFIG_FREEZE */
540 return os_atomic_load(&c_segment_pages_compressed, relaxed);
541 }
542
543 bool
vm_compressor_compressed_pages_nearing_limit(void)544 vm_compressor_compressed_pages_nearing_limit(void)
545 {
546 return vm_compressor_pages_compressed() > c_segment_pages_compressed_nearing_limit;
547 }
548
549 static bool
vm_compressor_segments_nearing_limit(void)550 vm_compressor_segments_nearing_limit(void)
551 {
552 uint64_t segments;
553
554 #if CONFIG_FREEZE
555 if (freezer_incore_cseg_acct) {
556 if (os_sub_overflow(c_segment_count, c_swappedout_count, &segments)) {
557 segments = 0;
558 }
559 if (os_sub_overflow(segments, c_swappedout_sparse_count, &segments)) {
560 segments = 0;
561 }
562 } else {
563 segments = os_atomic_load(&c_segment_count, relaxed);
564 }
565 #else /* CONFIG_FREEZE */
566 segments = c_segment_count;
567 #endif /* CONFIG_FREEZE */
568
569 return segments > c_segments_nearing_limit;
570 }
571
572 bool
vm_compressor_low_on_space(void)573 vm_compressor_low_on_space(void)
574 {
575 return vm_compressor_compressed_pages_nearing_limit() ||
576 vm_compressor_segments_nearing_limit();
577 }
578
579
580 bool
vm_compressor_out_of_space(void)581 vm_compressor_out_of_space(void)
582 {
583 #if CONFIG_FREEZE
584 uint64_t incore_seg_count;
585 uint32_t incore_compressed_pages;
586 if (freezer_incore_cseg_acct) {
587 if (os_sub_overflow(c_segment_count, c_swappedout_count, &incore_seg_count)) {
588 incore_seg_count = 0;
589 }
590 if (os_sub_overflow(incore_seg_count, c_swappedout_sparse_count, &incore_seg_count)) {
591 incore_seg_count = 0;
592 }
593 incore_compressed_pages = os_atomic_load(&c_segment_pages_compressed_incore, relaxed);
594 } else {
595 incore_seg_count = os_atomic_load(&c_segment_count, relaxed);
596 incore_compressed_pages = os_atomic_load(&c_segment_pages_compressed_incore, relaxed);
597 }
598
599 if ((incore_compressed_pages >= c_segment_pages_compressed_limit) ||
600 (incore_seg_count > c_segments_incore_limit)) {
601 return true;
602 }
603 #else /* CONFIG_FREEZE */
604 if ((c_segment_pages_compressed >= c_segment_pages_compressed_limit) ||
605 (c_segment_count >= c_segments_limit)) {
606 return true;
607 }
608 #endif /* CONFIG_FREEZE */
609 return FALSE;
610 }
611
612 bool
vm_compressor_is_thrashing()613 vm_compressor_is_thrashing()
614 {
615 compute_swapout_target_age();
616
617 if (swapout_target_age) {
618 c_segment_t c_seg;
619
620 lck_mtx_lock_spin_always(c_list_lock);
621
622 if (!queue_empty(&c_age_list_head)) {
623 c_seg = (c_segment_t) queue_first(&c_age_list_head);
624
625 if (c_seg->c_creation_ts > swapout_target_age) {
626 swapout_target_age = 0;
627 }
628 }
629 lck_mtx_unlock_always(c_list_lock);
630 }
631
632 return swapout_target_age != 0;
633 }
634
635
636 int
vm_wants_task_throttled(task_t task)637 vm_wants_task_throttled(task_t task)
638 {
639 ledger_amount_t compressed;
640 if (task == kernel_task) {
641 return 0;
642 }
643
644 if (VM_CONFIG_SWAP_IS_ACTIVE) {
645 if ((vm_compressor_low_on_space() || HARD_THROTTLE_LIMIT_REACHED())) {
646 ledger_get_balance(task->ledger, task_ledgers.internal_compressed, &compressed);
647 compressed >>= VM_MAP_PAGE_SHIFT(task->map);
648 if ((unsigned int)compressed > (c_segment_pages_compressed / 4)) {
649 return 1;
650 }
651 }
652 }
653 return 0;
654 }
655
656 #if CONFIG_JETSAM
657 bool memorystatus_disable_swap(void);
658 #if CONFIG_PHANTOM_CACHE
659 extern bool memorystatus_phantom_cache_pressure;
660 #endif /* CONFIG_PHANTOM_CACHE */
661 int compressor_thrashing_induced_jetsam = 0;
662 int filecache_thrashing_induced_jetsam = 0;
663 static boolean_t vm_compressor_thrashing_detected = FALSE;
664 #endif /* CONFIG_JETSAM */
665
666 void
vm_decompressor_lock(void)667 vm_decompressor_lock(void)
668 {
669 PAGE_REPLACEMENT_ALLOWED(TRUE);
670
671 decompressions_blocked = TRUE;
672
673 PAGE_REPLACEMENT_ALLOWED(FALSE);
674 }
675
676 void
vm_decompressor_unlock(void)677 vm_decompressor_unlock(void)
678 {
679 PAGE_REPLACEMENT_ALLOWED(TRUE);
680
681 decompressions_blocked = FALSE;
682
683 PAGE_REPLACEMENT_ALLOWED(FALSE);
684
685 thread_wakeup((event_t)&decompressions_blocked);
686 }
687
688 static inline void
cslot_copy(c_slot_t cdst,c_slot_t csrc)689 cslot_copy(c_slot_t cdst, c_slot_t csrc)
690 {
691 #if CHECKSUM_THE_DATA
692 cdst->c_hash_data = csrc->c_hash_data;
693 #endif
694 #if CHECKSUM_THE_COMPRESSED_DATA
695 cdst->c_hash_compressed_data = csrc->c_hash_compressed_data;
696 #endif
697 #if POPCOUNT_THE_COMPRESSED_DATA
698 cdst->c_pop_cdata = csrc->c_pop_cdata;
699 #endif
700 cdst->c_size = csrc->c_size;
701 #if HAS_MTE
702 cdst->c_mte_size = csrc->c_mte_size;
703 #endif
704 cdst->c_packed_ptr = csrc->c_packed_ptr;
705 #if defined(__arm64__)
706 cdst->c_codec = csrc->c_codec;
707 #endif
708 }
709
710 #if XNU_TARGET_OS_OSX
711 #define VM_COMPRESSOR_MAX_POOL_SIZE (192UL << 30)
712 #else
713 #define VM_COMPRESSOR_MAX_POOL_SIZE (0)
714 #endif
715
716 static vm_map_size_t compressor_size;
717 static SECURITY_READ_ONLY_LATE(struct mach_vm_range) compressor_range;
718 vm_map_t compressor_map;
719 uint64_t compressor_pool_max_size;
720 uint64_t compressor_pool_size;
721 uint32_t compressor_pool_multiplier;
722
723 #if DEVELOPMENT || DEBUG
724 /*
725 * Compressor segments are write-protected in development/debug
726 * kernels to help debug memory corruption.
727 * In cases where performance is a concern, this can be disabled
728 * via the boot-arg "-disable_cseg_write_protection".
729 */
730 boolean_t write_protect_c_segs = TRUE;
731 int vm_compressor_test_seg_wp;
732 uint32_t vm_ktrace_enabled;
733 #endif /* DEVELOPMENT || DEBUG */
734
735 #if (XNU_TARGET_OS_OSX && __arm64__)
736
737 #include <IOKit/IOPlatformExpert.h>
738 #include <sys/random.h>
739
740 static const char *csegbufsizeExperimentProperty = "_csegbufsz_experiment";
741 static thread_call_t csegbufsz_experiment_thread_call;
742
743 extern boolean_t IOServiceWaitForMatchingResource(const char * property, uint64_t timeout);
744 static void
erase_csegbufsz_experiment_property(__unused void * param0,__unused void * param1)745 erase_csegbufsz_experiment_property(__unused void *param0, __unused void *param1)
746 {
747 // Wait for NVRAM to be writable
748 if (!IOServiceWaitForMatchingResource("IONVRAM", UINT64_MAX)) {
749 printf("csegbufsz_experiment_property: Failed to wait for IONVRAM.");
750 }
751
752 if (!PERemoveNVRAMProperty(csegbufsizeExperimentProperty)) {
753 printf("csegbufsize_experiment_property: Failed to remove %s from NVRAM.", csegbufsizeExperimentProperty);
754 }
755 thread_call_free(csegbufsz_experiment_thread_call);
756 }
757
758 static void
erase_csegbufsz_experiment_property_async()759 erase_csegbufsz_experiment_property_async()
760 {
761 csegbufsz_experiment_thread_call = thread_call_allocate_with_priority(
762 erase_csegbufsz_experiment_property,
763 NULL,
764 THREAD_CALL_PRIORITY_LOW
765 );
766 if (csegbufsz_experiment_thread_call == NULL) {
767 printf("csegbufsize_experiment_property: Unable to allocate thread call.");
768 } else {
769 thread_call_enter(csegbufsz_experiment_thread_call);
770 }
771 }
772
773 static void
cleanup_csegbufsz_experiment(__unused void * arg0)774 cleanup_csegbufsz_experiment(__unused void *arg0)
775 {
776 char nvram = 0;
777 unsigned int len = sizeof(nvram);
778 if (PEReadNVRAMProperty(csegbufsizeExperimentProperty, &nvram, &len)) {
779 erase_csegbufsz_experiment_property_async();
780 }
781 }
782
783 STARTUP_ARG(EARLY_BOOT, STARTUP_RANK_FIRST, cleanup_csegbufsz_experiment, NULL);
784 #endif /* XNU_TARGET_OS_OSX && __arm64__ */
785
786 #if CONFIG_JETSAM
787 extern unsigned int memorystatus_swap_all_apps;
788 #endif /* CONFIG_JETSAM */
789
790 TUNABLE_DT(uint64_t, swap_vol_min_capacity, "/defaults", "kern.swap_min_capacity", "kern.swap_min_capacity", 0, TUNABLE_DT_NONE);
791
792 static void
vm_compressor_set_size(void)793 vm_compressor_set_size(void)
794 {
795 /*
796 * Note that this function may be called multiple times on systems with app swap
797 * because the value of vm_swap_get_max_configured_space() and memorystatus_swap_all_apps
798 * can change based the size of the swap volume. On these systems, we'll call
799 * this function once early in boot to reserve the maximum amount of VA required
800 * for the compressor submap and then one more time in vm_compressor_init after
801 * determining the swap volume size. We must not return a larger value the second
802 * time around.
803 */
804 vm_size_t c_segments_arr_size = 0;
805 struct c_slot_mapping tmp_slot_ptr;
806
807 /* The segment size can be overwritten by a boot-arg */
808 if (!PE_parse_boot_argn("vm_compressor_segment_buffer_size", &c_seg_bufsize, sizeof(c_seg_bufsize))) {
809 #if CONFIG_JETSAM
810 if (memorystatus_swap_all_apps) {
811 c_seg_bufsize = C_SEG_BUFSIZE_ARM_SWAP;
812 } else {
813 c_seg_bufsize = C_SEG_BUFSIZE_DEFAULT;
814 }
815 #else
816 c_seg_bufsize = C_SEG_BUFSIZE_DEFAULT;
817 #endif /* CONFIG_JETSAM */
818 }
819
820 vm_compressor_swap_init_swap_file_limit();
821 if (vm_compression_limit) {
822 compressor_pool_size = ptoa_64(vm_compression_limit);
823 }
824
825 compressor_pool_max_size = C_SEG_MAX_LIMIT;
826 compressor_pool_max_size *= c_seg_bufsize;
827
828 #if XNU_TARGET_OS_OSX
829
830 if (vm_compression_limit == 0) {
831 if (max_mem <= (4ULL * 1024ULL * 1024ULL * 1024ULL)) {
832 compressor_pool_size = 16ULL * max_mem;
833 } else if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) {
834 compressor_pool_size = 8ULL * max_mem;
835 } else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) {
836 compressor_pool_size = 4ULL * max_mem;
837 } else {
838 compressor_pool_size = 2ULL * max_mem;
839 }
840 }
841 /*
842 * Cap the compressor pool size to a max of 192G
843 */
844 if (compressor_pool_size > VM_COMPRESSOR_MAX_POOL_SIZE) {
845 compressor_pool_size = VM_COMPRESSOR_MAX_POOL_SIZE;
846 }
847 if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) {
848 compressor_pool_multiplier = 1;
849 } else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) {
850 compressor_pool_multiplier = 2;
851 } else {
852 compressor_pool_multiplier = 4;
853 }
854
855 #else
856
857 if (compressor_pool_max_size > max_mem) {
858 compressor_pool_max_size = max_mem;
859 }
860
861 if (vm_compression_limit == 0) {
862 compressor_pool_size = max_mem;
863 }
864
865 #if XNU_TARGET_OS_WATCH
866 compressor_pool_multiplier = 2;
867 #elif XNU_TARGET_OS_IOS
868 if (max_mem <= (2ULL * 1024ULL * 1024ULL * 1024ULL)) {
869 compressor_pool_multiplier = 2;
870 } else {
871 compressor_pool_multiplier = 1;
872 }
873 #else
874 compressor_pool_multiplier = 1;
875 #endif
876
877 #endif
878
879 PE_parse_boot_argn("kern.compressor_pool_multiplier", &compressor_pool_multiplier, sizeof(compressor_pool_multiplier));
880 if (compressor_pool_multiplier < 1) {
881 compressor_pool_multiplier = 1;
882 }
883
884 if (compressor_pool_size > compressor_pool_max_size) {
885 compressor_pool_size = compressor_pool_max_size;
886 }
887
888 c_seg_max_pages = (c_seg_bufsize / PAGE_SIZE);
889 c_seg_slot_var_array_min_len = c_seg_max_pages;
890
891 #if !defined(__x86_64__)
892 c_seg_off_limit = (C_SEG_BYTES_TO_OFFSET((c_seg_bufsize - 512)));
893 c_seg_allocsize = (c_seg_bufsize + PAGE_SIZE);
894 #else
895 c_seg_off_limit = (C_SEG_BYTES_TO_OFFSET((c_seg_bufsize - 128)));
896 c_seg_allocsize = c_seg_bufsize;
897 #endif /* !defined(__x86_64__) */
898
899 c_segments_limit = (uint32_t)(compressor_pool_size / (vm_size_t)(c_seg_allocsize));
900 tmp_slot_ptr.s_cseg = c_segments_limit;
901 /* Panic on internal configs*/
902 assertf((tmp_slot_ptr.s_cseg == c_segments_limit), "vm_compressor_init: overflowed s_cseg field in c_slot_mapping with c_segno: %d", c_segments_limit);
903
904 if (tmp_slot_ptr.s_cseg != c_segments_limit) {
905 tmp_slot_ptr.s_cseg = -1;
906 c_segments_limit = tmp_slot_ptr.s_cseg - 1; /*limited by segment idx bits in c_slot_mapping*/
907 compressor_pool_size = (c_segments_limit * (vm_size_t)(c_seg_allocsize));
908 }
909
910 c_segments_nearing_limit = (uint32_t)(((uint64_t)c_segments_limit * 98ULL) / 100ULL);
911
912 /* an upper limit on how many input pages the compressor can hold */
913 c_segment_pages_compressed_limit = (c_segments_limit * (c_seg_bufsize / PAGE_SIZE) * compressor_pool_multiplier);
914
915 if (c_segment_pages_compressed_limit < (uint32_t)(max_mem / PAGE_SIZE)) {
916 #if defined(XNU_TARGET_OS_WATCH)
917 c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE);
918 #else
919 if (!vm_compression_limit) {
920 c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE);
921 }
922 #endif
923 }
924
925 c_segment_pages_compressed_nearing_limit = (uint32_t)(((uint64_t)c_segment_pages_compressed_limit * 98ULL) / 100ULL);
926
927 #if CONFIG_FREEZE
928 /*
929 * Our in-core limits are based on the size of the compressor pool.
930 * The c_segments_nearing_limit is also based on the compressor pool
931 * size and calculated above.
932 */
933 c_segments_incore_limit = c_segments_limit;
934
935 if (freezer_incore_cseg_acct) {
936 /*
937 * Add enough segments to track all frozen c_segs that can be stored in swap.
938 */
939 c_segments_limit += (uint32_t)(vm_swap_get_max_configured_space() / (vm_size_t)(c_seg_allocsize));
940 tmp_slot_ptr.s_cseg = c_segments_limit;
941 /* Panic on internal configs*/
942 assertf((tmp_slot_ptr.s_cseg == c_segments_limit), "vm_compressor_init: freezer reserve overflowed s_cseg field in c_slot_mapping with c_segno: %d", c_segments_limit);
943 }
944 #endif
945 /*
946 * Submap needs space for:
947 * - c_segments
948 * - c_buffers
949 * - swap reclaimations -- c_seg_bufsize
950 */
951 c_segments_arr_size = vm_map_round_page((sizeof(union c_segu) * c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
952 c_buffers_size = vm_map_round_page(((vm_size_t)c_seg_allocsize * (vm_size_t)c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
953
954 compressor_size = c_segments_arr_size + c_buffers_size + c_seg_bufsize;
955
956 #if RECORD_THE_COMPRESSED_DATA
957 c_compressed_record_sbuf_size = (vm_size_t)c_seg_allocsize + (PAGE_SIZE * 2);
958 compressor_size += c_compressed_record_sbuf_size;
959 #endif /* RECORD_THE_COMPRESSED_DATA */
960 }
961 STARTUP(KMEM, STARTUP_RANK_FIRST, vm_compressor_set_size);
962
963 KMEM_RANGE_REGISTER_DYNAMIC(compressor, &compressor_range, ^() {
964 return compressor_size;
965 });
966
967 bool
osenvironment_is_diagnostics(void)968 osenvironment_is_diagnostics(void)
969 {
970 DTEntry chosen;
971 const char *osenvironment;
972 unsigned int size;
973 if (kSuccess == SecureDTLookupEntry(0, "/chosen", &chosen)) {
974 if (kSuccess == SecureDTGetProperty(chosen, "osenvironment", (void const **) &osenvironment, &size)) {
975 return strcmp(osenvironment, "diagnostics") == 0;
976 }
977 }
978 return false;
979 }
980
981 bool
osenvironment_is_device_recovery(void)982 osenvironment_is_device_recovery(void)
983 {
984 DTEntry chosen;
985 const char *osenvironment;
986 unsigned int size;
987 if (kSuccess == SecureDTLookupEntry(0, "/chosen", &chosen)) {
988 if (kSuccess == SecureDTGetProperty(chosen, "osenvironment", (void const **) &osenvironment, &size)) {
989 return strcmp(osenvironment, "device-recovery") == 0;
990 }
991 }
992 return false;
993 }
994
995 void
vm_compressor_init(void)996 vm_compressor_init(void)
997 {
998 thread_t thread;
999 #if RECORD_THE_COMPRESSED_DATA
1000 vm_size_t c_compressed_record_sbuf_size = 0;
1001 #endif /* RECORD_THE_COMPRESSED_DATA */
1002
1003 #if DEVELOPMENT || DEBUG || CONFIG_FREEZE
1004 char bootarg_name[32];
1005 #endif /* DEVELOPMENT || DEBUG || CONFIG_FREEZE */
1006 __unused uint64_t early_boot_compressor_size = compressor_size;
1007
1008 #if CONFIG_JETSAM
1009 if (memorystatus_swap_all_apps &&
1010 (osenvironment_is_diagnostics() || osenvironment_is_device_recovery())) {
1011 printf("osenvironment == \"diagnostics or device-recovery\". Disabling app swap.\n");
1012 memorystatus_disable_swap();
1013 }
1014
1015 if (memorystatus_swap_all_apps) {
1016 /*
1017 * App swap is disabled on devices with small NANDs.
1018 * Now that we're no longer in early boot, we can get
1019 * the NAND size and re-run vm_compressor_set_size.
1020 */
1021 int error = vm_swap_vol_get_capacity(SWAP_VOLUME_NAME, &vm_swap_volume_capacity);
1022 #if DEVELOPMENT || DEBUG
1023 if (error != 0) {
1024 panic("vm_compressor_init: Unable to get swap volume capacity. error=%d\n", error);
1025 }
1026 #else
1027 if (error != 0) {
1028 vm_log_error("vm_compressor_init: Unable to get swap volume capacity. error=%d\n", error);
1029 }
1030 #endif /* DEVELOPMENT || DEBUG */
1031 if (vm_swap_volume_capacity < swap_vol_min_capacity) {
1032 memorystatus_disable_swap();
1033 }
1034 /*
1035 * Resize the compressor and swap now that we know the capacity
1036 * of the swap volume.
1037 */
1038 vm_compressor_set_size();
1039 /*
1040 * We reserved a chunk of VA early in boot for the compressor submap.
1041 * We can't allocate more than that.
1042 */
1043 assert(compressor_size <= early_boot_compressor_size);
1044 }
1045 #endif /* CONFIG_JETSAM */
1046
1047 #if DEVELOPMENT || DEBUG
1048 if (PE_parse_boot_argn("-disable_cseg_write_protection", bootarg_name, sizeof(bootarg_name))) {
1049 write_protect_c_segs = FALSE;
1050 }
1051
1052 int vmcval = 1;
1053 #if defined(XNU_TARGET_OS_WATCH)
1054 vmcval = 0;
1055 #endif /* XNU_TARGET_OS_WATCH */
1056 PE_parse_boot_argn("vm_compressor_validation", &vmcval, sizeof(vmcval));
1057
1058 if (kern_feature_override(KF_COMPRSV_OVRD)) {
1059 vmcval = 0;
1060 }
1061
1062 if (vmcval == 0) {
1063 #if POPCOUNT_THE_COMPRESSED_DATA
1064 popcount_c_segs = FALSE;
1065 #endif
1066 #if CHECKSUM_THE_DATA || CHECKSUM_THE_COMPRESSED_DATA
1067 checksum_c_segs = FALSE;
1068 #endif
1069 #if VALIDATE_C_SEGMENTS
1070 validate_c_segs = FALSE;
1071 #endif
1072 write_protect_c_segs = FALSE;
1073 }
1074 #endif /* DEVELOPMENT || DEBUG */
1075
1076 #if CONFIG_FREEZE
1077 if (PE_parse_boot_argn("-disable_freezer_cseg_acct", bootarg_name, sizeof(bootarg_name))) {
1078 freezer_incore_cseg_acct = FALSE;
1079 }
1080 #endif /* CONFIG_FREEZE */
1081
1082 assert((C_SEGMENTS_PER_PAGE * sizeof(union c_segu)) == PAGE_SIZE);
1083
1084 #if !XNU_TARGET_OS_OSX
1085 vm_compressor_minorcompact_threshold_divisor = 20;
1086 vm_compressor_majorcompact_threshold_divisor = 30;
1087 vm_compressor_unthrottle_threshold_divisor = 40;
1088 vm_compressor_catchup_threshold_divisor = 60;
1089 #else /* !XNU_TARGET_OS_OSX */
1090 if (max_mem <= (3ULL * 1024ULL * 1024ULL * 1024ULL)) {
1091 vm_compressor_minorcompact_threshold_divisor = 11;
1092 vm_compressor_majorcompact_threshold_divisor = 13;
1093 vm_compressor_unthrottle_threshold_divisor = 20;
1094 vm_compressor_catchup_threshold_divisor = 35;
1095 } else {
1096 vm_compressor_minorcompact_threshold_divisor = 20;
1097 vm_compressor_majorcompact_threshold_divisor = 25;
1098 vm_compressor_unthrottle_threshold_divisor = 35;
1099 vm_compressor_catchup_threshold_divisor = 50;
1100 }
1101 #endif /* !XNU_TARGET_OS_OSX */
1102
1103 queue_init(&c_bad_list_head);
1104 queue_init(&c_age_list_head);
1105 queue_init(&c_minor_list_head);
1106 queue_init(&c_major_list_head);
1107 queue_init(&c_filling_list_head);
1108 queue_init(&c_early_swapout_list_head);
1109 queue_init(&c_regular_swapout_list_head);
1110 queue_init(&c_late_swapout_list_head);
1111 queue_init(&c_swapio_list_head);
1112 queue_init(&c_early_swappedin_list_head);
1113 queue_init(&c_regular_swappedin_list_head);
1114 queue_init(&c_late_swappedin_list_head);
1115 queue_init(&c_swappedout_list_head);
1116 queue_init(&c_swappedout_sparse_list_head);
1117
1118 c_free_segno_head = -1;
1119 c_segments_available = 0;
1120
1121 compressor_map = kmem_suballoc(kernel_map, &compressor_range.min_address,
1122 compressor_size, VM_MAP_CREATE_NEVER_FAULTS,
1123 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1124 KMS_NOFAIL | KMS_PERMANENT | KMS_NOSOFTLIMIT,
1125 VM_KERN_MEMORY_COMPRESSOR).kmr_submap;
1126
1127 kmem_alloc(compressor_map, (vm_offset_t *)(&c_segments),
1128 (sizeof(union c_segu) * c_segments_limit),
1129 KMA_NOFAIL | KMA_KOBJECT | KMA_VAONLY | KMA_PERMANENT | KMA_NOSOFTLIMIT,
1130 VM_KERN_MEMORY_COMPRESSOR);
1131 kmem_alloc(compressor_map, &c_buffers, c_buffers_size,
1132 KMA_NOFAIL | KMA_COMPRESSOR | KMA_VAONLY | KMA_PERMANENT | KMA_NOSOFTLIMIT,
1133 VM_KERN_MEMORY_COMPRESSOR);
1134
1135 #if DEVELOPMENT || DEBUG
1136 if (hvg_is_hcall_available(HVG_HCALL_SET_COREDUMP_DATA)) {
1137 hvg_hcall_set_coredump_data();
1138 }
1139 #endif
1140
1141 /*
1142 * Pick a good size that will minimize fragmentation in zalloc
1143 * by minimizing the fragmentation in a 16k run.
1144 *
1145 * c_seg_slot_var_array_min_len is larger on 4k systems than 16k ones,
1146 * making the fragmentation in a 4k page terrible. Using 16k for all
1147 * systems matches zalloc() and will minimize fragmentation.
1148 */
1149 uint32_t c_segment_size = sizeof(struct c_segment) + (c_seg_slot_var_array_min_len * sizeof(struct c_slot));
1150 uint32_t cnt = (16 << 10) / c_segment_size;
1151 uint32_t frag = (16 << 10) % c_segment_size;
1152
1153 c_seg_fixed_array_len = c_seg_slot_var_array_min_len;
1154
1155 while (cnt * sizeof(struct c_slot) < frag) {
1156 c_segment_size += sizeof(struct c_slot);
1157 c_seg_fixed_array_len++;
1158 frag -= cnt * sizeof(struct c_slot);
1159 }
1160
1161 compressor_segment_zone = zone_create("compressor_segment",
1162 c_segment_size, ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM);
1163
1164 c_segments_busy = FALSE;
1165
1166 c_segments_next_page = (caddr_t)c_segments;
1167 vm_compressor_algorithm_init();
1168
1169 {
1170 host_basic_info_data_t hinfo;
1171 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
1172 size_t bufsize;
1173 char *buf;
1174
1175 #define BSD_HOST 1
1176 host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
1177
1178 compressor_cpus = hinfo.max_cpus;
1179
1180 /* allocate various scratch buffers at the same place */
1181 bufsize = PAGE_SIZE;
1182 bufsize += compressor_cpus * vm_compressor_get_decode_scratch_size();
1183 /* For the panic path */
1184 bufsize += vm_compressor_get_decode_scratch_size();
1185 #if CONFIG_FREEZE
1186 bufsize += vm_compressor_get_encode_scratch_size();
1187 #endif
1188 #if RECORD_THE_COMPRESSED_DATA
1189 bufsize += c_compressed_record_sbuf_size;
1190 #endif
1191
1192 kmem_alloc(kernel_map, (vm_offset_t *)&buf, bufsize,
1193 KMA_DATA_SHARED | KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT,
1194 VM_KERN_MEMORY_COMPRESSOR);
1195
1196 /*
1197 * vm_compressor_kdp_state.kc_decompressed_page must be page aligned because we access
1198 * it through the physical aperture by page number.
1199 */
1200 vm_compressor_kdp_state.kc_panic_decompressed_page = buf;
1201 vm_compressor_kdp_state.kc_panic_decompressed_page_paddr = kvtophys((vm_offset_t)vm_compressor_kdp_state.kc_panic_decompressed_page);
1202 vm_compressor_kdp_state.kc_panic_decompressed_page_ppnum = (ppnum_t) atop(vm_compressor_kdp_state.kc_panic_decompressed_page_paddr);
1203 buf += PAGE_SIZE;
1204 bufsize -= PAGE_SIZE;
1205
1206 compressor_scratch_bufs = buf;
1207 buf += compressor_cpus * vm_compressor_get_decode_scratch_size();
1208 bufsize -= compressor_cpus * vm_compressor_get_decode_scratch_size();
1209
1210 vm_compressor_kdp_state.kc_panic_scratch_buf = buf;
1211 buf += vm_compressor_get_decode_scratch_size();
1212 bufsize -= vm_compressor_get_decode_scratch_size();
1213
1214 /* This is set up before each stackshot in vm_compressor_kdp_init */
1215 vm_compressor_kdp_state.kc_scratch_bufs = NULL;
1216
1217 #if CONFIG_FREEZE
1218 freezer_context_global.freezer_ctx_compressor_scratch_buf = buf;
1219 buf += vm_compressor_get_encode_scratch_size();
1220 bufsize -= vm_compressor_get_encode_scratch_size();
1221 #endif
1222
1223 #if RECORD_THE_COMPRESSED_DATA
1224 c_compressed_record_sbuf = buf;
1225 c_compressed_record_cptr = buf;
1226 c_compressed_record_ebuf = c_compressed_record_sbuf + c_compressed_record_sbuf_size;
1227 buf += c_compressed_record_sbuf_size;
1228 bufsize -= c_compressed_record_sbuf_size;
1229 #endif
1230 assert(bufsize == 0);
1231 }
1232
1233 if (kernel_thread_start_priority((thread_continue_t)vm_compressor_swap_trigger_thread, NULL,
1234 BASEPRI_VM, &thread) != KERN_SUCCESS) {
1235 panic("vm_compressor_swap_trigger_thread: create failed");
1236 }
1237 thread_deallocate(thread);
1238
1239 if (vm_pageout_internal_start() != KERN_SUCCESS) {
1240 panic("vm_compressor_init: Failed to start the internal pageout thread.");
1241 }
1242 if (VM_CONFIG_SWAP_IS_PRESENT) {
1243 vm_compressor_swap_init();
1244 }
1245
1246 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
1247 vm_compressor_is_active = 1;
1248 }
1249
1250 vm_compressor_available = 1;
1251
1252 vm_page_reactivate_all_throttled();
1253
1254 bzero(&vmcs_stats, sizeof(struct vm_compressor_swapper_stats));
1255 }
1256
1257 #define COMPRESSOR_KDP_BUFSIZE (\
1258 (vm_compressor_get_decode_scratch_size() * compressor_cpus) + \
1259 (PAGE_SIZE * compressor_cpus)) + \
1260 (sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_paddr) * compressor_cpus) + \
1261 (sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_ppnum) * compressor_cpus)
1262
1263
1264 /**
1265 * Initializes the VM compressor in preparation for a stackshot.
1266 * Stackshot mutex must be held.
1267 */
1268 kern_return_t
vm_compressor_kdp_init(void)1269 vm_compressor_kdp_init(void)
1270 {
1271 char *buf;
1272 kern_return_t err;
1273 size_t bufsize;
1274 size_t total_decode_size;
1275
1276 #if DEVELOPMENT || DEBUG
1277 extern lck_mtx_t stackshot_subsys_mutex;
1278 lck_mtx_assert(&stackshot_subsys_mutex, LCK_MTX_ASSERT_OWNED);
1279 #endif /* DEVELOPMENT || DEBUG */
1280
1281 if (!vm_compressor_available) {
1282 return KERN_SUCCESS;
1283 }
1284
1285 bufsize = COMPRESSOR_KDP_BUFSIZE;
1286
1287 /* Allocate the per-cpu decompression pages. */
1288 err = kmem_alloc(kernel_map, (vm_offset_t *)&buf, bufsize,
1289 KMA_DATA_SHARED | KMA_NOFAIL | KMA_KOBJECT,
1290 VM_KERN_MEMORY_COMPRESSOR);
1291
1292 if (err != KERN_SUCCESS) {
1293 return err;
1294 }
1295
1296 assert(vm_compressor_kdp_state.kc_scratch_bufs == NULL);
1297 vm_compressor_kdp_state.kc_scratch_bufs = buf;
1298 total_decode_size = vm_compressor_get_decode_scratch_size() * compressor_cpus;
1299 buf += total_decode_size;
1300 bufsize -= total_decode_size;
1301
1302 /*
1303 * vm_compressor_kdp_state.kc_decompressed_page must be page aligned because we access
1304 * it through the physical aperture by page number.
1305 */
1306 assert(vm_compressor_kdp_state.kc_decompressed_pages == NULL);
1307 vm_compressor_kdp_state.kc_decompressed_pages = buf;
1308 buf += PAGE_SIZE * compressor_cpus;
1309 bufsize -= PAGE_SIZE * compressor_cpus;
1310
1311 /* Scary! This will be aligned, I promise :) */
1312 assert(((vm_address_t) buf) % _Alignof(addr64_t) == 0);
1313 assert(vm_compressor_kdp_state.kc_decompressed_pages_paddr == NULL);
1314 vm_compressor_kdp_state.kc_decompressed_pages_paddr = (addr64_t*) (void*) buf;
1315 buf += sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_paddr) * compressor_cpus;
1316 bufsize -= sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_paddr) * compressor_cpus;
1317
1318 assert(((vm_address_t) buf) % _Alignof(ppnum_t) == 0);
1319 assert(vm_compressor_kdp_state.kc_decompressed_pages_ppnum == NULL);
1320 vm_compressor_kdp_state.kc_decompressed_pages_ppnum = (ppnum_t*) (void*) buf;
1321 buf += sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_ppnum) * compressor_cpus;
1322 bufsize -= sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_ppnum) * compressor_cpus;
1323
1324 assert(bufsize == 0);
1325
1326 for (size_t i = 0; i < compressor_cpus; i++) {
1327 vm_offset_t offset = (vm_offset_t) &vm_compressor_kdp_state.kc_decompressed_pages[i * PAGE_SIZE];
1328 vm_compressor_kdp_state.kc_decompressed_pages_paddr[i] = kvtophys(offset);
1329 vm_compressor_kdp_state.kc_decompressed_pages_ppnum[i] = (ppnum_t) atop(vm_compressor_kdp_state.kc_decompressed_pages_paddr[i]);
1330 }
1331
1332 return KERN_SUCCESS;
1333 }
1334
1335 /*
1336 * Frees up compressor buffers used by stackshot.
1337 * Stackshot mutex must be held.
1338 */
1339 void
vm_compressor_kdp_teardown(void)1340 vm_compressor_kdp_teardown(void)
1341 {
1342 extern lck_mtx_t stackshot_subsys_mutex;
1343 LCK_MTX_ASSERT(&stackshot_subsys_mutex, LCK_MTX_ASSERT_OWNED);
1344
1345 if (vm_compressor_kdp_state.kc_scratch_bufs == NULL) {
1346 return;
1347 }
1348
1349 /* Deallocate the per-cpu decompression pages. */
1350 kmem_free(kernel_map, (vm_offset_t) vm_compressor_kdp_state.kc_scratch_bufs, COMPRESSOR_KDP_BUFSIZE);
1351
1352 vm_compressor_kdp_state.kc_scratch_bufs = NULL;
1353 vm_compressor_kdp_state.kc_decompressed_pages = NULL;
1354 vm_compressor_kdp_state.kc_decompressed_pages_paddr = 0;
1355 vm_compressor_kdp_state.kc_decompressed_pages_ppnum = 0;
1356 }
1357
1358 static uint32_t
c_slot_extra_size(c_slot_t cs)1359 c_slot_extra_size(c_slot_t cs)
1360 {
1361 #if HAS_MTE
1362 return vm_mte_compressed_tags_actual_size(cs->c_mte_size);
1363 #else /* HAS_MTE */
1364 #pragma unused(cs)
1365 return 0;
1366 #endif /* HAS_MTE */
1367 }
1368
1369 #if VALIDATE_C_SEGMENTS
1370
1371 static void
c_seg_validate(c_segment_t c_seg,boolean_t must_be_compact)1372 c_seg_validate(c_segment_t c_seg, boolean_t must_be_compact)
1373 {
1374 uint16_t c_indx;
1375 int32_t bytes_used;
1376 uint32_t c_rounded_size;
1377 uint32_t c_size;
1378 c_slot_t cs;
1379
1380 if (__probable(validate_c_segs == FALSE)) {
1381 return;
1382 }
1383 if (c_seg->c_firstemptyslot < c_seg->c_nextslot) {
1384 c_indx = c_seg->c_firstemptyslot;
1385 cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1386
1387 if (cs == NULL) {
1388 panic("c_seg_validate: no slot backing c_firstemptyslot");
1389 }
1390
1391 if (cs->c_size) {
1392 panic("c_seg_validate: c_firstemptyslot has non-zero size (%d)", cs->c_size);
1393 }
1394 }
1395 bytes_used = 0;
1396
1397 for (c_indx = 0; c_indx < c_seg->c_nextslot; c_indx++) {
1398 cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1399
1400 c_size = UNPACK_C_SIZE(cs);
1401
1402 c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(c_size + c_slot_extra_size(cs));
1403
1404 bytes_used += c_rounded_size;
1405
1406 #if CHECKSUM_THE_COMPRESSED_DATA
1407 unsigned csvhash;
1408 if (c_size && cs->c_hash_compressed_data != (csvhash = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))) {
1409 addr64_t csvphys = kvtophys((vm_offset_t)&c_seg->c_store.c_buffer[cs->c_offset]);
1410 panic("Compressed data doesn't match original %p phys: 0x%llx %d %p %d %d 0x%x 0x%x", c_seg, csvphys, cs->c_offset, cs, c_indx, c_size, cs->c_hash_compressed_data, csvhash);
1411 }
1412 #endif
1413 #if POPCOUNT_THE_COMPRESSED_DATA
1414 unsigned csvpop;
1415 if (c_size) {
1416 uintptr_t csvaddr = (uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset];
1417 if (cs->c_pop_cdata != (csvpop = vmc_pop(csvaddr, c_size))) {
1418 panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%llx 0x%x 0x%x 0x%x", (csvpop - cs->c_pop_cdata), (void *)csvaddr, (void *) kvtophys(csvaddr), c_seg, cs, (uint64_t)cs->c_offset, c_size, csvpop, cs->c_pop_cdata);
1419 }
1420 }
1421 #endif
1422 }
1423
1424 if (bytes_used != c_seg->c_bytes_used) {
1425 panic("c_seg_validate: bytes_used mismatch - found %d, segment has %d", bytes_used, c_seg->c_bytes_used);
1426 }
1427
1428 if (c_seg->c_bytes_used > C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) {
1429 panic("c_seg_validate: c_bytes_used > c_nextoffset - c_nextoffset = %d, c_bytes_used = %d",
1430 (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used);
1431 }
1432
1433 if (must_be_compact) {
1434 if (c_seg->c_bytes_used != C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) {
1435 panic("c_seg_validate: c_bytes_used doesn't match c_nextoffset - c_nextoffset = %d, c_bytes_used = %d",
1436 (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used);
1437 }
1438 }
1439 }
1440
1441 #endif
1442
1443
1444 void
c_seg_need_delayed_compaction(c_segment_t c_seg,boolean_t c_list_lock_held)1445 c_seg_need_delayed_compaction(c_segment_t c_seg, boolean_t c_list_lock_held)
1446 {
1447 boolean_t clear_busy = FALSE;
1448
1449 if (c_list_lock_held == FALSE) {
1450 if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1451 C_SEG_BUSY(c_seg);
1452
1453 lck_mtx_unlock_always(&c_seg->c_lock);
1454 lck_mtx_lock_spin_always(c_list_lock);
1455 lck_mtx_lock_spin_always(&c_seg->c_lock);
1456
1457 clear_busy = TRUE;
1458 }
1459 }
1460 assert(c_seg->c_state != C_IS_FILLING);
1461
1462 if (!c_seg->c_on_minorcompact_q && !(C_SEG_IS_ON_DISK_OR_SOQ(c_seg)) && !c_seg->c_has_donated_pages) {
1463 queue_enter(&c_minor_list_head, c_seg, c_segment_t, c_list);
1464 c_seg->c_on_minorcompact_q = 1;
1465 os_atomic_inc(&c_minor_count, relaxed);
1466 }
1467 if (c_list_lock_held == FALSE) {
1468 lck_mtx_unlock_always(c_list_lock);
1469 }
1470
1471 if (clear_busy == TRUE) {
1472 C_SEG_WAKEUP_DONE(c_seg);
1473 }
1474 }
1475
1476
1477 unsigned int c_seg_moved_to_sparse_list = 0;
1478
1479 void
c_seg_move_to_sparse_list(c_segment_t c_seg)1480 c_seg_move_to_sparse_list(c_segment_t c_seg)
1481 {
1482 boolean_t clear_busy = FALSE;
1483
1484 if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1485 C_SEG_BUSY(c_seg);
1486
1487 lck_mtx_unlock_always(&c_seg->c_lock);
1488 lck_mtx_lock_spin_always(c_list_lock);
1489 lck_mtx_lock_spin_always(&c_seg->c_lock);
1490
1491 clear_busy = TRUE;
1492 }
1493 c_seg_switch_state(c_seg, C_ON_SWAPPEDOUTSPARSE_Q, FALSE);
1494
1495 c_seg_moved_to_sparse_list++;
1496
1497 lck_mtx_unlock_always(c_list_lock);
1498
1499 if (clear_busy == TRUE) {
1500 C_SEG_WAKEUP_DONE(c_seg);
1501 }
1502 }
1503
1504
1505
1506
1507 int try_minor_compaction_failed = 0;
1508 int try_minor_compaction_succeeded = 0;
1509
1510 void
c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg)1511 c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg)
1512 {
1513 assert(c_seg->c_on_minorcompact_q);
1514 /*
1515 * c_seg is currently on the delayed minor compaction
1516 * queue and we have c_seg locked... if we can get the
1517 * c_list_lock w/o blocking (if we blocked we could deadlock
1518 * because the lock order is c_list_lock then c_seg's lock)
1519 * we'll pull it from the delayed list and free it directly
1520 */
1521 if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1522 /*
1523 * c_list_lock is held, we need to bail
1524 */
1525 try_minor_compaction_failed++;
1526
1527 lck_mtx_unlock_always(&c_seg->c_lock);
1528 } else {
1529 try_minor_compaction_succeeded++;
1530
1531 C_SEG_BUSY(c_seg);
1532 c_seg_do_minor_compaction_and_unlock(c_seg, TRUE, FALSE, FALSE);
1533 }
1534 }
1535
1536
1537 int
c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg,boolean_t clear_busy,boolean_t need_list_lock,boolean_t disallow_page_replacement)1538 c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy, boolean_t need_list_lock, boolean_t disallow_page_replacement)
1539 {
1540 int c_seg_freed;
1541
1542 assert(c_seg->c_busy);
1543 assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg));
1544
1545 /*
1546 * check for the case that can occur when we are not swapping
1547 * and this segment has been major compacted in the past
1548 * and moved to the majorcompact q to remove it from further
1549 * consideration... if the occupancy falls too low we need
1550 * to put it back on the age_q so that it will be considered
1551 * in the next major compaction sweep... if we don't do this
1552 * we will eventually run into the c_segments_limit
1553 */
1554 if (c_seg->c_state == C_ON_MAJORCOMPACT_Q && C_SEG_SHOULD_MAJORCOMPACT_NOW(c_seg)) {
1555 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
1556 }
1557 if (!c_seg->c_on_minorcompact_q) {
1558 if (clear_busy == TRUE) {
1559 C_SEG_WAKEUP_DONE(c_seg);
1560 }
1561
1562 lck_mtx_unlock_always(&c_seg->c_lock);
1563
1564 return 0;
1565 }
1566 queue_remove(&c_minor_list_head, c_seg, c_segment_t, c_list);
1567 c_seg->c_on_minorcompact_q = 0;
1568 os_atomic_dec(&c_minor_count, relaxed);
1569
1570 lck_mtx_unlock_always(c_list_lock);
1571
1572 if (disallow_page_replacement == TRUE) {
1573 lck_mtx_unlock_always(&c_seg->c_lock);
1574
1575 PAGE_REPLACEMENT_DISALLOWED(TRUE);
1576
1577 lck_mtx_lock_spin_always(&c_seg->c_lock);
1578 }
1579 c_seg_freed = c_seg_minor_compaction_and_unlock(c_seg, clear_busy);
1580
1581 if (disallow_page_replacement == TRUE) {
1582 PAGE_REPLACEMENT_DISALLOWED(FALSE);
1583 }
1584
1585 if (need_list_lock == TRUE) {
1586 lck_mtx_lock_spin_always(c_list_lock);
1587 }
1588
1589 return c_seg_freed;
1590 }
1591
1592 void
kdp_compressor_busy_find_owner(event64_t wait_event,thread_waitinfo_t * waitinfo)1593 kdp_compressor_busy_find_owner(event64_t wait_event, thread_waitinfo_t *waitinfo)
1594 {
1595 c_segment_t c_seg = (c_segment_t) wait_event;
1596
1597 waitinfo->owner = thread_tid(c_seg->c_busy_for_thread);
1598 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(c_seg);
1599 }
1600
1601 #if DEVELOPMENT || DEBUG
1602 int
do_cseg_wedge_thread(void)1603 do_cseg_wedge_thread(void)
1604 {
1605 struct c_segment c_seg;
1606 c_seg.c_busy_for_thread = current_thread();
1607
1608 debug_cseg_wait_event = (event_t) &c_seg;
1609
1610 thread_set_pending_block_hint(current_thread(), kThreadWaitCompressor);
1611 assert_wait((event_t) (&c_seg), THREAD_INTERRUPTIBLE);
1612
1613 thread_block(THREAD_CONTINUE_NULL);
1614
1615 return 0;
1616 }
1617
1618 int
do_cseg_unwedge_thread(void)1619 do_cseg_unwedge_thread(void)
1620 {
1621 thread_wakeup(debug_cseg_wait_event);
1622 debug_cseg_wait_event = NULL;
1623
1624 return 0;
1625 }
1626 #endif /* DEVELOPMENT || DEBUG */
1627
1628 void
c_seg_wait_on_busy(c_segment_t c_seg)1629 c_seg_wait_on_busy(c_segment_t c_seg)
1630 {
1631 c_seg->c_wanted = 1;
1632
1633 thread_set_pending_block_hint(current_thread(), kThreadWaitCompressor);
1634 assert_wait((event_t) (c_seg), THREAD_UNINT);
1635
1636 lck_mtx_unlock_always(&c_seg->c_lock);
1637 thread_block(THREAD_CONTINUE_NULL);
1638 }
1639
1640 #if CONFIG_FREEZE
1641 /*
1642 * We don't have the task lock held while updating the task's
1643 * c_seg queues. We can do that because of the following restrictions:
1644 *
1645 * - SINGLE FREEZER CONTEXT:
1646 * We 'insert' c_segs into the task list on the task_freeze path.
1647 * There can only be one such freeze in progress and the task
1648 * isn't disappearing because we have the VM map lock held throughout
1649 * and we have a reference on the proc too.
1650 *
1651 * - SINGLE TASK DISOWN CONTEXT:
1652 * We 'disown' c_segs of a task ONLY from the task_terminate context. So
1653 * we don't need the task lock but we need the c_list_lock and the
1654 * compressor master lock (shared). We also hold the individual
1655 * c_seg locks (exclusive).
1656 *
1657 * If we either:
1658 * - can't get the c_seg lock on a try, then we start again because maybe
1659 * the c_seg is part of a compaction and might get freed. So we can't trust
1660 * that linkage and need to restart our queue traversal.
1661 * - OR, we run into a busy c_seg (say being swapped in or free-ing) we
1662 * drop all locks again and wait and restart our queue traversal.
1663 *
1664 * - The new_owner_task below is currently only the kernel or NULL.
1665 *
1666 */
1667 void
c_seg_update_task_owner(c_segment_t c_seg,task_t new_owner_task)1668 c_seg_update_task_owner(c_segment_t c_seg, task_t new_owner_task)
1669 {
1670 task_t owner_task = c_seg->c_task_owner;
1671 uint64_t uncompressed_bytes = ((c_seg->c_slots_used) * PAGE_SIZE_64);
1672
1673 LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
1674 LCK_MTX_ASSERT(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED);
1675
1676 if (owner_task) {
1677 task_update_frozen_to_swap_acct(owner_task, uncompressed_bytes, DEBIT_FROM_SWAP);
1678 queue_remove(&owner_task->task_frozen_cseg_q, c_seg,
1679 c_segment_t, c_task_list_next_cseg);
1680 }
1681
1682 if (new_owner_task) {
1683 queue_enter(&new_owner_task->task_frozen_cseg_q, c_seg,
1684 c_segment_t, c_task_list_next_cseg);
1685 task_update_frozen_to_swap_acct(new_owner_task, uncompressed_bytes, CREDIT_TO_SWAP);
1686 }
1687
1688 c_seg->c_task_owner = new_owner_task;
1689 }
1690
1691 void
task_disown_frozen_csegs(task_t owner_task)1692 task_disown_frozen_csegs(task_t owner_task)
1693 {
1694 c_segment_t c_seg = NULL, next_cseg = NULL;
1695
1696 again:
1697 PAGE_REPLACEMENT_DISALLOWED(TRUE);
1698 lck_mtx_lock_spin_always(c_list_lock);
1699
1700 for (c_seg = (c_segment_t) queue_first(&owner_task->task_frozen_cseg_q);
1701 !queue_end(&owner_task->task_frozen_cseg_q, (queue_entry_t) c_seg);
1702 c_seg = next_cseg) {
1703 next_cseg = (c_segment_t) queue_next(&c_seg->c_task_list_next_cseg);
1704
1705 if (!lck_mtx_try_lock_spin_always(&c_seg->c_lock)) {
1706 lck_mtx_unlock(c_list_lock);
1707 PAGE_REPLACEMENT_DISALLOWED(FALSE);
1708 goto again;
1709 }
1710
1711 if (c_seg->c_busy) {
1712 lck_mtx_unlock(c_list_lock);
1713 PAGE_REPLACEMENT_DISALLOWED(FALSE);
1714
1715 c_seg_wait_on_busy(c_seg);
1716
1717 goto again;
1718 }
1719 assert(c_seg->c_task_owner == owner_task);
1720 c_seg_update_task_owner(c_seg, kernel_task);
1721 lck_mtx_unlock_always(&c_seg->c_lock);
1722 }
1723
1724 lck_mtx_unlock(c_list_lock);
1725 PAGE_REPLACEMENT_DISALLOWED(FALSE);
1726 }
1727 #endif /* CONFIG_FREEZE */
1728
1729 void
c_seg_switch_state(c_segment_t c_seg,int new_state,boolean_t insert_head)1730 c_seg_switch_state(c_segment_t c_seg, int new_state, boolean_t insert_head)
1731 {
1732 int old_state = c_seg->c_state;
1733 queue_head_t *donate_swapout_list_head, *donate_swappedin_list_head;
1734 uint32_t *donate_swapout_count, *donate_swappedin_count;
1735
1736 /*
1737 * On macOS the donate queue is swapped first ie the c_early_swapout queue.
1738 * On other swap-capable platforms, we want to swap those out last. So we
1739 * use the c_late_swapout queue.
1740 */
1741 #if XNU_TARGET_OS_OSX /* tag:DONATE */
1742 #if (DEVELOPMENT || DEBUG)
1743 if (new_state != C_IS_FILLING) {
1744 LCK_MTX_ASSERT(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED);
1745 }
1746 LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
1747 #endif /* DEVELOPMENT || DEBUG */
1748
1749 donate_swapout_list_head = &c_early_swapout_list_head;
1750 donate_swapout_count = &c_early_swapout_count;
1751 donate_swappedin_list_head = &c_early_swappedin_list_head;
1752 donate_swappedin_count = &c_early_swappedin_count;
1753 #else /* XNU_TARGET_OS_OSX */
1754 donate_swapout_list_head = &c_late_swapout_list_head;
1755 donate_swapout_count = &c_late_swapout_count;
1756 donate_swappedin_list_head = &c_late_swappedin_list_head;
1757 donate_swappedin_count = &c_late_swappedin_count;
1758 #endif /* XNU_TARGET_OS_OSX */
1759
1760 switch (old_state) {
1761 case C_IS_EMPTY:
1762 assert(new_state == C_IS_FILLING || new_state == C_IS_FREE);
1763
1764 c_empty_count--;
1765 break;
1766
1767 case C_IS_FILLING:
1768 assert(new_state == C_ON_AGE_Q || new_state == C_ON_SWAPOUT_Q);
1769
1770 queue_remove(&c_filling_list_head, c_seg, c_segment_t, c_age_list);
1771 c_filling_count--;
1772 break;
1773
1774 case C_ON_AGE_Q:
1775 assert(new_state == C_ON_SWAPOUT_Q || new_state == C_ON_MAJORCOMPACT_Q ||
1776 new_state == C_IS_FREE);
1777
1778 queue_remove(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1779 c_age_count--;
1780 break;
1781
1782 case C_ON_SWAPPEDIN_Q:
1783 if (c_seg->c_has_donated_pages) {
1784 assert(new_state == C_ON_SWAPOUT_Q || new_state == C_IS_FREE);
1785 queue_remove(donate_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1786 *donate_swappedin_count -= 1;
1787 } else {
1788 assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE);
1789 #if CONFIG_FREEZE
1790 assert(c_seg->c_has_freezer_pages);
1791 queue_remove(&c_early_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1792 c_early_swappedin_count--;
1793 #else /* CONFIG_FREEZE */
1794 queue_remove(&c_regular_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1795 c_regular_swappedin_count--;
1796 #endif /* CONFIG_FREEZE */
1797 }
1798 break;
1799
1800 case C_ON_SWAPOUT_Q:
1801 assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE || new_state == C_IS_EMPTY || new_state == C_ON_SWAPIO_Q);
1802
1803 #if CONFIG_FREEZE
1804 if (c_seg->c_has_freezer_pages) {
1805 if (c_seg->c_task_owner && (new_state != C_ON_SWAPIO_Q)) {
1806 c_seg_update_task_owner(c_seg, NULL);
1807 }
1808 queue_remove(&c_early_swapout_list_head, c_seg, c_segment_t, c_age_list);
1809 c_early_swapout_count--;
1810 } else
1811 #endif /* CONFIG_FREEZE */
1812 {
1813 if (c_seg->c_has_donated_pages) {
1814 queue_remove(donate_swapout_list_head, c_seg, c_segment_t, c_age_list);
1815 *donate_swapout_count -= 1;
1816 } else {
1817 queue_remove(&c_regular_swapout_list_head, c_seg, c_segment_t, c_age_list);
1818 c_regular_swapout_count--;
1819 }
1820 }
1821
1822 if (new_state == C_ON_AGE_Q) {
1823 c_seg->c_has_donated_pages = 0;
1824 }
1825 thread_wakeup((event_t)&compaction_swapper_running);
1826 break;
1827
1828 case C_ON_SWAPIO_Q:
1829 #if CONFIG_FREEZE
1830 if (c_seg->c_has_freezer_pages) {
1831 assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_AGE_Q);
1832 } else
1833 #endif /* CONFIG_FREEZE */
1834 {
1835 if (c_seg->c_has_donated_pages) {
1836 assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_SWAPPEDIN_Q);
1837 } else {
1838 assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_AGE_Q);
1839 }
1840 }
1841
1842 queue_remove(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1843 c_swapio_count--;
1844 break;
1845
1846 case C_ON_SWAPPEDOUT_Q:
1847 assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q ||
1848 new_state == C_ON_SWAPPEDOUTSPARSE_Q ||
1849 new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE);
1850
1851 queue_remove(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
1852 c_swappedout_count--;
1853 break;
1854
1855 case C_ON_SWAPPEDOUTSPARSE_Q:
1856 assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q ||
1857 new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE);
1858
1859 queue_remove(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
1860 c_swappedout_sparse_count--;
1861 break;
1862
1863 case C_ON_MAJORCOMPACT_Q:
1864 assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE);
1865
1866 queue_remove(&c_major_list_head, c_seg, c_segment_t, c_age_list);
1867 c_major_count--;
1868 break;
1869
1870 case C_ON_BAD_Q:
1871 assert(new_state == C_IS_FREE);
1872
1873 queue_remove(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
1874 c_bad_count--;
1875 break;
1876
1877 default:
1878 panic("c_seg %p has bad c_state = %d", c_seg, old_state);
1879 }
1880
1881 switch (new_state) {
1882 case C_IS_FREE:
1883 assert(old_state != C_IS_FILLING);
1884
1885 break;
1886
1887 case C_IS_EMPTY:
1888 assert(old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1889
1890 c_empty_count++;
1891 break;
1892
1893 case C_IS_FILLING:
1894 assert(old_state == C_IS_EMPTY);
1895
1896 queue_enter(&c_filling_list_head, c_seg, c_segment_t, c_age_list);
1897 c_filling_count++;
1898 break;
1899
1900 case C_ON_AGE_Q:
1901 assert(old_state == C_IS_FILLING || old_state == C_ON_SWAPPEDIN_Q ||
1902 old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPIO_Q ||
1903 old_state == C_ON_MAJORCOMPACT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1904
1905 assert(!c_seg->c_has_donated_pages);
1906 if (old_state == C_IS_FILLING) {
1907 queue_enter(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1908 } else {
1909 if (!queue_empty(&c_age_list_head)) {
1910 c_segment_t c_first;
1911
1912 c_first = (c_segment_t)queue_first(&c_age_list_head);
1913 c_seg->c_creation_ts = c_first->c_creation_ts;
1914 }
1915 queue_enter_first(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1916 }
1917 c_age_count++;
1918 break;
1919
1920 case C_ON_SWAPPEDIN_Q:
1921 {
1922 queue_head_t *list_head;
1923
1924 assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q || old_state == C_ON_SWAPIO_Q);
1925 if (c_seg->c_has_donated_pages) {
1926 /* Error in swapouts could happen while the c_seg is still on the swapio queue */
1927 list_head = donate_swappedin_list_head;
1928 *donate_swappedin_count += 1;
1929 } else {
1930 #if CONFIG_FREEZE
1931 assert(c_seg->c_has_freezer_pages);
1932 list_head = &c_early_swappedin_list_head;
1933 c_early_swappedin_count++;
1934 #else /* CONFIG_FREEZE */
1935 list_head = &c_regular_swappedin_list_head;
1936 c_regular_swappedin_count++;
1937 #endif /* CONFIG_FREEZE */
1938 }
1939
1940 if (insert_head == TRUE) {
1941 queue_enter_first(list_head, c_seg, c_segment_t, c_age_list);
1942 } else {
1943 queue_enter(list_head, c_seg, c_segment_t, c_age_list);
1944 }
1945 break;
1946 }
1947
1948 case C_ON_SWAPOUT_Q:
1949 {
1950 queue_head_t *list_head;
1951
1952 #if CONFIG_FREEZE
1953 /*
1954 * A segment with both identities of frozen + donated pages
1955 * will be put on early swapout Q ie the frozen identity wins.
1956 * This is because when both identities are set, the donation bit
1957 * is added on after in the c_current_seg_filled path for accounting
1958 * purposes.
1959 */
1960 if (c_seg->c_has_freezer_pages) {
1961 assert(old_state == C_ON_AGE_Q || old_state == C_IS_FILLING);
1962 list_head = &c_early_swapout_list_head;
1963 c_early_swapout_count++;
1964 } else
1965 #endif
1966 {
1967 if (c_seg->c_has_donated_pages) {
1968 assert(old_state == C_ON_SWAPPEDIN_Q || old_state == C_IS_FILLING);
1969 list_head = donate_swapout_list_head;
1970 *donate_swapout_count += 1;
1971 } else {
1972 assert(old_state == C_ON_AGE_Q || old_state == C_IS_FILLING);
1973 list_head = &c_regular_swapout_list_head;
1974 c_regular_swapout_count++;
1975 }
1976 }
1977
1978 if (insert_head == TRUE) {
1979 queue_enter_first(list_head, c_seg, c_segment_t, c_age_list);
1980 } else {
1981 queue_enter(list_head, c_seg, c_segment_t, c_age_list);
1982 }
1983 break;
1984 }
1985
1986 case C_ON_SWAPIO_Q:
1987 assert(old_state == C_ON_SWAPOUT_Q);
1988
1989 if (insert_head == TRUE) {
1990 queue_enter_first(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1991 } else {
1992 queue_enter(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1993 }
1994 c_swapio_count++;
1995 break;
1996
1997 case C_ON_SWAPPEDOUT_Q:
1998 assert(old_state == C_ON_SWAPIO_Q);
1999
2000 if (insert_head == TRUE) {
2001 queue_enter_first(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
2002 } else {
2003 queue_enter(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
2004 }
2005 c_swappedout_count++;
2006 break;
2007
2008 case C_ON_SWAPPEDOUTSPARSE_Q:
2009 assert(old_state == C_ON_SWAPIO_Q || old_state == C_ON_SWAPPEDOUT_Q);
2010
2011 if (insert_head == TRUE) {
2012 queue_enter_first(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
2013 } else {
2014 queue_enter(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
2015 }
2016
2017 c_swappedout_sparse_count++;
2018 break;
2019
2020 case C_ON_MAJORCOMPACT_Q:
2021 assert(old_state == C_ON_AGE_Q);
2022 assert(!c_seg->c_has_donated_pages);
2023
2024 if (insert_head == TRUE) {
2025 queue_enter_first(&c_major_list_head, c_seg, c_segment_t, c_age_list);
2026 } else {
2027 queue_enter(&c_major_list_head, c_seg, c_segment_t, c_age_list);
2028 }
2029 c_major_count++;
2030 break;
2031
2032 case C_ON_BAD_Q:
2033 assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
2034
2035 if (insert_head == TRUE) {
2036 queue_enter_first(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
2037 } else {
2038 queue_enter(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
2039 }
2040 c_bad_count++;
2041 break;
2042
2043 default:
2044 panic("c_seg %p requesting bad c_state = %d", c_seg, new_state);
2045 }
2046 c_seg->c_state = new_state;
2047 }
2048
2049
2050
2051 void
c_seg_free(c_segment_t c_seg)2052 c_seg_free(c_segment_t c_seg)
2053 {
2054 assert(c_seg->c_busy);
2055
2056 lck_mtx_unlock_always(&c_seg->c_lock);
2057 lck_mtx_lock_spin_always(c_list_lock);
2058 lck_mtx_lock_spin_always(&c_seg->c_lock);
2059
2060 c_seg_free_locked(c_seg);
2061 }
2062
2063
2064 void
c_seg_free_locked(c_segment_t c_seg)2065 c_seg_free_locked(c_segment_t c_seg)
2066 {
2067 int segno;
2068 int pages_populated = 0;
2069 int32_t *c_buffer = NULL;
2070 uint64_t c_swap_handle = 0;
2071
2072 assert(c_seg->c_busy);
2073 assert(c_seg->c_slots_used == 0);
2074 assert(!c_seg->c_on_minorcompact_q);
2075 assert(!c_seg->c_busy_swapping);
2076
2077 if (c_seg->c_overage_swap == TRUE) {
2078 c_overage_swapped_count--;
2079 c_seg->c_overage_swap = FALSE;
2080 }
2081 if (!(C_SEG_IS_ONDISK(c_seg))) {
2082 c_buffer = c_seg->c_store.c_buffer;
2083 } else {
2084 c_swap_handle = c_seg->c_store.c_swap_handle;
2085 }
2086
2087 c_seg_switch_state(c_seg, C_IS_FREE, FALSE);
2088
2089 if (c_buffer) {
2090 pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE;
2091 c_seg->c_store.c_buffer = NULL;
2092 } else {
2093 #if CONFIG_FREEZE
2094 c_seg_update_task_owner(c_seg, NULL);
2095 #endif /* CONFIG_FREEZE */
2096
2097 c_seg->c_store.c_swap_handle = (uint64_t)-1;
2098 }
2099
2100 lck_mtx_unlock_always(&c_seg->c_lock);
2101
2102 lck_mtx_unlock_always(c_list_lock);
2103
2104 if (c_buffer) {
2105 if (pages_populated) {
2106 kernel_memory_depopulate((vm_offset_t)c_buffer,
2107 ptoa(pages_populated), KMA_COMPRESSOR,
2108 VM_KERN_MEMORY_COMPRESSOR);
2109 }
2110 } else if (c_swap_handle) {
2111 /*
2112 * Free swap space on disk.
2113 */
2114 vm_swap_free(c_swap_handle);
2115 }
2116 lck_mtx_lock_spin_always(&c_seg->c_lock);
2117 /*
2118 * c_seg must remain busy until
2119 * after the call to vm_swap_free
2120 */
2121 C_SEG_WAKEUP_DONE(c_seg);
2122 lck_mtx_unlock_always(&c_seg->c_lock);
2123
2124 segno = c_seg->c_mysegno;
2125
2126 lck_mtx_lock_spin_always(c_list_lock);
2127 /*
2128 * because the c_buffer is now associated with the segno,
2129 * we can't put the segno back on the free list until
2130 * after we have depopulated the c_buffer range, or
2131 * we run the risk of depopulating a range that is
2132 * now being used in one of the compressor heads
2133 */
2134 c_segments_get(segno)->c_segno = c_free_segno_head;
2135 c_free_segno_head = segno;
2136 c_segment_count--;
2137
2138 lck_mtx_unlock_always(c_list_lock);
2139
2140 lck_mtx_destroy(&c_seg->c_lock, &vm_compressor_lck_grp);
2141
2142 if (c_seg->c_slot_var_array_len) {
2143 kfree_type(struct c_slot, c_seg->c_slot_var_array_len,
2144 c_seg->c_slot_var_array);
2145 }
2146
2147 zfree(compressor_segment_zone, c_seg);
2148 }
2149
2150 #if DEVELOPMENT || DEBUG
2151 int c_seg_trim_page_count = 0;
2152 #endif
2153
2154 void
c_seg_trim_tail(c_segment_t c_seg)2155 c_seg_trim_tail(c_segment_t c_seg)
2156 {
2157 c_slot_t cs;
2158 uint32_t c_size;
2159 uint32_t c_offset;
2160 uint32_t c_rounded_size;
2161 uint16_t current_nextslot;
2162 uint32_t current_populated_offset;
2163
2164 if (c_seg->c_bytes_used == 0) {
2165 return;
2166 }
2167 current_nextslot = c_seg->c_nextslot;
2168 current_populated_offset = c_seg->c_populated_offset;
2169
2170 while (c_seg->c_nextslot) {
2171 cs = C_SEG_SLOT_FROM_INDEX(c_seg, (c_seg->c_nextslot - 1));
2172
2173 c_size = UNPACK_C_SIZE(cs);
2174
2175 if (c_size) {
2176 if (current_nextslot != c_seg->c_nextslot) {
2177 c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(c_size + c_slot_extra_size(cs));
2178 c_offset = cs->c_offset + C_SEG_BYTES_TO_OFFSET(c_rounded_size);
2179
2180 c_seg->c_nextoffset = c_offset;
2181 c_seg->c_populated_offset = (c_offset + (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1)) &
2182 ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1);
2183
2184 if (c_seg->c_firstemptyslot > c_seg->c_nextslot) {
2185 c_seg->c_firstemptyslot = c_seg->c_nextslot;
2186 }
2187 #if DEVELOPMENT || DEBUG
2188 c_seg_trim_page_count += ((round_page_32(C_SEG_OFFSET_TO_BYTES(current_populated_offset)) -
2189 round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) /
2190 PAGE_SIZE);
2191 #endif
2192 }
2193 break;
2194 }
2195 c_seg->c_nextslot--;
2196 }
2197 assert(c_seg->c_nextslot);
2198 }
2199
2200
2201 int
c_seg_minor_compaction_and_unlock(c_segment_t c_seg,boolean_t clear_busy)2202 c_seg_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy)
2203 {
2204 c_slot_mapping_t slot_ptr;
2205 uint32_t c_offset = 0;
2206 uint32_t old_populated_offset;
2207 uint32_t c_rounded_size;
2208 uint32_t c_size;
2209 uint16_t c_indx = 0;
2210 int i;
2211 c_slot_t c_dst;
2212 c_slot_t c_src;
2213
2214 assert(c_seg->c_busy);
2215
2216 KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_MINOR) | DBG_FUNC_START,
2217 VM_KERNEL_ADDRHIDE(c_seg), c_seg->c_state,
2218 c_seg->c_bytes_unused, c_seg->c_slots_used);
2219
2220
2221 #if VALIDATE_C_SEGMENTS
2222 c_seg_validate(c_seg, FALSE);
2223 #endif
2224 if (c_seg->c_bytes_used == 0) {
2225 c_seg_free(c_seg);
2226 KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_MINOR) | DBG_FUNC_END,
2227 true, 0, 0);
2228 return 1;
2229 }
2230 lck_mtx_unlock_always(&c_seg->c_lock);
2231
2232 if (c_seg->c_firstemptyslot >= c_seg->c_nextslot || C_SEG_UNUSED_BYTES(c_seg) < PAGE_SIZE) {
2233 goto done;
2234 }
2235
2236 /* TODO: assert first emptyslot's c_size is actually 0 */
2237
2238 #if DEVELOPMENT || DEBUG
2239 C_SEG_MAKE_WRITEABLE(c_seg);
2240 #endif
2241
2242 #if VALIDATE_C_SEGMENTS
2243 c_seg->c_was_minor_compacted++;
2244 #endif
2245 c_indx = c_seg->c_firstemptyslot;
2246 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
2247
2248 old_populated_offset = c_seg->c_populated_offset;
2249 c_offset = c_dst->c_offset;
2250
2251 for (i = c_indx + 1; i < c_seg->c_nextslot && c_offset < c_seg->c_nextoffset; i++) {
2252 c_src = C_SEG_SLOT_FROM_INDEX(c_seg, i);
2253
2254 c_size = UNPACK_C_SIZE(c_src);
2255
2256 if (c_size == 0) {
2257 continue;
2258 }
2259
2260 c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(c_size + c_slot_extra_size(c_src));
2261
2262 /* N.B.: This memcpy may be an overlapping copy */
2263 memcpy(&c_seg->c_store.c_buffer[c_offset], &c_seg->c_store.c_buffer[c_src->c_offset], c_rounded_size);
2264
2265 cslot_copy(c_dst, c_src);
2266 c_dst->c_offset = c_offset;
2267
2268 slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
2269 slot_ptr->s_cindx = c_indx;
2270
2271 c_offset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
2272 PACK_C_SIZE(c_src, 0);
2273 #if HAS_MTE
2274 c_src->c_mte_size = 0;
2275 #endif /* HAS_MTE */
2276 c_indx++;
2277
2278 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
2279 }
2280 c_seg->c_firstemptyslot = c_indx;
2281 c_seg->c_nextslot = c_indx;
2282 c_seg->c_nextoffset = c_offset;
2283 c_seg->c_populated_offset = (c_offset + (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1)) & ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1);
2284 c_seg->c_bytes_unused = 0;
2285
2286 #if VALIDATE_C_SEGMENTS
2287 c_seg_validate(c_seg, TRUE);
2288 #endif
2289 if (old_populated_offset > c_seg->c_populated_offset) {
2290 uint32_t gc_size;
2291 int32_t *gc_ptr;
2292
2293 gc_size = C_SEG_OFFSET_TO_BYTES(old_populated_offset - c_seg->c_populated_offset);
2294 gc_ptr = &c_seg->c_store.c_buffer[c_seg->c_populated_offset];
2295
2296 kernel_memory_depopulate((vm_offset_t)gc_ptr, gc_size,
2297 KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
2298 }
2299
2300 #if DEVELOPMENT || DEBUG
2301 C_SEG_WRITE_PROTECT(c_seg);
2302 #endif
2303
2304 done:
2305 KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_MINOR) | DBG_FUNC_END,
2306 false, c_seg->c_bytes_unused, c_seg->c_bytes_used);
2307
2308 if (clear_busy == TRUE) {
2309 lck_mtx_lock_spin_always(&c_seg->c_lock);
2310 C_SEG_WAKEUP_DONE(c_seg);
2311 lck_mtx_unlock_always(&c_seg->c_lock);
2312 }
2313 return 0;
2314 }
2315
2316
2317 static void
c_seg_alloc_nextslot(c_segment_t c_seg)2318 c_seg_alloc_nextslot(c_segment_t c_seg)
2319 {
2320 struct c_slot *old_slot_array = NULL;
2321 struct c_slot *new_slot_array = NULL;
2322 int newlen;
2323 int oldlen;
2324
2325 if (c_seg->c_nextslot < c_seg_fixed_array_len) {
2326 return;
2327 }
2328
2329 if ((c_seg->c_nextslot - c_seg_fixed_array_len) >= c_seg->c_slot_var_array_len) {
2330 oldlen = c_seg->c_slot_var_array_len;
2331 old_slot_array = c_seg->c_slot_var_array;
2332
2333 if (oldlen == 0) {
2334 newlen = c_seg_slot_var_array_min_len;
2335 } else {
2336 newlen = oldlen * 2;
2337 }
2338
2339 new_slot_array = kalloc_type(struct c_slot, newlen, Z_WAITOK);
2340
2341 lck_mtx_lock_spin_always(&c_seg->c_lock);
2342
2343 if (old_slot_array) {
2344 memcpy(new_slot_array, old_slot_array,
2345 sizeof(struct c_slot) * oldlen);
2346 }
2347
2348 c_seg->c_slot_var_array_len = newlen;
2349 c_seg->c_slot_var_array = new_slot_array;
2350
2351 lck_mtx_unlock_always(&c_seg->c_lock);
2352
2353 kfree_type(struct c_slot, oldlen, old_slot_array);
2354 }
2355 }
2356
2357
2358 #define C_SEG_MAJOR_COMPACT_STATS_MAX (30)
2359
2360 struct vm_major_compact_stats_s {
2361 uint64_t asked_permission;
2362 uint64_t compactions;
2363 uint64_t moved_slots;
2364 uint64_t moved_bytes;
2365 uint64_t wasted_space_in_swapouts;
2366 uint64_t count_of_swapouts;
2367 uint64_t count_of_freed_segs;
2368 uint64_t bailed_compactions;
2369 uint64_t bytes_freed;
2370 uint64_t runtime_us;
2371 };
2372
2373 struct vm_major_compact_stats_s c_seg_major_compact_stats[C_SEG_MAJOR_COMPACT_STATS_MAX];
2374
2375 int c_seg_major_compact_stats_now = 0;
2376
2377 #define C_MAJOR_COMPACTION_SIZE_APPROPRIATE ((c_seg_bufsize * 90) / 100)
2378
2379 boolean_t
c_seg_major_compact_ok(c_segment_t c_seg_dst,c_segment_t c_seg_src)2380 c_seg_major_compact_ok(
2381 c_segment_t c_seg_dst,
2382 c_segment_t c_seg_src)
2383 {
2384 c_seg_major_compact_stats[c_seg_major_compact_stats_now].asked_permission++;
2385 vm_pageout_vminfo.vm_compactor_major_compactions_considered++;
2386
2387 if (c_seg_src->c_bytes_used >= C_MAJOR_COMPACTION_SIZE_APPROPRIATE &&
2388 c_seg_dst->c_bytes_used >= C_MAJOR_COMPACTION_SIZE_APPROPRIATE) {
2389 return FALSE;
2390 }
2391
2392 if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
2393 /*
2394 * destination segment is full... can't compact
2395 */
2396 return FALSE;
2397 }
2398
2399 return TRUE;
2400 }
2401
2402 /*
2403 * Move slots from src to dst
2404 * returns TRUE if we can continue compacting further to the same dst segment
2405 */
2406 boolean_t
c_seg_coalesce(c_segment_t c_seg_dst,c_segment_t c_seg_src)2407 c_seg_coalesce(
2408 c_segment_t c_seg_dst,
2409 c_segment_t c_seg_src)
2410 {
2411 c_slot_mapping_t slot_ptr;
2412 uint32_t c_rounded_size;
2413 uint32_t c_size;
2414 uint16_t dst_slot;
2415 int i;
2416 c_slot_t c_dst;
2417 c_slot_t c_src;
2418 boolean_t keep_compacting = TRUE;
2419
2420 KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_COALESCE) | DBG_FUNC_START,
2421 VM_KERNEL_ADDRHIDE(c_seg_dst), c_seg_dst->c_populated_offset,
2422 VM_KERNEL_ADDRHIDE(c_seg_src), c_seg_src->c_populated_offset);
2423
2424 /*
2425 * segments are not locked but they are both marked c_busy
2426 * which keeps c_decompress from working on them...
2427 * we can safely allocate new pages, move compressed data
2428 * from c_seg_src to c_seg_dst and update both c_segment's
2429 * state w/o holding the master lock
2430 */
2431 #if DEVELOPMENT || DEBUG
2432 C_SEG_MAKE_WRITEABLE(c_seg_dst);
2433 #endif
2434
2435 #if VALIDATE_C_SEGMENTS
2436 c_seg_dst->c_was_major_compacted++;
2437 c_seg_src->c_was_major_donor++;
2438 #endif
2439 assertf(c_seg_dst->c_has_donated_pages == c_seg_src->c_has_donated_pages, "Mismatched donation status Dst: %p, Src: %p\n", c_seg_dst, c_seg_src);
2440 c_seg_major_compact_stats[c_seg_major_compact_stats_now].compactions++;
2441 vm_pageout_vminfo.vm_compactor_major_compactions_completed++;
2442
2443 dst_slot = c_seg_dst->c_nextslot;
2444
2445 for (i = 0; i < c_seg_src->c_nextslot; i++) {
2446 c_src = C_SEG_SLOT_FROM_INDEX(c_seg_src, i);
2447
2448 c_size = UNPACK_C_SIZE(c_src);
2449
2450 if (c_size == 0) {
2451 /* BATCH: move what we have so far; */
2452 continue;
2453 }
2454
2455 int combined_size = c_size + c_slot_extra_size(c_src);
2456
2457 c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(combined_size);
2458
2459 int size_left = c_seg_bufsize - C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_nextoffset);
2460 /* we're going to increment c_nextoffset by c_rounded_size so it should not overflow the segment bufsize */
2461 if (size_left < c_rounded_size) {
2462 keep_compacting = FALSE;
2463 break;
2464 }
2465
2466 /* Do we have enough populated space left in dst? */
2467 assertf(c_seg_dst->c_populated_offset >= c_seg_dst->c_nextoffset, "Unexpected segment offsets: %u,%u", c_seg_dst->c_populated_offset, c_seg_dst->c_nextoffset);
2468 if (C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset - c_seg_dst->c_nextoffset) < (unsigned) combined_size) {
2469 int size_to_populate;
2470
2471 /* eagerly populate the entire segment in expectation to fill it */
2472 assert(c_seg_bufsize >= C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset));
2473 size_to_populate = c_seg_bufsize - C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset);
2474
2475 if (size_to_populate == 0) {
2476 /* can't populate any more pages in this segment */
2477 keep_compacting = FALSE;
2478 break;
2479 }
2480 if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) {
2481 size_to_populate = C_SEG_MAX_POPULATE_SIZE;
2482 }
2483
2484 kernel_memory_populate(
2485 (vm_offset_t) &c_seg_dst->c_store.c_buffer[c_seg_dst->c_populated_offset],
2486 size_to_populate,
2487 KMA_NOFAIL | KMA_COMPRESSOR,
2488 VM_KERN_MEMORY_COMPRESSOR);
2489
2490 c_seg_dst->c_populated_offset += C_SEG_BYTES_TO_OFFSET(size_to_populate);
2491 assert(C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset) <= c_seg_bufsize);
2492 }
2493 c_seg_alloc_nextslot(c_seg_dst);
2494
2495 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, c_seg_dst->c_nextslot);
2496
2497 /*
2498 * We don't want pages to get stolen by the contiguous memory allocator
2499 * when copying data from one segment to another.
2500 */
2501 PAGE_REPLACEMENT_DISALLOWED(TRUE);
2502 memcpy(&c_seg_dst->c_store.c_buffer[c_seg_dst->c_nextoffset], &c_seg_src->c_store.c_buffer[c_src->c_offset], combined_size);
2503 PAGE_REPLACEMENT_DISALLOWED(FALSE);
2504
2505 c_seg_major_compact_stats[c_seg_major_compact_stats_now].moved_slots++;
2506 vm_pageout_vminfo.vm_compactor_major_compaction_slots_moved++;
2507 c_seg_major_compact_stats[c_seg_major_compact_stats_now].moved_bytes += combined_size;
2508 vm_pageout_vminfo.vm_compactor_major_compaction_bytes_moved += combined_size;
2509
2510 cslot_copy(c_dst, c_src);
2511 c_dst->c_offset = c_seg_dst->c_nextoffset;
2512
2513 if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) {
2514 c_seg_dst->c_firstemptyslot++;
2515 }
2516 c_seg_dst->c_slots_used++;
2517 c_seg_dst->c_nextslot++;
2518 c_seg_dst->c_bytes_used += c_rounded_size;
2519 c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
2520
2521 PACK_C_SIZE(c_src, 0);
2522 #if HAS_MTE
2523 c_src->c_mte_size = 0;
2524 #endif
2525
2526 c_seg_src->c_bytes_used -= c_rounded_size;
2527 c_seg_src->c_bytes_unused += c_rounded_size;
2528 c_seg_src->c_firstemptyslot = 0;
2529
2530 assert(c_seg_src->c_slots_used);
2531 c_seg_src->c_slots_used--;
2532
2533 if (!c_seg_src->c_swappedin) {
2534 /* Pessimistically lose swappedin status when non-swappedin pages are added. */
2535 c_seg_dst->c_swappedin = false;
2536 }
2537
2538 if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
2539 /* dest segment is now full */
2540 keep_compacting = FALSE;
2541 break;
2542 }
2543 }
2544 #if DEVELOPMENT || DEBUG
2545 C_SEG_WRITE_PROTECT(c_seg_dst);
2546 #endif
2547 if (dst_slot < c_seg_dst->c_nextslot) {
2548 PAGE_REPLACEMENT_ALLOWED(TRUE);
2549 /*
2550 * we've now locked out c_decompress from
2551 * converting the slot passed into it into
2552 * a c_segment_t which allows us to use
2553 * the backptr to change which c_segment and
2554 * index the slot points to
2555 */
2556 while (dst_slot < c_seg_dst->c_nextslot) {
2557 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot);
2558
2559 slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
2560 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
2561 slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1;
2562 slot_ptr->s_cindx = dst_slot++;
2563 }
2564 PAGE_REPLACEMENT_ALLOWED(FALSE);
2565 }
2566 KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_COALESCE) | DBG_FUNC_END,
2567 keep_compacting, c_seg_dst->c_nextoffset,
2568 c_seg_dst->c_populated_offset, c_seg_dst->c_bytes_used);
2569 return keep_compacting;
2570 }
2571
2572
2573 uint64_t
vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec,clock_nsec_t end_nsec,clock_sec_t start_sec,clock_nsec_t start_nsec)2574 vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec, clock_nsec_t end_nsec, clock_sec_t start_sec, clock_nsec_t start_nsec)
2575 {
2576 uint64_t end_msecs;
2577 uint64_t start_msecs;
2578
2579 end_msecs = (end_sec * 1000) + end_nsec / 1000000;
2580 start_msecs = (start_sec * 1000) + start_nsec / 1000000;
2581
2582 return end_msecs - start_msecs;
2583 }
2584
2585
2586
2587 uint32_t compressor_eval_period_in_msecs = 250;
2588 uint32_t compressor_sample_min_in_msecs = 500;
2589 uint32_t compressor_sample_max_in_msecs = 10000;
2590 uint32_t compressor_thrashing_threshold_per_10msecs = 50;
2591 uint32_t compressor_thrashing_min_per_10msecs = 20;
2592
2593 /* When true, reset sample data next chance we get. */
2594 static boolean_t compressor_need_sample_reset = FALSE;
2595
2596
2597 void
compute_swapout_target_age(void)2598 compute_swapout_target_age(void)
2599 {
2600 clock_sec_t cur_ts_sec;
2601 clock_nsec_t cur_ts_nsec;
2602 uint32_t min_operations_needed_in_this_sample;
2603 uint64_t elapsed_msecs_in_eval;
2604 uint64_t elapsed_msecs_in_sample;
2605 boolean_t need_eval_reset = FALSE;
2606
2607 clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec);
2608
2609 elapsed_msecs_in_sample = vm_compressor_compute_elapsed_msecs(cur_ts_sec, cur_ts_nsec, start_of_sample_period_sec, start_of_sample_period_nsec);
2610
2611 if (compressor_need_sample_reset ||
2612 elapsed_msecs_in_sample >= compressor_sample_max_in_msecs) {
2613 compressor_need_sample_reset = TRUE;
2614 need_eval_reset = TRUE;
2615 goto done;
2616 }
2617 elapsed_msecs_in_eval = vm_compressor_compute_elapsed_msecs(cur_ts_sec, cur_ts_nsec, start_of_eval_period_sec, start_of_eval_period_nsec);
2618
2619 if (elapsed_msecs_in_eval < compressor_eval_period_in_msecs) {
2620 goto done;
2621 }
2622 need_eval_reset = TRUE;
2623
2624 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_START, elapsed_msecs_in_eval, sample_period_compression_count, sample_period_decompression_count, 0, 0);
2625
2626 min_operations_needed_in_this_sample = (compressor_thrashing_min_per_10msecs * (uint32_t)elapsed_msecs_in_eval) / 10;
2627
2628 if ((sample_period_compression_count - last_eval_compression_count) < min_operations_needed_in_this_sample ||
2629 (sample_period_decompression_count - last_eval_decompression_count) < min_operations_needed_in_this_sample) {
2630 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, sample_period_compression_count - last_eval_compression_count,
2631 sample_period_decompression_count - last_eval_decompression_count, 0, 1, 0);
2632
2633 swapout_target_age = 0;
2634
2635 compressor_need_sample_reset = TRUE;
2636 need_eval_reset = TRUE;
2637 goto done;
2638 }
2639 last_eval_compression_count = sample_period_compression_count;
2640 last_eval_decompression_count = sample_period_decompression_count;
2641
2642 if (elapsed_msecs_in_sample < compressor_sample_min_in_msecs) {
2643 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, swapout_target_age, 0, 0, 5, 0);
2644 goto done;
2645 }
2646 if (sample_period_decompression_count > ((compressor_thrashing_threshold_per_10msecs * elapsed_msecs_in_sample) / 10)) {
2647 uint64_t running_total;
2648 uint64_t working_target;
2649 uint64_t aging_target;
2650 uint32_t oldest_age_of_csegs_sampled = 0;
2651 uint64_t working_set_approximation = 0;
2652
2653 swapout_target_age = 0;
2654
2655 working_target = (sample_period_decompression_count / 100) * 95; /* 95 percent */
2656 aging_target = (sample_period_decompression_count / 100) * 1; /* 1 percent */
2657 running_total = 0;
2658
2659 for (oldest_age_of_csegs_sampled = 0; oldest_age_of_csegs_sampled < DECOMPRESSION_SAMPLE_MAX_AGE; oldest_age_of_csegs_sampled++) {
2660 running_total += age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2661
2662 working_set_approximation += oldest_age_of_csegs_sampled * age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2663
2664 if (running_total >= working_target) {
2665 break;
2666 }
2667 }
2668 if (oldest_age_of_csegs_sampled < DECOMPRESSION_SAMPLE_MAX_AGE) {
2669 working_set_approximation = (working_set_approximation * 1000) / elapsed_msecs_in_sample;
2670
2671 if (working_set_approximation < VM_PAGE_COMPRESSOR_COUNT) {
2672 running_total = overage_decompressions_during_sample_period;
2673
2674 for (oldest_age_of_csegs_sampled = DECOMPRESSION_SAMPLE_MAX_AGE - 1; oldest_age_of_csegs_sampled; oldest_age_of_csegs_sampled--) {
2675 running_total += age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2676
2677 if (running_total >= aging_target) {
2678 break;
2679 }
2680 }
2681 swapout_target_age = (uint32_t)cur_ts_sec - oldest_age_of_csegs_sampled;
2682
2683 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, swapout_target_age, working_set_approximation, VM_PAGE_COMPRESSOR_COUNT, 2, 0);
2684 } else {
2685 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, working_set_approximation, VM_PAGE_COMPRESSOR_COUNT, 0, 3, 0);
2686 }
2687 } else {
2688 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, working_target, running_total, 0, 4, 0);
2689 }
2690
2691 compressor_need_sample_reset = TRUE;
2692 need_eval_reset = TRUE;
2693 } else {
2694 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, sample_period_decompression_count, (compressor_thrashing_threshold_per_10msecs * elapsed_msecs_in_sample) / 10, 0, 6, 0);
2695 }
2696 done:
2697 if (compressor_need_sample_reset == TRUE) {
2698 bzero(age_of_decompressions_during_sample_period, sizeof(age_of_decompressions_during_sample_period));
2699 overage_decompressions_during_sample_period = 0;
2700
2701 start_of_sample_period_sec = cur_ts_sec;
2702 start_of_sample_period_nsec = cur_ts_nsec;
2703 sample_period_decompression_count = 0;
2704 sample_period_compression_count = 0;
2705 last_eval_decompression_count = 0;
2706 last_eval_compression_count = 0;
2707 compressor_need_sample_reset = FALSE;
2708 }
2709 if (need_eval_reset == TRUE) {
2710 start_of_eval_period_sec = cur_ts_sec;
2711 start_of_eval_period_nsec = cur_ts_nsec;
2712 }
2713 }
2714
2715
2716 int compaction_swapper_init_now = 0;
2717 int compaction_swapper_running = 0;
2718 int compaction_swapper_awakened = 0;
2719 int compaction_swapper_abort = 0;
2720
2721 bool
vm_compressor_swapout_is_ripe()2722 vm_compressor_swapout_is_ripe()
2723 {
2724 bool is_ripe = false;
2725 if (vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit) {
2726 c_segment_t c_seg;
2727 clock_sec_t now;
2728 clock_sec_t age;
2729 clock_nsec_t nsec;
2730
2731 clock_get_system_nanotime(&now, &nsec);
2732 age = 0;
2733
2734 lck_mtx_lock_spin_always(c_list_lock);
2735
2736 if (!queue_empty(&c_age_list_head)) {
2737 c_seg = (c_segment_t) queue_first(&c_age_list_head);
2738
2739 age = now - c_seg->c_creation_ts;
2740 }
2741 lck_mtx_unlock_always(c_list_lock);
2742
2743 if (age >= vm_ripe_target_age) {
2744 is_ripe = true;
2745 }
2746 }
2747 return is_ripe;
2748 }
2749
2750 static bool
compressor_swapout_conditions_met(void)2751 compressor_swapout_conditions_met(void)
2752 {
2753 bool should_swap = false;
2754 if (COMPRESSOR_NEEDS_TO_SWAP()) {
2755 should_swap = true;
2756 vmcs_stats.compressor_swap_threshold_exceeded++;
2757 }
2758 if (VM_PAGE_Q_THROTTLED(&vm_pageout_queue_external) && vm_page_anonymous_count < (vm_page_inactive_count / 20)) {
2759 should_swap = true;
2760 vmcs_stats.external_q_throttled++;
2761 }
2762 if (vm_page_free_count < (vm_page_free_reserved - (COMPRESSOR_FREE_RESERVED_LIMIT * 2))) {
2763 should_swap = true;
2764 vmcs_stats.free_count_below_reserve++;
2765 }
2766 return should_swap;
2767 }
2768
2769 static bool
compressor_needs_to_swap()2770 compressor_needs_to_swap()
2771 {
2772 bool should_swap = false;
2773 if (vm_compressor_swapout_is_ripe()) {
2774 should_swap = true;
2775 goto check_if_low_space;
2776 }
2777
2778 if (VM_CONFIG_SWAP_IS_ACTIVE) {
2779 should_swap = compressor_swapout_conditions_met();
2780 if (should_swap) {
2781 goto check_if_low_space;
2782 }
2783 }
2784
2785 #if (XNU_TARGET_OS_OSX && __arm64__)
2786 /*
2787 * Thrashing detection disabled.
2788 */
2789 #else /* (XNU_TARGET_OS_OSX && __arm64__) */
2790
2791 if (vm_compressor_is_thrashing()) {
2792 should_swap = true;
2793 vmcs_stats.thrashing_detected++;
2794 }
2795
2796 #if CONFIG_PHANTOM_CACHE
2797 if (vm_phantom_cache_check_pressure()) {
2798 os_atomic_store(&memorystatus_phantom_cache_pressure, true, release);
2799 should_swap = true;
2800 }
2801 #endif
2802 if (swapout_target_age) {
2803 should_swap = true;
2804 }
2805 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
2806
2807 check_if_low_space:
2808
2809 #if CONFIG_JETSAM
2810 if (should_swap || vm_compressor_low_on_space()) {
2811 if (vm_compressor_thrashing_detected == FALSE) {
2812 vm_compressor_thrashing_detected = TRUE;
2813
2814 if (swapout_target_age) {
2815 compressor_thrashing_induced_jetsam++;
2816 } else if (vm_compressor_low_on_space()) {
2817 compressor_thrashing_induced_jetsam++;
2818 } else {
2819 filecache_thrashing_induced_jetsam++;
2820 }
2821 /*
2822 * Wake up the memorystatus thread so that it can return
2823 * the system to a healthy state (by killing processes).
2824 */
2825 memorystatus_thread_wake();
2826 }
2827 /*
2828 * let the jetsam take precedence over
2829 * any major compactions we might have
2830 * been able to do... otherwise we run
2831 * the risk of doing major compactions
2832 * on segments we're about to free up
2833 * due to the jetsam activity.
2834 */
2835 should_swap = false;
2836 if (memorystatus_swap_all_apps && vm_swap_low_on_space()) {
2837 memorystatus_respond_to_swap_exhaustion();
2838 }
2839 }
2840 #else /* CONFIG_JETSAM */
2841 if (should_swap && vm_swap_low_on_space()) {
2842 memorystatus_respond_to_swap_exhaustion();
2843 }
2844 #endif /* CONFIG_JETSAM */
2845
2846 if (should_swap == false) {
2847 /*
2848 * vm_compressor_needs_to_major_compact returns true only if we're
2849 * about to run out of available compressor segments... in this
2850 * case, we absolutely need to run a major compaction even if
2851 * we've just kicked off a jetsam or we don't otherwise need to
2852 * swap... terminating objects releases
2853 * pages back to the uncompressed cache, but does not guarantee
2854 * that we will free up even a single compression segment
2855 */
2856 should_swap = vm_compressor_needs_to_major_compact();
2857 if (should_swap) {
2858 vmcs_stats.fragmentation_detected++;
2859 }
2860 }
2861
2862 /*
2863 * returning TRUE when swap_supported == FALSE
2864 * will cause the major compaction engine to
2865 * run, but will not trigger any swapping...
2866 * segments that have been major compacted
2867 * will be moved to the majorcompact queue
2868 */
2869 return should_swap;
2870 }
2871
2872 #if CONFIG_JETSAM
2873 /*
2874 * This function is called from the jetsam thread after killing something to
2875 * mitigate thrashing.
2876 *
2877 * We need to restart our thrashing detection heuristics since memory pressure
2878 * has potentially changed significantly, and we don't want to detect on old
2879 * data from before the jetsam.
2880 */
2881 void
vm_thrashing_jetsam_done(void)2882 vm_thrashing_jetsam_done(void)
2883 {
2884 vm_compressor_thrashing_detected = FALSE;
2885
2886 /* Were we compressor-thrashing or filecache-thrashing? */
2887 if (swapout_target_age) {
2888 swapout_target_age = 0;
2889 compressor_need_sample_reset = TRUE;
2890 }
2891 #if CONFIG_PHANTOM_CACHE
2892 else {
2893 vm_phantom_cache_restart_sample();
2894 }
2895 #endif
2896 }
2897 #endif /* CONFIG_JETSAM */
2898
2899 uint32_t vm_wake_compactor_swapper_calls = 0;
2900 uint32_t vm_run_compactor_already_running = 0;
2901 uint32_t vm_run_compactor_empty_minor_q = 0;
2902 uint32_t vm_run_compactor_did_compact = 0;
2903 uint32_t vm_run_compactor_waited = 0;
2904
2905 /* run minor compaction right now, if the compaction-swapper thread is not already running */
2906 void
vm_run_compactor(void)2907 vm_run_compactor(void)
2908 {
2909 if (c_segment_count == 0) {
2910 return;
2911 }
2912
2913 if (os_atomic_load(&c_minor_count, relaxed) == 0) {
2914 vm_run_compactor_empty_minor_q++;
2915 return;
2916 }
2917
2918 lck_mtx_lock_spin_always(c_list_lock);
2919
2920 if (compaction_swapper_running) {
2921 if (vm_pageout_state.vm_restricted_to_single_processor == FALSE) {
2922 vm_run_compactor_already_running++;
2923
2924 lck_mtx_unlock_always(c_list_lock);
2925 return;
2926 }
2927 vm_run_compactor_waited++;
2928
2929 assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
2930
2931 lck_mtx_unlock_always(c_list_lock);
2932
2933 thread_block(THREAD_CONTINUE_NULL);
2934
2935 return;
2936 }
2937 vm_run_compactor_did_compact++;
2938
2939 fastwake_warmup = FALSE;
2940 compaction_swapper_running = 1;
2941
2942 vm_compressor_do_delayed_compactions(FALSE);
2943
2944 compaction_swapper_running = 0;
2945
2946 lck_mtx_unlock_always(c_list_lock);
2947
2948 thread_wakeup((event_t)&compaction_swapper_running);
2949 }
2950
2951
2952 void
vm_wake_compactor_swapper(void)2953 vm_wake_compactor_swapper(void)
2954 {
2955 if (compaction_swapper_running || compaction_swapper_awakened || c_segment_count == 0) {
2956 return;
2957 }
2958
2959 if (os_atomic_load(&c_minor_count, relaxed) ||
2960 vm_compressor_needs_to_major_compact()) {
2961 lck_mtx_lock_spin_always(c_list_lock);
2962
2963 fastwake_warmup = FALSE;
2964
2965 if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
2966 vm_wake_compactor_swapper_calls++;
2967
2968 compaction_swapper_awakened = 1;
2969 thread_wakeup((event_t)&c_compressor_swap_trigger);
2970 }
2971 lck_mtx_unlock_always(c_list_lock);
2972 }
2973 }
2974
2975
2976 void
vm_consider_swapping()2977 vm_consider_swapping()
2978 {
2979 assert(VM_CONFIG_SWAP_IS_PRESENT);
2980
2981 lck_mtx_lock_spin_always(c_list_lock);
2982
2983 compaction_swapper_abort = 1;
2984
2985 while (compaction_swapper_running) {
2986 assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
2987
2988 lck_mtx_unlock_always(c_list_lock);
2989
2990 thread_block(THREAD_CONTINUE_NULL);
2991
2992 lck_mtx_lock_spin_always(c_list_lock);
2993 }
2994 compaction_swapper_abort = 0;
2995 compaction_swapper_running = 1;
2996
2997 vm_swapout_ripe_segments = TRUE;
2998
2999 vm_compressor_process_major_segments(vm_swapout_ripe_segments);
3000
3001 vm_compressor_compact_and_swap(FALSE);
3002
3003 compaction_swapper_running = 0;
3004
3005 vm_swapout_ripe_segments = FALSE;
3006
3007 lck_mtx_unlock_always(c_list_lock);
3008
3009 thread_wakeup((event_t)&compaction_swapper_running);
3010 }
3011
3012
3013 void
vm_consider_waking_compactor_swapper(void)3014 vm_consider_waking_compactor_swapper(void)
3015 {
3016 bool need_wakeup = false;
3017
3018 if (c_segment_count == 0) {
3019 return;
3020 }
3021
3022 if (compaction_swapper_running || compaction_swapper_awakened) {
3023 return;
3024 }
3025
3026 if (!compaction_swapper_inited && !compaction_swapper_init_now) {
3027 compaction_swapper_init_now = 1;
3028 need_wakeup = true;
3029 } else if (vm_compressor_needs_to_minor_compact() ||
3030 compressor_needs_to_swap()) {
3031 need_wakeup = true;
3032 }
3033
3034 if (need_wakeup) {
3035 lck_mtx_lock_spin_always(c_list_lock);
3036
3037 fastwake_warmup = FALSE;
3038
3039 if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
3040 memoryshot(DBG_VM_WAKEUP_COMPACTOR_SWAPPER, DBG_FUNC_NONE);
3041
3042 compaction_swapper_awakened = 1;
3043 thread_wakeup((event_t)&c_compressor_swap_trigger);
3044 }
3045 lck_mtx_unlock_always(c_list_lock);
3046 }
3047 }
3048
3049
3050 #define C_SWAPOUT_LIMIT 4
3051 #define DELAYED_COMPACTIONS_PER_PASS 30
3052
3053 /* process segments that are in the minor compaction queue */
3054 void
vm_compressor_do_delayed_compactions(boolean_t flush_all)3055 vm_compressor_do_delayed_compactions(boolean_t flush_all)
3056 {
3057 c_segment_t c_seg;
3058 int number_compacted = 0;
3059 bool needs_to_swap = false;
3060 uint32_t c_swapout_count = 0;
3061
3062
3063 VM_DEBUG_CONSTANT_EVENT(vm_compressor_do_delayed_compactions, DBG_VM_COMPRESSOR_DELAYED_COMPACT, DBG_FUNC_START, c_minor_count, flush_all, 0, 0);
3064 KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_DEFERRED) | DBG_FUNC_START,
3065 c_minor_count, flush_all);
3066
3067 #if XNU_TARGET_OS_OSX
3068 LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
3069 #endif /* XNU_TARGET_OS_OSX */
3070
3071 while (!queue_empty(&c_minor_list_head) && !needs_to_swap) {
3072 c_seg = (c_segment_t)queue_first(&c_minor_list_head);
3073
3074 lck_mtx_lock_spin_always(&c_seg->c_lock);
3075
3076 if (c_seg->c_busy) {
3077 lck_mtx_unlock_always(c_list_lock);
3078 c_seg_wait_on_busy(c_seg);
3079 lck_mtx_lock_spin_always(c_list_lock);
3080
3081 continue;
3082 }
3083 C_SEG_BUSY(c_seg);
3084
3085 c_seg_do_minor_compaction_and_unlock(c_seg, TRUE, FALSE, TRUE);
3086
3087 c_swapout_count = c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count;
3088 number_compacted++;
3089 if (VM_CONFIG_SWAP_IS_ACTIVE && (number_compacted % DELAYED_COMPACTIONS_PER_PASS) == 0) {
3090 if ((flush_all == TRUE || compressor_needs_to_swap()) && c_swapout_count < C_SWAPOUT_LIMIT) {
3091 needs_to_swap = true;
3092 }
3093 }
3094 lck_mtx_lock_spin_always(c_list_lock);
3095 }
3096
3097 VM_DEBUG_CONSTANT_EVENT(vm_compressor_do_delayed_compactions, DBG_VM_COMPRESSOR_DELAYED_COMPACT, DBG_FUNC_END, c_minor_count, number_compacted, needs_to_swap, 0);
3098 KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_DEFERRED) | DBG_FUNC_END,
3099 c_minor_count, number_compacted, needs_to_swap);
3100 }
3101
3102 int min_csegs_per_major_compaction = DELAYED_COMPACTIONS_PER_PASS;
3103
3104 static bool
vm_compressor_major_compact_cseg(c_segment_t c_seg,uint32_t * c_seg_considered,bool * bail_wanted_cseg,uint64_t * total_bytes_freed)3105 vm_compressor_major_compact_cseg(c_segment_t c_seg, uint32_t* c_seg_considered, bool* bail_wanted_cseg, uint64_t* total_bytes_freed)
3106 {
3107 /*
3108 * Major compaction
3109 */
3110 bool keep_compacting = true, fully_compacted = true;
3111 queue_head_t *list_head = NULL;
3112 c_segment_t c_seg_next;
3113 uint64_t bytes_to_free = 0, bytes_freed = 0;
3114 uint32_t number_considered = 0;
3115
3116 if (c_seg->c_state == C_ON_AGE_Q) {
3117 assert(!c_seg->c_has_donated_pages);
3118 list_head = &c_age_list_head;
3119 } else if (c_seg->c_state == C_ON_SWAPPEDIN_Q) {
3120 assert(c_seg->c_has_donated_pages);
3121 list_head = &c_late_swappedin_list_head;
3122 }
3123
3124 KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_MAJOR) | DBG_FUNC_START,
3125 VM_KERNEL_ADDRHIDE(c_seg), c_seg->c_state,
3126 c_seg->c_bytes_used);
3127
3128 while (keep_compacting == TRUE) {
3129 assert(c_seg->c_busy);
3130
3131 /* look for another segment to consolidate */
3132
3133 c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
3134
3135 if (queue_end(list_head, (queue_entry_t)c_seg_next)) {
3136 break;
3137 }
3138
3139 assert(c_seg_next->c_state == c_seg->c_state);
3140
3141 number_considered++;
3142
3143 if (c_seg_major_compact_ok(c_seg, c_seg_next) == FALSE) {
3144 break;
3145 }
3146
3147 lck_mtx_lock_spin_always(&c_seg_next->c_lock);
3148
3149 if (c_seg_next->c_busy) {
3150 /*
3151 * We are going to block for our neighbor.
3152 * If our c_seg is wanted, we should unbusy
3153 * it because we don't know how long we might
3154 * have to block here.
3155 */
3156 if (c_seg->c_wanted) {
3157 lck_mtx_unlock_always(&c_seg_next->c_lock);
3158 fully_compacted = false;
3159 c_seg_major_compact_stats[c_seg_major_compact_stats_now].bailed_compactions++;
3160 vm_pageout_vminfo.vm_compactor_major_compactions_bailed++;
3161 *bail_wanted_cseg = true;
3162 break;
3163 }
3164
3165 lck_mtx_unlock_always(c_list_lock);
3166
3167 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 8, (void*) VM_KERNEL_ADDRPERM(c_seg_next), 0, 0);
3168
3169 c_seg_wait_on_busy(c_seg_next);
3170 lck_mtx_lock_spin_always(c_list_lock);
3171
3172 continue;
3173 }
3174 /* grab that segment */
3175 C_SEG_BUSY(c_seg_next);
3176
3177 bytes_to_free = C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3178 if (c_seg_do_minor_compaction_and_unlock(c_seg_next, FALSE, TRUE, TRUE)) {
3179 /*
3180 * found an empty c_segment and freed it
3181 * so we can't continue to use c_seg_next
3182 */
3183 bytes_freed += bytes_to_free;
3184 c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3185 vm_pageout_vminfo.vm_compactor_major_compaction_segments_freed++;
3186 continue;
3187 }
3188
3189 /* unlock the list ... */
3190 lck_mtx_unlock_always(c_list_lock);
3191
3192 /* do the major compaction */
3193
3194 keep_compacting = c_seg_coalesce(c_seg, c_seg_next);
3195
3196 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 9, keep_compacting, 0, 0);
3197
3198 PAGE_REPLACEMENT_DISALLOWED(TRUE);
3199
3200 lck_mtx_lock_spin_always(&c_seg_next->c_lock);
3201 /*
3202 * run a minor compaction on the donor segment
3203 * since we pulled at least some of it's
3204 * data into our target... if we've emptied
3205 * it, now is a good time to free it which
3206 * c_seg_minor_compaction_and_unlock also takes care of
3207 *
3208 * by passing TRUE, we ask for c_busy to be cleared
3209 * and c_wanted to be taken care of
3210 */
3211 bytes_to_free = C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3212 if (c_seg_minor_compaction_and_unlock(c_seg_next, TRUE)) {
3213 bytes_freed += bytes_to_free;
3214 c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3215 vm_pageout_vminfo.vm_compactor_major_compaction_segments_freed++;
3216 } else {
3217 bytes_to_free -= C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3218 bytes_freed += bytes_to_free;
3219 }
3220
3221 PAGE_REPLACEMENT_DISALLOWED(FALSE);
3222
3223 /* relock the list */
3224 lck_mtx_lock_spin_always(c_list_lock);
3225
3226 if (c_seg->c_wanted) {
3227 /*
3228 * Our c_seg is in demand. Let's
3229 * unbusy it and wakeup the waiters
3230 * instead of continuing the compaction
3231 * because we could be in this loop
3232 * for a while.
3233 */
3234 fully_compacted = false;
3235 *bail_wanted_cseg = true;
3236 c_seg_major_compact_stats[c_seg_major_compact_stats_now].bailed_compactions++;
3237 vm_pageout_vminfo.vm_compactor_major_compactions_bailed++;
3238 break;
3239 }
3240 } /* major compaction */
3241
3242 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 10, number_considered, *bail_wanted_cseg, 0);
3243
3244 *c_seg_considered += number_considered;
3245 *total_bytes_freed += bytes_freed;
3246
3247 lck_mtx_lock_spin_always(&c_seg->c_lock);
3248 KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_MAJOR) | DBG_FUNC_END,
3249 fully_compacted, *bail_wanted_cseg,
3250 bytes_freed, c_seg->c_bytes_used);
3251 return fully_compacted;
3252 }
3253
3254 #define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
3255 MACRO_BEGIN \
3256 if ((int)((rfrac) -= (frac)) < 0) { \
3257 (rfrac) += (unit); \
3258 (rsecs) -= 1; \
3259 } \
3260 (rsecs) -= (secs); \
3261 MACRO_END
3262
3263 clock_nsec_t c_process_major_report_over_ms = 9; /* report if over 9 ms */
3264 int c_process_major_yield_after = 1000; /* yield after moving 1,000 segments */
3265 uint64_t c_process_major_reports = 0;
3266 clock_sec_t c_process_major_max_sec = 0;
3267 clock_nsec_t c_process_major_max_nsec = 0;
3268 uint32_t c_process_major_peak_segcount = 0;
3269 static void
vm_compressor_process_major_segments(bool ripe_age_only)3270 vm_compressor_process_major_segments(bool ripe_age_only)
3271 {
3272 c_segment_t c_seg = NULL;
3273 int count = 0, total = 0, breaks = 0;
3274 clock_sec_t start_sec, end_sec;
3275 clock_nsec_t start_nsec, end_nsec;
3276 clock_nsec_t report_over_ns;
3277
3278 if (queue_empty(&c_major_list_head)) {
3279 return;
3280 }
3281
3282 // printf("%s: starting to move segments from MAJORQ to AGEQ\n", __FUNCTION__);
3283 if (c_process_major_report_over_ms != 0) {
3284 report_over_ns = c_process_major_report_over_ms * NSEC_PER_MSEC;
3285 } else {
3286 report_over_ns = (clock_nsec_t)-1;
3287 }
3288
3289 if (ripe_age_only) {
3290 if (c_overage_swapped_count >= c_overage_swapped_limit) {
3291 /*
3292 * Return while we wait for the overage segments
3293 * in our queue to get pushed out first.
3294 */
3295 return;
3296 }
3297 }
3298
3299 clock_get_system_nanotime(&start_sec, &start_nsec);
3300 while (!queue_empty(&c_major_list_head)) {
3301 if (!ripe_age_only) {
3302 /*
3303 * Start from the end to preserve aging order. The newer
3304 * segments are at the tail and so need to be inserted in
3305 * the aging queue in this way so we have the older segments
3306 * at the end of the AGE_Q.
3307 */
3308 c_seg = (c_segment_t)queue_last(&c_major_list_head);
3309 } else {
3310 c_seg = (c_segment_t)queue_first(&c_major_list_head);
3311 if ((start_sec - c_seg->c_creation_ts) < vm_ripe_target_age) {
3312 /*
3313 * We have found the first segment in our queue that is not ripe. Segments after it
3314 * will be the same. So let's bail here. Return with c_list_lock held.
3315 */
3316 break;
3317 }
3318 }
3319
3320 lck_mtx_lock_spin_always(&c_seg->c_lock);
3321 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3322 lck_mtx_unlock_always(&c_seg->c_lock);
3323
3324 count++;
3325 if (count == c_process_major_yield_after ||
3326 queue_empty(&c_major_list_head)) {
3327 /* done or time to take a break */
3328 } else {
3329 /* keep going */
3330 continue;
3331 }
3332
3333 total += count;
3334 clock_get_system_nanotime(&end_sec, &end_nsec);
3335 TIME_SUB(end_sec, start_sec, end_nsec, start_nsec, NSEC_PER_SEC);
3336 if (end_sec > c_process_major_max_sec) {
3337 c_process_major_max_sec = end_sec;
3338 c_process_major_max_nsec = end_nsec;
3339 } else if (end_sec == c_process_major_max_sec &&
3340 end_nsec > c_process_major_max_nsec) {
3341 c_process_major_max_nsec = end_nsec;
3342 }
3343 if (total > c_process_major_peak_segcount) {
3344 c_process_major_peak_segcount = total;
3345 }
3346 if (end_sec > 0 ||
3347 end_nsec >= report_over_ns) {
3348 /* we used more than expected */
3349 c_process_major_reports++;
3350 printf("%s: moved %d/%d segments from MAJORQ to AGEQ in %lu.%09u seconds and %d breaks\n",
3351 __FUNCTION__, count, total,
3352 end_sec, end_nsec, breaks);
3353 }
3354 if (queue_empty(&c_major_list_head)) {
3355 /* done */
3356 break;
3357 }
3358 /* take a break to allow someone else to grab the lock */
3359 lck_mtx_unlock_always(c_list_lock);
3360 mutex_pause(0); /* 10 microseconds */
3361 lck_mtx_lock_spin_always(c_list_lock);
3362 /* start again */
3363 clock_get_system_nanotime(&start_sec, &start_nsec);
3364 count = 0;
3365 breaks++;
3366 }
3367 }
3368
3369 /*
3370 * macOS special swappable csegs -> early_swapin queue
3371 * non-macOS special swappable+non-freezer csegs -> late_swapin queue
3372 * Processing special csegs means minor compacting each cseg and then
3373 * major compacting it and putting them on the early or late
3374 * (depending on platform) swapout queue. tag:DONATE
3375 */
3376 static void
vm_compressor_process_special_swapped_in_segments_locked(void)3377 vm_compressor_process_special_swapped_in_segments_locked(void)
3378 {
3379 c_segment_t c_seg = NULL;
3380 bool switch_state = true, bail_wanted_cseg = false;
3381 unsigned int yield_after_considered_per_pass = 0;
3382 unsigned int total_considered = 0, total_bailed = 0;
3383 uint64_t total_bytes_freed = 0;
3384 queue_head_t *special_swappedin_list_head;
3385
3386 #if XNU_TARGET_OS_OSX
3387 special_swappedin_list_head = &c_early_swappedin_list_head;
3388 #else /* XNU_TARGET_OS_OSX */
3389 if (memorystatus_swap_all_apps) {
3390 special_swappedin_list_head = &c_late_swappedin_list_head;
3391 } else {
3392 /* called on unsupported config*/
3393 return;
3394 }
3395 #endif /* XNU_TARGET_OS_OSX */
3396
3397 KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_SPECIAL) | DBG_FUNC_START,
3398 c_early_swappedin_count, c_late_swappedin_count);
3399
3400 yield_after_considered_per_pass = MAX(min_csegs_per_major_compaction, DELAYED_COMPACTIONS_PER_PASS);
3401 while (!queue_empty(special_swappedin_list_head)) {
3402 uint64_t cur_bytes_freed = 0;
3403 uint32_t cur_considered = 0;
3404
3405 c_seg = (c_segment_t)queue_first(special_swappedin_list_head);
3406
3407 lck_mtx_lock_spin_always(&c_seg->c_lock);
3408
3409 if (c_seg->c_busy) {
3410 lck_mtx_unlock_always(c_list_lock);
3411 c_seg_wait_on_busy(c_seg);
3412 lck_mtx_lock_spin_always(c_list_lock);
3413 continue;
3414 }
3415
3416 C_SEG_BUSY(c_seg);
3417 lck_mtx_unlock_always(&c_seg->c_lock);
3418 lck_mtx_unlock_always(c_list_lock);
3419
3420 PAGE_REPLACEMENT_DISALLOWED(TRUE);
3421
3422 lck_mtx_lock_spin_always(&c_seg->c_lock);
3423
3424 if (c_seg_minor_compaction_and_unlock(c_seg, FALSE /*clear busy?*/)) {
3425 /*
3426 * found an empty c_segment and freed it
3427 * so go grab the next guy in the queue
3428 */
3429 PAGE_REPLACEMENT_DISALLOWED(FALSE);
3430 lck_mtx_lock_spin_always(c_list_lock);
3431 continue;
3432 }
3433
3434 PAGE_REPLACEMENT_DISALLOWED(FALSE);
3435 lck_mtx_lock_spin_always(c_list_lock);
3436
3437 switch_state = vm_compressor_major_compact_cseg(c_seg, &cur_considered, &bail_wanted_cseg, &cur_bytes_freed);
3438 assert(c_seg->c_busy);
3439 assert(!c_seg->c_on_minorcompact_q);
3440
3441 if (switch_state) {
3442 if (VM_CONFIG_SWAP_IS_ACTIVE || VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
3443 /*
3444 * Ordinarily we let swapped in segments age out + get
3445 * major compacted with the rest of the c_segs on the ageQ.
3446 * But the early donated c_segs, if well compacted, should be
3447 * kept ready to be swapped out if needed. These are typically
3448 * describing memory belonging to a leaky app (macOS) or a swap-
3449 * capable app (iPadOS) and for the latter we can keep these
3450 * around longer because we control the triggers in the memorystatus
3451 * subsystem
3452 */
3453 c_seg_switch_state(c_seg, C_ON_SWAPOUT_Q, FALSE);
3454 }
3455 }
3456
3457 C_SEG_WAKEUP_DONE(c_seg);
3458
3459 lck_mtx_unlock_always(&c_seg->c_lock);
3460
3461 total_considered += cur_considered;
3462 total_bytes_freed += cur_bytes_freed;
3463 if (bail_wanted_cseg) {
3464 total_bailed++;
3465 }
3466
3467 if (cur_considered >= yield_after_considered_per_pass) {
3468 if (bail_wanted_cseg) {
3469 /*
3470 * We stopped major compactions on a c_seg
3471 * that is wanted. We don't know the priority
3472 * of the waiter unfortunately but we are at
3473 * a very high priority and so, just in case
3474 * the waiter is a critical system daemon or
3475 * UI thread, let's give up the CPU in case
3476 * the system is running a few CPU intensive
3477 * tasks.
3478 */
3479 bail_wanted_cseg = false;
3480 KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_PAUSE) | DBG_FUNC_START);
3481 lck_mtx_unlock_always(c_list_lock);
3482
3483 mutex_pause(2); /* 100us yield */
3484
3485 lck_mtx_lock_spin_always(c_list_lock);
3486 KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_PAUSE) | DBG_FUNC_END);
3487 }
3488
3489 cur_considered = 0;
3490 }
3491 }
3492
3493 KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_SPECIAL) | DBG_FUNC_END,
3494 total_considered, total_bailed, total_bytes_freed);
3495 }
3496
3497 void
vm_compressor_process_special_swapped_in_segments(void)3498 vm_compressor_process_special_swapped_in_segments(void)
3499 {
3500 lck_mtx_lock_spin_always(c_list_lock);
3501 vm_compressor_process_special_swapped_in_segments_locked();
3502 lck_mtx_unlock_always(c_list_lock);
3503 }
3504
3505 #define ENABLE_DYNAMIC_SWAPPED_AGE_LIMIT 1
3506
3507 /* minimum time that segments can be in swappedin q as a grace period after they were swapped-in
3508 * before they are added to age-q */
3509 #define C_SEGMENT_SWAPPEDIN_AGE_LIMIT_LOW 1 /* seconds */
3510 #define C_SEGMENT_SWAPPEDIN_AGE_LIMIT_NORMAL 10 /* seconds */
3511 #define C_AGE_Q_COUNT_LOW_THRESHOLD 50
3512
3513 /*
3514 * Processing regular csegs means aging them.
3515 */
3516 static void
vm_compressor_process_regular_swapped_in_segments(boolean_t flush_all)3517 vm_compressor_process_regular_swapped_in_segments(boolean_t flush_all)
3518 {
3519 c_segment_t c_seg;
3520 clock_sec_t now;
3521 clock_nsec_t nsec;
3522 unsigned int num_processed = 0;
3523
3524 unsigned long limit = C_SEGMENT_SWAPPEDIN_AGE_LIMIT_NORMAL;
3525
3526 #ifdef ENABLE_DYNAMIC_SWAPPED_AGE_LIMIT
3527 /* In normal operation, segments are kept in the swapped-in-q for a grace period of 10 seconds so that whoever
3528 * needed to decompress something from a segment that was just swapped-in would have a chance to decompress
3529 * more out of it.
3530 * If the system is in high memory pressure state, this may cause the age-q to be completely empty so that
3531 * there are no candidate segments for swap-out. In this state we use a lower limit of 1 second.
3532 * condition 1: the age-q absolute size is too low
3533 * condition 2: there are more segments in swapped-in-q than in age-q
3534 * each of these represent a bad situation which we want to try to alleviate by moving more segments from
3535 * swappped-in-q to age-q so that we have a better selection of who to swap-out
3536 */
3537 if (c_age_count < C_AGE_Q_COUNT_LOW_THRESHOLD || c_age_count < c_regular_swappedin_count) {
3538 limit = C_SEGMENT_SWAPPEDIN_AGE_LIMIT_LOW;
3539 }
3540 #endif
3541 KDBG(VM_COMPRESSOR_EVENTID(DBG_PROCESS_SWAPPEDIN) | DBG_FUNC_START,
3542 c_regular_swappedin_count, c_age_count, limit, flush_all);
3543
3544 clock_get_system_nanotime(&now, &nsec);
3545
3546 while (!queue_empty(&c_regular_swappedin_list_head)) {
3547 c_seg = (c_segment_t)queue_first(&c_regular_swappedin_list_head);
3548
3549 if (flush_all == FALSE && (now - c_seg->c_swappedin_ts) < limit) {
3550 /* swappedin q is sorted by the order of time of addition os if we reached a seg that's too
3551 * young, we know that all the rest after it are also too young */
3552 break;
3553 }
3554
3555 lck_mtx_lock_spin_always(&c_seg->c_lock);
3556
3557 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3558 c_seg->c_agedin_ts = (uint32_t) now;
3559 num_processed++;
3560
3561 lck_mtx_unlock_always(&c_seg->c_lock);
3562 }
3563 KDBG(VM_COMPRESSOR_EVENTID(DBG_PROCESS_SWAPPEDIN) | DBG_FUNC_END,
3564 num_processed);
3565 }
3566
3567
3568 extern int vm_num_swap_files;
3569 extern int vm_num_pinned_swap_files;
3570 extern int vm_swappin_enabled;
3571
3572 extern unsigned int vm_swapfile_total_segs_used;
3573 extern unsigned int vm_swapfile_total_segs_alloced;
3574
3575
3576 void
vm_compressor_flush(void)3577 vm_compressor_flush(void)
3578 {
3579 uint64_t vm_swap_put_failures_at_start;
3580 wait_result_t wait_result = 0;
3581 AbsoluteTime startTime, endTime;
3582 clock_sec_t now_sec;
3583 clock_nsec_t now_nsec;
3584 uint64_t nsec;
3585 c_segment_t c_seg, c_seg_next;
3586
3587 HIBLOG("vm_compressor_flush - starting\n");
3588
3589 clock_get_uptime(&startTime);
3590
3591 lck_mtx_lock_spin_always(c_list_lock);
3592
3593 fastwake_warmup = FALSE;
3594 compaction_swapper_abort = 1;
3595
3596 while (compaction_swapper_running) {
3597 assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
3598
3599 lck_mtx_unlock_always(c_list_lock);
3600
3601 thread_block(THREAD_CONTINUE_NULL);
3602
3603 lck_mtx_lock_spin_always(c_list_lock);
3604 }
3605 compaction_swapper_abort = 0;
3606 compaction_swapper_running = 1;
3607
3608 hibernate_flushing = TRUE;
3609 hibernate_no_swapspace = FALSE;
3610 hibernate_flush_timed_out = FALSE;
3611 c_generation_id_flush_barrier = c_generation_id + 1000;
3612
3613 clock_get_system_nanotime(&now_sec, &now_nsec);
3614 hibernate_flushing_deadline = now_sec + HIBERNATE_FLUSHING_SECS_TO_COMPLETE;
3615
3616 vm_swap_put_failures_at_start = vm_swap_put_failures;
3617
3618 /*
3619 * We are about to hibernate and so we want all segments flushed to disk.
3620 * Segments that are on the major compaction queue won't be considered in
3621 * the vm_compressor_compact_and_swap() pass. So we need to bring them to
3622 * the ageQ for consideration.
3623 */
3624 if (!queue_empty(&c_major_list_head)) {
3625 c_seg = (c_segment_t)queue_first(&c_major_list_head);
3626
3627 while (!queue_end(&c_major_list_head, (queue_entry_t)c_seg)) {
3628 c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
3629 lck_mtx_lock_spin_always(&c_seg->c_lock);
3630 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3631 lck_mtx_unlock_always(&c_seg->c_lock);
3632 c_seg = c_seg_next;
3633 }
3634 }
3635 vm_compressor_compact_and_swap(TRUE);
3636 /* need to wait here since the swap thread may also be running in parallel and handling segments */
3637 while (!queue_empty(&c_early_swapout_list_head) || !queue_empty(&c_regular_swapout_list_head) || !queue_empty(&c_late_swapout_list_head)) {
3638 assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC);
3639
3640 lck_mtx_unlock_always(c_list_lock);
3641
3642 wait_result = thread_block(THREAD_CONTINUE_NULL);
3643
3644 lck_mtx_lock_spin_always(c_list_lock);
3645
3646 if (wait_result == THREAD_TIMED_OUT) {
3647 break;
3648 }
3649 }
3650 hibernate_flushing = FALSE;
3651 compaction_swapper_running = 0;
3652
3653 if (vm_swap_put_failures > vm_swap_put_failures_at_start) {
3654 HIBLOG("vm_compressor_flush failed to clean %llu segments - vm_page_compressor_count(%d)\n",
3655 vm_swap_put_failures - vm_swap_put_failures_at_start, VM_PAGE_COMPRESSOR_COUNT);
3656 }
3657
3658 lck_mtx_unlock_always(c_list_lock);
3659
3660 thread_wakeup((event_t)&compaction_swapper_running);
3661
3662 clock_get_uptime(&endTime);
3663 SUB_ABSOLUTETIME(&endTime, &startTime);
3664 absolutetime_to_nanoseconds(endTime, &nsec);
3665
3666 HIBLOG("vm_compressor_flush completed - took %qd msecs - vm_num_swap_files = %d, vm_num_pinned_swap_files = %d, vm_swappin_enabled = %d\n",
3667 nsec / 1000000ULL, vm_num_swap_files, vm_num_pinned_swap_files, vm_swappin_enabled);
3668 }
3669
3670
3671 int compaction_swap_trigger_thread_awakened = 0;
3672
3673 static void
vm_compressor_swap_trigger_thread(void)3674 vm_compressor_swap_trigger_thread(void)
3675 {
3676 current_thread()->options |= TH_OPT_VMPRIV;
3677
3678 /*
3679 * compaction_swapper_init_now is set when the first call to
3680 * vm_consider_waking_compactor_swapper is made from
3681 * vm_pageout_scan... since this function is called upon
3682 * thread creation, we want to make sure to delay adjusting
3683 * the tuneables until we are awakened via vm_pageout_scan
3684 * so that we are at a point where the vm_swapfile_open will
3685 * be operating on the correct directory (in case the default
3686 * of using the VM volume is overridden by the dynamic_pager)
3687 */
3688 if (compaction_swapper_init_now) {
3689 vm_compaction_swapper_do_init();
3690
3691 if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
3692 thread_vm_bind_group_add();
3693 }
3694 #if CONFIG_THREAD_GROUPS
3695 thread_group_vm_add();
3696 #endif
3697 thread_set_thread_name(current_thread(), "VM_cswap_trigger");
3698 compaction_swapper_init_now = 0;
3699 }
3700 lck_mtx_lock_spin_always(c_list_lock);
3701
3702 compaction_swap_trigger_thread_awakened++;
3703 compaction_swapper_awakened = 0;
3704
3705 if (compaction_swapper_running == 0) {
3706 compaction_swapper_running = 1;
3707
3708 vm_compressor_compact_and_swap(FALSE);
3709
3710 compaction_swapper_running = 0;
3711 }
3712 assert_wait((event_t)&c_compressor_swap_trigger, THREAD_UNINT);
3713
3714 if (compaction_swapper_running == 0) {
3715 thread_wakeup((event_t)&compaction_swapper_running);
3716 }
3717
3718 lck_mtx_unlock_always(c_list_lock);
3719
3720 thread_block((thread_continue_t)vm_compressor_swap_trigger_thread);
3721
3722 /* NOTREACHED */
3723 }
3724
3725
3726 void
vm_compressor_record_warmup_start(void)3727 vm_compressor_record_warmup_start(void)
3728 {
3729 c_segment_t c_seg;
3730
3731 lck_mtx_lock_spin_always(c_list_lock);
3732
3733 if (first_c_segment_to_warm_generation_id == 0) {
3734 if (!queue_empty(&c_age_list_head)) {
3735 c_seg = (c_segment_t)queue_last(&c_age_list_head);
3736
3737 first_c_segment_to_warm_generation_id = c_seg->c_generation_id;
3738 } else {
3739 first_c_segment_to_warm_generation_id = 0;
3740 }
3741
3742 fastwake_recording_in_progress = TRUE;
3743 }
3744 lck_mtx_unlock_always(c_list_lock);
3745 }
3746
3747
3748 void
vm_compressor_record_warmup_end(void)3749 vm_compressor_record_warmup_end(void)
3750 {
3751 c_segment_t c_seg;
3752
3753 lck_mtx_lock_spin_always(c_list_lock);
3754
3755 if (fastwake_recording_in_progress == TRUE) {
3756 if (!queue_empty(&c_age_list_head)) {
3757 c_seg = (c_segment_t)queue_last(&c_age_list_head);
3758
3759 last_c_segment_to_warm_generation_id = c_seg->c_generation_id;
3760 } else {
3761 last_c_segment_to_warm_generation_id = first_c_segment_to_warm_generation_id;
3762 }
3763
3764 fastwake_recording_in_progress = FALSE;
3765
3766 HIBLOG("vm_compressor_record_warmup (%qd - %qd)\n", first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id);
3767 }
3768 lck_mtx_unlock_always(c_list_lock);
3769 }
3770
3771
3772 #define DELAY_TRIM_ON_WAKE_NS (25 * NSEC_PER_SEC)
3773
3774 void
vm_compressor_delay_trim(void)3775 vm_compressor_delay_trim(void)
3776 {
3777 uint64_t now = mach_absolute_time();
3778 uint64_t delay_abstime;
3779 nanoseconds_to_absolutetime(DELAY_TRIM_ON_WAKE_NS, &delay_abstime);
3780 dont_trim_until_ts = now + delay_abstime;
3781 }
3782
3783
3784 void
vm_compressor_do_warmup(void)3785 vm_compressor_do_warmup(void)
3786 {
3787 lck_mtx_lock_spin_always(c_list_lock);
3788
3789 if (first_c_segment_to_warm_generation_id == last_c_segment_to_warm_generation_id) {
3790 first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
3791
3792 lck_mtx_unlock_always(c_list_lock);
3793 return;
3794 }
3795
3796 if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
3797 fastwake_warmup = TRUE;
3798
3799 compaction_swapper_awakened = 1;
3800 thread_wakeup((event_t)&c_compressor_swap_trigger);
3801 }
3802 lck_mtx_unlock_always(c_list_lock);
3803 }
3804
3805 void
do_fastwake_warmup_all(void)3806 do_fastwake_warmup_all(void)
3807 {
3808 lck_mtx_lock_spin_always(c_list_lock);
3809
3810 if (queue_empty(&c_swappedout_list_head) && queue_empty(&c_swappedout_sparse_list_head)) {
3811 lck_mtx_unlock_always(c_list_lock);
3812 return;
3813 }
3814
3815 fastwake_warmup = TRUE;
3816
3817 do_fastwake_warmup(&c_swappedout_list_head, TRUE);
3818
3819 do_fastwake_warmup(&c_swappedout_sparse_list_head, TRUE);
3820
3821 fastwake_warmup = FALSE;
3822
3823 lck_mtx_unlock_always(c_list_lock);
3824 }
3825
3826 void
do_fastwake_warmup(queue_head_t * c_queue,boolean_t consider_all_cseg)3827 do_fastwake_warmup(queue_head_t *c_queue, boolean_t consider_all_cseg)
3828 {
3829 c_segment_t c_seg = NULL;
3830 AbsoluteTime startTime, endTime;
3831 uint64_t nsec;
3832
3833
3834 HIBLOG("vm_compressor_fastwake_warmup (%qd - %qd) - starting\n", first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id);
3835
3836 clock_get_uptime(&startTime);
3837
3838 lck_mtx_unlock_always(c_list_lock);
3839
3840 proc_set_thread_policy(current_thread(),
3841 TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER2);
3842
3843 PAGE_REPLACEMENT_DISALLOWED(TRUE);
3844
3845 lck_mtx_lock_spin_always(c_list_lock);
3846
3847 while (!queue_empty(c_queue) && fastwake_warmup == TRUE) {
3848 c_seg = (c_segment_t) queue_first(c_queue);
3849
3850 if (consider_all_cseg == FALSE) {
3851 if (c_seg->c_generation_id < first_c_segment_to_warm_generation_id ||
3852 c_seg->c_generation_id > last_c_segment_to_warm_generation_id) {
3853 break;
3854 }
3855
3856 if (vm_page_free_count < (AVAILABLE_MEMORY / 4)) {
3857 break;
3858 }
3859 }
3860
3861 lck_mtx_lock_spin_always(&c_seg->c_lock);
3862 lck_mtx_unlock_always(c_list_lock);
3863
3864 if (c_seg->c_busy) {
3865 PAGE_REPLACEMENT_DISALLOWED(FALSE);
3866 c_seg_wait_on_busy(c_seg);
3867 PAGE_REPLACEMENT_DISALLOWED(TRUE);
3868 } else {
3869 if (c_seg_swapin(c_seg, TRUE, FALSE) == 0) {
3870 lck_mtx_unlock_always(&c_seg->c_lock);
3871 }
3872 c_segment_warmup_count++;
3873
3874 PAGE_REPLACEMENT_DISALLOWED(FALSE);
3875 vm_pageout_io_throttle();
3876 PAGE_REPLACEMENT_DISALLOWED(TRUE);
3877 }
3878 lck_mtx_lock_spin_always(c_list_lock);
3879 }
3880 lck_mtx_unlock_always(c_list_lock);
3881
3882 PAGE_REPLACEMENT_DISALLOWED(FALSE);
3883
3884 proc_set_thread_policy(current_thread(),
3885 TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER0);
3886
3887 clock_get_uptime(&endTime);
3888 SUB_ABSOLUTETIME(&endTime, &startTime);
3889 absolutetime_to_nanoseconds(endTime, &nsec);
3890
3891 HIBLOG("vm_compressor_fastwake_warmup completed - took %qd msecs\n", nsec / 1000000ULL);
3892
3893 lck_mtx_lock_spin_always(c_list_lock);
3894
3895 if (consider_all_cseg == FALSE) {
3896 first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
3897 }
3898 }
3899
3900 extern bool vm_swapout_thread_running;
3901 extern boolean_t compressor_store_stop_compaction;
3902
3903 void
vm_compressor_compact_and_swap(boolean_t flush_all)3904 vm_compressor_compact_and_swap(boolean_t flush_all)
3905 {
3906 c_segment_t c_seg;
3907 bool switch_state, bail_wanted_cseg = false;
3908 clock_sec_t now;
3909 clock_nsec_t nsec;
3910 mach_timespec_t start_ts, end_ts;
3911 unsigned int number_considered, wanted_cseg_found, yield_after_considered_per_pass, number_yields;
3912 uint64_t bytes_freed, delta_usec;
3913 uint32_t c_swapout_count = 0;
3914
3915 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_START, c_age_count, c_minor_count, c_major_count, vm_page_free_count);
3916
3917 KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_AND_SWAP) | DBG_FUNC_START,
3918 vm_compressor_fragmentation_level(),
3919 VM_PAGE_COMPRESSOR_COUNT,
3920 c_segment_count - c_swappedout_count - c_swappedout_sparse_count,
3921 flush_all);
3922
3923 if (fastwake_warmup == TRUE) {
3924 uint64_t starting_warmup_count;
3925
3926 starting_warmup_count = c_segment_warmup_count;
3927
3928 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_START, c_segment_warmup_count,
3929 first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id, 0, 0);
3930 do_fastwake_warmup(&c_swappedout_list_head, FALSE);
3931 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_END, c_segment_warmup_count, c_segment_warmup_count - starting_warmup_count, 0, 0, 0);
3932
3933 fastwake_warmup = FALSE;
3934 }
3935
3936 #if (XNU_TARGET_OS_OSX && __arm64__)
3937 /*
3938 * Re-considering major csegs showed benefits on all platforms by
3939 * significantly reducing fragmentation and getting back memory.
3940 * However, on smaller devices, eg watch, there was increased power
3941 * use for the additional compactions. And the turnover in csegs on
3942 * those smaller platforms is high enough in the decompression/free
3943 * path that we can skip reconsidering them here because we already
3944 * consider them for major compaction in those paths.
3945 */
3946 vm_compressor_process_major_segments(false /*all segments and not just the ripe-aged ones*/);
3947 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
3948
3949 /*
3950 * it's possible for the c_age_list_head to be empty if we
3951 * hit our limits for growing the compressor pool and we subsequently
3952 * hibernated... on the next hibernation we could see the queue as
3953 * empty and not proceeed even though we have a bunch of segments on
3954 * the swapped in queue that need to be dealt with.
3955 */
3956 vm_compressor_do_delayed_compactions(flush_all);
3957 vm_compressor_process_special_swapped_in_segments_locked();
3958 vm_compressor_process_regular_swapped_in_segments(flush_all);
3959
3960 /*
3961 * we only need to grab the timestamp once per
3962 * invocation of this function since the
3963 * timescale we're interested in is measured
3964 * in days
3965 */
3966 clock_get_system_nanotime(&now, &nsec);
3967
3968 start_ts.tv_sec = (int) now;
3969 start_ts.tv_nsec = nsec;
3970 delta_usec = 0;
3971 number_considered = 0;
3972 wanted_cseg_found = 0;
3973 number_yields = 0;
3974 bytes_freed = 0;
3975 yield_after_considered_per_pass = MAX(min_csegs_per_major_compaction, DELAYED_COMPACTIONS_PER_PASS);
3976
3977 /**
3978 * SW: Need to figure out how to properly rate limit this log because it is currently way too
3979 * noisy. rdar://99379414 (Figure out how to rate limit the fragmentation level logging)
3980 */
3981 vm_log_debug("before compaction fragmentation level %u\n", vm_compressor_fragmentation_level());
3982
3983 while (!queue_empty(&c_age_list_head) && !compaction_swapper_abort && !compressor_store_stop_compaction) {
3984 if (hibernate_flushing == TRUE) {
3985 clock_sec_t sec;
3986
3987 if (hibernate_should_abort()) {
3988 HIBLOG("vm_compressor_flush - hibernate_should_abort returned TRUE\n");
3989 break;
3990 }
3991 if (hibernate_no_swapspace == TRUE) {
3992 HIBLOG("vm_compressor_flush - out of swap space\n");
3993 break;
3994 }
3995 if (vm_swap_files_pinned() == FALSE) {
3996 HIBLOG("vm_compressor_flush - unpinned swap files\n");
3997 break;
3998 }
3999 if (hibernate_in_progress_with_pinned_swap == TRUE &&
4000 (vm_swapfile_total_segs_alloced == vm_swapfile_total_segs_used)) {
4001 HIBLOG("vm_compressor_flush - out of pinned swap space\n");
4002 break;
4003 }
4004 clock_get_system_nanotime(&sec, &nsec);
4005
4006 if (sec > hibernate_flushing_deadline) {
4007 hibernate_flush_timed_out = TRUE;
4008 HIBLOG("vm_compressor_flush - failed to finish before deadline\n");
4009 break;
4010 }
4011 }
4012
4013 c_swapout_count = c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count;
4014 if (VM_CONFIG_SWAP_IS_ACTIVE && !vm_swap_out_of_space() && c_swapout_count >= C_SWAPOUT_LIMIT) {
4015 assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 100, 1000 * NSEC_PER_USEC);
4016
4017 if (!vm_swapout_thread_running) {
4018 thread_wakeup((event_t)&vm_swapout_thread);
4019 }
4020
4021 lck_mtx_unlock_always(c_list_lock);
4022
4023 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 1, c_swapout_count, 0, 0);
4024
4025 thread_block(THREAD_CONTINUE_NULL);
4026
4027 lck_mtx_lock_spin_always(c_list_lock);
4028 }
4029 /*
4030 * Minor compactions
4031 */
4032 vm_compressor_do_delayed_compactions(flush_all);
4033
4034 /*
4035 * vm_compressor_process_early_swapped_in_segments()
4036 * might be too aggressive. So OFF for now.
4037 */
4038 vm_compressor_process_regular_swapped_in_segments(flush_all);
4039
4040 /* Recompute because we dropped the c_list_lock above*/
4041 c_swapout_count = c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count;
4042 if (VM_CONFIG_SWAP_IS_ACTIVE && !vm_swap_out_of_space() && c_swapout_count >= C_SWAPOUT_LIMIT) {
4043 /*
4044 * we timed out on the above thread_block
4045 * let's loop around and try again
4046 * the timeout allows us to continue
4047 * to do minor compactions to make
4048 * more memory available
4049 */
4050 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 2, c_swapout_count, 0, 0);
4051
4052 continue;
4053 }
4054
4055 /*
4056 * Swap out segments?
4057 */
4058 if (flush_all == FALSE) {
4059 bool needs_to_swap;
4060
4061 lck_mtx_unlock_always(c_list_lock);
4062
4063 needs_to_swap = compressor_needs_to_swap();
4064
4065 lck_mtx_lock_spin_always(c_list_lock);
4066
4067 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 3, needs_to_swap, 0, 0);
4068
4069 if (!needs_to_swap) {
4070 break;
4071 }
4072 }
4073 if (queue_empty(&c_age_list_head)) {
4074 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 4, c_age_count, 0, 0);
4075 break;
4076 }
4077 c_seg = (c_segment_t) queue_first(&c_age_list_head);
4078
4079 assert(c_seg->c_state == C_ON_AGE_Q);
4080
4081 if (flush_all == TRUE && c_seg->c_generation_id > c_generation_id_flush_barrier) {
4082 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 5, 0, 0, 0);
4083 break;
4084 }
4085
4086 lck_mtx_lock_spin_always(&c_seg->c_lock);
4087
4088 if (c_seg->c_busy) {
4089 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 6, (void*) VM_KERNEL_ADDRPERM(c_seg), 0, 0);
4090
4091 lck_mtx_unlock_always(c_list_lock);
4092 c_seg_wait_on_busy(c_seg);
4093 lck_mtx_lock_spin_always(c_list_lock);
4094
4095 continue;
4096 }
4097 C_SEG_BUSY(c_seg);
4098
4099 if (c_seg_do_minor_compaction_and_unlock(c_seg, FALSE, TRUE, TRUE)) {
4100 /*
4101 * found an empty c_segment and freed it
4102 * so go grab the next guy in the queue
4103 */
4104 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 7, 0, 0, 0);
4105 c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
4106 vm_pageout_vminfo.vm_compactor_major_compaction_segments_freed++;
4107 continue;
4108 }
4109
4110 switch_state = vm_compressor_major_compact_cseg(c_seg, &number_considered, &bail_wanted_cseg, &bytes_freed);
4111 if (bail_wanted_cseg) {
4112 wanted_cseg_found++;
4113 bail_wanted_cseg = false;
4114 }
4115
4116 assert(c_seg->c_busy);
4117 assert(!c_seg->c_on_minorcompact_q);
4118
4119 if (switch_state) {
4120 if (VM_CONFIG_SWAP_IS_ACTIVE) {
4121 int new_state = C_ON_SWAPOUT_Q;
4122 #if (XNU_TARGET_OS_OSX && __arm64__)
4123 if (flush_all == false && compressor_swapout_conditions_met() == false) {
4124 new_state = C_ON_MAJORCOMPACT_Q;
4125 }
4126 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
4127
4128 if (new_state == C_ON_SWAPOUT_Q) {
4129 /*
4130 * This mode of putting a generic c_seg on the swapout list is
4131 * only supported when we have general swapping enabled
4132 */
4133 clock_sec_t lnow;
4134 clock_nsec_t lnsec;
4135 clock_get_system_nanotime(&lnow, &lnsec);
4136 if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 30) {
4137 vmcs_stats.unripe_under_30s++;
4138 } else if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 60) {
4139 vmcs_stats.unripe_under_60s++;
4140 } else if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 300) {
4141 vmcs_stats.unripe_under_300s++;
4142 }
4143 }
4144
4145 c_seg_switch_state(c_seg, new_state, FALSE);
4146 } else {
4147 if ((vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit)) {
4148 assert(VM_CONFIG_SWAP_IS_PRESENT);
4149 /*
4150 * we are running compressor sweeps with swap-behind
4151 * make sure the c_seg has aged enough before swapping it
4152 * out...
4153 */
4154 if ((now - c_seg->c_creation_ts) >= vm_ripe_target_age) {
4155 c_seg->c_overage_swap = TRUE;
4156 c_overage_swapped_count++;
4157 c_seg_switch_state(c_seg, C_ON_SWAPOUT_Q, FALSE);
4158 }
4159 }
4160 }
4161 if (c_seg->c_state == C_ON_AGE_Q) {
4162 /*
4163 * this c_seg didn't get moved to the swapout queue
4164 * so we need to move it out of the way...
4165 * we just did a major compaction on it so put it
4166 * on that queue
4167 */
4168 c_seg_switch_state(c_seg, C_ON_MAJORCOMPACT_Q, FALSE);
4169 } else {
4170 c_seg_major_compact_stats[c_seg_major_compact_stats_now].wasted_space_in_swapouts += c_seg_bufsize - c_seg->c_bytes_used;
4171 vm_pageout_vminfo.vm_compactor_swapout_bytes_wasted += c_seg_bufsize - c_seg->c_bytes_used;
4172 c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_swapouts++;
4173 vm_pageout_vminfo.vm_compactor_swapouts_queued++;
4174 }
4175 }
4176
4177 C_SEG_WAKEUP_DONE(c_seg);
4178
4179 lck_mtx_unlock_always(&c_seg->c_lock);
4180
4181 /*
4182 * On systems _with_ general swap, regardless of jetsam, we wake up the swapout thread here.
4183 * On systems _without_ general swap, it's the responsibility of the memorystatus
4184 * subsystem to wake up the swapper.
4185 * TODO: When we have full jetsam support on a swap enabled system, we will need to revisit
4186 * this policy.
4187 */
4188 if (VM_CONFIG_SWAP_IS_ACTIVE && c_swapout_count) {
4189 /*
4190 * We don't pause/yield here because we will either
4191 * yield below or at the top of the loop with the
4192 * assert_wait_timeout.
4193 */
4194 if (!vm_swapout_thread_running) {
4195 thread_wakeup((event_t)&vm_swapout_thread);
4196 }
4197 }
4198
4199 if (number_considered >= yield_after_considered_per_pass) {
4200 if (wanted_cseg_found) {
4201 /*
4202 * We stopped major compactions on a c_seg
4203 * that is wanted. We don't know the priority
4204 * of the waiter unfortunately but we are at
4205 * a very high priority and so, just in case
4206 * the waiter is a critical system daemon or
4207 * UI thread, let's give up the CPU in case
4208 * the system is running a few CPU intensive
4209 * tasks.
4210 */
4211 KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_PAUSE) | DBG_FUNC_START);
4212 lck_mtx_unlock_always(c_list_lock);
4213
4214 mutex_pause(2); /* 100us yield */
4215
4216 number_yields++;
4217
4218 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 11, number_considered, number_yields, 0);
4219
4220 lck_mtx_lock_spin_always(c_list_lock);
4221 KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_PAUSE) | DBG_FUNC_END);
4222 }
4223
4224 number_considered = 0;
4225 wanted_cseg_found = 0;
4226 }
4227 }
4228 clock_get_system_nanotime(&now, &nsec);
4229
4230 end_ts = major_compact_ts = (mach_timespec_t){.tv_sec = (int)now, .tv_nsec = nsec};
4231
4232 SUB_MACH_TIMESPEC(&end_ts, &start_ts);
4233
4234 delta_usec = (end_ts.tv_sec * USEC_PER_SEC) + (end_ts.tv_nsec / NSEC_PER_USEC) - (number_yields * 100);
4235
4236 delta_usec = MAX(1, delta_usec); /* we could have 0 usec run if conditions weren't right */
4237
4238 c_seg_major_compact_stats[c_seg_major_compact_stats_now].bytes_freed = bytes_freed;
4239 vm_pageout_vminfo.vm_compactor_major_compaction_bytes_freed += bytes_freed;
4240 c_seg_major_compact_stats[c_seg_major_compact_stats_now].runtime_us = delta_usec;
4241
4242 KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_AND_SWAP) | DBG_FUNC_NONE,
4243 c_seg_major_compact_stats[c_seg_major_compact_stats_now].asked_permission,
4244 c_seg_major_compact_stats[c_seg_major_compact_stats_now].bailed_compactions,
4245 c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_swapouts,
4246 c_seg_major_compact_stats[c_seg_major_compact_stats_now].wasted_space_in_swapouts);
4247 KDBG(VM_COMPRESSOR_EVENTID(DBG_COMPACT_AND_SWAP) | DBG_FUNC_END,
4248 c_seg_major_compact_stats[c_seg_major_compact_stats_now].compactions,
4249 c_seg_major_compact_stats[c_seg_major_compact_stats_now].moved_slots,
4250 c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs,
4251 c_seg_major_compact_stats[c_seg_major_compact_stats_now].bytes_freed);
4252
4253 c_seg_major_compact_stats_now = (c_seg_major_compact_stats_now + 1) % C_SEG_MAJOR_COMPACT_STATS_MAX;
4254
4255 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_END, c_age_count, c_minor_count, c_major_count, vm_page_free_count);
4256 }
4257
4258
4259 static c_segment_t
c_seg_allocate(c_segment_t * current_chead,bool * nearing_limits)4260 c_seg_allocate(c_segment_t *current_chead, bool *nearing_limits)
4261 {
4262 c_segment_t c_seg;
4263 int min_needed;
4264 int size_to_populate;
4265 c_segment_t *donate_queue_head;
4266 uint32_t compressed_pages;
4267
4268 *nearing_limits = false;
4269
4270 compressed_pages = vm_compressor_pages_compressed();
4271
4272 if (compressed_pages >= c_segment_pages_compressed_nearing_limit) {
4273 *nearing_limits = true;
4274 }
4275 if (compressed_pages >= c_segment_pages_compressed_limit) {
4276 /*
4277 * We've reached the compressed pages limit, don't return
4278 * a segment to compress into
4279 */
4280 return NULL;
4281 }
4282
4283 if ((c_seg = *current_chead) == NULL) {
4284 uint32_t c_segno;
4285
4286 lck_mtx_lock_spin_always(c_list_lock);
4287
4288 while (c_segments_busy == TRUE) {
4289 assert_wait((event_t) (&c_segments_busy), THREAD_UNINT);
4290
4291 lck_mtx_unlock_always(c_list_lock);
4292
4293 thread_block(THREAD_CONTINUE_NULL);
4294
4295 lck_mtx_lock_spin_always(c_list_lock);
4296 }
4297 if (c_free_segno_head == (uint32_t)-1) {
4298 uint32_t c_segments_available_new;
4299
4300 /*
4301 * We may have dropped the c_list_lock, re-evaluate
4302 * the compressed pages count
4303 */
4304 compressed_pages = vm_compressor_pages_compressed();
4305
4306 if (c_segments_available >= c_segments_nearing_limit ||
4307 compressed_pages >= c_segment_pages_compressed_nearing_limit) {
4308 *nearing_limits = true;
4309 }
4310 if (c_segments_available >= c_segments_limit ||
4311 compressed_pages >= c_segment_pages_compressed_limit) {
4312 lck_mtx_unlock_always(c_list_lock);
4313
4314 return NULL;
4315 }
4316 c_segments_busy = TRUE;
4317 lck_mtx_unlock_always(c_list_lock);
4318
4319 /* pages for c_segments are never depopulated, c_segments_available never goes down */
4320 kernel_memory_populate((vm_offset_t)c_segments_next_page,
4321 PAGE_SIZE, KMA_NOFAIL | KMA_KOBJECT,
4322 VM_KERN_MEMORY_COMPRESSOR);
4323 c_segments_next_page += PAGE_SIZE;
4324
4325 c_segments_available_new = c_segments_available + C_SEGMENTS_PER_PAGE;
4326
4327 if (c_segments_available_new > c_segments_limit) {
4328 c_segments_available_new = c_segments_limit;
4329 }
4330
4331 /* add the just-added segments to the top of the free-list */
4332 for (c_segno = c_segments_available + 1; c_segno < c_segments_available_new; c_segno++) {
4333 c_segments_get(c_segno - 1)->c_segno = c_segno; /* next free is the one after you */
4334 }
4335
4336 lck_mtx_lock_spin_always(c_list_lock);
4337
4338 c_segments_get(c_segno - 1)->c_segno = c_free_segno_head; /* link to the rest of, existing freelist */
4339 c_free_segno_head = c_segments_available; /* first one in the page that was just allocated */
4340 c_segments_available = c_segments_available_new;
4341
4342 c_segments_busy = FALSE;
4343 thread_wakeup((event_t) (&c_segments_busy));
4344 }
4345 c_segno = c_free_segno_head;
4346 assert(c_segno >= 0 && c_segno < c_segments_limit);
4347
4348 c_free_segno_head = (uint32_t)c_segments_get(c_segno)->c_segno;
4349
4350 /*
4351 * do the rest of the bookkeeping now while we're still behind
4352 * the list lock and grab our generation id now into a local
4353 * so that we can install it once we have the c_seg allocated
4354 */
4355 c_segment_count++;
4356 if (c_segment_count > c_segment_count_max) {
4357 c_segment_count_max = c_segment_count;
4358 }
4359
4360 lck_mtx_unlock_always(c_list_lock);
4361
4362 c_seg = zalloc_flags(compressor_segment_zone, Z_WAITOK | Z_ZERO);
4363
4364 c_seg->c_store.c_buffer = (int32_t *)C_SEG_BUFFER_ADDRESS(c_segno);
4365
4366 lck_mtx_init(&c_seg->c_lock, &vm_compressor_lck_grp, LCK_ATTR_NULL);
4367
4368 c_seg->c_state = C_IS_EMPTY;
4369 c_seg->c_firstemptyslot = C_SLOT_MAX_INDEX;
4370 c_seg->c_mysegno = c_segno;
4371
4372 lck_mtx_lock_spin_always(c_list_lock);
4373 c_empty_count++; /* going to be immediately decremented in the next call */
4374 c_seg_switch_state(c_seg, C_IS_FILLING, FALSE);
4375 c_segments_get(c_segno)->c_seg = c_seg;
4376 assert(c_segments_get(c_segno)->c_segno > c_segments_available); /* we just assigned a pointer to it so this is an indication that it is occupied */
4377 lck_mtx_unlock_always(c_list_lock);
4378
4379 for (int i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) {
4380 #if XNU_TARGET_OS_OSX /* tag:DONATE */
4381 donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_early_swapout_chead);
4382 #else /* XNU_TARGET_OS_OSX */
4383 if (memorystatus_swap_all_apps) {
4384 donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_late_swapout_chead);
4385 } else {
4386 donate_queue_head = NULL;
4387 }
4388 #endif /* XNU_TARGET_OS_OSX */
4389
4390 if (current_chead == donate_queue_head) {
4391 c_seg->c_has_donated_pages = 1;
4392 break;
4393 }
4394 }
4395
4396 *current_chead = c_seg;
4397
4398 #if DEVELOPMENT || DEBUG
4399 C_SEG_MAKE_WRITEABLE(c_seg);
4400 #endif
4401 }
4402 c_seg_alloc_nextslot(c_seg);
4403
4404 size_to_populate = c_seg_allocsize - C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset);
4405
4406 if (size_to_populate) {
4407 min_needed = PAGE_SIZE + (c_seg_allocsize - c_seg_bufsize);
4408
4409 if (C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset - c_seg->c_nextoffset) < (unsigned) min_needed) {
4410 if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) {
4411 size_to_populate = C_SEG_MAX_POPULATE_SIZE;
4412 }
4413
4414 os_atomic_add(&vm_pageout_vminfo.vm_compressor_pages_grabbed, size_to_populate / PAGE_SIZE, relaxed);
4415
4416 kernel_memory_populate(
4417 (vm_offset_t) &c_seg->c_store.c_buffer[c_seg->c_populated_offset],
4418 size_to_populate,
4419 KMA_NOFAIL | KMA_COMPRESSOR,
4420 VM_KERN_MEMORY_COMPRESSOR);
4421 } else {
4422 size_to_populate = 0;
4423 }
4424 }
4425 PAGE_REPLACEMENT_DISALLOWED(TRUE);
4426
4427 lck_mtx_lock_spin_always(&c_seg->c_lock);
4428
4429 if (size_to_populate) {
4430 c_seg->c_populated_offset += C_SEG_BYTES_TO_OFFSET(size_to_populate);
4431 }
4432
4433 return c_seg;
4434 }
4435
4436 #if DEVELOPMENT || DEBUG
4437 #if CONFIG_FREEZE
4438 extern boolean_t memorystatus_freeze_to_memory;
4439 #endif /* CONFIG_FREEZE */
4440 #endif /* DEVELOPMENT || DEBUG */
4441 uint64_t c_seg_total_donated_bytes = 0; /* For testing/debugging only for now. Remove and add new counters for vm_stat.*/
4442
4443 uint64_t c_seg_filled_no_contention = 0;
4444 uint64_t c_seg_filled_contention = 0;
4445 clock_sec_t c_seg_filled_contention_sec_max = 0;
4446 clock_nsec_t c_seg_filled_contention_nsec_max = 0;
4447
4448 static void
c_current_seg_filled(c_segment_t c_seg,c_segment_t * current_chead)4449 c_current_seg_filled(c_segment_t c_seg, c_segment_t *current_chead)
4450 {
4451 uint32_t unused_bytes;
4452 uint32_t offset_to_depopulate;
4453 int new_state = C_ON_AGE_Q;
4454 clock_sec_t sec;
4455 clock_nsec_t nsec;
4456 bool head_insert = false, wakeup_swapout_thread = false;
4457
4458 unused_bytes = trunc_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset - c_seg->c_nextoffset));
4459
4460 if (unused_bytes) {
4461 /* if this is a platform that need an extra page at the end of the segment when running compress
4462 * then now is the time to depopulate that extra page. it still takes virtual space but doesn't
4463 * actually waste memory */
4464 offset_to_depopulate = C_SEG_BYTES_TO_OFFSET(round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_nextoffset)));
4465
4466 /* release the extra physical page(s) at the end of the segment */
4467 lck_mtx_unlock_always(&c_seg->c_lock);
4468
4469 kernel_memory_depopulate(
4470 (vm_offset_t) &c_seg->c_store.c_buffer[offset_to_depopulate],
4471 unused_bytes,
4472 KMA_COMPRESSOR,
4473 VM_KERN_MEMORY_COMPRESSOR);
4474
4475 lck_mtx_lock_spin_always(&c_seg->c_lock);
4476
4477 c_seg->c_populated_offset = offset_to_depopulate;
4478 }
4479 assert(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset) <= c_seg_bufsize);
4480
4481 #if DEVELOPMENT || DEBUG
4482 {
4483 boolean_t c_seg_was_busy = FALSE;
4484
4485 if (!c_seg->c_busy) {
4486 C_SEG_BUSY(c_seg);
4487 } else {
4488 c_seg_was_busy = TRUE;
4489 }
4490
4491 lck_mtx_unlock_always(&c_seg->c_lock);
4492
4493 C_SEG_WRITE_PROTECT(c_seg);
4494
4495 lck_mtx_lock_spin_always(&c_seg->c_lock);
4496
4497 if (c_seg_was_busy == FALSE) {
4498 C_SEG_WAKEUP_DONE(c_seg);
4499 }
4500 }
4501 #endif
4502
4503 #if CONFIG_FREEZE
4504 if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead) &&
4505 VM_CONFIG_SWAP_IS_PRESENT &&
4506 VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
4507 #if DEVELOPMENT || DEBUG
4508 && !memorystatus_freeze_to_memory
4509 #endif /* DEVELOPMENT || DEBUG */
4510 ) {
4511 new_state = C_ON_SWAPOUT_Q;
4512 wakeup_swapout_thread = true;
4513 }
4514 #endif /* CONFIG_FREEZE */
4515
4516 if (vm_darkwake_mode == TRUE) {
4517 new_state = C_ON_SWAPOUT_Q;
4518 head_insert = true;
4519 wakeup_swapout_thread = true;
4520 } else {
4521 c_segment_t *donate_queue_head;
4522 for (int i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) {
4523 #if XNU_TARGET_OS_OSX /* tag:DONATE */
4524 donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_early_swapout_chead);
4525 #else /* XNU_TARGET_OS_OSX */
4526 donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_late_swapout_chead);
4527 #endif /* XNU_TARGET_OS_OSX */
4528 if (current_chead == donate_queue_head) {
4529 /* This is the place where the "donating" task actually does the so-called donation
4530 * Instead of continueing to take place in memory in the compressor, the segment goes directly
4531 * to swap-out instead of going to AGE_Q */
4532 assert(c_seg->c_has_donated_pages);
4533 new_state = C_ON_SWAPOUT_Q;
4534 c_seg_total_donated_bytes += c_seg->c_bytes_used;
4535 break;
4536 }
4537 }
4538 }
4539
4540 clock_get_system_nanotime(&sec, &nsec);
4541 c_seg->c_creation_ts = (uint32_t)sec;
4542
4543 if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
4544 clock_sec_t sec2;
4545 clock_nsec_t nsec2;
4546
4547 lck_mtx_lock_spin_always(c_list_lock);
4548 clock_get_system_nanotime(&sec2, &nsec2);
4549 TIME_SUB(sec2, sec, nsec2, nsec, NSEC_PER_SEC);
4550 /* keep track of how much time we've waited for c_list_lock */
4551 if (sec2 > c_seg_filled_contention_sec_max) {
4552 c_seg_filled_contention_sec_max = sec2;
4553 c_seg_filled_contention_nsec_max = nsec2;
4554 } else if (sec2 == c_seg_filled_contention_sec_max && nsec2 > c_seg_filled_contention_nsec_max) {
4555 c_seg_filled_contention_nsec_max = nsec2;
4556 }
4557 c_seg_filled_contention++;
4558 } else {
4559 c_seg_filled_no_contention++;
4560 }
4561
4562 #if CONFIG_FREEZE
4563 if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead)) {
4564 if (freezer_context_global.freezer_ctx_task->donates_own_pages) {
4565 assert(!c_seg->c_has_donated_pages);
4566 c_seg->c_has_donated_pages = 1;
4567 os_atomic_add(&c_segment_pages_compressed_incore_late_swapout, c_seg->c_slots_used, relaxed);
4568 }
4569 c_seg->c_has_freezer_pages = 1;
4570 }
4571 #endif /* CONFIG_FREEZE */
4572
4573 c_seg->c_generation_id = c_generation_id++;
4574 c_seg_switch_state(c_seg, new_state, head_insert);
4575
4576 #if CONFIG_FREEZE
4577 /*
4578 * Donated segments count as frozen to swap if we go through the freezer.
4579 * TODO: What we need is a new ledger and cseg state that can describe
4580 * a frozen cseg from a donated task so we can accurately decrement it on
4581 * swapins.
4582 */
4583 if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead) && (c_seg->c_state == C_ON_SWAPOUT_Q)) {
4584 /*
4585 * darkwake and freezer can't co-exist together
4586 * We'll need to fix this accounting as a start.
4587 * And early donation c_segs are separate from frozen c_segs.
4588 */
4589 assert(vm_darkwake_mode == FALSE);
4590 c_seg_update_task_owner(c_seg, freezer_context_global.freezer_ctx_task);
4591 freezer_context_global.freezer_ctx_swapped_bytes += c_seg->c_bytes_used;
4592 }
4593 #endif /* CONFIG_FREEZE */
4594
4595 if (c_seg->c_state == C_ON_AGE_Q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
4596 /* this is possible if we decompressed a page from the segment before it ended filling */
4597 #if CONFIG_FREEZE
4598 assert(c_seg->c_task_owner == NULL);
4599 #endif /* CONFIG_FREEZE */
4600 c_seg_need_delayed_compaction(c_seg, TRUE);
4601 }
4602
4603 lck_mtx_unlock_always(c_list_lock);
4604
4605 if (wakeup_swapout_thread) {
4606 /*
4607 * Darkwake and Freeze configs always
4608 * wake up the swapout thread because
4609 * the compactor thread that normally handles
4610 * it may not be running as much in these
4611 * configs.
4612 */
4613 thread_wakeup((event_t)&vm_swapout_thread);
4614 }
4615
4616 *current_chead = NULL;
4617 }
4618
4619 /*
4620 * returns with c_seg locked
4621 */
4622 void
c_seg_swapin_requeue(c_segment_t c_seg,boolean_t has_data,boolean_t minor_compact_ok,boolean_t age_on_swapin_q)4623 c_seg_swapin_requeue(c_segment_t c_seg, boolean_t has_data, boolean_t minor_compact_ok, boolean_t age_on_swapin_q)
4624 {
4625 clock_sec_t sec;
4626 clock_nsec_t nsec;
4627
4628 clock_get_system_nanotime(&sec, &nsec);
4629
4630 lck_mtx_lock_spin_always(c_list_lock);
4631 lck_mtx_lock_spin_always(&c_seg->c_lock);
4632
4633 assert(c_seg->c_busy_swapping);
4634 assert(c_seg->c_busy);
4635
4636 c_seg->c_busy_swapping = 0;
4637
4638 if (c_seg->c_overage_swap == TRUE) {
4639 c_overage_swapped_count--;
4640 c_seg->c_overage_swap = FALSE;
4641 }
4642 if (has_data == TRUE) {
4643 if (age_on_swapin_q == TRUE || c_seg->c_has_donated_pages) {
4644 #if CONFIG_FREEZE
4645 /*
4646 * If a segment has both identities, frozen and donated bits set, the donated
4647 * bit wins on the swapin path. This is because the segment is being swapped back
4648 * in and so is in demand and should be given more time to spend in memory before
4649 * being swapped back out under pressure.
4650 */
4651 if (c_seg->c_has_donated_pages) {
4652 c_seg->c_has_freezer_pages = 0;
4653 }
4654 #endif /* CONFIG_FREEZE */
4655 c_seg_switch_state(c_seg, C_ON_SWAPPEDIN_Q, FALSE);
4656 } else {
4657 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
4658 }
4659
4660 if (minor_compact_ok == TRUE && !c_seg->c_on_minorcompact_q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
4661 c_seg_need_delayed_compaction(c_seg, TRUE);
4662 }
4663 } else {
4664 c_seg->c_store.c_buffer = (int32_t*) NULL;
4665 c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0);
4666
4667 c_seg_switch_state(c_seg, C_ON_BAD_Q, FALSE);
4668 }
4669 c_seg->c_swappedin_ts = (uint32_t)sec;
4670 c_seg->c_swappedin = true;
4671 #if TRACK_C_SEGMENT_UTILIZATION
4672 c_seg->c_decompressions_since_swapin = 0;
4673 #endif /* TRACK_C_SEGMENT_UTILIZATION */
4674
4675 lck_mtx_unlock_always(c_list_lock);
4676 }
4677
4678
4679
4680 /*
4681 * c_seg has to be locked and is returned locked if the c_seg isn't freed
4682 * PAGE_REPLACMENT_DISALLOWED has to be TRUE on entry and is returned TRUE
4683 * c_seg_swapin returns 1 if the c_seg was freed, 0 otherwise
4684 */
4685
4686 int
c_seg_swapin(c_segment_t c_seg,boolean_t force_minor_compaction,boolean_t age_on_swapin_q)4687 c_seg_swapin(c_segment_t c_seg, boolean_t force_minor_compaction, boolean_t age_on_swapin_q)
4688 {
4689 vm_offset_t addr = 0;
4690 uint32_t io_size = 0;
4691 uint64_t f_offset;
4692 thread_pri_floor_t token;
4693
4694 assert(C_SEG_IS_ONDISK(c_seg));
4695
4696 #if !CHECKSUM_THE_SWAP
4697 c_seg_trim_tail(c_seg);
4698 #endif
4699 io_size = round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset));
4700 f_offset = c_seg->c_store.c_swap_handle;
4701
4702 C_SEG_BUSY(c_seg);
4703 c_seg->c_busy_swapping = 1;
4704
4705 /*
4706 * This thread is likely going to block for I/O.
4707 * Make sure it is ready to run when the I/O completes because
4708 * it needs to clear the busy bit on the c_seg so that other
4709 * waiting threads can make progress too.
4710 */
4711 token = thread_priority_floor_start();
4712 lck_mtx_unlock_always(&c_seg->c_lock);
4713
4714 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4715
4716 addr = (vm_offset_t)C_SEG_BUFFER_ADDRESS(c_seg->c_mysegno);
4717 c_seg->c_store.c_buffer = (int32_t*) addr;
4718
4719 kernel_memory_populate(addr, io_size, KMA_NOFAIL | KMA_COMPRESSOR,
4720 VM_KERN_MEMORY_COMPRESSOR);
4721
4722 if (vm_swap_get(c_seg, f_offset, io_size) != KERN_SUCCESS) {
4723 PAGE_REPLACEMENT_DISALLOWED(TRUE);
4724
4725 kernel_memory_depopulate(addr, io_size, KMA_COMPRESSOR,
4726 VM_KERN_MEMORY_COMPRESSOR);
4727
4728 c_seg_swapin_requeue(c_seg, FALSE, TRUE, age_on_swapin_q);
4729 } else {
4730 #if ENCRYPTED_SWAP
4731 vm_swap_decrypt(c_seg, true);
4732 #endif /* ENCRYPTED_SWAP */
4733
4734 #if CHECKSUM_THE_SWAP
4735 if (c_seg->cseg_swap_size != io_size) {
4736 panic("swapin size doesn't match swapout size");
4737 }
4738
4739 if (c_seg->cseg_hash != vmc_hash((char*) c_seg->c_store.c_buffer, (int)io_size)) {
4740 panic("c_seg_swapin - Swap hash mismatch");
4741 }
4742 #endif /* CHECKSUM_THE_SWAP */
4743
4744 PAGE_REPLACEMENT_DISALLOWED(TRUE);
4745
4746 c_seg_swapin_requeue(c_seg, TRUE, force_minor_compaction == TRUE ? FALSE : TRUE, age_on_swapin_q);
4747
4748 #if CONFIG_FREEZE
4749 /*
4750 * c_seg_swapin_requeue() returns with the c_seg lock held.
4751 */
4752 if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
4753 assert(c_seg->c_busy);
4754
4755 lck_mtx_unlock_always(&c_seg->c_lock);
4756 lck_mtx_lock_spin_always(c_list_lock);
4757 lck_mtx_lock_spin_always(&c_seg->c_lock);
4758 }
4759
4760 if (c_seg->c_task_owner) {
4761 c_seg_update_task_owner(c_seg, NULL);
4762 }
4763
4764 lck_mtx_unlock_always(c_list_lock);
4765
4766 os_atomic_add(&c_segment_pages_compressed_incore, c_seg->c_slots_used, relaxed);
4767 if (c_seg->c_has_donated_pages) {
4768 os_atomic_add(&c_segment_pages_compressed_incore_late_swapout, c_seg->c_slots_used, relaxed);
4769 }
4770 #endif /* CONFIG_FREEZE */
4771
4772 __assert_only unsigned int prev_swapped_count = os_atomic_sub_orig(
4773 &vm_page_swapped_count, c_seg->c_slots_used, relaxed);
4774 assert3u(prev_swapped_count, >=, c_seg->c_slots_used);
4775 os_atomic_add(&compressor_bytes_used, c_seg->c_bytes_used, relaxed);
4776
4777 if (force_minor_compaction == TRUE) {
4778 if (c_seg_minor_compaction_and_unlock(c_seg, FALSE)) {
4779 /*
4780 * c_seg was completely empty so it was freed,
4781 * so be careful not to reference it again
4782 *
4783 * Drop the boost so that the thread priority
4784 * is returned back to where it is supposed to be.
4785 */
4786 thread_priority_floor_end(&token);
4787 return 1;
4788 }
4789
4790 lck_mtx_lock_spin_always(&c_seg->c_lock);
4791 }
4792 }
4793 C_SEG_WAKEUP_DONE(c_seg);
4794
4795 /*
4796 * Drop the boost so that the thread priority
4797 * is returned back to where it is supposed to be.
4798 */
4799 thread_priority_floor_end(&token);
4800
4801 return 0;
4802 }
4803
4804 /*
4805 * TODO: refactor the CAS loops in c_segment_sv_hash_drop_ref() and c_segment_sv_hash_instert()
4806 * to os_atomic_rmw_loop() [rdar://139546215]
4807 */
4808
4809 static void
c_segment_sv_hash_drop_ref(int hash_indx)4810 c_segment_sv_hash_drop_ref(int hash_indx)
4811 {
4812 struct c_sv_hash_entry o_sv_he, n_sv_he;
4813
4814 while (1) {
4815 o_sv_he.he_record = c_segment_sv_hash_table[hash_indx].he_record;
4816
4817 n_sv_he.he_ref = o_sv_he.he_ref - 1;
4818 n_sv_he.he_data = o_sv_he.he_data;
4819
4820 if (OSCompareAndSwap64((UInt64)o_sv_he.he_record, (UInt64)n_sv_he.he_record, (UInt64 *) &c_segment_sv_hash_table[hash_indx].he_record) == TRUE) {
4821 if (n_sv_he.he_ref == 0) {
4822 os_atomic_dec(&c_segment_svp_in_hash, relaxed);
4823 }
4824 break;
4825 }
4826 }
4827 }
4828
4829
4830 static int
c_segment_sv_hash_insert(uint32_t data)4831 c_segment_sv_hash_insert(uint32_t data)
4832 {
4833 int hash_sindx;
4834 int misses;
4835 struct c_sv_hash_entry o_sv_he, n_sv_he;
4836 boolean_t got_ref = FALSE;
4837
4838 if (data == 0) {
4839 os_atomic_inc(&c_segment_svp_zero_compressions, relaxed);
4840 } else {
4841 os_atomic_inc(&c_segment_svp_nonzero_compressions, relaxed);
4842 }
4843
4844 hash_sindx = data & C_SV_HASH_MASK;
4845
4846 for (misses = 0; misses < C_SV_HASH_MAX_MISS; misses++) {
4847 o_sv_he.he_record = c_segment_sv_hash_table[hash_sindx].he_record;
4848
4849 while (o_sv_he.he_data == data || o_sv_he.he_ref == 0) {
4850 n_sv_he.he_ref = o_sv_he.he_ref + 1;
4851 n_sv_he.he_data = data;
4852
4853 if (OSCompareAndSwap64((UInt64)o_sv_he.he_record, (UInt64)n_sv_he.he_record, (UInt64 *) &c_segment_sv_hash_table[hash_sindx].he_record) == TRUE) {
4854 if (n_sv_he.he_ref == 1) {
4855 os_atomic_inc(&c_segment_svp_in_hash, relaxed);
4856 }
4857 got_ref = TRUE;
4858 break;
4859 }
4860 o_sv_he.he_record = c_segment_sv_hash_table[hash_sindx].he_record;
4861 }
4862 if (got_ref == TRUE) {
4863 break;
4864 }
4865 hash_sindx++;
4866
4867 if (hash_sindx == C_SV_HASH_SIZE) {
4868 hash_sindx = 0;
4869 }
4870 }
4871 if (got_ref == FALSE) {
4872 return -1;
4873 }
4874
4875 return hash_sindx;
4876 }
4877
4878
4879 #if RECORD_THE_COMPRESSED_DATA
4880
4881 static void
c_compressed_record_data(char * src,int c_size)4882 c_compressed_record_data(char *src, int c_size)
4883 {
4884 if ((c_compressed_record_cptr + c_size + 4) >= c_compressed_record_ebuf) {
4885 panic("c_compressed_record_cptr >= c_compressed_record_ebuf");
4886 }
4887
4888 *(int *)((void *)c_compressed_record_cptr) = c_size;
4889
4890 c_compressed_record_cptr += 4;
4891
4892 memcpy(c_compressed_record_cptr, src, c_size);
4893 c_compressed_record_cptr += c_size;
4894 }
4895 #endif
4896
4897 #if HAS_MTE
4898
4899 /* with KASAN we panic unconditionally in the next MTE compression functions */
4900 #pragma clang diagnostic push
4901 #pragma clang diagnostic ignored "-Wmissing-noreturn"
4902
4903 /*
4904 * Compress the MTE tags for a page that starts in va.
4905 */
4906 static uint32_t
compress_mte_tags(void * va,char * buffer_out,uint32_t size_out)4907 compress_mte_tags(void *va, char *buffer_out, uint32_t size_out)
4908 {
4909 #if defined(KASAN)
4910 #pragma unused(va)
4911 #pragma unused(buffer_out)
4912 #pragma unused(size_out)
4913 panic("KASAN with MTE pages is not supported (%s)", __func__);
4914 #endif /* KASAN */
4915
4916 MTE_BULK_DECLARE_TAGLIST(temp_tags, PAGE_SIZE);
4917
4918 /* copy tags to temp buffer */
4919 mte_bulk_read_tags(va, PAGE_SIZE, temp_tags, sizeof(temp_tags));
4920
4921 uint32_t size_written = vm_mte_rle_compress_tags((uint8_t*)temp_tags, C_MTE_SIZE, (uint8_t*)buffer_out, size_out);
4922 assert(size_written > 0);
4923 /* size_written can be > 512 which indicates single-tag optimization,
4924 * in which case nothing written to the out buffer */
4925 vm_mte_tags_stats_compressed(size_written);
4926
4927 return size_written;
4928 }
4929
4930 /*
4931 * Decompress the MTE tags for the page that starts in va.
4932 */
4933 static bool
decompress_mte_tags(void * va,uint32_t size_in,char * buffer_in)4934 decompress_mte_tags(void *va, uint32_t size_in, char *buffer_in)
4935 {
4936 #if defined(KASAN)
4937 #pragma unused(va)
4938 #pragma unused(buffer_in)
4939 #pragma unused(size_in)
4940 panic("KASAN with MTE pages is not supported (%s)", __func__);
4941 #endif /* KASAN */
4942 assert(size_in > 0);
4943
4944 MTE_BULK_DECLARE_TAGLIST(temp_tags, PAGE_SIZE);
4945
4946 bool ok = vm_mte_rle_decompress_tags((uint8_t*)buffer_in, size_in, (uint8_t*)temp_tags, C_MTE_SIZE);
4947 /* returns false if the compression encoding was somehow corrupted */
4948 assertf(ok, "corrupt tags encoding in:%p, %ud out:%p", buffer_in, size_in, temp_tags);
4949
4950 if (ok) {
4951 mte_bulk_write_tags(va, PAGE_SIZE, temp_tags, sizeof(temp_tags));
4952 }
4953
4954 return ok;
4955 }
4956
4957 #pragma clang diagnostic pop
4958
4959 #endif /* HAS_MTE */
4960
4961 /**
4962 * Do the actual compression of the given page
4963 * @param src [IN] address in the physical aperture of the page to compress.
4964 * @param slot_ptr [OUT] fill the slot-mapping of the c_seg+slot where the page ends up being stored
4965 * @param current_chead [IN-OUT] current filling c_seg. pointer comes from the current compression thread state
4966 * On the very first call this is going to point to NULL and this function will fill that pointer with a new
4967 * filling c_sec if the current filling c_seg doesn't have enough space, it will be replaced in this location
4968 * with a new filling c_seg
4969 * @param scratch_buf [IN] pointer from the current thread state, used by the compression codec
4970 * @return KERN_RESOURCE_SHORTAGE if the compressor has been exhausted
4971 */
4972 static kern_return_t
c_compress_page(char * src,c_slot_mapping_t slot_ptr,c_segment_t * current_chead,char * scratch_buf,__unused vm_compressor_options_t flags)4973 c_compress_page(
4974 char *src,
4975 c_slot_mapping_t slot_ptr,
4976 c_segment_t *current_chead,
4977 char *scratch_buf,
4978 __unused vm_compressor_options_t flags)
4979 {
4980 int c_size = -1;
4981 int c_rounded_size = 0;
4982 int max_csize;
4983 bool nearing_limits;
4984 c_slot_t cs;
4985 c_segment_t c_seg;
4986
4987 KERNEL_DEBUG(0xe0400000 | DBG_FUNC_START, *current_chead, 0, 0, 0, 0);
4988 retry: /* may need to retry if the currently filling c_seg will not have enough space */
4989 c_seg = c_seg_allocate(current_chead, &nearing_limits);
4990 if (c_seg == NULL) {
4991 if (nearing_limits) {
4992 memorystatus_respond_to_compressor_exhaustion();
4993 }
4994 return KERN_RESOURCE_SHORTAGE;
4995 }
4996
4997 /*
4998 * c_seg_allocate() returns with c_seg lock held
4999 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
5000 * c_nextslot has been allocated and
5001 * c_store.c_buffer populated
5002 */
5003 assert(c_seg->c_state == C_IS_FILLING);
5004
5005 cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_seg->c_nextslot);
5006
5007 C_SLOT_ASSERT_PACKABLE(slot_ptr);
5008 cs->c_packed_ptr = C_SLOT_PACK_PTR(slot_ptr);
5009
5010 cs->c_offset = c_seg->c_nextoffset;
5011
5012 unsigned int avail_space = c_seg_bufsize - C_SEG_OFFSET_TO_BYTES((int32_t)cs->c_offset);
5013
5014 #if HAS_MTE
5015 /* Hold back room for the MTE tags, which can be as long as C_MTE_SIZE in the worst case */
5016 /* possible optimization: radr://133756934 */
5017 if (flags & C_MTE) {
5018 if (avail_space > C_MTE_SIZE) {
5019 avail_space -= C_MTE_SIZE;
5020 } else {
5021 avail_space = 0;
5022 }
5023 }
5024 #endif /* HAS_MTE */
5025
5026 max_csize = avail_space;
5027 if (max_csize > PAGE_SIZE) {
5028 max_csize = PAGE_SIZE;
5029 }
5030
5031 #if CHECKSUM_THE_DATA
5032 cs->c_hash_data = vmc_hash(src, PAGE_SIZE);
5033 #endif
5034 boolean_t incomp_copy = FALSE; /* codec indicates it already did copy an incompressible page */
5035 /* The SW codec case needs 4 bytes for its header and these are not accounted for in the bytes_budget argument.
5036 * Also, the the SV-not-in-hash case needs 4 bytes. */
5037 int max_csize_adj = (max_csize - 4);
5038 if (__improbable(max_csize_adj < 0)) {
5039 max_csize_adj = 0;
5040 }
5041
5042 if (max_csize > 0 && max_csize_adj > 0) {
5043 if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) {
5044 #if defined(__arm64__)
5045 uint16_t ccodec = CINVALID;
5046 uint32_t inline_popcount;
5047 if (max_csize >= C_SEG_OFFSET_ALIGNMENT_BOUNDARY) {
5048 vm_memtag_disable_checking();
5049 c_size = metacompressor((const uint8_t *) src,
5050 (uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset],
5051 max_csize_adj, &ccodec,
5052 scratch_buf, &incomp_copy, &inline_popcount);
5053 vm_memtag_enable_checking();
5054 assert(inline_popcount == C_SLOT_NO_POPCOUNT);
5055
5056 #if C_SEG_OFFSET_ALIGNMENT_BOUNDARY > 4
5057 /* The case of HW codec doesn't detect overflow on its own, instead it spills the the next page
5058 * and we need to detect this happened */
5059 if (c_size > max_csize_adj) {
5060 c_size = -1;
5061 }
5062 #endif
5063 } else {
5064 c_size = -1;
5065 }
5066 assert(ccodec == CCWK || ccodec == CCLZ4);
5067 cs->c_codec = ccodec;
5068 #endif
5069 } else {
5070 #if defined(__arm64__)
5071 vm_memtag_disable_checking();
5072 cs->c_codec = CCWK;
5073 __unreachable_ok_push
5074 if (PAGE_SIZE == 4096) {
5075 c_size = WKdm_compress_4k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5076 (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
5077 } else {
5078 c_size = WKdm_compress_16k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5079 (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
5080 }
5081 __unreachable_ok_pop
5082 vm_memtag_enable_checking();
5083 #else
5084 vm_memtag_disable_checking();
5085 c_size = WKdm_compress_new((const WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5086 (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
5087 vm_memtag_enable_checking();
5088 #endif
5089 }
5090 } else { /* max_csize == 0 or max_csize_adj == 0 */
5091 c_size = -1;
5092 }
5093 /* c_size is the size written by the codec, or 0 if it's uniform 32 bit value or (-1 if there was not enough space
5094 * or it was incompressible) */
5095 assertf(((c_size <= max_csize_adj) && (c_size >= -1)),
5096 "c_size invalid (%d, %d), cur compressions: %d", c_size, max_csize_adj, c_segment_pages_compressed);
5097
5098 if (c_size == -1) {
5099 if (max_csize < PAGE_SIZE) {
5100 c_current_seg_filled(c_seg, current_chead);
5101 assert(*current_chead == NULL);
5102
5103 lck_mtx_unlock_always(&c_seg->c_lock);
5104 /* TODO: it may be worth requiring codecs to distinguish
5105 * between incompressible inputs and failures due to budget exhaustion.
5106 * right now this assumes that if the space we had is > PAGE_SIZE, then the codec failed due to incompressible input */
5107
5108 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5109 goto retry; /* previous c_seg didn't have enough space, we finalized it and can try again with a fresh c_seg */
5110 }
5111 c_size = PAGE_SIZE; /* tag:WK-INCOMPRESSIBLE */
5112
5113 if (incomp_copy == FALSE) { /* codec did not copy the incompressible input */
5114 vm_memtag_disable_checking();
5115 memcpy(&c_seg->c_store.c_buffer[cs->c_offset], src, c_size);
5116 vm_memtag_enable_checking();
5117 }
5118
5119 os_atomic_inc(&c_segment_noncompressible_pages, relaxed);
5120 } else if (c_size == 0) {
5121 #if HAS_MTE
5122 /* don't try to query the hash if we need to save the MTE tags since we won't have where to put the tags
5123 * (also, reading the uint32 at src for the hash query would be an MTE violation) tag:NO-SV-AND-MTE */
5124 if (!(flags & C_MTE))
5125 #endif /* HAS_MTE */
5126 {
5127 /*
5128 * Special case - this is a page completely full of a single 32 bit value.
5129 * We store some values directly in the c_slot_mapping, if not there, the
5130 * 4 byte value goes in the compressor segment.
5131 */
5132 int hash_index = c_segment_sv_hash_insert(*(uint32_t *) (uintptr_t) src);
5133
5134 if (hash_index != -1) {
5135 slot_ptr->s_cindx = hash_index;
5136 slot_ptr->s_cseg = C_SV_CSEG_ID;
5137 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5138 slot_ptr->s_uncompressed = 0;
5139 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5140
5141 os_atomic_inc(&c_segment_svp_hash_succeeded, relaxed);
5142 #if RECORD_THE_COMPRESSED_DATA
5143 c_compressed_record_data(src, 4);
5144 #endif
5145 /* we didn't write anything to c_buffer and didn't end up using the slot in the c_seg at all, so skip all
5146 * the book-keeping of the case that we did */
5147 goto sv_compression;
5148 }
5149 }
5150 os_atomic_inc(&c_segment_svp_hash_failed, relaxed);
5151
5152 c_size = 4;
5153 vm_memtag_disable_checking();
5154 memcpy(&c_seg->c_store.c_buffer[cs->c_offset], src, c_size);
5155 vm_memtag_enable_checking();
5156 }
5157
5158 #if RECORD_THE_COMPRESSED_DATA
5159 c_compressed_record_data((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size);
5160 #endif
5161 #if CHECKSUM_THE_COMPRESSED_DATA
5162 cs->c_hash_compressed_data = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size);
5163 #endif
5164 #if POPCOUNT_THE_COMPRESSED_DATA
5165 cs->c_pop_cdata = vmc_pop((uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset], c_size);
5166 #endif
5167
5168 PACK_C_SIZE(cs, c_size);
5169
5170 #if HAS_MTE
5171 /* For bring up, just copy the tags into the segment */
5172 if (c_size && (flags & C_MTE)) {
5173 /* current data we filled started at c_offset and had size c_size */
5174 int space_left = c_seg_bufsize - C_SEG_OFFSET_TO_BYTES((int32_t)cs->c_offset) - c_size;
5175 assert(space_left >= C_MTE_SIZE); /* This is guaranteed by the avail_space modification above */
5176 cs->c_mte_size = compress_mte_tags(src, ((char *)&c_seg->c_store.c_buffer[cs->c_offset]) + c_size, (uint32_t)space_left);
5177 } else {
5178 cs->c_mte_size = 0;
5179 }
5180 /* next invocation of WKDMc expects to be writing at a 64 byte alignment */
5181 c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(c_size + c_slot_extra_size(cs));
5182 #else /* HAS_MTE */
5183 c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(c_size);
5184 #endif /* HAS_MTE */
5185
5186 c_seg->c_bytes_used += c_rounded_size;
5187 c_seg->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
5188 c_seg->c_slots_used++;
5189
5190 #if CONFIG_FREEZE
5191 /* TODO: should c_segment_pages_compressed be up here too? See 88598046 for details */
5192 os_atomic_inc(&c_segment_pages_compressed_incore, relaxed);
5193 if (c_seg->c_has_donated_pages) {
5194 os_atomic_inc(&c_segment_pages_compressed_incore_late_swapout, relaxed);
5195 }
5196 #endif /* CONFIG_FREEZE */
5197
5198 slot_ptr->s_cindx = c_seg->c_nextslot++;
5199 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1, see other usages of s_cseg where it's decremented */
5200 slot_ptr->s_cseg = c_seg->c_mysegno + 1;
5201
5202 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5203 slot_ptr->s_uncompressed = 0;
5204 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5205
5206 sv_compression:
5207 /* can we say this c_seg is full? */
5208 if (c_seg->c_nextoffset >= c_seg_off_limit || c_seg->c_nextslot >= C_SLOT_MAX_INDEX) {
5209 /* condition 1: segment buffer is almost full, don't bother trying to fill it further.
5210 * condition 2: we can't have any more slots in this c_segment even if we had buffer space */
5211 c_current_seg_filled(c_seg, current_chead);
5212 assert(*current_chead == NULL);
5213 }
5214
5215 lck_mtx_unlock_always(&c_seg->c_lock);
5216
5217 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5218
5219 #if RECORD_THE_COMPRESSED_DATA
5220 if ((c_compressed_record_cptr - c_compressed_record_sbuf) >= c_seg_allocsize) {
5221 c_compressed_record_write(c_compressed_record_sbuf, (int)(c_compressed_record_cptr - c_compressed_record_sbuf));
5222 c_compressed_record_cptr = c_compressed_record_sbuf;
5223 }
5224 #endif
5225 if (c_size) {
5226 os_atomic_add(&c_segment_compressed_bytes, c_size, relaxed);
5227 os_atomic_add(&compressor_bytes_used, c_rounded_size, relaxed);
5228 }
5229 os_atomic_add(&c_segment_input_bytes, PAGE_SIZE, relaxed);
5230
5231 os_atomic_inc(&c_segment_pages_compressed, relaxed);
5232 #if DEVELOPMENT || DEBUG
5233 if (!compressor_running_perf_test) {
5234 /*
5235 * The perf_compressor benchmark should not be able to trigger
5236 * compressor thrashing jetsams.
5237 */
5238 os_atomic_inc(&sample_period_compression_count, relaxed);
5239 }
5240 #else /* DEVELOPMENT || DEBUG */
5241 os_atomic_inc(&sample_period_compression_count, relaxed);
5242 #endif /* DEVELOPMENT || DEBUG */
5243
5244 if (nearing_limits) {
5245 memorystatus_respond_to_compressor_exhaustion();
5246 }
5247
5248 KERNEL_DEBUG(0xe0400000 | DBG_FUNC_END, *current_chead, c_size, c_segment_input_bytes, c_segment_compressed_bytes, 0);
5249
5250 return KERN_SUCCESS;
5251 }
5252
5253 static inline void
sv_decompress(int32_t * ddst,int32_t pattern)5254 sv_decompress(int32_t *ddst, int32_t pattern)
5255 {
5256 // assert(__builtin_constant_p(PAGE_SIZE) != 0);
5257 #if defined(__x86_64__)
5258 memset_word(ddst, pattern, PAGE_SIZE / sizeof(int32_t));
5259 #elif defined(__arm64__)
5260 assert((PAGE_SIZE % 128) == 0);
5261 if (pattern == 0) {
5262 fill32_dczva((addr64_t)ddst, PAGE_SIZE);
5263 } else {
5264 fill32_nt((addr64_t)ddst, PAGE_SIZE, pattern);
5265 }
5266 #else
5267 size_t i;
5268
5269 /* Unroll the pattern fill loop 4x to encourage the
5270 * compiler to emit NEON stores, cf.
5271 * <rdar://problem/25839866> Loop autovectorization
5272 * anomalies.
5273 */
5274 /* * We use separate loops for each PAGE_SIZE
5275 * to allow the autovectorizer to engage, as PAGE_SIZE
5276 * may not be a constant.
5277 */
5278
5279 __unreachable_ok_push
5280 if (PAGE_SIZE == 4096) {
5281 for (i = 0; i < (4096U / sizeof(int32_t)); i += 4) {
5282 *ddst++ = pattern;
5283 *ddst++ = pattern;
5284 *ddst++ = pattern;
5285 *ddst++ = pattern;
5286 }
5287 } else {
5288 assert(PAGE_SIZE == 16384);
5289 for (i = 0; i < (int)(16384U / sizeof(int32_t)); i += 4) {
5290 *ddst++ = pattern;
5291 *ddst++ = pattern;
5292 *ddst++ = pattern;
5293 *ddst++ = pattern;
5294 }
5295 }
5296 __unreachable_ok_pop
5297 #endif
5298 }
5299
5300 static vm_decompress_result_t
c_decompress_page(char * dst,volatile c_slot_mapping_t slot_ptr,vm_compressor_options_t flags,int * zeroslot)5301 c_decompress_page(
5302 char *dst,
5303 volatile c_slot_mapping_t slot_ptr, /* why volatile? perhaps due to changes across hibernation */
5304 vm_compressor_options_t flags,
5305 int *zeroslot)
5306 {
5307 c_slot_t cs;
5308 c_segment_t c_seg;
5309 uint32_t c_segno;
5310 uint16_t c_indx;
5311 int c_rounded_size;
5312 uint32_t c_size;
5313 vm_decompress_result_t retval = 0;
5314 boolean_t need_unlock = TRUE;
5315 boolean_t consider_defragmenting = FALSE;
5316 boolean_t kdp_mode = FALSE;
5317
5318 #if HAS_MTE
5319 vm_mte_c_tags_removal_reason_t mte_tags_removal_reason = VM_MTE_C_TAGS_REMOVAL_FREE;
5320 #endif
5321 if (__improbable(flags & C_KDP)) {
5322 if (not_in_kdp) {
5323 panic("C_KDP passed to decompress page from outside of debugger context");
5324 }
5325
5326 assert((flags & C_KEEP) == C_KEEP);
5327 assert((flags & C_DONT_BLOCK) == C_DONT_BLOCK);
5328
5329 if ((flags & (C_DONT_BLOCK | C_KEEP)) != (C_DONT_BLOCK | C_KEEP)) {
5330 return DECOMPRESS_NEED_BLOCK;
5331 }
5332
5333 kdp_mode = TRUE;
5334 *zeroslot = 0;
5335 }
5336
5337 ReTry:
5338 if (__probable(!kdp_mode)) {
5339 PAGE_REPLACEMENT_DISALLOWED(TRUE);
5340 } else {
5341 if (kdp_lck_rw_lock_is_acquired_exclusive(&c_master_lock)) {
5342 return DECOMPRESS_NEED_BLOCK;
5343 }
5344 }
5345
5346 #if HIBERNATION
5347 /*
5348 * if hibernation is enabled, it indicates (via a call
5349 * to 'vm_decompressor_lock' that no further
5350 * decompressions are allowed once it reaches
5351 * the point of flushing all of the currently dirty
5352 * anonymous memory through the compressor and out
5353 * to disk... in this state we allow freeing of compressed
5354 * pages and must honor the C_DONT_BLOCK case
5355 */
5356 if (__improbable(dst && decompressions_blocked == TRUE)) {
5357 if (flags & C_DONT_BLOCK) {
5358 if (__probable(!kdp_mode)) {
5359 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5360 }
5361
5362 *zeroslot = 0;
5363 return -2;
5364 }
5365 /*
5366 * it's safe to atomically assert and block behind the
5367 * lock held in shared mode because "decompressions_blocked" is
5368 * only set and cleared and the thread_wakeup done when the lock
5369 * is held exclusively
5370 */
5371 assert_wait((event_t)&decompressions_blocked, THREAD_UNINT);
5372
5373 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5374
5375 thread_block(THREAD_CONTINUE_NULL);
5376
5377 goto ReTry;
5378 }
5379 #endif
5380 /* s_cseg is actually "segno+1" */
5381 c_segno = slot_ptr->s_cseg - 1;
5382
5383 if (__improbable(c_segno >= c_segments_available)) {
5384 panic("c_decompress_page: c_segno %d >= c_segments_available %d, slot_ptr(%p), slot_data(%x)",
5385 c_segno, c_segments_available, slot_ptr, *(int *)((void *)slot_ptr));
5386 }
5387
5388 if (__improbable(c_segments_get(c_segno)->c_segno < c_segments_available)) {
5389 panic("c_decompress_page: c_segno %d is free, slot_ptr(%p), slot_data(%x)",
5390 c_segno, slot_ptr, *(int *)((void *)slot_ptr));
5391 }
5392
5393 c_seg = c_segments_get(c_segno)->c_seg;
5394
5395 if (__probable(!kdp_mode)) {
5396 lck_mtx_lock_spin_always(&c_seg->c_lock);
5397 } else {
5398 if (kdp_lck_mtx_lock_spin_is_acquired(&c_seg->c_lock)) {
5399 return DECOMPRESS_NEED_BLOCK;
5400 }
5401 }
5402
5403 assert(c_seg->c_state != C_IS_EMPTY && c_seg->c_state != C_IS_FREE);
5404
5405 if (dst == NULL && c_seg->c_busy_swapping) {
5406 assert(c_seg->c_busy);
5407
5408 goto bypass_busy_check;
5409 }
5410 if (flags & C_DONT_BLOCK) {
5411 if (c_seg->c_busy || (C_SEG_IS_ONDISK(c_seg) && dst)) {
5412 *zeroslot = 0;
5413
5414 retval = DECOMPRESS_NEED_BLOCK;
5415 goto done;
5416 }
5417 }
5418 if (c_seg->c_busy) {
5419 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5420
5421 c_seg_wait_on_busy(c_seg);
5422
5423 goto ReTry;
5424 }
5425 bypass_busy_check:
5426
5427 c_indx = slot_ptr->s_cindx;
5428
5429 if (__improbable(c_indx >= c_seg->c_nextslot)) {
5430 panic("c_decompress_page: c_indx %d >= c_nextslot %d, c_seg(%p), slot_ptr(%p), slot_data(%x)",
5431 c_indx, c_seg->c_nextslot, c_seg, slot_ptr, *(int *)((void *)slot_ptr));
5432 }
5433
5434 cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
5435
5436 c_size = UNPACK_C_SIZE(cs);
5437
5438 #if HAS_MTE
5439 if (dst) { /* if we're coming from vm_compressor_free() we're not going to have flags,
5440 * see rdar://133837861 to make this more generic */
5441 if (cs->c_mte_size != 0) {
5442 assertf(flags & C_MTE,
5443 "decompress page with mte_size=%d but no C_MTE in flags=%x", (int) cs->c_mte_size, flags);
5444 } else {
5445 assertf(!(flags & C_MTE),
5446 "decompress page without mte (mte_size=%d) and with C_MTE in flags=%x", (int) cs->c_mte_size, flags);
5447 }
5448 }
5449 #endif /* HAS_MTE */
5450
5451 if (__improbable(c_size == 0)) { /* sanity check it's not an empty slot */
5452 panic("c_decompress_page: c_size == 0, c_seg(%p), slot_ptr(%p), slot_data(%x)",
5453 c_seg, slot_ptr, *(int *)((void *)slot_ptr));
5454 }
5455
5456 c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(c_size + c_slot_extra_size(cs));
5457 /* c_rounded_size should not change after this point so that it remains consistent on all branches */
5458
5459 if (dst) { /* would be NULL if we don't want the page content, from free */
5460 uint32_t age_of_cseg;
5461 clock_sec_t cur_ts_sec;
5462 clock_nsec_t cur_ts_nsec;
5463
5464 if (C_SEG_IS_ONDISK(c_seg)) {
5465 #if CONFIG_FREEZE
5466 if (freezer_incore_cseg_acct) {
5467 if ((c_seg->c_slots_used + c_segment_pages_compressed_incore) >= c_segment_pages_compressed_nearing_limit) {
5468 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5469 lck_mtx_unlock_always(&c_seg->c_lock);
5470
5471 memorystatus_kill_on_VM_compressor_space_shortage(FALSE /* async */);
5472
5473 goto ReTry;
5474 }
5475
5476 uint32_t incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
5477 if ((incore_seg_count + 1) >= c_segments_nearing_limit) {
5478 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5479 lck_mtx_unlock_always(&c_seg->c_lock);
5480
5481 memorystatus_kill_on_VM_compressor_space_shortage(FALSE /* async */);
5482
5483 goto ReTry;
5484 }
5485 }
5486 #endif /* CONFIG_FREEZE */
5487 assert(kdp_mode == FALSE);
5488 retval = c_seg_swapin(c_seg, FALSE, TRUE);
5489 assert(retval == 0);
5490
5491 retval = DECOMPRESS_SUCCESS_SWAPPEDIN;
5492 }
5493 if (c_seg->c_state == C_ON_BAD_Q) {
5494 assert(c_seg->c_store.c_buffer == NULL);
5495 *zeroslot = 0;
5496
5497 retval = DECOMPRESS_FAILED_BAD_Q;
5498 goto done;
5499 }
5500
5501 #if POPCOUNT_THE_COMPRESSED_DATA
5502 unsigned csvpop;
5503 uintptr_t csvaddr = (uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset];
5504 if (cs->c_pop_cdata != (csvpop = vmc_pop(csvaddr, c_size))) {
5505 panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%x 0x%x 0x%x 0x%x", (csvpop - cs->c_pop_cdata), (void *)csvaddr, (void *) kvtophys(csvaddr), c_seg, cs, cs->c_offset, c_size, csvpop, cs->c_pop_cdata);
5506 }
5507 #endif
5508
5509 #if CHECKSUM_THE_COMPRESSED_DATA
5510 unsigned csvhash;
5511 if (cs->c_hash_compressed_data != (csvhash = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))) {
5512 panic("Compressed data doesn't match original %p %p %u %u %u", c_seg, cs, c_size, cs->c_hash_compressed_data, csvhash);
5513 }
5514 #endif
5515 if (c_size == PAGE_SIZE) { /* tag:WK-INCOMPRESSIBLE */
5516 /* page wasn't compressible... just copy it out */
5517 vm_memtag_disable_checking();
5518 memcpy(dst, &c_seg->c_store.c_buffer[cs->c_offset], PAGE_SIZE);
5519 vm_memtag_enable_checking();
5520 } else if (c_size == 4) {
5521 int32_t data;
5522 int32_t *dptr;
5523
5524 /*
5525 * page was populated with a single value
5526 * that didn't fit into our fast hash
5527 * so we packed it in as a single non-compressed value
5528 * that we need to populate the page with
5529 */
5530 dptr = (int32_t *)(uintptr_t)dst;
5531 data = *(int32_t *)(&c_seg->c_store.c_buffer[cs->c_offset]);
5532 vm_memtag_disable_checking();
5533 sv_decompress(dptr, data);
5534 vm_memtag_enable_checking();
5535 } else { /* normal segment decompress */
5536 uint32_t my_cpu_no;
5537 char *scratch_buf;
5538
5539 my_cpu_no = cpu_number();
5540
5541 assert(my_cpu_no < compressor_cpus);
5542
5543 if (__probable(!kdp_mode)) {
5544 /*
5545 * we're behind the c_seg lock held in spin mode
5546 * which means pre-emption is disabled... therefore
5547 * the following sequence is atomic and safe
5548 */
5549 scratch_buf = &compressor_scratch_bufs[my_cpu_no * vm_compressor_get_decode_scratch_size()];
5550 } else if (flags & C_KDP_MULTICPU) {
5551 assert(vm_compressor_kdp_state.kc_scratch_bufs != NULL);
5552 scratch_buf = &vm_compressor_kdp_state.kc_scratch_bufs[my_cpu_no * vm_compressor_get_decode_scratch_size()];
5553 } else {
5554 scratch_buf = vm_compressor_kdp_state.kc_panic_scratch_buf;
5555 }
5556
5557 if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) {
5558 #if defined(__arm64__)
5559 uint16_t c_codec = cs->c_codec;
5560 uint32_t inline_popcount;
5561 vm_memtag_disable_checking();
5562 if (!metadecompressor((const uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset],
5563 (uint8_t *)dst, c_size, c_codec, (void *)scratch_buf, &inline_popcount)) {
5564 vm_memtag_enable_checking();
5565 retval = DECOMPRESS_FAILED_ALGO_ERROR;
5566 } else {
5567 vm_memtag_enable_checking();
5568 assert(inline_popcount == C_SLOT_NO_POPCOUNT);
5569 }
5570 #endif
5571 } else { /* algorithm == VM_COMPRESSOR_DEFAULT_CODEC */
5572 vm_memtag_disable_checking();
5573 #if defined(__arm64__)
5574 __unreachable_ok_push
5575 if (PAGE_SIZE == 4096) {
5576 WKdm_decompress_4k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5577 (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
5578 } else {
5579 WKdm_decompress_16k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5580 (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
5581 }
5582 __unreachable_ok_pop
5583 #else
5584 WKdm_decompress_new((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5585 (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
5586 #endif
5587 vm_memtag_enable_checking();
5588 }
5589 } /* normal segment decompress */
5590
5591 #if CHECKSUM_THE_DATA
5592 if (cs->c_hash_data != vmc_hash(dst, PAGE_SIZE)) {
5593 #if defined(__arm64__)
5594 int32_t *dinput = &c_seg->c_store.c_buffer[cs->c_offset];
5595 panic("decompressed data doesn't match original cs: %p, hash: 0x%x, offset: %d, c_size: %d, c_rounded_size: %d, codec: %d, header: 0x%x 0x%x 0x%x", cs, cs->c_hash_data, cs->c_offset, c_size, c_rounded_size, cs->c_codec, *dinput, *(dinput + 1), *(dinput + 2));
5596 #else /* defined(__arm64__) */
5597 panic("decompressed data doesn't match original cs: %p, hash: %d, offset: 0x%x, c_size: %d", cs, cs->c_hash_data, cs->c_offset, c_size);
5598 #endif /* defined(__arm64__) */
5599 }
5600 #endif /* CHECKSUM_THE_DATA */
5601 if (c_seg->c_swappedin_ts == 0 && !kdp_mode) {
5602 clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec);
5603
5604 age_of_cseg = (uint32_t)cur_ts_sec - c_seg->c_creation_ts;
5605 if (age_of_cseg < DECOMPRESSION_SAMPLE_MAX_AGE) {
5606 os_atomic_inc(&age_of_decompressions_during_sample_period[age_of_cseg], relaxed);
5607 } else {
5608 os_atomic_inc(&overage_decompressions_during_sample_period, relaxed);
5609 }
5610
5611 os_atomic_inc(&sample_period_decompression_count, relaxed);
5612 }
5613
5614 #if HAS_MTE
5615 /*
5616 * Only decompress tags if there are tags to decompress and the
5617 * out page is actually going to use tagging.
5618 */
5619 if (cs->c_mte_size != 0 && (flags & C_MTE_DROP_TAGS) == 0) {
5620 if (!decompress_mte_tags(dst, cs->c_mte_size, ((char *)&c_seg->c_store.c_buffer[cs->c_offset]) + c_size)) {
5621 retval = DECOMPRESS_FAILED_TAGS;
5622 mte_tags_removal_reason = VM_MTE_C_TAGS_REMOVAL_CORRUPT;
5623 } else {
5624 mte_tags_removal_reason = VM_MTE_C_TAGS_REMOVAL_DECOMPRESSED;
5625 }
5626 } else {
5627 mte_tags_removal_reason = VM_MTE_C_TAGS_REMOVAL_FREE;
5628 }
5629 #endif /* HAS_MTE */
5630
5631 #if TRACK_C_SEGMENT_UTILIZATION
5632 if (c_seg->c_swappedin) {
5633 c_seg->c_decompressions_since_swapin++;
5634 }
5635 #endif /* TRACK_C_SEGMENT_UTILIZATION */
5636 } /* dst */
5637 else {
5638 /*
5639 * We are freeing an uncompressed page from this c_seg and so balance the ledgers.
5640 */
5641 if (C_SEG_IS_ONDISK(c_seg)) {
5642 __assert_only unsigned int prev_swapped_count =
5643 os_atomic_dec_orig(&vm_page_swapped_count, relaxed);
5644 assert3u(prev_swapped_count, >, 0);
5645 #if CONFIG_FREEZE
5646 /*
5647 * The compression sweep feature will push out anonymous pages to disk
5648 * without going through the freezer path and so those c_segs, while
5649 * swapped out, won't have an owner.
5650 */
5651 if (c_seg->c_task_owner) {
5652 task_update_frozen_to_swap_acct(c_seg->c_task_owner, PAGE_SIZE_64, DEBIT_FROM_SWAP);
5653 }
5654
5655 /*
5656 * We are freeing a page in swap without swapping it in. We bump the in-core
5657 * count here to simulate a swapin of a page so that we can accurately
5658 * decrement it below.
5659 */
5660 os_atomic_inc(&c_segment_pages_compressed_incore, relaxed);
5661 if (c_seg->c_has_donated_pages) {
5662 os_atomic_inc(&c_segment_pages_compressed_incore_late_swapout, relaxed);
5663 }
5664 } else if (c_seg->c_state == C_ON_BAD_Q) {
5665 assert(c_seg->c_store.c_buffer == NULL);
5666 *zeroslot = 0;
5667
5668 retval = DECOMPRESS_FAILED_BAD_Q_FREEZE;
5669 goto done; /* this is intended to avoid the decrement of c_segment_pages_compressed_incore below */
5670 #endif /* CONFIG_FREEZE */
5671 }
5672 #if HAS_MTE
5673 mte_tags_removal_reason = VM_MTE_C_TAGS_REMOVAL_FREE;
5674 #endif /* HAS_MTE */
5675 }
5676
5677 if (flags & C_KEEP) {
5678 *zeroslot = 0;
5679 goto done;
5680 }
5681
5682 #if HAS_MTE
5683 if (cs->c_mte_size != 0) {
5684 vm_mte_tags_stats_removed(cs->c_mte_size, mte_tags_removal_reason);
5685 }
5686 #endif /* HAS_MTE */
5687
5688 /* now perform needed bookkeeping for the removal of the slot from the segment */
5689 assert(kdp_mode == FALSE);
5690
5691 c_seg->c_bytes_unused += c_rounded_size;
5692 c_seg->c_bytes_used -= c_rounded_size;
5693
5694 assert(c_seg->c_slots_used);
5695 c_seg->c_slots_used--;
5696 if (dst && c_seg->c_swappedin) {
5697 task_t task = current_task();
5698 if (task) {
5699 ledger_credit(task->ledger, task_ledgers.swapins, PAGE_SIZE);
5700 }
5701 }
5702
5703 PACK_C_SIZE(cs, 0); /* mark slot as empty */
5704 #if HAS_MTE
5705 cs->c_mte_size = 0;
5706 #endif /* HAS_MTE */
5707
5708 if (c_indx < c_seg->c_firstemptyslot) {
5709 c_seg->c_firstemptyslot = c_indx;
5710 }
5711
5712 os_atomic_dec(&c_segment_pages_compressed, relaxed);
5713 #if CONFIG_FREEZE
5714 os_atomic_dec(&c_segment_pages_compressed_incore, relaxed);
5715 assertf(c_segment_pages_compressed_incore >= 0, "-ve incore count %p 0x%x", c_seg, c_segment_pages_compressed_incore);
5716 if (c_seg->c_has_donated_pages) {
5717 os_atomic_dec(&c_segment_pages_compressed_incore_late_swapout, relaxed);
5718 assertf(c_segment_pages_compressed_incore_late_swapout >= 0, "-ve lateswapout count %p 0x%x", c_seg, c_segment_pages_compressed_incore_late_swapout);
5719 }
5720 #endif /* CONFIG_FREEZE */
5721
5722 if (c_seg->c_state != C_ON_BAD_Q && !(C_SEG_IS_ONDISK(c_seg))) {
5723 /*
5724 * C_SEG_IS_ONDISK == TRUE can occur when we're doing a
5725 * free of a compressed page (i.e. dst == NULL)
5726 */
5727 os_atomic_sub(&compressor_bytes_used, c_rounded_size, relaxed);
5728 }
5729 if (c_seg->c_busy_swapping) {
5730 /*
5731 * bypass case for c_busy_swapping...
5732 * let the swapin/swapout paths deal with putting
5733 * the c_seg on the minor compaction queue if needed
5734 */
5735 assert(c_seg->c_busy);
5736 goto done;
5737 }
5738 assert(!c_seg->c_busy);
5739
5740 if (c_seg->c_state != C_IS_FILLING) {
5741 /* did we just remove the last slot from the segment? */
5742 if (c_seg->c_bytes_used == 0) {
5743 if (!(C_SEG_IS_ONDISK(c_seg))) {
5744 /* it was compressed resident in memory */
5745 int pages_populated;
5746
5747 pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE;
5748 c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0);
5749
5750 if (pages_populated) {
5751 assert(c_seg->c_state != C_ON_BAD_Q);
5752 assert(c_seg->c_store.c_buffer != NULL);
5753
5754 C_SEG_BUSY(c_seg);
5755 lck_mtx_unlock_always(&c_seg->c_lock);
5756
5757 kernel_memory_depopulate(
5758 (vm_offset_t) c_seg->c_store.c_buffer,
5759 ptoa(pages_populated),
5760 KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
5761
5762 lck_mtx_lock_spin_always(&c_seg->c_lock);
5763 C_SEG_WAKEUP_DONE(c_seg);
5764 }
5765 /* minor compaction will free it */
5766 if (!c_seg->c_on_minorcompact_q && c_seg->c_state != C_ON_SWAPIO_Q) {
5767 if (c_seg->c_state == C_ON_SWAPOUT_Q) {
5768 /* If we're on the swapout q, we want to get out of it since there's no reason to swapout
5769 * anymore, so put on AGE Q in the meantime until minor compact */
5770 bool clear_busy = false;
5771 if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
5772 C_SEG_BUSY(c_seg);
5773
5774 lck_mtx_unlock_always(&c_seg->c_lock);
5775 lck_mtx_lock_spin_always(c_list_lock);
5776 lck_mtx_lock_spin_always(&c_seg->c_lock);
5777 clear_busy = true;
5778 }
5779 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
5780 if (clear_busy) {
5781 C_SEG_WAKEUP_DONE(c_seg);
5782 clear_busy = false;
5783 }
5784 lck_mtx_unlock_always(c_list_lock);
5785 }
5786 c_seg_need_delayed_compaction(c_seg, FALSE);
5787 }
5788 } else { /* C_SEG_IS_ONDISK(c_seg) */
5789 /* it's empty and on-disk, make sure it's marked as sparse */
5790 if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q) {
5791 c_seg_move_to_sparse_list(c_seg);
5792 consider_defragmenting = TRUE;
5793 }
5794 }
5795 } else if (c_seg->c_on_minorcompact_q) {
5796 assert(c_seg->c_state != C_ON_BAD_Q);
5797 assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg));
5798
5799 if (C_SEG_SHOULD_MINORCOMPACT_NOW(c_seg)) {
5800 c_seg_try_minor_compaction_and_unlock(c_seg);
5801 need_unlock = FALSE;
5802 }
5803 } else if (!(C_SEG_IS_ONDISK(c_seg))) {
5804 if (c_seg->c_state != C_ON_BAD_Q && c_seg->c_state != C_ON_SWAPOUT_Q && c_seg->c_state != C_ON_SWAPIO_Q &&
5805 C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
5806 c_seg_need_delayed_compaction(c_seg, FALSE);
5807 }
5808 } else if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q && C_SEG_ONDISK_IS_SPARSE(c_seg)) {
5809 c_seg_move_to_sparse_list(c_seg);
5810 consider_defragmenting = TRUE;
5811 }
5812 } /* c_state != C_IS_FILLING */
5813 done:
5814 if (__improbable(kdp_mode)) {
5815 return retval;
5816 }
5817
5818 if (need_unlock == TRUE) {
5819 lck_mtx_unlock_always(&c_seg->c_lock);
5820 }
5821
5822 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5823
5824 if (consider_defragmenting == TRUE) {
5825 vm_swap_consider_defragmenting(VM_SWAP_FLAGS_NONE);
5826 }
5827
5828 #if !XNU_TARGET_OS_OSX
5829 /*
5830 * Decompressions will generate fragmentation in the compressor pool
5831 * over time. Consider waking the compactor thread if any of the
5832 * fragmentation thresholds have been crossed as a result of this
5833 * decompression.
5834 */
5835 vm_consider_waking_compactor_swapper();
5836 #endif /* !XNU_TARGET_OS_OSX */
5837
5838 return retval;
5839 }
5840
5841
5842 inline bool
vm_compressor_is_slot_compressed(int * slot)5843 vm_compressor_is_slot_compressed(int *slot)
5844 {
5845 #if !CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5846 #pragma unused(slot)
5847 return true;
5848 #else /* !CONFIG_TRACK_UNMODIFIED_ANON_PAGES*/
5849 c_slot_mapping_t slot_ptr = (c_slot_mapping_t)slot;
5850 return !slot_ptr->s_uncompressed;
5851 #endif /* !CONFIG_TRACK_UNMODIFIED_ANON_PAGES*/
5852 }
5853
5854 vm_decompress_result_t
vm_compressor_get(ppnum_t pn,int * slot,vm_compressor_options_t flags)5855 vm_compressor_get(ppnum_t pn, int *slot, vm_compressor_options_t flags)
5856 {
5857 c_slot_mapping_t slot_ptr;
5858 char *dst;
5859 int zeroslot = 1;
5860 vm_decompress_result_t retval;
5861
5862 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5863 if (flags & C_PAGE_UNMODIFIED) {
5864 int iretval = vm_uncompressed_get(pn, slot, flags | C_KEEP);
5865 if (iretval == 0) {
5866 os_atomic_inc(&compressor_ro_uncompressed_get, relaxed);
5867 return DECOMPRESS_SUCCESS;
5868 }
5869
5870 return DECOMPRESS_FAILED_UNMODIFIED;
5871 }
5872 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5873
5874 /* get address in physical aperture of this page for fill into */
5875 dst = pmap_map_compressor_page(pn);
5876 slot_ptr = (c_slot_mapping_t)slot;
5877
5878 assert(dst != NULL);
5879
5880 if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
5881 #if HAS_MTE
5882 /* single value page can't be an MTE page (since there's no place to put the tags) see tag:NO-SV-AND-MTE */
5883 assert(!(flags & C_MTE));
5884 #endif
5885 int32_t data;
5886 int32_t *dptr;
5887
5888 /*
5889 * page was populated with a single value
5890 * that found a home in our hash table
5891 * grab that value from the hash and populate the page
5892 * that we need to populate the page with
5893 */
5894 dptr = (int32_t *)(uintptr_t)dst;
5895 data = c_segment_sv_hash_table[slot_ptr->s_cindx].he_data;
5896 sv_decompress(dptr, data);
5897
5898 if (!(flags & C_KEEP)) {
5899 c_segment_sv_hash_drop_ref(slot_ptr->s_cindx);
5900
5901 os_atomic_dec(&c_segment_pages_compressed, relaxed);
5902 *slot = 0;
5903 }
5904 if (data) {
5905 os_atomic_inc(&c_segment_svp_nonzero_decompressions, relaxed);
5906 } else {
5907 os_atomic_inc(&c_segment_svp_zero_decompressions, relaxed);
5908 }
5909
5910 pmap_unmap_compressor_page(pn, dst);
5911 return DECOMPRESS_SUCCESS;
5912 }
5913 retval = c_decompress_page(dst, slot_ptr, flags, &zeroslot);
5914
5915 /*
5916 * zeroslot will be set to 0 by c_decompress_page if (flags & C_KEEP)
5917 * or (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be TRUE
5918 */
5919 if (zeroslot) {
5920 *slot = 0;
5921 }
5922
5923 pmap_unmap_compressor_page(pn, dst);
5924
5925 /*
5926 * returns 0 if we successfully decompressed a page from a segment already in memory
5927 * returns 1 if we had to first swap in the segment, before successfully decompressing the page
5928 * returns -1 if we encountered an error swapping in the segment - decompression failed
5929 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be true
5930 */
5931 return retval;
5932 }
5933
5934 vm_decompress_result_t
vm_compressor_free(int * slot,vm_compressor_options_t flags)5935 vm_compressor_free(int *slot, vm_compressor_options_t flags)
5936 {
5937 bool slot_is_compressed = vm_compressor_is_slot_compressed(slot);
5938
5939 if (slot_is_compressed) {
5940 c_slot_mapping_t slot_ptr;
5941 int zeroslot = 1;
5942 vm_decompress_result_t retval = DECOMPRESS_SUCCESS;
5943
5944 assert(flags == 0 || flags == C_DONT_BLOCK);
5945
5946 slot_ptr = (c_slot_mapping_t)slot;
5947
5948 if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
5949 c_segment_sv_hash_drop_ref(slot_ptr->s_cindx);
5950 os_atomic_dec(&c_segment_pages_compressed, relaxed);
5951
5952 *slot = 0;
5953 return 0;
5954 }
5955
5956 #if HAS_MTE
5957 /* Don't need to worry about C_MTE flag when just freeing */
5958 #endif
5959 retval = c_decompress_page(NULL, slot_ptr, flags, &zeroslot);
5960 /*
5961 * returns 0 if we successfully freed the specified compressed page
5962 * returns -1 if we encountered an error swapping in the segment - decompression failed
5963 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' set
5964 */
5965
5966 if (retval == DECOMPRESS_SUCCESS) {
5967 *slot = 0;
5968 }
5969
5970 return retval;
5971 }
5972 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5973 else {
5974 if ((flags & C_PAGE_UNMODIFIED) == 0) {
5975 /* moving from uncompressed state to compressed. Free it.*/
5976 vm_uncompressed_free(slot, 0);
5977 assert(*slot == 0);
5978 }
5979 }
5980 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5981 return KERN_SUCCESS;
5982 }
5983
5984 kern_return_t
vm_compressor_put(ppnum_t pn,int * slot,void ** current_chead,char * scratch_buf,vm_compressor_options_t flags)5985 vm_compressor_put(ppnum_t pn, int *slot, void **current_chead, char *scratch_buf, vm_compressor_options_t flags)
5986 {
5987 char *src;
5988 kern_return_t kr;
5989
5990 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5991 if (flags & C_PAGE_UNMODIFIED) {
5992 if (*slot) {
5993 os_atomic_inc(&compressor_ro_uncompressed_skip_returned, relaxed);
5994 return KERN_SUCCESS;
5995 } else {
5996 kr = vm_uncompressed_put(pn, slot);
5997 if (kr == KERN_SUCCESS) {
5998 os_atomic_inc(&compressor_ro_uncompressed_put, relaxed);
5999 return kr;
6000 }
6001 }
6002 }
6003 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
6004
6005 /* get the address of the page in the physical apperture in the kernel task virtual memory */
6006 #if HAS_MTE
6007 /* By the time we get here the physical apperture page should be already have tags enabled in pmap
6008 * see pmap_[un]make_tagged_page() */
6009 #endif
6010 src = pmap_map_compressor_page(pn);
6011 assert(src != NULL);
6012
6013 kr = c_compress_page(src, (c_slot_mapping_t)slot, (c_segment_t *)current_chead, scratch_buf, flags);
6014 pmap_unmap_compressor_page(pn, src);
6015
6016 return kr;
6017 }
6018
6019 void
vm_compressor_transfer(int * dst_slot_p,int * src_slot_p)6020 vm_compressor_transfer(
6021 int *dst_slot_p,
6022 int *src_slot_p)
6023 {
6024 c_slot_mapping_t dst_slot, src_slot;
6025 c_segment_t c_seg;
6026 uint16_t c_indx;
6027 c_slot_t cs;
6028
6029 src_slot = (c_slot_mapping_t) src_slot_p;
6030
6031 if (src_slot->s_cseg == C_SV_CSEG_ID || !vm_compressor_is_slot_compressed(src_slot_p)) {
6032 *dst_slot_p = *src_slot_p;
6033 *src_slot_p = 0;
6034 return;
6035 }
6036 dst_slot = (c_slot_mapping_t) dst_slot_p;
6037 Retry:
6038 PAGE_REPLACEMENT_DISALLOWED(TRUE);
6039 /* get segment for src_slot */
6040 c_seg = c_segments_get(src_slot->s_cseg - 1)->c_seg;
6041 /* lock segment */
6042 lck_mtx_lock_spin_always(&c_seg->c_lock);
6043 /* wait if it's busy */
6044 if (c_seg->c_busy && !c_seg->c_busy_swapping) {
6045 PAGE_REPLACEMENT_DISALLOWED(FALSE);
6046 c_seg_wait_on_busy(c_seg);
6047 goto Retry;
6048 }
6049 /* find the c_slot */
6050 c_indx = src_slot->s_cindx;
6051 cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
6052 /* point the c_slot back to dst_slot instead of src_slot */
6053 C_SLOT_ASSERT_PACKABLE(dst_slot);
6054 cs->c_packed_ptr = C_SLOT_PACK_PTR(dst_slot);
6055 /* transfer */
6056 *dst_slot_p = *src_slot_p;
6057 *src_slot_p = 0;
6058 lck_mtx_unlock_always(&c_seg->c_lock);
6059 PAGE_REPLACEMENT_DISALLOWED(FALSE);
6060 }
6061
6062 #if defined(__arm64__)
6063 extern uint64_t vm_swapfile_last_failed_to_create_ts;
6064 __attribute__((noreturn))
6065 void
vm_panic_hibernate_write_image_failed(int err,uint64_t file_size_min,uint64_t file_size_max,uint64_t file_size)6066 vm_panic_hibernate_write_image_failed(
6067 int err,
6068 uint64_t file_size_min,
6069 uint64_t file_size_max,
6070 uint64_t file_size)
6071 {
6072 panic("hibernate_write_image encountered error 0x%x - %u, %u, %d, %d, %d, %d, %d, %d, %d, %d, %llu, %d, %d, %d, %llu, %llu, %llu\n",
6073 err,
6074 VM_PAGE_COMPRESSOR_COUNT, vm_page_wire_count,
6075 c_age_count, c_major_count, c_minor_count, (c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count), c_swappedout_sparse_count,
6076 vm_num_swap_files, vm_num_pinned_swap_files, vm_swappin_enabled, vm_swap_put_failures,
6077 (vm_swapfile_last_failed_to_create_ts ? 1:0), hibernate_no_swapspace, hibernate_flush_timed_out,
6078 file_size_min, file_size_max, file_size);
6079 }
6080 #endif /*(__arm64__)*/
6081
6082 #if CONFIG_FREEZE
6083
6084 int freezer_finished_filling = 0;
6085
6086 void
vm_compressor_finished_filling(void ** current_chead)6087 vm_compressor_finished_filling(
6088 void **current_chead)
6089 {
6090 c_segment_t c_seg;
6091
6092 if ((c_seg = *(c_segment_t *)current_chead) == NULL) {
6093 return;
6094 }
6095
6096 assert(c_seg->c_state == C_IS_FILLING);
6097
6098 lck_mtx_lock_spin_always(&c_seg->c_lock);
6099
6100 c_current_seg_filled(c_seg, (c_segment_t *)current_chead);
6101
6102 lck_mtx_unlock_always(&c_seg->c_lock);
6103
6104 freezer_finished_filling++;
6105 }
6106
6107
6108 /*
6109 * This routine is used to transfer the compressed chunks from
6110 * the c_seg/cindx pointed to by slot_p into a new c_seg headed
6111 * by the current_chead and a new cindx within that c_seg.
6112 *
6113 * Currently, this routine is only used by the "freezer backed by
6114 * compressor with swap" mode to create a series of c_segs that
6115 * only contain compressed data belonging to one task. So, we
6116 * move a task's previously compressed data into a set of new
6117 * c_segs which will also hold the task's yet to be compressed data.
6118 */
6119
6120 kern_return_t
vm_compressor_relocate(void ** current_chead,int * slot_p)6121 vm_compressor_relocate(
6122 void **current_chead,
6123 int *slot_p)
6124 {
6125 c_slot_mapping_t slot_ptr;
6126 c_slot_mapping_t src_slot;
6127 uint32_t c_rounded_size;
6128 uint32_t c_size;
6129 uint16_t dst_slot;
6130 c_slot_t c_dst;
6131 c_slot_t c_src;
6132 uint16_t c_indx;
6133 c_segment_t c_seg_dst = NULL;
6134 c_segment_t c_seg_src = NULL;
6135 kern_return_t kr = KERN_SUCCESS;
6136 bool nearing_limits;
6137
6138
6139 src_slot = (c_slot_mapping_t) slot_p;
6140
6141 if (src_slot->s_cseg == C_SV_CSEG_ID) {
6142 /*
6143 * no need to relocate... this is a page full of a single
6144 * value which is hashed to a single entry not contained
6145 * in a c_segment_t
6146 */
6147 return kr;
6148 }
6149
6150 if (vm_compressor_is_slot_compressed((int *)src_slot) == false) {
6151 /*
6152 * Unmodified anonymous pages are sitting uncompressed on disk.
6153 * So don't pull them back in again.
6154 */
6155 return kr;
6156 }
6157
6158 Relookup_dst:
6159 c_seg_dst = c_seg_allocate((c_segment_t *)current_chead, &nearing_limits);
6160 /*
6161 * returns with c_seg lock held
6162 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
6163 * c_nextslot has been allocated and
6164 * c_store.c_buffer populated
6165 */
6166 if (c_seg_dst == NULL) {
6167 /*
6168 * Out of compression segments?
6169 */
6170 if (nearing_limits) {
6171 memorystatus_respond_to_compressor_exhaustion();
6172 }
6173 kr = KERN_RESOURCE_SHORTAGE;
6174 goto out;
6175 }
6176
6177 assert(c_seg_dst->c_busy == 0);
6178
6179 C_SEG_BUSY(c_seg_dst);
6180
6181 dst_slot = c_seg_dst->c_nextslot;
6182
6183 lck_mtx_unlock_always(&c_seg_dst->c_lock);
6184 if (nearing_limits) {
6185 memorystatus_respond_to_compressor_exhaustion();
6186 }
6187
6188 Relookup_src:
6189 c_seg_src = c_segments_get(src_slot->s_cseg - 1)->c_seg;
6190
6191 assert(c_seg_dst != c_seg_src);
6192
6193 lck_mtx_lock_spin_always(&c_seg_src->c_lock);
6194
6195 if (C_SEG_IS_ON_DISK_OR_SOQ(c_seg_src) ||
6196 c_seg_src->c_state == C_IS_FILLING) {
6197 /*
6198 * Skip this page if :-
6199 * a) the src c_seg is already on-disk (or on its way there)
6200 * A "thaw" can mark a process as eligible for
6201 * another freeze cycle without bringing any of
6202 * its swapped out c_segs back from disk (because
6203 * that is done on-demand).
6204 * Or, this page may be mapped elsewhere in the task's map,
6205 * and we may have marked it for swap already.
6206 *
6207 * b) Or, the src c_seg is being filled by the compressor
6208 * thread. We don't want the added latency of waiting for
6209 * this c_seg in the freeze path and so we skip it.
6210 */
6211
6212 PAGE_REPLACEMENT_DISALLOWED(FALSE);
6213
6214 lck_mtx_unlock_always(&c_seg_src->c_lock);
6215
6216 c_seg_src = NULL;
6217
6218 goto out;
6219 }
6220
6221 if (c_seg_src->c_busy) {
6222 PAGE_REPLACEMENT_DISALLOWED(FALSE);
6223 c_seg_wait_on_busy(c_seg_src);
6224
6225 c_seg_src = NULL;
6226
6227 PAGE_REPLACEMENT_DISALLOWED(TRUE);
6228
6229 goto Relookup_src;
6230 }
6231
6232 C_SEG_BUSY(c_seg_src);
6233
6234 lck_mtx_unlock_always(&c_seg_src->c_lock);
6235
6236 /* find the c_slot */
6237 c_indx = src_slot->s_cindx;
6238
6239 c_src = C_SEG_SLOT_FROM_INDEX(c_seg_src, c_indx);
6240
6241 c_size = UNPACK_C_SIZE(c_src);
6242
6243 assert(c_size);
6244 int combined_size = c_size + c_slot_extra_size(c_src);
6245
6246 if (combined_size > (uint32_t)(c_seg_bufsize - C_SEG_OFFSET_TO_BYTES((int32_t)c_seg_dst->c_nextoffset))) {
6247 /*
6248 * This segment is full. We need a new one.
6249 */
6250
6251 lck_mtx_lock_spin_always(&c_seg_src->c_lock);
6252 C_SEG_WAKEUP_DONE(c_seg_src);
6253 lck_mtx_unlock_always(&c_seg_src->c_lock);
6254
6255 c_seg_src = NULL;
6256
6257 lck_mtx_lock_spin_always(&c_seg_dst->c_lock);
6258
6259 assert(c_seg_dst->c_busy);
6260 assert(c_seg_dst->c_state == C_IS_FILLING);
6261 assert(!c_seg_dst->c_on_minorcompact_q);
6262
6263 c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead);
6264 assert(*current_chead == NULL);
6265
6266 C_SEG_WAKEUP_DONE(c_seg_dst);
6267
6268 lck_mtx_unlock_always(&c_seg_dst->c_lock);
6269
6270 c_seg_dst = NULL;
6271
6272 PAGE_REPLACEMENT_DISALLOWED(FALSE);
6273
6274 goto Relookup_dst;
6275 }
6276
6277 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, c_seg_dst->c_nextslot);
6278
6279 memcpy(&c_seg_dst->c_store.c_buffer[c_seg_dst->c_nextoffset], &c_seg_src->c_store.c_buffer[c_src->c_offset], combined_size);
6280 PAGE_REPLACEMENT_DISALLOWED(FALSE);
6281 /*
6282 * Is platform alignment actually necessary since wkdm aligns its output?
6283 */
6284 c_rounded_size = C_SEG_ROUND_TO_ALIGNMENT(combined_size);
6285
6286 cslot_copy(c_dst, c_src);
6287 c_dst->c_offset = c_seg_dst->c_nextoffset;
6288
6289 if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) {
6290 c_seg_dst->c_firstemptyslot++;
6291 }
6292
6293 c_seg_dst->c_slots_used++;
6294 c_seg_dst->c_nextslot++;
6295 c_seg_dst->c_bytes_used += c_rounded_size;
6296 c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
6297
6298
6299 PACK_C_SIZE(c_src, 0);
6300 #if HAS_MTE
6301 c_src->c_mte_size = 0;
6302 #endif
6303
6304 c_seg_src->c_bytes_used -= c_rounded_size;
6305 c_seg_src->c_bytes_unused += c_rounded_size;
6306
6307 assert(c_seg_src->c_slots_used);
6308 c_seg_src->c_slots_used--;
6309
6310 if (!c_seg_src->c_swappedin) {
6311 /* Pessimistically lose swappedin status when non-swappedin pages are added. */
6312 c_seg_dst->c_swappedin = false;
6313 }
6314
6315 if (c_indx < c_seg_src->c_firstemptyslot) {
6316 c_seg_src->c_firstemptyslot = c_indx;
6317 }
6318
6319 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot);
6320
6321 PAGE_REPLACEMENT_ALLOWED(TRUE);
6322 slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
6323 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
6324 slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1;
6325 slot_ptr->s_cindx = dst_slot;
6326
6327 PAGE_REPLACEMENT_ALLOWED(FALSE);
6328
6329 out:
6330 if (c_seg_src) {
6331 lck_mtx_lock_spin_always(&c_seg_src->c_lock);
6332
6333 C_SEG_WAKEUP_DONE(c_seg_src);
6334
6335 if (c_seg_src->c_bytes_used == 0 && c_seg_src->c_state != C_IS_FILLING) {
6336 if (!c_seg_src->c_on_minorcompact_q) {
6337 c_seg_need_delayed_compaction(c_seg_src, FALSE);
6338 }
6339 }
6340
6341 lck_mtx_unlock_always(&c_seg_src->c_lock);
6342 }
6343
6344 if (c_seg_dst) {
6345 PAGE_REPLACEMENT_DISALLOWED(TRUE);
6346
6347 lck_mtx_lock_spin_always(&c_seg_dst->c_lock);
6348
6349 if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
6350 /*
6351 * Nearing or exceeded maximum slot and offset capacity.
6352 */
6353 assert(c_seg_dst->c_busy);
6354 assert(c_seg_dst->c_state == C_IS_FILLING);
6355 assert(!c_seg_dst->c_on_minorcompact_q);
6356
6357 c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead);
6358 assert(*current_chead == NULL);
6359 }
6360
6361 C_SEG_WAKEUP_DONE(c_seg_dst);
6362
6363 lck_mtx_unlock_always(&c_seg_dst->c_lock);
6364
6365 c_seg_dst = NULL;
6366
6367 PAGE_REPLACEMENT_DISALLOWED(FALSE);
6368 }
6369
6370 return kr;
6371 }
6372 #endif /* CONFIG_FREEZE */
6373
6374 #if DEVELOPMENT || DEBUG
6375
6376 void
vm_compressor_inject_error(int * slot)6377 vm_compressor_inject_error(int *slot)
6378 {
6379 c_slot_mapping_t slot_ptr = (c_slot_mapping_t)slot;
6380
6381 /* No error detection for single-value compression. */
6382 if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
6383 printf("%s(): cannot inject errors in SV-compressed pages\n", __func__ );
6384 return;
6385 }
6386
6387 /* s_cseg is actually "segno+1" */
6388 const uint32_t c_segno = slot_ptr->s_cseg - 1;
6389
6390 assert(c_segno < c_segments_available);
6391 assert(c_segments_get(c_segno)->c_segno >= c_segments_available);
6392
6393 const c_segment_t c_seg = c_segments_get(c_segno)->c_seg;
6394
6395 PAGE_REPLACEMENT_DISALLOWED(TRUE);
6396
6397 lck_mtx_lock_spin_always(&c_seg->c_lock);
6398 assert(c_seg->c_state != C_IS_EMPTY && c_seg->c_state != C_IS_FREE);
6399
6400 const uint16_t c_indx = slot_ptr->s_cindx;
6401 assert(c_indx < c_seg->c_nextslot);
6402
6403 /*
6404 * To safely make this segment temporarily writable, we need to mark
6405 * the segment busy, which allows us to release the segment lock.
6406 */
6407 while (c_seg->c_busy) {
6408 c_seg_wait_on_busy(c_seg);
6409 lck_mtx_lock_spin_always(&c_seg->c_lock);
6410 }
6411 C_SEG_BUSY(c_seg);
6412
6413 bool already_writable = (c_seg->c_state == C_IS_FILLING);
6414 if (!already_writable) {
6415 /*
6416 * Protection update must be performed preemptibly, so temporarily drop
6417 * the lock. Having set c_busy will prevent most other concurrent
6418 * operations.
6419 */
6420 lck_mtx_unlock_always(&c_seg->c_lock);
6421 C_SEG_MAKE_WRITEABLE(c_seg);
6422 lck_mtx_lock_spin_always(&c_seg->c_lock);
6423 }
6424
6425 /*
6426 * Once we've released the lock following our c_state == C_IS_FILLING check,
6427 * c_current_seg_filled() can (re-)write-protect the segment. However, it
6428 * will transition from C_IS_FILLING before releasing the c_seg lock, so we
6429 * can detect this by re-checking after we've reobtained the lock.
6430 */
6431 if (already_writable && c_seg->c_state != C_IS_FILLING) {
6432 lck_mtx_unlock_always(&c_seg->c_lock);
6433 C_SEG_MAKE_WRITEABLE(c_seg);
6434 lck_mtx_lock_spin_always(&c_seg->c_lock);
6435 already_writable = false;
6436 /* Segment can't be freed while c_busy is set. */
6437 assert(c_seg->c_state != C_IS_FILLING);
6438 }
6439
6440 /*
6441 * Skip if the segment is on disk. This check can only be performed after
6442 * the final acquisition of the segment lock before we attempt to write to
6443 * the segment.
6444 */
6445 if (!C_SEG_IS_ON_DISK_OR_SOQ(c_seg)) {
6446 c_slot_t cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
6447 int32_t *data = &c_seg->c_store.c_buffer[cs->c_offset];
6448 /* assume that the compressed data holds at least one int32_t */
6449 assert(UNPACK_C_SIZE(cs) > sizeof(*data));
6450 /*
6451 * This bit is known to be in the payload of a MISS packet resulting from
6452 * the pattern used in the test pattern from decompression_failure.c.
6453 * Flipping it should result in many corrupted bits in the test page.
6454 */
6455 data[0] ^= 0x00000100;
6456 }
6457
6458 if (!already_writable) {
6459 lck_mtx_unlock_always(&c_seg->c_lock);
6460 C_SEG_WRITE_PROTECT(c_seg);
6461 lck_mtx_lock_spin_always(&c_seg->c_lock);
6462 }
6463
6464 C_SEG_WAKEUP_DONE(c_seg);
6465 lck_mtx_unlock_always(&c_seg->c_lock);
6466
6467 PAGE_REPLACEMENT_DISALLOWED(FALSE);
6468 }
6469
6470 /*
6471 * Serialize information about a specific segment
6472 * returns true if the segment was written or there's nothing to write for the segno
6473 * false if there's not enough space
6474 * argument size input - the size of the input buffer, output - the size written, set to 0 on failure
6475 */
6476 kern_return_t
vm_compressor_serialize_segment_debug_info(int segno,char * buf,size_t * size,vm_c_serialize_add_data_t with_data)6477 vm_compressor_serialize_segment_debug_info(int segno, char *buf, size_t *size, vm_c_serialize_add_data_t with_data)
6478 {
6479 size_t insize = *size;
6480 size_t offset = 0;
6481 *size = 0;
6482 if (c_segments_get(segno)->c_segno < c_segments_available) {
6483 /* This check means there's no pointer assigned here so it must be an index in the free list.
6484 * if this was an active c_segment, .c_seg would be assigned to, which is a pointer, interpreted as an int it
6485 * would be higher than c_segments_available. See also assert to this effect right after assigning to c_seg in
6486 * c_seg_allocate()
6487 */
6488 return KERN_SUCCESS;
6489 }
6490 if (c_segments_get(segno)->c_segno == (uint32_t)-1) {
6491 /* c_segno of the end of the free-list */
6492 return KERN_SUCCESS;
6493 }
6494
6495 const struct c_segment* c_seg = c_segments_get(segno)->c_seg;
6496 if (c_seg->c_state == C_IS_FREE) {
6497 return KERN_SUCCESS; /* nothing needs to be done */
6498 }
6499
6500 int nslots = c_seg->c_nextslot;
6501 /* do we have enough space for slots (without data)? */
6502 if (sizeof(struct c_segment_info) + (nslots * sizeof(struct c_slot_info)) > insize) {
6503 return KERN_NO_SPACE; /* not enough space, please call me again */
6504 }
6505
6506 struct c_segment_info* csi = (struct c_segment_info*)buf;
6507 offset += sizeof(struct c_segment_info);
6508
6509 csi->csi_mysegno = c_seg->c_mysegno;
6510 csi->csi_creation_ts = c_seg->c_creation_ts;
6511 csi->csi_swappedin_ts = c_seg->c_swappedin_ts;
6512 csi->csi_bytes_unused = c_seg->c_bytes_unused;
6513 csi->csi_bytes_used = c_seg->c_bytes_used;
6514 csi->csi_populated_offset = c_seg->c_populated_offset;
6515 csi->csi_state = c_seg->c_state;
6516 csi->csi_swappedin = c_seg->c_swappedin;
6517 csi->csi_on_minor_compact_q = c_seg->c_on_minorcompact_q;
6518 csi->csi_has_donated_pages = c_seg->c_has_donated_pages;
6519 csi->csi_slots_used = (uint16_t)c_seg->c_slots_used;
6520 csi->csi_slot_var_array_len = c_seg->c_slot_var_array_len;
6521 csi->csi_slots_len = (uint16_t)nslots;
6522 #if TRACK_C_SEGMENT_UTILIZATION
6523 csi->csi_decompressions_since_swapin = c_seg->c_decompressions_since_swapin;
6524 #else
6525 csi->csi_decompressions_since_swapin = 0;
6526 #endif /* TRACK_C_SEGMENT_UTILIZATION */
6527 #if HAS_MTE
6528 bool cseg_in_mem = !C_SEG_IS_ON_DISK_OR_SOQ(c_seg);
6529 #endif /* HAS_MTE */
6530 /* This entire data collection races with the compressor threads which can change any
6531 * of this data members, and specifically can drop the data buffer to swap
6532 * We don't take the segment lock since that would slow the iteration over the segments down
6533 * and hurt the "snapshot-ness" of the data. The race risk is acceptable since this is
6534 * used only for a tester in development. */
6535
6536 for (int si = 0; si < nslots; ++si) {
6537 if (offset + sizeof(struct c_slot_info) > insize) {
6538 return KERN_NO_SPACE;
6539 }
6540 /* see also c_seg_validate() for some of the details */
6541 const struct c_slot* cs = C_SEG_SLOT_FROM_INDEX(c_seg, si);
6542 struct c_slot_info* ssi = (struct c_slot_info*)(buf + offset);
6543 offset += sizeof(struct c_slot_info);
6544 ssi->csi_size = (uint16_t)UNPACK_C_SIZE(cs);
6545 #if HAS_MTE
6546 ssi->csi_mte_size = cs->c_mte_size;
6547 ssi->csi_mte_has_data = 0;
6548 uint32_t actual_mte_size = vm_mte_compressed_tags_actual_size(ssi->csi_mte_size);
6549 if (with_data == VM_C_SERIALIZE_DATA_TAGS && actual_mte_size > 0 && cseg_in_mem) {
6550 if (offset + actual_mte_size > insize) {
6551 return KERN_NO_SPACE;
6552 }
6553 char* tags_buf = ((char *)&c_seg->c_store.c_buffer[cs->c_offset]) + ssi->csi_size;
6554 memcpy(buf + offset, tags_buf, actual_mte_size);
6555 offset += actual_mte_size;
6556 ssi->csi_mte_has_data = 1;
6557 }
6558 #else /* HAS_MTE */
6559 #pragma unused(with_data)
6560 ssi->csi_unused = 0;
6561 #endif /* HAS_MTE */
6562 }
6563 *size = offset;
6564 return KERN_SUCCESS;
6565 }
6566
6567 #endif /* DEVELOPMENT || DEBUG */
6568
6569 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
6570
6571 struct vnode;
6572 extern void vm_swapfile_open(const char *path, struct vnode **vp);
6573 extern int vm_swapfile_preallocate(struct vnode *vp, uint64_t *size, boolean_t *pin);
6574
6575 struct vnode *uncompressed_vp0 = NULL;
6576 struct vnode *uncompressed_vp1 = NULL;
6577 uint32_t uncompressed_file0_free_pages = 0, uncompressed_file1_free_pages = 0;
6578 uint64_t uncompressed_file0_free_offset = 0, uncompressed_file1_free_offset = 0;
6579
6580 uint64_t compressor_ro_uncompressed = 0;
6581 uint64_t compressor_ro_uncompressed_total_returned = 0;
6582 uint64_t compressor_ro_uncompressed_skip_returned = 0;
6583 uint64_t compressor_ro_uncompressed_get = 0;
6584 uint64_t compressor_ro_uncompressed_put = 0;
6585 uint64_t compressor_ro_uncompressed_swap_usage = 0;
6586
6587 extern void vnode_put(struct vnode* vp);
6588 extern int vnode_getwithref(struct vnode* vp);
6589 extern int vm_swapfile_io(struct vnode *vp, uint64_t offset, uint64_t start, int npages, int flags, void *upl_ctx);
6590
6591 #define MAX_OFFSET_PAGES (255)
6592 uint64_t uncompressed_file0_space_bitmap[MAX_OFFSET_PAGES];
6593 uint64_t uncompressed_file1_space_bitmap[MAX_OFFSET_PAGES];
6594
6595 #define UNCOMPRESSED_FILEIDX_OFFSET_MASK (((uint32_t)1<<31ull) - 1)
6596 #define UNCOMPRESSED_FILEIDX_SHIFT (29)
6597 #define UNCOMPRESSED_FILEIDX_MASK (3)
6598 #define UNCOMPRESSED_OFFSET_SHIFT (29)
6599 #define UNCOMPRESSED_OFFSET_MASK (7)
6600
6601 static uint32_t
vm_uncompressed_extract_swap_file(int slot)6602 vm_uncompressed_extract_swap_file(int slot)
6603 {
6604 uint32_t fileidx = (((uint32_t)slot & UNCOMPRESSED_FILEIDX_OFFSET_MASK) >> UNCOMPRESSED_FILEIDX_SHIFT) & UNCOMPRESSED_FILEIDX_MASK;
6605 return fileidx;
6606 }
6607
6608 static uint32_t
vm_uncompressed_extract_swap_offset(int slot)6609 vm_uncompressed_extract_swap_offset(int slot)
6610 {
6611 return slot & (uint32_t)(~(UNCOMPRESSED_OFFSET_MASK << UNCOMPRESSED_OFFSET_SHIFT));
6612 }
6613
6614 static void
vm_uncompressed_return_space_to_swap(int slot)6615 vm_uncompressed_return_space_to_swap(int slot)
6616 {
6617 PAGE_REPLACEMENT_ALLOWED(TRUE);
6618 uint32_t fileidx = vm_uncompressed_extract_swap_file(slot);
6619 if (fileidx == 1) {
6620 uint32_t free_offset = vm_uncompressed_extract_swap_offset(slot);
6621 uint64_t pgidx = free_offset / PAGE_SIZE_64;
6622 uint64_t chunkidx = pgidx / 64;
6623 uint64_t chunkoffset = pgidx % 64;
6624 #if DEVELOPMENT || DEBUG
6625 uint64_t vaddr = (uint64_t)&uncompressed_file0_space_bitmap[chunkidx];
6626 uint64_t maxvaddr = (uint64_t)&uncompressed_file0_space_bitmap[MAX_OFFSET_PAGES];
6627 assertf(vaddr < maxvaddr, "0x%llx 0x%llx", vaddr, maxvaddr);
6628 #endif /*DEVELOPMENT || DEBUG*/
6629 assertf((uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6630 "0x%x %llu %llu", slot, chunkidx, chunkoffset);
6631 uncompressed_file0_space_bitmap[chunkidx] &= ~((uint64_t)1 << chunkoffset);
6632 assertf(!(uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6633 "0x%x %llu %llu", slot, chunkidx, chunkoffset);
6634
6635 uncompressed_file0_free_pages++;
6636 } else {
6637 uint32_t free_offset = vm_uncompressed_extract_swap_offset(slot);
6638 uint64_t pgidx = free_offset / PAGE_SIZE_64;
6639 uint64_t chunkidx = pgidx / 64;
6640 uint64_t chunkoffset = pgidx % 64;
6641 assertf((uncompressed_file1_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6642 "%llu %llu", chunkidx, chunkoffset);
6643 uncompressed_file1_space_bitmap[chunkidx] &= ~((uint64_t)1 << chunkoffset);
6644
6645 uncompressed_file1_free_pages++;
6646 }
6647 compressor_ro_uncompressed_swap_usage--;
6648 PAGE_REPLACEMENT_ALLOWED(FALSE);
6649 }
6650
6651 static int
vm_uncompressed_reserve_space_in_swap()6652 vm_uncompressed_reserve_space_in_swap()
6653 {
6654 int slot = 0;
6655 if (uncompressed_file0_free_pages == 0 && uncompressed_file1_free_pages == 0) {
6656 return -1;
6657 }
6658
6659 PAGE_REPLACEMENT_ALLOWED(TRUE);
6660 if (uncompressed_file0_free_pages) {
6661 uint64_t chunkidx = 0;
6662 uint64_t chunkoffset = 0;
6663 while (uncompressed_file0_space_bitmap[chunkidx] == 0xffffffffffffffff) {
6664 chunkidx++;
6665 }
6666 while (uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) {
6667 chunkoffset++;
6668 }
6669
6670 assertf((uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) == 0,
6671 "%llu %llu", chunkidx, chunkoffset);
6672 #if DEVELOPMENT || DEBUG
6673 uint64_t vaddr = (uint64_t)&uncompressed_file0_space_bitmap[chunkidx];
6674 uint64_t maxvaddr = (uint64_t)&uncompressed_file0_space_bitmap[MAX_OFFSET_PAGES];
6675 assertf(vaddr < maxvaddr, "0x%llx 0x%llx", vaddr, maxvaddr);
6676 #endif /*DEVELOPMENT || DEBUG*/
6677 uncompressed_file0_space_bitmap[chunkidx] |= ((uint64_t)1 << chunkoffset);
6678 uncompressed_file0_free_offset = ((chunkidx * 64) + chunkoffset) * PAGE_SIZE_64;
6679 assertf((uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6680 "%llu %llu", chunkidx, chunkoffset);
6681
6682 assert(uncompressed_file0_free_offset <= (1 << UNCOMPRESSED_OFFSET_SHIFT));
6683 slot = (int)((1 << UNCOMPRESSED_FILEIDX_SHIFT) + uncompressed_file0_free_offset);
6684 uncompressed_file0_free_pages--;
6685 } else {
6686 uint64_t chunkidx = 0;
6687 uint64_t chunkoffset = 0;
6688 while (uncompressed_file1_space_bitmap[chunkidx] == 0xFFFFFFFFFFFFFFFF) {
6689 chunkidx++;
6690 }
6691 while (uncompressed_file1_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) {
6692 chunkoffset++;
6693 }
6694 assert((uncompressed_file1_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) == 0);
6695 uncompressed_file1_space_bitmap[chunkidx] |= ((uint64_t)1 << chunkoffset);
6696 uncompressed_file1_free_offset = ((chunkidx * 64) + chunkoffset) * PAGE_SIZE_64;
6697 slot = (int)((2 << UNCOMPRESSED_FILEIDX_SHIFT) + uncompressed_file1_free_offset);
6698 uncompressed_file1_free_pages--;
6699 }
6700 compressor_ro_uncompressed_swap_usage++;
6701 PAGE_REPLACEMENT_ALLOWED(FALSE);
6702 return slot;
6703 }
6704
6705 #define MAX_IO_REQ (16)
6706 struct _uncompressor_io_req {
6707 uint64_t addr;
6708 bool inuse;
6709 } uncompressor_io_req[MAX_IO_REQ];
6710
6711 int
vm_uncompressed_put(ppnum_t pn,int * slot)6712 vm_uncompressed_put(ppnum_t pn, int *slot)
6713 {
6714 int retval = 0;
6715 struct vnode *uncompressed_vp = NULL;
6716 uint64_t uncompress_offset = 0;
6717
6718 again:
6719 if (uncompressed_vp0 == NULL) {
6720 PAGE_REPLACEMENT_ALLOWED(TRUE);
6721 if (uncompressed_vp0 == NULL) {
6722 uint64_t size = (MAX_OFFSET_PAGES * 1024 * 1024ULL);
6723 vm_swapfile_open("/private/var/vm/uncompressedswap0", &uncompressed_vp0);
6724 if (uncompressed_vp0 == NULL) {
6725 PAGE_REPLACEMENT_ALLOWED(FALSE);
6726 return KERN_NO_ACCESS;
6727 }
6728 vm_swapfile_preallocate(uncompressed_vp0, &size, NULL);
6729 uncompressed_file0_free_pages = (uint32_t)atop(size);
6730 bzero(uncompressed_file0_space_bitmap, sizeof(uint64_t) * MAX_OFFSET_PAGES);
6731
6732 int i = 0;
6733 for (; i < MAX_IO_REQ; i++) {
6734 kmem_alloc(kernel_map, (vm_offset_t*)&uncompressor_io_req[i].addr, PAGE_SIZE_64, KMA_NOFAIL | KMA_KOBJECT, VM_KERN_MEMORY_COMPRESSOR);
6735 uncompressor_io_req[i].inuse = false;
6736 }
6737
6738 vm_swapfile_open("/private/var/vm/uncompressedswap1", &uncompressed_vp1);
6739 assert(uncompressed_vp1);
6740 vm_swapfile_preallocate(uncompressed_vp1, &size, NULL);
6741 uncompressed_file1_free_pages = (uint32_t)atop(size);
6742 bzero(uncompressed_file1_space_bitmap, sizeof(uint64_t) * MAX_OFFSET_PAGES);
6743 PAGE_REPLACEMENT_ALLOWED(FALSE);
6744 } else {
6745 PAGE_REPLACEMENT_ALLOWED(FALSE);
6746 delay(100);
6747 goto again;
6748 }
6749 }
6750
6751 int swapinfo = vm_uncompressed_reserve_space_in_swap();
6752 if (swapinfo == -1) {
6753 *slot = 0;
6754 return KERN_RESOURCE_SHORTAGE;
6755 }
6756
6757 if (vm_uncompressed_extract_swap_file(swapinfo) == 1) {
6758 uncompressed_vp = uncompressed_vp0;
6759 } else {
6760 uncompressed_vp = uncompressed_vp1;
6761 }
6762 uncompress_offset = vm_uncompressed_extract_swap_offset(swapinfo);
6763 if ((retval = vnode_getwithref(uncompressed_vp)) != 0) {
6764 vm_log_error("vm_uncompressed_put: vnode_getwithref on swapfile failed with %d\n", retval);
6765 } else {
6766 int i = 0;
6767 retry:
6768 PAGE_REPLACEMENT_ALLOWED(TRUE);
6769 for (i = 0; i < MAX_IO_REQ; i++) {
6770 if (uncompressor_io_req[i].inuse == false) {
6771 uncompressor_io_req[i].inuse = true;
6772 break;
6773 }
6774 }
6775 if (i == MAX_IO_REQ) {
6776 assert_wait((event_t)&uncompressor_io_req, THREAD_UNINT);
6777 PAGE_REPLACEMENT_ALLOWED(FALSE);
6778 thread_block(THREAD_CONTINUE_NULL);
6779 goto retry;
6780 }
6781 PAGE_REPLACEMENT_ALLOWED(FALSE);
6782 void *addr = pmap_map_compressor_page(pn);
6783 memcpy((void*)uncompressor_io_req[i].addr, addr, PAGE_SIZE_64);
6784 pmap_unmap_compressor_page(pn, addr);
6785
6786 retval = vm_swapfile_io(uncompressed_vp, uncompress_offset, (uint64_t)uncompressor_io_req[i].addr, 1, SWAP_WRITE, NULL);
6787 if (retval) {
6788 *slot = 0;
6789 } else {
6790 *slot = (int)swapinfo;
6791 ((c_slot_mapping_t)(slot))->s_uncompressed = 1;
6792 }
6793 vnode_put(uncompressed_vp);
6794 PAGE_REPLACEMENT_ALLOWED(TRUE);
6795 uncompressor_io_req[i].inuse = false;
6796 thread_wakeup((event_t)&uncompressor_io_req);
6797 PAGE_REPLACEMENT_ALLOWED(FALSE);
6798 }
6799 return retval;
6800 }
6801
6802 int
vm_uncompressed_get(ppnum_t pn,int * slot,__unused vm_compressor_options_t flags)6803 vm_uncompressed_get(ppnum_t pn, int *slot, __unused vm_compressor_options_t flags)
6804 {
6805 int retval = 0;
6806 struct vnode *uncompressed_vp = NULL;
6807 uint32_t fileidx = vm_uncompressed_extract_swap_file(*slot);
6808 uint64_t uncompress_offset = vm_uncompressed_extract_swap_offset(*slot);
6809
6810 if (__improbable(flags & C_KDP)) {
6811 return -2;
6812 }
6813
6814 if (fileidx == 1) {
6815 uncompressed_vp = uncompressed_vp0;
6816 } else {
6817 uncompressed_vp = uncompressed_vp1;
6818 }
6819
6820 if ((retval = vnode_getwithref(uncompressed_vp)) != 0) {
6821 vm_log_error("vm_uncompressed_put: vnode_getwithref on swapfile failed with %d\n", retval);
6822 } else {
6823 int i = 0;
6824 retry:
6825 PAGE_REPLACEMENT_ALLOWED(TRUE);
6826 for (i = 0; i < MAX_IO_REQ; i++) {
6827 if (uncompressor_io_req[i].inuse == false) {
6828 uncompressor_io_req[i].inuse = true;
6829 break;
6830 }
6831 }
6832 if (i == MAX_IO_REQ) {
6833 assert_wait((event_t)&uncompressor_io_req, THREAD_UNINT);
6834 PAGE_REPLACEMENT_ALLOWED(FALSE);
6835 thread_block(THREAD_CONTINUE_NULL);
6836 goto retry;
6837 }
6838 PAGE_REPLACEMENT_ALLOWED(FALSE);
6839 retval = vm_swapfile_io(uncompressed_vp, uncompress_offset, (uint64_t)uncompressor_io_req[i].addr, 1, SWAP_READ, NULL);
6840 vnode_put(uncompressed_vp);
6841 void *addr = pmap_map_compressor_page(pn);
6842 memcpy(addr, (void*)uncompressor_io_req[i].addr, PAGE_SIZE_64);
6843 pmap_unmap_compressor_page(pn, addr);
6844 PAGE_REPLACEMENT_ALLOWED(TRUE);
6845 uncompressor_io_req[i].inuse = false;
6846 thread_wakeup((event_t)&uncompressor_io_req);
6847 PAGE_REPLACEMENT_ALLOWED(FALSE);
6848 }
6849 return retval;
6850 }
6851
6852 int
vm_uncompressed_free(int * slot,__unused vm_compressor_options_t flags)6853 vm_uncompressed_free(int *slot, __unused vm_compressor_options_t flags)
6854 {
6855 vm_uncompressed_return_space_to_swap(*slot);
6856 *slot = 0;
6857 return 0;
6858 }
6859
6860 #endif /*CONFIG_TRACK_UNMODIFIED_ANON_PAGES*/
6861