1 /*
2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include "vm_compressor_backing_store_internal.h"
30 #include <vm/vm_pageout_xnu.h>
31 #include <vm/vm_protos_internal.h>
32 #include <vm/vm_kern_xnu.h>
33 #include <vm/vm_map_xnu.h>
34 #include <vm/vm_compressor_internal.h>
35 #include <vm/vm_iokit.h>
36 #include <vm/vm_map_internal.h>
37
38 #include <IOKit/IOHibernatePrivate.h>
39 #include <kern/policy_internal.h>
40 #include <sys/kern_memorystatus_xnu.h>
41
42 LCK_GRP_DECLARE(vm_swap_data_lock_grp, "vm_swap_data");
43 LCK_MTX_DECLARE(vm_swap_data_lock, &vm_swap_data_lock_grp);
44
45 #if defined(XNU_TARGET_OS_OSX)
46 /*
47 * launchd explicitly turns ON swap later during boot on macOS devices.
48 */
49 boolean_t compressor_store_stop_compaction = TRUE;
50 #else
51 boolean_t compressor_store_stop_compaction = FALSE;
52 #endif
53
54 boolean_t vm_swapfile_create_needed = FALSE;
55 boolean_t vm_swapfile_gc_needed = FALSE;
56
57 int vm_swapper_throttle = -1;
58 uint64_t vm_swapout_thread_id;
59
60 uint64_t vm_swap_put_failures = 0; /* Likely failed I/O. Data is still in memory. */
61 uint64_t vm_swap_get_failures = 0; /* Fatal */
62 uint64_t vm_swap_put_failures_no_swap_file = 0; /* Possibly not fatal because we might just need a new swapfile. */
63 int vm_num_swap_files_config = 0;
64 int vm_num_swap_files = 0;
65 int vm_num_pinned_swap_files = 0;
66 uint64_t vm_swap_volume_capacity = 0;
67 int vm_swapout_thread_processed_segments = 0;
68 int vm_swapout_thread_awakened = 0;
69 bool vm_swapout_thread_running = FALSE;
70 _Atomic bool vm_swapout_wake_pending = false;
71 int vm_swapfile_create_thread_awakened = 0;
72 int vm_swapfile_create_thread_running = 0;
73 int vm_swapfile_gc_thread_awakened = 0;
74 int vm_swapfile_gc_thread_running = 0;
75
76 int64_t vm_swappin_avail = 0;
77 boolean_t vm_swappin_enabled = FALSE;
78 unsigned int vm_swapfile_total_segs_alloced = 0;
79 unsigned int vm_swapfile_total_segs_alloced_max = 0;
80 unsigned int vm_swapfile_total_segs_used = 0;
81 unsigned int vm_swapfile_total_segs_used_max = 0;
82
83 char swapfilename[MAX_SWAPFILENAME_LEN + 1] = SWAP_FILE_NAME;
84
85 extern vm_map_t compressor_map;
86 extern uint32_t c_seg_bufsize, c_seg_allocsize, c_seg_off_limit;
87
88 #define SWAP_READY 0x1 /* Swap file is ready to be used */
89 #define SWAP_RECLAIM 0x2 /* Swap file is marked to be reclaimed */
90 #define SWAP_WANTED 0x4 /* Swap file has waiters */
91 #define SWAP_REUSE 0x8 /* Swap file is on the Q and has a name. Reuse after init-ing.*/
92 #define SWAP_PINNED 0x10 /* Swap file is pinned (FusionDrive) */
93
94
95 struct swapfile {
96 queue_head_t swp_queue; /* list of swap files */
97 char *swp_path; /* saved pathname of swap file */
98 struct vnode *swp_vp; /* backing vnode */
99 uint64_t swp_size; /* size of this swap file */
100 uint8_t *swp_bitmap; /* bitmap showing the alloced/freed slots in the swap file */
101 unsigned int swp_pathlen; /* length of pathname */
102 unsigned int swp_nsegs; /* #segments we can use */
103 unsigned int swp_nseginuse; /* #segments in use */
104 unsigned int swp_index; /* index of this swap file */
105 unsigned int swp_flags; /* state of swap file */
106 unsigned int swp_free_hint; /* offset of 1st free chunk */
107 unsigned int swp_io_count; /* count of outstanding I/Os */
108 c_segment_t *swp_csegs; /* back pointers to the c_segments. Used during swap reclaim. */
109
110 struct trim_list *swp_delayed_trim_list_head;
111 unsigned int swp_delayed_trim_count;
112 };
113
114 queue_head_t swf_global_queue;
115 boolean_t swp_trim_supported = FALSE;
116
117 extern uint64_t dont_trim_until_ts;
118 uint64_t vm_swapfile_last_failed_to_create_ts = 0;
119 uint64_t vm_swapfile_last_successful_create_ts = 0;
120 int vm_swapfile_can_be_created = FALSE;
121 boolean_t delayed_trim_handling_in_progress = FALSE;
122
123 boolean_t hibernate_in_progress_with_pinned_swap = FALSE;
124
125 static void vm_swapout_thread_throttle_adjust(void);
126 static void vm_swap_free_now(struct swapfile *swf, uint64_t f_offset);
127 static void vm_swapfile_create_thread(void);
128 static void vm_swapfile_gc_thread(void);
129 static void vm_swap_defragment(void);
130 static void vm_swap_handle_delayed_trims(boolean_t);
131 static void vm_swap_do_delayed_trim(struct swapfile *);
132 static void vm_swap_wait_on_trim_handling_in_progress(void);
133 static void vm_swapout_finish(c_segment_t c_seg, uint64_t f_offset, uint32_t size, kern_return_t kr);
134
135 extern int vnode_getwithref(struct vnode* vp);
136
137 boolean_t vm_swap_force_defrag = FALSE, vm_swap_force_reclaim = FALSE;
138
139 #if !XNU_TARGET_OS_OSX
140
141 /*
142 * For CONFIG_FREEZE, we scale the c_segments_limit based on the
143 * number of swapfiles allowed. That increases wired memory overhead.
144 * So we want to keep the max swapfiles same on both DEV/RELEASE so
145 * that the memory overhead is similar for performance comparisons.
146 */
147 #define VM_MAX_SWAP_FILE_NUM 5
148 #if defined(__arm64__) && defined(ARM_LARGE_MEMORY)
149 #define VM_MAX_SWAP_FILE_SWAP_ENABLED_NUM (64ULL * (1ULL << 30) / MAX_SWAP_FILE_SIZE)
150 #define VM_MIN_SWAP_FILE_SWAP_ENABLED_NUM (16ULL * (1ULL << 30) / MAX_SWAP_FILE_SIZE)
151 #else /* defined(__arm64__) && defined(ARM_LARGE_MEMORY) */
152 /*
153 * We reserve compressor pool VA at boot for the max # of swap files. If someone
154 * has enabled app swap but we're not an arm large memory device we can't hog
155 * all of the VA so we only go up to 4GB.
156 */
157 #define VM_MAX_SWAP_FILE_SWAP_ENABLED_NUM (4ULL * (1ULL << 30) / MAX_SWAP_FILE_SIZE)
158 #define VM_MIN_SWAP_FILE_SWAP_ENABLED_NUM (4ULL * (1ULL << 30) / MAX_SWAP_FILE_SIZE)
159 #endif /* defined(__arm64__) && defined(ARM_LARGE_MEMORY) */
160 #define VM_SWAP_MIN_VOLUME_CAPACITY (128ULL * (1ULL << 30))
161
162 #define VM_SWAPFILE_DELAYED_TRIM_MAX 4
163
164 #define VM_SWAP_SHOULD_DEFRAGMENT() (((vm_swap_force_defrag == TRUE) || (c_swappedout_sparse_count > (vm_swapfile_total_segs_used / 16))) ? 1 : 0)
165 #define VM_SWAP_SHOULD_PIN(_size) FALSE
166 #define VM_SWAP_SHOULD_TRIM(swf) ((swf->swp_delayed_trim_count >= VM_SWAPFILE_DELAYED_TRIM_MAX) ? 1 : 0)
167
168 #else /* !XNU_TARGET_OS_OSX */
169
170 #define VM_MAX_SWAP_FILE_NUM 100
171 #define VM_SWAPFILE_DELAYED_TRIM_MAX 128
172
173 #define VM_SWAP_SHOULD_DEFRAGMENT() (((vm_swap_force_defrag == TRUE) || (c_swappedout_sparse_count > (vm_swapfile_total_segs_used / 4))) ? 1 : 0)
174 #define VM_SWAP_SHOULD_PIN(_size) (vm_swappin_avail > 0 && vm_swappin_avail >= (int64_t)(_size))
175 #define VM_SWAP_SHOULD_TRIM(swf) ((swf->swp_delayed_trim_count >= VM_SWAPFILE_DELAYED_TRIM_MAX) ? 1 : 0)
176
177 #endif /* !XNU_TARGET_OS_OSX */
178
179 #define VM_SWAP_SHOULD_RECLAIM() (((vm_swap_force_reclaim == TRUE) || ((vm_swapfile_total_segs_alloced - vm_swapfile_total_segs_used) >= swapfile_reclaim_threshold_segs)) ? 1 : 0)
180 #define VM_SWAP_SHOULD_ABORT_RECLAIM() (((vm_swap_force_reclaim == FALSE) && ((vm_swapfile_total_segs_alloced - vm_swapfile_total_segs_used) <= swapfile_reclam_minimum_segs)) ? 1 : 0)
181
182 #define VM_SWAP_BUSY() (((c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count) && (vm_swapper_throttle == THROTTLE_LEVEL_COMPRESSOR_TIER0)) ? 1 : 0)
183
184
185 #if CHECKSUM_THE_SWAP
186 extern unsigned int hash_string(char *cp, int len);
187 #endif
188
189 #if RECORD_THE_COMPRESSED_DATA
190 boolean_t c_compressed_record_init_done = FALSE; /* was the record file opened? */
191 int c_compressed_record_write_error = 0;
192 struct vnode *c_compressed_record_vp = NULL; /* the file opened for record write */
193 uint64_t c_compressed_record_file_offset = 0; /* next write offset */
194 void c_compressed_record_init(void);
195 void c_compressed_record_write(char *, int);
196 #endif
197
198 extern void vm_pageout_io_throttle(void);
199
200 static struct swapfile *vm_swapfile_for_handle(uint64_t);
201
202 /*
203 * Called with the vm_swap_data_lock held.
204 */
205
206 static struct swapfile *
vm_swapfile_for_handle(uint64_t f_offset)207 vm_swapfile_for_handle(uint64_t f_offset)
208 {
209 uint64_t file_offset = 0;
210 unsigned int swapfile_index = 0;
211 struct swapfile* swf = NULL;
212
213 file_offset = (f_offset & SWAP_SLOT_MASK);
214 swapfile_index = (f_offset >> SWAP_DEVICE_SHIFT);
215
216 swf = (struct swapfile*) queue_first(&swf_global_queue);
217
218 while (queue_end(&swf_global_queue, (queue_entry_t)swf) == FALSE) {
219 if (swapfile_index == swf->swp_index) {
220 break;
221 }
222
223 swf = (struct swapfile*) queue_next(&swf->swp_queue);
224 }
225
226 if (queue_end(&swf_global_queue, (queue_entry_t) swf)) {
227 swf = NULL;
228 }
229
230 return swf;
231 }
232
233 #if ENCRYPTED_SWAP
234
235 #include <libkern/crypto/aesxts.h>
236
237 extern int cc_rand_generate(void *, size_t); /* from libkern/cyrpto/rand.h> */
238
239 boolean_t swap_crypt_initialized;
240 void swap_crypt_initialize(void);
241
242 symmetric_xts xts_modectx;
243 uint32_t swap_crypt_key1[8]; /* big enough for a 256 bit random key */
244 uint32_t swap_crypt_key2[8]; /* big enough for a 256 bit random key */
245
246 #if DEVELOPMENT || DEBUG
247 boolean_t swap_crypt_xts_tested = FALSE;
248 unsigned char swap_crypt_test_page_ref[4096] __attribute__((aligned(4096)));
249 unsigned char swap_crypt_test_page_encrypt[4096] __attribute__((aligned(4096)));
250 unsigned char swap_crypt_test_page_decrypt[4096] __attribute__((aligned(4096)));
251 #endif /* DEVELOPMENT || DEBUG */
252
253 unsigned long vm_page_encrypt_counter;
254 unsigned long vm_page_decrypt_counter;
255
256
257 void
swap_crypt_initialize(void)258 swap_crypt_initialize(void)
259 {
260 uint8_t *enckey1, *enckey2;
261 int keylen1, keylen2;
262 int error;
263
264 assert(swap_crypt_initialized == FALSE);
265
266 keylen1 = sizeof(swap_crypt_key1);
267 enckey1 = (uint8_t *)&swap_crypt_key1;
268 keylen2 = sizeof(swap_crypt_key2);
269 enckey2 = (uint8_t *)&swap_crypt_key2;
270
271 error = cc_rand_generate((void *)enckey1, keylen1);
272 assert(!error);
273
274 error = cc_rand_generate((void *)enckey2, keylen2);
275 assert(!error);
276
277 error = xts_start(0, NULL, enckey1, keylen1, enckey2, keylen2, 0, 0, &xts_modectx);
278 assert(!error);
279
280 swap_crypt_initialized = TRUE;
281
282 #if DEVELOPMENT || DEBUG
283 uint8_t *encptr;
284 uint8_t *decptr;
285 uint8_t *refptr;
286 uint8_t *iv;
287 uint64_t ivnum[2];
288 int size = 0;
289 int i = 0;
290 int rc = 0;
291
292 assert(swap_crypt_xts_tested == FALSE);
293
294 /*
295 * Validate the encryption algorithms.
296 *
297 * First initialize the test data.
298 */
299 for (i = 0; i < 4096; i++) {
300 swap_crypt_test_page_ref[i] = (char) i;
301 }
302 ivnum[0] = (uint64_t)0xaa;
303 ivnum[1] = 0;
304 iv = (uint8_t *)ivnum;
305
306 refptr = (uint8_t *)swap_crypt_test_page_ref;
307 encptr = (uint8_t *)swap_crypt_test_page_encrypt;
308 decptr = (uint8_t *)swap_crypt_test_page_decrypt;
309 size = 4096;
310
311 /* encrypt */
312 rc = xts_encrypt(refptr, size, encptr, iv, &xts_modectx);
313 assert(!rc);
314
315 /* compare result with original - should NOT match */
316 for (i = 0; i < 4096; i++) {
317 if (swap_crypt_test_page_encrypt[i] !=
318 swap_crypt_test_page_ref[i]) {
319 break;
320 }
321 }
322 assert(i != 4096);
323
324 /* decrypt */
325 rc = xts_decrypt(encptr, size, decptr, iv, &xts_modectx);
326 assert(!rc);
327
328 /* compare result with original */
329 for (i = 0; i < 4096; i++) {
330 if (swap_crypt_test_page_decrypt[i] !=
331 swap_crypt_test_page_ref[i]) {
332 panic("encryption test failed");
333 }
334 }
335 /* encrypt in place */
336 rc = xts_encrypt(decptr, size, decptr, iv, &xts_modectx);
337 assert(!rc);
338
339 /* decrypt in place */
340 rc = xts_decrypt(decptr, size, decptr, iv, &xts_modectx);
341 assert(!rc);
342
343 for (i = 0; i < 4096; i++) {
344 if (swap_crypt_test_page_decrypt[i] !=
345 swap_crypt_test_page_ref[i]) {
346 panic("in place encryption test failed");
347 }
348 }
349 swap_crypt_xts_tested = TRUE;
350 #endif /* DEVELOPMENT || DEBUG */
351 }
352
353
354 void
vm_swap_encrypt(c_segment_t c_seg)355 vm_swap_encrypt(c_segment_t c_seg)
356 {
357 uint8_t *ptr;
358 uint8_t *iv;
359 uint64_t ivnum[2];
360 int size = 0;
361 int rc = 0;
362
363 if (swap_crypt_initialized == FALSE) {
364 swap_crypt_initialize();
365 }
366
367 /*
368 * Data stored in the compressor should never need to be faulted in.
369 * Make sure pages storing data that we're encrypting cannot
370 * be stolen out from under us in the off chance that the mapping
371 * gets disconnected while we're actively encrypting.
372 */
373 PAGE_REPLACEMENT_DISALLOWED(TRUE);
374 #if DEVELOPMENT || DEBUG
375 C_SEG_MAKE_WRITEABLE(c_seg);
376 #endif
377 ptr = (uint8_t *)c_seg->c_store.c_buffer;
378 size = round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset));
379
380 ivnum[0] = (uint64_t)c_seg;
381 ivnum[1] = 0;
382 iv = (uint8_t *)ivnum;
383
384 rc = xts_encrypt(ptr, size, ptr, iv, &xts_modectx);
385 assert(!rc);
386
387 vm_page_encrypt_counter += (size / PAGE_SIZE_64);
388
389 #if DEVELOPMENT || DEBUG
390 C_SEG_WRITE_PROTECT(c_seg);
391 #endif
392 PAGE_REPLACEMENT_DISALLOWED(FALSE);
393 }
394
395 void
vm_swap_decrypt(c_segment_t c_seg,bool disallow_page_replacement)396 vm_swap_decrypt(c_segment_t c_seg, bool disallow_page_replacement)
397 {
398 uint8_t *ptr;
399 uint8_t *iv;
400 uint64_t ivnum[2];
401 int size = 0;
402 int rc = 0;
403
404 assert(swap_crypt_initialized);
405
406 /*
407 * See comment in vm_swap_encrypt().
408 * The master lock may already be held, though, which is why we don't do
409 * PAGE_REPLACEMENT_DISALLOWED(TRUE) and do a try_lock instead.
410 */
411 if (disallow_page_replacement) {
412 PAGE_REPLACEMENT_DISALLOWED(TRUE);
413 }
414
415 #if DEVELOPMENT || DEBUG
416 C_SEG_MAKE_WRITEABLE(c_seg);
417 #endif
418 ptr = (uint8_t *)c_seg->c_store.c_buffer;
419 size = round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset));
420
421 ivnum[0] = (uint64_t)c_seg;
422 ivnum[1] = 0;
423 iv = (uint8_t *)ivnum;
424
425 rc = xts_decrypt(ptr, size, ptr, iv, &xts_modectx);
426 assert(!rc);
427
428 vm_page_decrypt_counter += (size / PAGE_SIZE_64);
429
430 #if DEVELOPMENT || DEBUG
431 C_SEG_WRITE_PROTECT(c_seg);
432 #endif
433 if (disallow_page_replacement) {
434 PAGE_REPLACEMENT_DISALLOWED(FALSE);
435 }
436 }
437 #endif /* ENCRYPTED_SWAP */
438
439 uint64_t compressed_swap_chunk_size, vm_swapfile_hiwater_segs, swapfile_reclaim_threshold_segs, swapfile_reclam_minimum_segs;
440 extern bool memorystatus_swap_all_apps;
441
442 void
vm_compressor_swap_init_swap_file_limit(void)443 vm_compressor_swap_init_swap_file_limit(void)
444 {
445 vm_num_swap_files_config = VM_MAX_SWAP_FILE_NUM;
446 #if CONFIG_JETSAM
447 if (memorystatus_swap_all_apps) {
448 if (vm_swap_volume_capacity == 0) {
449 /*
450 * Early in boot we don't know the swap volume capacity.
451 * That's fine. Reserve space for the maximum config
452 * and we'll lower this later in boot once we have the capacity.
453 */
454 vm_num_swap_files_config = VM_MAX_SWAP_FILE_SWAP_ENABLED_NUM;
455 } else {
456 static uint64_t kFixedPointFactor = 100;
457 /*
458 * Scale the max number of swap files linearly.
459 * But we can never go above VM_MAX_SWAP_FILE_SWAP_ENABLED_NUM.
460 */
461 vm_num_swap_files_config = vm_swap_volume_capacity * kFixedPointFactor / VM_SWAP_MIN_VOLUME_CAPACITY
462 * VM_MIN_SWAP_FILE_SWAP_ENABLED_NUM / kFixedPointFactor;
463 vm_num_swap_files_config = MAX(vm_num_swap_files_config, VM_MIN_SWAP_FILE_SWAP_ENABLED_NUM);
464 vm_num_swap_files_config = MIN(vm_num_swap_files_config, VM_MAX_SWAP_FILE_SWAP_ENABLED_NUM);
465 }
466 }
467 #endif /* CONFIG_JETSAM */
468 #if DEVELOPMENT || DEBUG
469 typeof(vm_num_swap_files_config) parsed_vm_max_num_swap_files = 0;
470 if (PE_parse_boot_argn("vm_max_num_swap_files", &parsed_vm_max_num_swap_files, sizeof(parsed_vm_max_num_swap_files))) {
471 if (parsed_vm_max_num_swap_files > 0) {
472 vm_num_swap_files_config = parsed_vm_max_num_swap_files;
473 } else {
474 printf("WARNING: Ignoring vm_max_num_swap_files=%d boot-arg. Value must be > 0\n", parsed_vm_max_num_swap_files);
475 }
476 }
477 #endif
478 printf("Maximum number of VM swap files: %d\n", vm_num_swap_files_config);
479 }
480
481 int vm_swap_enabled = 0;
482 void
vm_compressor_swap_init(void)483 vm_compressor_swap_init(void)
484 {
485 thread_t thread = NULL;
486
487 queue_init(&swf_global_queue);
488
489 #if !XNU_TARGET_OS_OSX
490 /*
491 * dummy value until the swap file gets created
492 * when we drive the first c_segment_t to the
493 * swapout queue... at that time we will
494 * know the true size we have to work with
495 */
496 c_overage_swapped_limit = 16;
497 #endif /* !XNU_TARGET_OS_OSX */
498
499 compressed_swap_chunk_size = c_seg_bufsize;
500 vm_swapfile_hiwater_segs = (MIN_SWAP_FILE_SIZE / compressed_swap_chunk_size);
501 swapfile_reclaim_threshold_segs = ((17 * (MAX_SWAP_FILE_SIZE / compressed_swap_chunk_size)) / 10);
502 swapfile_reclam_minimum_segs = ((13 * (MAX_SWAP_FILE_SIZE / compressed_swap_chunk_size)) / 10);
503
504 if (kernel_thread_start_priority((thread_continue_t)vm_swapout_thread, NULL,
505 BASEPRI_VM, &thread) != KERN_SUCCESS) {
506 panic("vm_swapout_thread: create failed");
507 }
508 thread_set_thread_name(thread, "VM_swapout");
509 vm_swapout_thread_id = thread->thread_id;
510 thread_deallocate(thread);
511
512 if (kernel_thread_start_priority((thread_continue_t)vm_swapfile_create_thread, NULL,
513 BASEPRI_VM, &thread) != KERN_SUCCESS) {
514 panic("vm_swapfile_create_thread: create failed");
515 }
516 thread_set_thread_name(thread, "VM_swapfile_create");
517 thread_deallocate(thread);
518
519 if (kernel_thread_start_priority((thread_continue_t)vm_swapfile_gc_thread, NULL,
520 BASEPRI_VM, &thread) != KERN_SUCCESS) {
521 panic("vm_swapfile_gc_thread: create failed");
522 }
523 thread_set_thread_name(thread, "VM_swapfile_gc");
524 /*
525 * Swapfile garbage collection will need to allocate memory
526 * to complete its swap reclaim and in-memory compaction.
527 * So allow it to dip into the reserved VM page pool.
528 */
529 thread_lock(thread);
530 thread->options |= TH_OPT_VMPRIV;
531 thread_unlock(thread);
532 thread_deallocate(thread);
533 proc_set_thread_policy_with_tid(kernel_task, thread->thread_id,
534 TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER2);
535 proc_set_thread_policy_with_tid(kernel_task, thread->thread_id,
536 TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE);
537
538 vm_swap_enabled = 1;
539 printf("VM Swap Subsystem is ON\n");
540 }
541
542
543 #if RECORD_THE_COMPRESSED_DATA
544
545 void
c_compressed_record_init()546 c_compressed_record_init()
547 {
548 if (c_compressed_record_init_done == FALSE) {
549 vm_swapfile_open("/tmp/compressed_data", &c_compressed_record_vp);
550 c_compressed_record_init_done = TRUE;
551 }
552 }
553
554 void
c_compressed_record_write(char * buf,int size)555 c_compressed_record_write(char *buf, int size)
556 {
557 if (c_compressed_record_write_error == 0) {
558 c_compressed_record_write_error = vm_record_file_write(c_compressed_record_vp, c_compressed_record_file_offset, buf, size);
559 c_compressed_record_file_offset += size;
560 }
561 }
562 #endif
563
564
565 int compaction_swapper_inited = 0;
566
567 void
vm_compaction_swapper_do_init(void)568 vm_compaction_swapper_do_init(void)
569 {
570 struct vnode *vp;
571 char *pathname;
572 int namelen;
573
574 if (compaction_swapper_inited) {
575 return;
576 }
577
578 if (vm_compressor_mode != VM_PAGER_COMPRESSOR_WITH_SWAP) {
579 compaction_swapper_inited = 1;
580 return;
581 }
582 lck_mtx_lock(&vm_swap_data_lock);
583
584 if (!compaction_swapper_inited) {
585 namelen = (int)strlen(swapfilename) + SWAPFILENAME_INDEX_LEN + 1;
586 pathname = kalloc_data(namelen, Z_WAITOK | Z_ZERO);
587 snprintf(pathname, namelen, "%s%d", swapfilename, 0);
588
589 vm_swapfile_open(pathname, &vp);
590
591 if (vp) {
592 if (vnode_pager_isSSD(vp) == FALSE) {
593 /*
594 * swap files live on an HDD, so let's make sure to start swapping
595 * much earlier since we're not worried about SSD write-wear and
596 * we have so little write bandwidth to work with
597 * these values were derived expermentially by running the performance
598 * teams stock test for evaluating HDD performance against various
599 * combinations and looking and comparing overall results.
600 * Note that the > relationship between these 4 values must be maintained
601 */
602 if (vm_compressor_minorcompact_threshold_divisor_overridden == 0) {
603 vm_compressor_minorcompact_threshold_divisor = 15;
604 }
605 if (vm_compressor_majorcompact_threshold_divisor_overridden == 0) {
606 vm_compressor_majorcompact_threshold_divisor = 18;
607 }
608 if (vm_compressor_unthrottle_threshold_divisor_overridden == 0) {
609 vm_compressor_unthrottle_threshold_divisor = 24;
610 }
611 if (vm_compressor_catchup_threshold_divisor_overridden == 0) {
612 vm_compressor_catchup_threshold_divisor = 30;
613 }
614 }
615 #if XNU_TARGET_OS_OSX
616 vnode_setswapmount(vp);
617 vm_swappin_avail = vnode_getswappin_avail(vp);
618
619 if (vm_swappin_avail) {
620 vm_swappin_enabled = TRUE;
621 }
622 #endif /* XNU_TARGET_OS_OSX */
623 vm_swapfile_close((uint64_t)pathname, vp);
624 }
625 kfree_data(pathname, namelen);
626
627 compaction_swapper_inited = 1;
628 }
629 lck_mtx_unlock(&vm_swap_data_lock);
630 }
631
632
633 void
vm_swap_consider_defragmenting(int flags)634 vm_swap_consider_defragmenting(int flags)
635 {
636 boolean_t force_defrag = (flags & VM_SWAP_FLAGS_FORCE_DEFRAG);
637 boolean_t force_reclaim = (flags & VM_SWAP_FLAGS_FORCE_RECLAIM);
638
639 if (compressor_store_stop_compaction == FALSE && !VM_SWAP_BUSY() &&
640 (force_defrag || force_reclaim || VM_SWAP_SHOULD_DEFRAGMENT() || VM_SWAP_SHOULD_RECLAIM())) {
641 if (!vm_swapfile_gc_thread_running || force_defrag || force_reclaim) {
642 lck_mtx_lock(&vm_swap_data_lock);
643
644 if (force_defrag) {
645 vm_swap_force_defrag = TRUE;
646 }
647
648 if (force_reclaim) {
649 vm_swap_force_reclaim = TRUE;
650 }
651
652 if (!vm_swapfile_gc_thread_running) {
653 thread_wakeup((event_t) &vm_swapfile_gc_needed);
654 }
655
656 lck_mtx_unlock(&vm_swap_data_lock);
657 }
658 }
659 }
660
661
662 int vm_swap_defragment_yielded = 0;
663 int vm_swap_defragment_swapin = 0;
664 int vm_swap_defragment_free = 0;
665 int vm_swap_defragment_busy = 0;
666
667 #if CONFIG_FREEZE
668 extern int32_t c_segment_pages_compressed_incore;
669 extern int32_t c_segment_pages_compressed_incore_late_swapout;
670 extern uint32_t c_segment_pages_compressed_nearing_limit;
671 extern uint32_t c_segment_count;
672 extern uint32_t c_segments_nearing_limit;
673
674 extern bool freezer_incore_cseg_acct;
675 #endif /* CONFIG_FREEZE */
676
677 static void
vm_swap_defragment()678 vm_swap_defragment()
679 {
680 c_segment_t c_seg;
681
682 /*
683 * have to grab the master lock w/o holding
684 * any locks in spin mode
685 */
686 PAGE_REPLACEMENT_DISALLOWED(TRUE);
687
688 lck_mtx_lock_spin_always(c_list_lock);
689
690 while (!queue_empty(&c_swappedout_sparse_list_head)) {
691 if (compressor_store_stop_compaction == TRUE || VM_SWAP_BUSY()) {
692 vm_swap_defragment_yielded++;
693 break;
694 }
695 c_seg = (c_segment_t)queue_first(&c_swappedout_sparse_list_head);
696
697 lck_mtx_lock_spin_always(&c_seg->c_lock);
698
699 assert(c_seg->c_state == C_ON_SWAPPEDOUTSPARSE_Q);
700
701 if (c_seg->c_busy) {
702 lck_mtx_unlock_always(c_list_lock);
703
704 PAGE_REPLACEMENT_DISALLOWED(FALSE);
705 /*
706 * c_seg_wait_on_busy consumes c_seg->c_lock
707 */
708 c_seg_wait_on_busy(c_seg);
709
710 PAGE_REPLACEMENT_DISALLOWED(TRUE);
711
712 lck_mtx_lock_spin_always(c_list_lock);
713
714 vm_swap_defragment_busy++;
715 continue;
716 }
717 if (c_seg->c_bytes_used == 0) {
718 /*
719 * c_seg_free_locked consumes the c_list_lock
720 * and c_seg->c_lock
721 */
722 C_SEG_BUSY(c_seg);
723 c_seg_free_locked(c_seg);
724
725 vm_swap_defragment_free++;
726 } else {
727 lck_mtx_unlock_always(c_list_lock);
728
729 #if CONFIG_FREEZE
730 if (freezer_incore_cseg_acct) {
731 /*
732 * TODO(jason): These two are tricky because they're pre-emptive jetsams.
733 * The system is not unhealthy, but we know that it's about to become unhealthy once
734 * we do this swapin.
735 * So we're waking up the memorystatus thread to make space
736 * (hopefully) before this segment comes in.
737 *
738 * I think the compressor_backing_store needs to keep track of
739 * two new globals that will track the number of segments
740 * being swapped in due to defrag and the number of slots used
741 * in those segments.
742 * Then the health check below can be called from the memorystatus
743 * thread.
744 */
745 if ((c_seg->c_slots_used + c_segment_pages_compressed_incore) >= c_segment_pages_compressed_nearing_limit) {
746 memorystatus_kill_on_VM_compressor_space_shortage(TRUE /* async */);
747 }
748
749 uint32_t incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
750 if ((incore_seg_count + 1) >= c_segments_nearing_limit) {
751 memorystatus_kill_on_VM_compressor_space_shortage(TRUE /* async */);
752 }
753 }
754 #endif /* CONFIG_FREEZE */
755 if (c_seg_swapin(c_seg, TRUE, FALSE) == 0) {
756 lck_mtx_unlock_always(&c_seg->c_lock);
757 vmcs_stats.defrag_swapins += (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) >> PAGE_SHIFT;
758 }
759
760 vm_swap_defragment_swapin++;
761 }
762 PAGE_REPLACEMENT_DISALLOWED(FALSE);
763
764 vm_pageout_io_throttle();
765
766 /*
767 * because write waiters have privilege over readers,
768 * dropping and immediately retaking the master lock will
769 * still allow any thread waiting to acquire the
770 * master lock exclusively an opportunity to take it
771 */
772 PAGE_REPLACEMENT_DISALLOWED(TRUE);
773
774 lck_mtx_lock_spin_always(c_list_lock);
775 }
776 lck_mtx_unlock_always(c_list_lock);
777
778 PAGE_REPLACEMENT_DISALLOWED(FALSE);
779 }
780
781 TUNABLE(uint64_t, vm_swapfile_creation_delay_ns, "vm_swapfile_creation_delay_ns", 15 * NSEC_PER_SEC);
782
783 static inline bool
vm_swapfile_should_create(uint64_t now)784 vm_swapfile_should_create(uint64_t now)
785 {
786 uint64_t delta_failed_creation_ns;
787 absolutetime_to_nanoseconds(now - vm_swapfile_last_failed_to_create_ts, &delta_failed_creation_ns);
788
789 return (vm_num_swap_files < vm_num_swap_files_config) &&
790 ((vm_swapfile_total_segs_alloced - vm_swapfile_total_segs_used) < (unsigned int)vm_swapfile_hiwater_segs) &&
791 (delta_failed_creation_ns > vm_swapfile_creation_delay_ns);
792 }
793
794 bool vm_swapfile_create_thread_inited = false;
795
796 static void
vm_swapfile_create_thread(void)797 vm_swapfile_create_thread(void)
798 {
799 uint64_t now;
800
801 if (!vm_swapfile_create_thread_inited) {
802 #if CONFIG_THREAD_GROUPS
803 thread_group_vm_add();
804 #endif /* CONFIG_THREAD_GROUPS */
805 current_thread()->options |= TH_OPT_VMPRIV;
806
807 vm_swapfile_create_thread_inited = true;
808 }
809
810 vm_swapfile_create_thread_awakened++;
811 vm_swapfile_create_thread_running = 1;
812
813 while (TRUE) {
814 /*
815 * walk through the list of swap files
816 * and do the delayed frees/trims for
817 * any swap file whose count of delayed
818 * frees is above the batch limit
819 */
820 vm_swap_handle_delayed_trims(FALSE);
821
822 lck_mtx_lock(&vm_swap_data_lock);
823
824 if (hibernate_in_progress_with_pinned_swap == TRUE) {
825 break;
826 }
827
828 if (compressor_store_stop_compaction == TRUE) {
829 break;
830 }
831
832 now = mach_absolute_time();
833
834 if (!vm_swapfile_should_create(now)) {
835 break;
836 }
837
838 lck_mtx_unlock(&vm_swap_data_lock);
839
840 if (vm_swap_create_file() == FALSE) {
841 vm_swapfile_last_failed_to_create_ts = now;
842 HIBLOG("low swap: failed to create swapfile\n");
843 } else {
844 vm_swapfile_last_successful_create_ts = now;
845 }
846 }
847 vm_swapfile_create_thread_running = 0;
848
849 if (hibernate_in_progress_with_pinned_swap == TRUE) {
850 thread_wakeup((event_t)&hibernate_in_progress_with_pinned_swap);
851 }
852
853 if (compressor_store_stop_compaction == TRUE) {
854 thread_wakeup((event_t)&compressor_store_stop_compaction);
855 }
856
857 assert_wait((event_t)&vm_swapfile_create_needed, THREAD_UNINT);
858
859 lck_mtx_unlock(&vm_swap_data_lock);
860
861 thread_block((thread_continue_t)vm_swapfile_create_thread);
862
863 /* NOTREACHED */
864 }
865
866
867 #if HIBERNATION
868
869 kern_return_t
hibernate_pin_swap(boolean_t start)870 hibernate_pin_swap(boolean_t start)
871 {
872 vm_compaction_swapper_do_init();
873
874 if (start == FALSE) {
875 lck_mtx_lock(&vm_swap_data_lock);
876 hibernate_in_progress_with_pinned_swap = FALSE;
877 lck_mtx_unlock(&vm_swap_data_lock);
878
879 return KERN_SUCCESS;
880 }
881 if (vm_swappin_enabled == FALSE) {
882 return KERN_SUCCESS;
883 }
884
885 lck_mtx_lock(&vm_swap_data_lock);
886
887 hibernate_in_progress_with_pinned_swap = TRUE;
888
889 while (vm_swapfile_create_thread_running || vm_swapfile_gc_thread_running) {
890 assert_wait((event_t)&hibernate_in_progress_with_pinned_swap, THREAD_UNINT);
891
892 lck_mtx_unlock(&vm_swap_data_lock);
893
894 thread_block(THREAD_CONTINUE_NULL);
895
896 lck_mtx_lock(&vm_swap_data_lock);
897 }
898 if (vm_num_swap_files > vm_num_pinned_swap_files) {
899 hibernate_in_progress_with_pinned_swap = FALSE;
900 lck_mtx_unlock(&vm_swap_data_lock);
901
902 HIBLOG("hibernate_pin_swap failed - vm_num_swap_files = %d, vm_num_pinned_swap_files = %d\n",
903 vm_num_swap_files, vm_num_pinned_swap_files);
904 return KERN_FAILURE;
905 }
906 lck_mtx_unlock(&vm_swap_data_lock);
907
908 while (VM_SWAP_SHOULD_PIN(MAX_SWAP_FILE_SIZE)) {
909 if (vm_swap_create_file() == FALSE) {
910 break;
911 }
912 }
913 return KERN_SUCCESS;
914 }
915 #endif
916 bool vm_swapfile_gc_thread_inited = false;
917 static void
vm_swapfile_gc_thread(void)918 vm_swapfile_gc_thread(void)
919 {
920 boolean_t need_defragment;
921 boolean_t need_reclaim;
922
923 if (!vm_swapfile_gc_thread_inited) {
924 #if CONFIG_THREAD_GROUPS
925 thread_group_vm_add();
926 #endif /* CONFIG_THREAD_GROUPS */
927 vm_swapfile_gc_thread_inited = true;
928 }
929
930 vm_swapfile_gc_thread_awakened++;
931 vm_swapfile_gc_thread_running = 1;
932
933 while (TRUE) {
934 lck_mtx_lock(&vm_swap_data_lock);
935
936 if (hibernate_in_progress_with_pinned_swap == TRUE) {
937 break;
938 }
939
940 if (VM_SWAP_BUSY() || compressor_store_stop_compaction == TRUE) {
941 break;
942 }
943
944 need_defragment = FALSE;
945 need_reclaim = FALSE;
946
947 if (VM_SWAP_SHOULD_DEFRAGMENT()) {
948 need_defragment = TRUE;
949 }
950
951 if (VM_SWAP_SHOULD_RECLAIM()) {
952 need_defragment = TRUE;
953 need_reclaim = TRUE;
954 }
955 if (need_defragment == FALSE && need_reclaim == FALSE) {
956 break;
957 }
958
959 vm_swap_force_defrag = FALSE;
960 vm_swap_force_reclaim = FALSE;
961
962 lck_mtx_unlock(&vm_swap_data_lock);
963
964 if (need_defragment == TRUE) {
965 vm_swap_defragment();
966 }
967 if (need_reclaim == TRUE) {
968 vm_swap_reclaim();
969 }
970 }
971 vm_swapfile_gc_thread_running = 0;
972
973 if (hibernate_in_progress_with_pinned_swap == TRUE) {
974 thread_wakeup((event_t)&hibernate_in_progress_with_pinned_swap);
975 }
976
977 if (compressor_store_stop_compaction == TRUE) {
978 thread_wakeup((event_t)&compressor_store_stop_compaction);
979 }
980
981 assert_wait((event_t)&vm_swapfile_gc_needed, THREAD_UNINT);
982
983 lck_mtx_unlock(&vm_swap_data_lock);
984
985 thread_block((thread_continue_t)vm_swapfile_gc_thread);
986
987 /* NOTREACHED */
988 }
989
990
991
992 #define VM_SWAPOUT_LIMIT_T2P 4
993 #define VM_SWAPOUT_LIMIT_T1P 4
994 #define VM_SWAPOUT_LIMIT_T0P 6
995 #define VM_SWAPOUT_LIMIT_T0 8
996 #define VM_SWAPOUT_LIMIT_MAX 8
997
998 #define VM_SWAPOUT_START 0
999 #define VM_SWAPOUT_T2_PASSIVE 1
1000 #define VM_SWAPOUT_T1_PASSIVE 2
1001 #define VM_SWAPOUT_T0_PASSIVE 3
1002 #define VM_SWAPOUT_T0 4
1003
1004 int vm_swapout_state = VM_SWAPOUT_START;
1005 int vm_swapout_limit = 1;
1006
1007 int vm_swapper_entered_T0 = 0;
1008 int vm_swapper_entered_T0P = 0;
1009 int vm_swapper_entered_T1P = 0;
1010 int vm_swapper_entered_T2P = 0;
1011
1012
1013 static void
vm_swapout_thread_throttle_adjust(void)1014 vm_swapout_thread_throttle_adjust(void)
1015 {
1016 switch (vm_swapout_state) {
1017 case VM_SWAPOUT_START:
1018
1019 vm_swapper_throttle = THROTTLE_LEVEL_COMPRESSOR_TIER2;
1020 vm_swapper_entered_T2P++;
1021
1022 proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id,
1023 TASK_POLICY_INTERNAL, TASK_POLICY_IO, vm_swapper_throttle);
1024 proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id,
1025 TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE);
1026 vm_swapout_limit = VM_SWAPOUT_LIMIT_T2P;
1027 vm_swapout_state = VM_SWAPOUT_T2_PASSIVE;
1028
1029 break;
1030
1031 case VM_SWAPOUT_T2_PASSIVE:
1032
1033 if (SWAPPER_NEEDS_TO_UNTHROTTLE()) {
1034 vm_swapper_throttle = THROTTLE_LEVEL_COMPRESSOR_TIER0;
1035 vm_swapper_entered_T0P++;
1036
1037 proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id,
1038 TASK_POLICY_INTERNAL, TASK_POLICY_IO, vm_swapper_throttle);
1039 proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id,
1040 TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE);
1041 vm_swapout_limit = VM_SWAPOUT_LIMIT_T0P;
1042 vm_swapout_state = VM_SWAPOUT_T0_PASSIVE;
1043
1044 break;
1045 }
1046 if (swapout_target_age || hibernate_flushing == TRUE) {
1047 vm_swapper_throttle = THROTTLE_LEVEL_COMPRESSOR_TIER1;
1048 vm_swapper_entered_T1P++;
1049
1050 proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id,
1051 TASK_POLICY_INTERNAL, TASK_POLICY_IO, vm_swapper_throttle);
1052 proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id,
1053 TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE);
1054 vm_swapout_limit = VM_SWAPOUT_LIMIT_T1P;
1055 vm_swapout_state = VM_SWAPOUT_T1_PASSIVE;
1056 }
1057 break;
1058
1059 case VM_SWAPOUT_T1_PASSIVE:
1060
1061 if (SWAPPER_NEEDS_TO_UNTHROTTLE()) {
1062 vm_swapper_throttle = THROTTLE_LEVEL_COMPRESSOR_TIER0;
1063 vm_swapper_entered_T0P++;
1064
1065 proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id,
1066 TASK_POLICY_INTERNAL, TASK_POLICY_IO, vm_swapper_throttle);
1067 proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id,
1068 TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE);
1069 vm_swapout_limit = VM_SWAPOUT_LIMIT_T0P;
1070 vm_swapout_state = VM_SWAPOUT_T0_PASSIVE;
1071
1072 break;
1073 }
1074 if (swapout_target_age == 0 && hibernate_flushing == FALSE) {
1075 vm_swapper_throttle = THROTTLE_LEVEL_COMPRESSOR_TIER2;
1076 vm_swapper_entered_T2P++;
1077
1078 proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id,
1079 TASK_POLICY_INTERNAL, TASK_POLICY_IO, vm_swapper_throttle);
1080 proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id,
1081 TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE);
1082 vm_swapout_limit = VM_SWAPOUT_LIMIT_T2P;
1083 vm_swapout_state = VM_SWAPOUT_T2_PASSIVE;
1084 }
1085 break;
1086
1087 case VM_SWAPOUT_T0_PASSIVE:
1088
1089 if (SWAPPER_NEEDS_TO_RETHROTTLE()) {
1090 vm_swapper_throttle = THROTTLE_LEVEL_COMPRESSOR_TIER2;
1091 vm_swapper_entered_T2P++;
1092
1093 proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id,
1094 TASK_POLICY_INTERNAL, TASK_POLICY_IO, vm_swapper_throttle);
1095 proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id,
1096 TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE);
1097 vm_swapout_limit = VM_SWAPOUT_LIMIT_T2P;
1098 vm_swapout_state = VM_SWAPOUT_T2_PASSIVE;
1099
1100 break;
1101 }
1102 if (SWAPPER_NEEDS_TO_CATCHUP()) {
1103 vm_swapper_entered_T0++;
1104
1105 proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id,
1106 TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_DISABLE);
1107 vm_swapout_limit = VM_SWAPOUT_LIMIT_T0;
1108 vm_swapout_state = VM_SWAPOUT_T0;
1109 }
1110 break;
1111
1112 case VM_SWAPOUT_T0:
1113
1114 if (SWAPPER_HAS_CAUGHTUP()) {
1115 vm_swapper_entered_T0P++;
1116
1117 proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id,
1118 TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE);
1119 vm_swapout_limit = VM_SWAPOUT_LIMIT_T0P;
1120 vm_swapout_state = VM_SWAPOUT_T0_PASSIVE;
1121 }
1122 break;
1123 }
1124 }
1125
1126 int vm_swapout_found_empty = 0;
1127
1128 struct swapout_io_completion vm_swapout_ctx[VM_SWAPOUT_LIMIT_MAX];
1129
1130 int vm_swapout_soc_busy = 0;
1131 int vm_swapout_soc_done = 0;
1132
1133
1134 static struct swapout_io_completion *
vm_swapout_find_free_soc(void)1135 vm_swapout_find_free_soc(void)
1136 {
1137 int i;
1138
1139 for (i = 0; i < VM_SWAPOUT_LIMIT_MAX; i++) {
1140 if (vm_swapout_ctx[i].swp_io_busy == 0) {
1141 return &vm_swapout_ctx[i];
1142 }
1143 }
1144 assert(vm_swapout_soc_busy == VM_SWAPOUT_LIMIT_MAX);
1145
1146 return NULL;
1147 }
1148
1149 static struct swapout_io_completion *
vm_swapout_find_done_soc(void)1150 vm_swapout_find_done_soc(void)
1151 {
1152 int i;
1153
1154 if (vm_swapout_soc_done) {
1155 for (i = 0; i < VM_SWAPOUT_LIMIT_MAX; i++) {
1156 if (vm_swapout_ctx[i].swp_io_done) {
1157 return &vm_swapout_ctx[i];
1158 }
1159 }
1160 }
1161 return NULL;
1162 }
1163
1164 static void
vm_swapout_complete_soc(struct swapout_io_completion * soc)1165 vm_swapout_complete_soc(struct swapout_io_completion *soc)
1166 {
1167 kern_return_t kr;
1168
1169 if (soc->swp_io_error) {
1170 kr = KERN_FAILURE;
1171 } else {
1172 kr = KERN_SUCCESS;
1173 }
1174
1175 lck_mtx_unlock_always(c_list_lock);
1176
1177 vm_swap_put_finish(soc->swp_swf, &soc->swp_f_offset, soc->swp_io_error, TRUE /*drop iocount*/);
1178 vm_swapout_finish(soc->swp_c_seg, soc->swp_f_offset, soc->swp_c_size, kr);
1179
1180 lck_mtx_lock_spin_always(c_list_lock);
1181
1182 soc->swp_io_done = 0;
1183 soc->swp_io_busy = 0;
1184
1185 vm_swapout_soc_busy--;
1186 vm_swapout_soc_done--;
1187 }
1188
1189 bool vm_swapout_thread_inited = false;
1190 extern uint32_t c_donate_swapout_count;
1191 #if CONFIG_JETSAM
1192 bool memorystatus_swap_over_trigger(uint64_t adjustment_factor);
1193 /*
1194 * swapout_sleep_threshold sets the percentage of the swapout threshold at which
1195 * the swap thread will stop processing the swapout queue.
1196 * By default this is 90 which means we will swap until the
1197 * swapout queue size is at 90% of the threshold to wake the swap thread.
1198 * By definition the queue length must be >= 100% of the threshold when the.
1199 * swap thread is woken up. On development builds this can be adjusted with
1200 * the vm.swapout_sleep_threshold sysctl.
1201 */
1202 uint32_t swapout_sleep_threshold = 90;
1203 #endif /* CONFIG_JETSAM */
1204 static bool
should_process_swapout_queue(const queue_head_t * swapout_list_head)1205 should_process_swapout_queue(const queue_head_t *swapout_list_head)
1206 {
1207 bool process_queue = !queue_empty(swapout_list_head) &&
1208 vm_swapout_soc_busy < vm_swapout_limit &&
1209 !compressor_store_stop_compaction;
1210 #if CONFIG_JETSAM
1211 if (memorystatus_swap_all_apps && swapout_list_head == &c_late_swapout_list_head) {
1212 process_queue = process_queue && memorystatus_swap_over_trigger(swapout_sleep_threshold);
1213 }
1214 #endif /* CONFIG_JETSAM */
1215 return process_queue;
1216 }
1217
1218 void
vm_swapout_thread(void)1219 vm_swapout_thread(void)
1220 {
1221 uint32_t size = 0;
1222 c_segment_t c_seg = NULL;
1223 kern_return_t kr = KERN_SUCCESS;
1224 struct swapout_io_completion *soc;
1225 queue_head_t *swapout_list_head;
1226 bool queues_empty = false;
1227
1228 if (!vm_swapout_thread_inited) {
1229 #if CONFIG_THREAD_GROUPS
1230 thread_group_vm_add();
1231 #endif /* CONFIG_THREAD_GROUPS */
1232 current_thread()->options |= TH_OPT_VMPRIV;
1233 vm_swapout_thread_inited = true;
1234 }
1235
1236 vm_swapout_thread_awakened++;
1237
1238 lck_mtx_lock_spin_always(c_list_lock);
1239
1240 swapout_list_head = &c_early_swapout_list_head;
1241 vm_swapout_thread_running = TRUE;
1242 os_atomic_store(&vm_swapout_wake_pending, false, relaxed);
1243 again:
1244 while (should_process_swapout_queue(swapout_list_head)) {
1245 c_seg = (c_segment_t)queue_first(swapout_list_head);
1246
1247 lck_mtx_lock_spin_always(&c_seg->c_lock);
1248
1249 assert(c_seg->c_state == C_ON_SWAPOUT_Q);
1250
1251 if (c_seg->c_busy) {
1252 lck_mtx_unlock_always(c_list_lock);
1253
1254 c_seg_wait_on_busy(c_seg);
1255
1256 lck_mtx_lock_spin_always(c_list_lock);
1257
1258 continue;
1259 }
1260 vm_swapout_thread_processed_segments++;
1261
1262 size = round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset));
1263
1264 if (size == 0) {
1265 assert(c_seg->c_bytes_used == 0);
1266
1267 /*
1268 * c_seg_free_locked will drop the c_list_lock and
1269 * the c_seg->c_lock.
1270 */
1271 C_SEG_BUSY(c_seg);
1272 c_seg_free_locked(c_seg);
1273 c_seg = NULL;
1274
1275 vm_swapout_found_empty++;
1276 goto c_seg_is_empty;
1277 }
1278 C_SEG_BUSY(c_seg);
1279 c_seg->c_busy_swapping = 1;
1280
1281 c_seg_switch_state(c_seg, C_ON_SWAPIO_Q, FALSE);
1282
1283 lck_mtx_unlock_always(c_list_lock);
1284 lck_mtx_unlock_always(&c_seg->c_lock);
1285
1286 #if CHECKSUM_THE_SWAP
1287 c_seg->cseg_hash = hash_string((char *)c_seg->c_store.c_buffer, (int)size);
1288 c_seg->cseg_swap_size = size;
1289 #endif /* CHECKSUM_THE_SWAP */
1290
1291 #if ENCRYPTED_SWAP
1292 vm_swap_encrypt(c_seg);
1293 #endif /* ENCRYPTED_SWAP */
1294
1295 soc = vm_swapout_find_free_soc();
1296 assert(soc);
1297
1298 soc->swp_upl_ctx.io_context = (void *)soc;
1299 soc->swp_upl_ctx.io_done = (void *)vm_swapout_iodone;
1300 soc->swp_upl_ctx.io_error = 0;
1301
1302 kr = vm_swap_put((vm_offset_t)c_seg->c_store.c_buffer, &soc->swp_f_offset, size, c_seg, soc);
1303
1304 if (kr != KERN_SUCCESS) {
1305 if (soc->swp_io_done) {
1306 lck_mtx_lock_spin_always(c_list_lock);
1307
1308 soc->swp_io_done = 0;
1309 vm_swapout_soc_done--;
1310
1311 lck_mtx_unlock_always(c_list_lock);
1312 }
1313 vm_swapout_finish(c_seg, soc->swp_f_offset, size, kr);
1314 } else {
1315 soc->swp_io_busy = 1;
1316 vm_swapout_soc_busy++;
1317 }
1318
1319 c_seg_is_empty:
1320 if (!(c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count)) {
1321 vm_swap_consider_defragmenting(VM_SWAP_FLAGS_NONE);
1322 }
1323
1324 lck_mtx_lock_spin_always(c_list_lock);
1325
1326 while ((soc = vm_swapout_find_done_soc())) {
1327 vm_swapout_complete_soc(soc);
1328 }
1329 lck_mtx_unlock_always(c_list_lock);
1330
1331 vm_swapout_thread_throttle_adjust();
1332
1333 lck_mtx_lock_spin_always(c_list_lock);
1334 }
1335 while ((soc = vm_swapout_find_done_soc())) {
1336 vm_swapout_complete_soc(soc);
1337 }
1338 lck_mtx_unlock_always(c_list_lock);
1339
1340 vm_pageout_io_throttle();
1341
1342 lck_mtx_lock_spin_always(c_list_lock);
1343
1344 /*
1345 * Recheck if we have some c_segs to wakeup
1346 * post throttle. And, check to see if we
1347 * have any more swapouts needed.
1348 */
1349 if (vm_swapout_soc_done) {
1350 goto again;
1351 }
1352
1353 #if XNU_TARGET_OS_OSX
1354 queues_empty = queue_empty(&c_early_swapout_list_head) && queue_empty(&c_regular_swapout_list_head) && queue_empty(&c_late_swapout_list_head);
1355 #else /* XNU_TARGET_OS_OSX */
1356 queues_empty = queue_empty(&c_early_swapout_list_head) && queue_empty(&c_late_swapout_list_head);
1357 #endif /* XNU_TARGET_OS_OSX */
1358
1359 if (!queues_empty) {
1360 swapout_list_head = NULL;
1361 if (!queue_empty(&c_early_swapout_list_head)) {
1362 swapout_list_head = &c_early_swapout_list_head;
1363 } else {
1364 #if XNU_TARGET_OS_OSX
1365 /*
1366 * On macOS we _always_ processs all swapout queues.
1367 */
1368 if (!queue_empty(&c_regular_swapout_list_head)) {
1369 swapout_list_head = &c_regular_swapout_list_head;
1370 } else {
1371 swapout_list_head = &c_late_swapout_list_head;
1372 }
1373 #else /* XNU_TARGET_OS_OSX */
1374 /*
1375 * On non-macOS swap-capable platforms, we might want to
1376 * processs just the early queue (Freezer) or process both
1377 * early and late queues (app swap). We processed the early
1378 * queue up above. The late Q will only be processed if the
1379 * checks in should_process_swapout_queue give the go-ahead.
1380 */
1381 swapout_list_head = &c_late_swapout_list_head;
1382 #endif /* XNU_TARGET_OS_OSX */
1383 }
1384 if (swapout_list_head && should_process_swapout_queue(swapout_list_head)) {
1385 goto again;
1386 }
1387 }
1388
1389 assert_wait((event_t)&vm_swapout_thread, THREAD_UNINT);
1390
1391 vm_swapout_thread_running = FALSE;
1392
1393 lck_mtx_unlock_always(c_list_lock);
1394
1395 thread_block((thread_continue_t)vm_swapout_thread);
1396
1397 /* NOTREACHED */
1398 }
1399
1400
1401 void
vm_swapout_iodone(void * io_context,int error)1402 vm_swapout_iodone(void *io_context, int error)
1403 {
1404 struct swapout_io_completion *soc;
1405
1406 soc = (struct swapout_io_completion *)io_context;
1407
1408 lck_mtx_lock_spin_always(c_list_lock);
1409
1410 soc->swp_io_done = 1;
1411 soc->swp_io_error = error;
1412 vm_swapout_soc_done++;
1413
1414 if (!vm_swapout_thread_running) {
1415 thread_wakeup((event_t)&vm_swapout_thread);
1416 }
1417
1418 lck_mtx_unlock_always(c_list_lock);
1419 }
1420
1421
1422 static void
vm_swapout_finish(c_segment_t c_seg,uint64_t f_offset,uint32_t size,kern_return_t kr)1423 vm_swapout_finish(c_segment_t c_seg, uint64_t f_offset, uint32_t size, kern_return_t kr)
1424 {
1425 PAGE_REPLACEMENT_DISALLOWED(TRUE);
1426
1427 if (kr == KERN_SUCCESS) {
1428 kernel_memory_depopulate((vm_offset_t)c_seg->c_store.c_buffer, size,
1429 KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
1430 }
1431 #if ENCRYPTED_SWAP
1432 else {
1433 vm_swap_decrypt(c_seg, false);
1434 }
1435 #endif /* ENCRYPTED_SWAP */
1436 lck_mtx_lock_spin_always(c_list_lock);
1437 lck_mtx_lock_spin_always(&c_seg->c_lock);
1438
1439 if (kr == KERN_SUCCESS) {
1440 int new_state = C_ON_SWAPPEDOUT_Q;
1441 boolean_t insert_head = FALSE;
1442
1443 if (hibernate_flushing == TRUE) {
1444 if (c_seg->c_generation_id >= first_c_segment_to_warm_generation_id &&
1445 c_seg->c_generation_id <= last_c_segment_to_warm_generation_id) {
1446 insert_head = TRUE;
1447 }
1448 } else if (C_SEG_ONDISK_IS_SPARSE(c_seg)) {
1449 new_state = C_ON_SWAPPEDOUTSPARSE_Q;
1450 }
1451
1452 c_seg_switch_state(c_seg, new_state, insert_head);
1453
1454 c_seg->c_store.c_swap_handle = f_offset;
1455
1456 counter_add(&vm_statistics_swapouts, size >> PAGE_SHIFT);
1457
1458 c_seg->c_swappedin = false;
1459
1460 if (c_seg->c_bytes_used) {
1461 OSAddAtomic64(-c_seg->c_bytes_used, &compressor_bytes_used);
1462 }
1463
1464 #if CONFIG_FREEZE
1465 /*
1466 * Successful swapout. Decrement the in-core compressed pages count.
1467 */
1468 OSAddAtomic(-(c_seg->c_slots_used), &c_segment_pages_compressed_incore);
1469 assertf(c_segment_pages_compressed_incore >= 0, "-ve incore count %p 0x%x", c_seg, c_segment_pages_compressed_incore);
1470 if (c_seg->c_has_donated_pages) {
1471 OSAddAtomic(-(c_seg->c_slots_used), &c_segment_pages_compressed_incore_late_swapout);
1472 }
1473 #endif /* CONFIG_FREEZE */
1474 } else {
1475 if (c_seg->c_overage_swap == TRUE) {
1476 c_seg->c_overage_swap = FALSE;
1477 c_overage_swapped_count--;
1478 }
1479
1480 #if CONFIG_FREEZE
1481 if (c_seg->c_has_freezer_pages) {
1482 if (c_seg->c_task_owner) {
1483 c_seg_update_task_owner(c_seg, NULL);
1484 }
1485 /*
1486 * We failed to swapout a frozen cseg. We need
1487 * to put it back in the queues, specifically the
1488 * AGE_Q. So clear the donated bit otherwise it'll
1489 * land on the swapped_in Q.
1490 */
1491 c_seg->c_has_donated_pages = 0;
1492 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
1493 } else
1494 #endif /* CONFIG_FREEZE */
1495 {
1496 if (c_seg->c_has_donated_pages) {
1497 c_seg_switch_state(c_seg, C_ON_SWAPPEDIN_Q, FALSE);
1498 } else {
1499 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
1500 }
1501 }
1502
1503 if (!c_seg->c_on_minorcompact_q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
1504 c_seg_need_delayed_compaction(c_seg, TRUE);
1505 }
1506 }
1507 assert(c_seg->c_busy_swapping);
1508 assert(c_seg->c_busy);
1509
1510 c_seg->c_busy_swapping = 0;
1511 lck_mtx_unlock_always(c_list_lock);
1512
1513 C_SEG_WAKEUP_DONE(c_seg);
1514 lck_mtx_unlock_always(&c_seg->c_lock);
1515
1516 PAGE_REPLACEMENT_DISALLOWED(FALSE);
1517 }
1518
1519
1520 boolean_t
vm_swap_create_file()1521 vm_swap_create_file()
1522 {
1523 uint64_t size = 0;
1524 int namelen = 0;
1525 boolean_t swap_file_created = FALSE;
1526 boolean_t swap_file_reuse = FALSE;
1527 boolean_t swap_file_pin = FALSE;
1528 struct swapfile *swf = NULL;
1529
1530 /*
1531 * make sure we've got all the info we need
1532 * to potentially pin a swap file... we could
1533 * be swapping out due to hibernation w/o ever
1534 * having run vm_pageout_scan, which is normally
1535 * the trigger to do the init
1536 */
1537 vm_compaction_swapper_do_init();
1538
1539 /*
1540 * Any swapfile structure ready for re-use?
1541 */
1542
1543 lck_mtx_lock(&vm_swap_data_lock);
1544
1545 swf = (struct swapfile*) queue_first(&swf_global_queue);
1546
1547 while (queue_end(&swf_global_queue, (queue_entry_t)swf) == FALSE) {
1548 if (swf->swp_flags == SWAP_REUSE) {
1549 swap_file_reuse = TRUE;
1550 break;
1551 }
1552 swf = (struct swapfile*) queue_next(&swf->swp_queue);
1553 }
1554
1555 lck_mtx_unlock(&vm_swap_data_lock);
1556
1557 if (swap_file_reuse == FALSE) {
1558 namelen = (int)strlen(swapfilename) + SWAPFILENAME_INDEX_LEN + 1;
1559
1560 swf = kalloc_type(struct swapfile, Z_WAITOK | Z_ZERO);
1561 swf->swp_index = vm_num_swap_files + 1;
1562 swf->swp_pathlen = namelen;
1563 swf->swp_path = kalloc_data(swf->swp_pathlen, Z_WAITOK | Z_ZERO);
1564
1565 snprintf(swf->swp_path, namelen, "%s%d", swapfilename, vm_num_swap_files);
1566 }
1567
1568 vm_swapfile_open(swf->swp_path, &swf->swp_vp);
1569
1570 if (swf->swp_vp == NULL) {
1571 if (swap_file_reuse == FALSE) {
1572 kfree_data(swf->swp_path, swf->swp_pathlen);
1573 kfree_type(struct swapfile, swf);
1574 }
1575 return FALSE;
1576 }
1577 vm_swapfile_can_be_created = TRUE;
1578
1579 size = MAX_SWAP_FILE_SIZE;
1580
1581 while (size >= MIN_SWAP_FILE_SIZE) {
1582 swap_file_pin = VM_SWAP_SHOULD_PIN(size);
1583
1584 if (vm_swapfile_preallocate(swf->swp_vp, &size, &swap_file_pin) == 0) {
1585 int num_bytes_for_bitmap = 0;
1586
1587 swap_file_created = TRUE;
1588
1589 swf->swp_size = size;
1590 swf->swp_nsegs = (unsigned int) (size / compressed_swap_chunk_size);
1591 swf->swp_nseginuse = 0;
1592 swf->swp_free_hint = 0;
1593
1594 num_bytes_for_bitmap = MAX((swf->swp_nsegs >> 3), 1);
1595 /*
1596 * Allocate a bitmap that describes the
1597 * number of segments held by this swapfile.
1598 */
1599 swf->swp_bitmap = kalloc_data(num_bytes_for_bitmap,
1600 Z_WAITOK | Z_ZERO);
1601
1602 swf->swp_csegs = kalloc_type(c_segment_t, swf->swp_nsegs,
1603 Z_WAITOK | Z_ZERO);
1604
1605 /*
1606 * passing a NULL trim_list into vnode_trim_list
1607 * will return ENOTSUP if trim isn't supported
1608 * and 0 if it is
1609 */
1610 if (vnode_trim_list(swf->swp_vp, NULL, FALSE) == 0) {
1611 swp_trim_supported = TRUE;
1612 }
1613
1614 lck_mtx_lock(&vm_swap_data_lock);
1615
1616 swf->swp_flags = SWAP_READY;
1617
1618 if (swap_file_reuse == FALSE) {
1619 queue_enter(&swf_global_queue, swf, struct swapfile*, swp_queue);
1620 }
1621
1622 vm_num_swap_files++;
1623
1624 vm_swapfile_total_segs_alloced += swf->swp_nsegs;
1625 if (vm_swapfile_total_segs_alloced > vm_swapfile_total_segs_alloced_max) {
1626 vm_swapfile_total_segs_alloced_max = vm_swapfile_total_segs_alloced;
1627 }
1628
1629 if (swap_file_pin == TRUE) {
1630 vm_num_pinned_swap_files++;
1631 swf->swp_flags |= SWAP_PINNED;
1632 vm_swappin_avail -= swf->swp_size;
1633 }
1634
1635 lck_mtx_unlock(&vm_swap_data_lock);
1636
1637 thread_wakeup((event_t) &vm_num_swap_files);
1638 #if !XNU_TARGET_OS_OSX
1639 if (vm_num_swap_files == 1) {
1640 c_overage_swapped_limit = (uint32_t)size / c_seg_bufsize;
1641
1642 if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
1643 c_overage_swapped_limit /= 2;
1644 }
1645 }
1646 #endif /* !XNU_TARGET_OS_OSX */
1647 break;
1648 } else {
1649 size = size / 2;
1650 }
1651 }
1652 if (swap_file_created == FALSE) {
1653 vm_swapfile_close((uint64_t)(swf->swp_path), swf->swp_vp);
1654
1655 swf->swp_vp = NULL;
1656
1657 if (swap_file_reuse == FALSE) {
1658 kfree_data(swf->swp_path, swf->swp_pathlen);
1659 kfree_type(struct swapfile, swf);
1660 }
1661 }
1662 return swap_file_created;
1663 }
1664
1665 extern void vnode_put(struct vnode* vp);
1666 kern_return_t
vm_swap_get(c_segment_t c_seg,uint64_t f_offset,uint64_t size)1667 vm_swap_get(c_segment_t c_seg, uint64_t f_offset, uint64_t size)
1668 {
1669 struct swapfile *swf = NULL;
1670 uint64_t file_offset = 0;
1671 int retval = 0;
1672
1673 assert(c_seg->c_store.c_buffer);
1674
1675 lck_mtx_lock(&vm_swap_data_lock);
1676
1677 swf = vm_swapfile_for_handle(f_offset);
1678
1679 if (swf == NULL || (!(swf->swp_flags & SWAP_READY) && !(swf->swp_flags & SWAP_RECLAIM))) {
1680 vm_swap_get_failures++;
1681 retval = 1;
1682 goto done;
1683 }
1684 swf->swp_io_count++;
1685
1686 lck_mtx_unlock(&vm_swap_data_lock);
1687
1688 #if DEVELOPMENT || DEBUG
1689 C_SEG_MAKE_WRITEABLE(c_seg);
1690 #endif
1691 file_offset = (f_offset & SWAP_SLOT_MASK);
1692
1693 if ((retval = vnode_getwithref(swf->swp_vp)) != 0) {
1694 printf("vm_swap_get: vnode_getwithref on swapfile failed with %d\n", retval);
1695 } else {
1696 retval = vm_swapfile_io(swf->swp_vp, file_offset, (uint64_t)c_seg->c_store.c_buffer, (int)(size / PAGE_SIZE_64), SWAP_READ, NULL);
1697 vnode_put(swf->swp_vp);
1698 }
1699
1700 #if DEVELOPMENT || DEBUG
1701 C_SEG_WRITE_PROTECT(c_seg);
1702 #endif
1703 if (retval == 0) {
1704 counter_add(&vm_statistics_swapins, size >> PAGE_SHIFT);
1705 } else {
1706 vm_swap_get_failures++;
1707 }
1708
1709 /*
1710 * Free this slot in the swap structure.
1711 */
1712 vm_swap_free(f_offset);
1713
1714 lck_mtx_lock(&vm_swap_data_lock);
1715 swf->swp_io_count--;
1716
1717 if ((swf->swp_flags & SWAP_WANTED) && swf->swp_io_count == 0) {
1718 swf->swp_flags &= ~SWAP_WANTED;
1719 thread_wakeup((event_t) &swf->swp_flags);
1720 }
1721 done:
1722 lck_mtx_unlock(&vm_swap_data_lock);
1723
1724 if (retval == 0) {
1725 return KERN_SUCCESS;
1726 } else {
1727 return KERN_FAILURE;
1728 }
1729 }
1730
1731 kern_return_t
vm_swap_put(vm_offset_t addr,uint64_t * f_offset,uint32_t size,c_segment_t c_seg,struct swapout_io_completion * soc)1732 vm_swap_put(vm_offset_t addr, uint64_t *f_offset, uint32_t size, c_segment_t c_seg, struct swapout_io_completion *soc)
1733 {
1734 unsigned int segidx = 0;
1735 struct swapfile *swf = NULL;
1736 uint64_t file_offset = 0;
1737 uint64_t swapfile_index = 0;
1738 unsigned int byte_for_segidx = 0;
1739 unsigned int offset_within_byte = 0;
1740 boolean_t swf_eligible = FALSE;
1741 boolean_t waiting = FALSE;
1742 boolean_t retried = FALSE;
1743 int error = 0;
1744 uint64_t now;
1745 void *upl_ctx = NULL;
1746 boolean_t drop_iocount = FALSE;
1747
1748 if (addr == 0 || f_offset == NULL || compressor_store_stop_compaction) {
1749 return KERN_FAILURE;
1750 }
1751 retry:
1752 lck_mtx_lock(&vm_swap_data_lock);
1753
1754 swf = (struct swapfile*) queue_first(&swf_global_queue);
1755
1756 while (queue_end(&swf_global_queue, (queue_entry_t)swf) == FALSE) {
1757 segidx = swf->swp_free_hint;
1758
1759 swf_eligible = (swf->swp_flags & SWAP_READY) && (swf->swp_nseginuse < swf->swp_nsegs);
1760
1761 if (swf_eligible) {
1762 while (segidx < swf->swp_nsegs) {
1763 byte_for_segidx = segidx >> 3;
1764 offset_within_byte = segidx % 8;
1765
1766 if ((swf->swp_bitmap)[byte_for_segidx] & (1 << offset_within_byte)) {
1767 segidx++;
1768 continue;
1769 }
1770
1771 (swf->swp_bitmap)[byte_for_segidx] |= (uint8_t)(1 << offset_within_byte);
1772
1773 file_offset = segidx * compressed_swap_chunk_size;
1774 swf->swp_nseginuse++;
1775 swf->swp_io_count++;
1776 swf->swp_csegs[segidx] = c_seg;
1777
1778 swapfile_index = swf->swp_index;
1779 vm_swapfile_total_segs_used++;
1780 if (vm_swapfile_total_segs_used > vm_swapfile_total_segs_used_max) {
1781 vm_swapfile_total_segs_used_max = vm_swapfile_total_segs_used;
1782 }
1783
1784 now = mach_absolute_time();
1785
1786 if (vm_swapfile_should_create(now) && !vm_swapfile_create_thread_running) {
1787 thread_wakeup((event_t) &vm_swapfile_create_needed);
1788 }
1789
1790 lck_mtx_unlock(&vm_swap_data_lock);
1791
1792 goto issue_io;
1793 }
1794 }
1795 swf = (struct swapfile*) queue_next(&swf->swp_queue);
1796 }
1797 assert(queue_end(&swf_global_queue, (queue_entry_t) swf));
1798
1799 /*
1800 * we've run out of swap segments, but may not
1801 * be in a position to immediately create a new swap
1802 * file if we've recently failed to create due to a lack
1803 * of free space in the root filesystem... we'll try
1804 * to kick that create off, but in any event we're going
1805 * to take a breather (up to 1 second) so that we're not caught in a tight
1806 * loop back in "vm_compressor_compact_and_swap" trying to stuff
1807 * segments into swap files only to have them immediately put back
1808 * on the c_age queue due to vm_swap_put failing.
1809 *
1810 * if we're doing these puts due to a hibernation flush,
1811 * no need to block... setting hibernate_no_swapspace to TRUE,
1812 * will cause "vm_compressor_compact_and_swap" to immediately abort
1813 */
1814 now = mach_absolute_time();
1815
1816 if (vm_swapfile_should_create(now)) {
1817 if (!vm_swapfile_create_thread_running) {
1818 thread_wakeup((event_t) &vm_swapfile_create_needed);
1819 }
1820 waiting = TRUE;
1821 assert_wait_timeout((event_t) &vm_num_swap_files, THREAD_INTERRUPTIBLE, 1000, 1000 * NSEC_PER_USEC);
1822 } else {
1823 if (hibernate_flushing) {
1824 hibernate_no_swapspace = TRUE;
1825 }
1826 }
1827
1828 lck_mtx_unlock(&vm_swap_data_lock);
1829
1830 if (waiting == TRUE) {
1831 thread_block(THREAD_CONTINUE_NULL);
1832
1833 if (retried == FALSE && hibernate_flushing == TRUE) {
1834 retried = TRUE;
1835 goto retry;
1836 }
1837 }
1838 vm_swap_put_failures_no_swap_file++;
1839
1840 return KERN_FAILURE;
1841
1842 issue_io:
1843 assert(c_seg->c_busy_swapping);
1844 assert(c_seg->c_busy);
1845 assert(!c_seg->c_on_minorcompact_q);
1846
1847 *f_offset = (swapfile_index << SWAP_DEVICE_SHIFT) | file_offset;
1848
1849 if (soc) {
1850 soc->swp_c_seg = c_seg;
1851 soc->swp_c_size = size;
1852
1853 soc->swp_swf = swf;
1854
1855 soc->swp_io_error = 0;
1856 soc->swp_io_done = 0;
1857
1858 upl_ctx = (void *)&soc->swp_upl_ctx;
1859 }
1860
1861 if ((error = vnode_getwithref(swf->swp_vp)) != 0) {
1862 printf("vm_swap_put: vnode_getwithref on swapfile failed with %d\n", error);
1863 } else {
1864 error = vm_swapfile_io(swf->swp_vp, file_offset, addr, (int) (size / PAGE_SIZE_64), SWAP_WRITE, upl_ctx);
1865 drop_iocount = TRUE;
1866 }
1867
1868 if (error || upl_ctx == NULL) {
1869 return vm_swap_put_finish(swf, f_offset, error, drop_iocount);
1870 }
1871
1872 return KERN_SUCCESS;
1873 }
1874
1875 kern_return_t
vm_swap_put_finish(struct swapfile * swf,uint64_t * f_offset,int error,boolean_t drop_iocount)1876 vm_swap_put_finish(struct swapfile *swf, uint64_t *f_offset, int error, boolean_t drop_iocount)
1877 {
1878 if (drop_iocount) {
1879 vnode_put(swf->swp_vp);
1880 }
1881
1882 lck_mtx_lock(&vm_swap_data_lock);
1883
1884 swf->swp_io_count--;
1885
1886 if ((swf->swp_flags & SWAP_WANTED) && swf->swp_io_count == 0) {
1887 swf->swp_flags &= ~SWAP_WANTED;
1888 thread_wakeup((event_t) &swf->swp_flags);
1889 }
1890 lck_mtx_unlock(&vm_swap_data_lock);
1891
1892 if (error) {
1893 vm_swap_free(*f_offset);
1894 vm_swap_put_failures++;
1895
1896 return KERN_FAILURE;
1897 }
1898 return KERN_SUCCESS;
1899 }
1900
1901
1902 static void
vm_swap_free_now(struct swapfile * swf,uint64_t f_offset)1903 vm_swap_free_now(struct swapfile *swf, uint64_t f_offset)
1904 {
1905 uint64_t file_offset = 0;
1906 unsigned int segidx = 0;
1907
1908
1909 if ((swf->swp_flags & SWAP_READY) || (swf->swp_flags & SWAP_RECLAIM)) {
1910 unsigned int byte_for_segidx = 0;
1911 unsigned int offset_within_byte = 0;
1912
1913 file_offset = (f_offset & SWAP_SLOT_MASK);
1914 segidx = (unsigned int) (file_offset / compressed_swap_chunk_size);
1915
1916 byte_for_segidx = segidx >> 3;
1917 offset_within_byte = segidx % 8;
1918
1919 if ((swf->swp_bitmap)[byte_for_segidx] & (1 << offset_within_byte)) {
1920 (swf->swp_bitmap)[byte_for_segidx] &= ~(1 << offset_within_byte);
1921
1922 swf->swp_csegs[segidx] = NULL;
1923
1924 swf->swp_nseginuse--;
1925 vm_swapfile_total_segs_used--;
1926
1927 if (segidx < swf->swp_free_hint) {
1928 swf->swp_free_hint = segidx;
1929 }
1930 }
1931 if (VM_SWAP_SHOULD_RECLAIM() && !vm_swapfile_gc_thread_running) {
1932 thread_wakeup((event_t) &vm_swapfile_gc_needed);
1933 }
1934 }
1935 }
1936
1937
1938 uint32_t vm_swap_free_now_count = 0;
1939 uint32_t vm_swap_free_delayed_count = 0;
1940
1941
1942 void
vm_swap_free(uint64_t f_offset)1943 vm_swap_free(uint64_t f_offset)
1944 {
1945 struct swapfile *swf = NULL;
1946 struct trim_list *tl = NULL;
1947 uint64_t now;
1948
1949 if (swp_trim_supported == TRUE) {
1950 tl = kalloc_type(struct trim_list, Z_WAITOK);
1951 }
1952
1953 lck_mtx_lock(&vm_swap_data_lock);
1954
1955 swf = vm_swapfile_for_handle(f_offset);
1956
1957 if (swf && (swf->swp_flags & (SWAP_READY | SWAP_RECLAIM))) {
1958 if (swp_trim_supported == FALSE || (swf->swp_flags & SWAP_RECLAIM)) {
1959 /*
1960 * don't delay the free if the underlying disk doesn't support
1961 * trim, or we're in the midst of reclaiming this swap file since
1962 * we don't want to move segments that are technically free
1963 * but not yet handled by the delayed free mechanism
1964 */
1965 vm_swap_free_now(swf, f_offset);
1966
1967 vm_swap_free_now_count++;
1968 goto done;
1969 }
1970 tl->tl_offset = f_offset & SWAP_SLOT_MASK;
1971 tl->tl_length = compressed_swap_chunk_size;
1972
1973 tl->tl_next = swf->swp_delayed_trim_list_head;
1974 swf->swp_delayed_trim_list_head = tl;
1975 swf->swp_delayed_trim_count++;
1976 tl = NULL;
1977
1978 if (VM_SWAP_SHOULD_TRIM(swf) && !vm_swapfile_create_thread_running) {
1979 now = mach_absolute_time();
1980
1981 if (now > dont_trim_until_ts) {
1982 thread_wakeup((event_t) &vm_swapfile_create_needed);
1983 }
1984 }
1985 vm_swap_free_delayed_count++;
1986 }
1987 done:
1988 lck_mtx_unlock(&vm_swap_data_lock);
1989
1990 if (tl != NULL) {
1991 kfree_type(struct trim_list, tl);
1992 }
1993 }
1994
1995
1996 static void
vm_swap_wait_on_trim_handling_in_progress()1997 vm_swap_wait_on_trim_handling_in_progress()
1998 {
1999 while (delayed_trim_handling_in_progress == TRUE) {
2000 assert_wait((event_t) &delayed_trim_handling_in_progress, THREAD_UNINT);
2001 lck_mtx_unlock(&vm_swap_data_lock);
2002
2003 thread_block(THREAD_CONTINUE_NULL);
2004
2005 lck_mtx_lock(&vm_swap_data_lock);
2006 }
2007 }
2008
2009
2010 static void
vm_swap_handle_delayed_trims(boolean_t force_now)2011 vm_swap_handle_delayed_trims(boolean_t force_now)
2012 {
2013 struct swapfile *swf = NULL;
2014
2015 /*
2016 * serialize the race between us and vm_swap_reclaim...
2017 * if vm_swap_reclaim wins it will turn off SWAP_READY
2018 * on the victim it has chosen... we can just skip over
2019 * that file since vm_swap_reclaim will first process
2020 * all of the delayed trims associated with it
2021 */
2022
2023 if (compressor_store_stop_compaction == TRUE) {
2024 return;
2025 }
2026
2027 lck_mtx_lock(&vm_swap_data_lock);
2028
2029 delayed_trim_handling_in_progress = TRUE;
2030
2031 lck_mtx_unlock(&vm_swap_data_lock);
2032
2033 /*
2034 * no need to hold the lock to walk the swf list since
2035 * vm_swap_create (the only place where we add to this list)
2036 * is run on the same thread as this function
2037 * and vm_swap_reclaim doesn't remove items from this list
2038 * instead marking them with SWAP_REUSE for future re-use
2039 */
2040 swf = (struct swapfile*) queue_first(&swf_global_queue);
2041
2042 while (queue_end(&swf_global_queue, (queue_entry_t)swf) == FALSE) {
2043 if ((swf->swp_flags & SWAP_READY) && (force_now == TRUE || VM_SWAP_SHOULD_TRIM(swf))) {
2044 assert(!(swf->swp_flags & SWAP_RECLAIM));
2045 vm_swap_do_delayed_trim(swf);
2046 }
2047 swf = (struct swapfile*) queue_next(&swf->swp_queue);
2048 }
2049 lck_mtx_lock(&vm_swap_data_lock);
2050
2051 delayed_trim_handling_in_progress = FALSE;
2052 thread_wakeup((event_t) &delayed_trim_handling_in_progress);
2053
2054 if (VM_SWAP_SHOULD_RECLAIM() && !vm_swapfile_gc_thread_running) {
2055 thread_wakeup((event_t) &vm_swapfile_gc_needed);
2056 }
2057
2058 lck_mtx_unlock(&vm_swap_data_lock);
2059 }
2060
2061 static void
vm_swap_do_delayed_trim(struct swapfile * swf)2062 vm_swap_do_delayed_trim(struct swapfile *swf)
2063 {
2064 struct trim_list *tl, *tl_head;
2065 int error;
2066
2067 if (compressor_store_stop_compaction == TRUE) {
2068 return;
2069 }
2070
2071 if ((error = vnode_getwithref(swf->swp_vp)) != 0) {
2072 printf("vm_swap_do_delayed_trim: vnode_getwithref on swapfile failed with %d\n", error);
2073 return;
2074 }
2075
2076 lck_mtx_lock(&vm_swap_data_lock);
2077
2078 tl_head = swf->swp_delayed_trim_list_head;
2079 swf->swp_delayed_trim_list_head = NULL;
2080 swf->swp_delayed_trim_count = 0;
2081
2082 lck_mtx_unlock(&vm_swap_data_lock);
2083
2084 vnode_trim_list(swf->swp_vp, tl_head, TRUE);
2085
2086 (void) vnode_put(swf->swp_vp);
2087
2088 while ((tl = tl_head) != NULL) {
2089 unsigned int segidx = 0;
2090 unsigned int byte_for_segidx = 0;
2091 unsigned int offset_within_byte = 0;
2092
2093 lck_mtx_lock(&vm_swap_data_lock);
2094
2095 segidx = (unsigned int) (tl->tl_offset / compressed_swap_chunk_size);
2096
2097 byte_for_segidx = segidx >> 3;
2098 offset_within_byte = segidx % 8;
2099
2100 if ((swf->swp_bitmap)[byte_for_segidx] & (1 << offset_within_byte)) {
2101 (swf->swp_bitmap)[byte_for_segidx] &= ~(1 << offset_within_byte);
2102
2103 swf->swp_csegs[segidx] = NULL;
2104
2105 swf->swp_nseginuse--;
2106 vm_swapfile_total_segs_used--;
2107
2108 if (segidx < swf->swp_free_hint) {
2109 swf->swp_free_hint = segidx;
2110 }
2111 }
2112 lck_mtx_unlock(&vm_swap_data_lock);
2113
2114 tl_head = tl->tl_next;
2115
2116 kfree_type(struct trim_list, tl);
2117 }
2118 }
2119
2120
2121 void
vm_swap_flush()2122 vm_swap_flush()
2123 {
2124 return;
2125 }
2126
2127 int vm_swap_reclaim_yielded = 0;
2128
2129 void
vm_swap_reclaim(void)2130 vm_swap_reclaim(void)
2131 {
2132 vm_offset_t addr = 0;
2133 unsigned int segidx = 0;
2134 uint64_t f_offset = 0;
2135 struct swapfile *swf = NULL;
2136 struct swapfile *smallest_swf = NULL;
2137 unsigned int min_nsegs = 0;
2138 unsigned int byte_for_segidx = 0;
2139 unsigned int offset_within_byte = 0;
2140 uint32_t c_size = 0;
2141
2142 c_segment_t c_seg = NULL;
2143
2144 kmem_alloc(compressor_map, (vm_offset_t *)&addr, c_seg_bufsize,
2145 KMA_NOFAIL | KMA_KOBJECT | KMA_DATA, VM_KERN_MEMORY_COMPRESSOR);
2146
2147 lck_mtx_lock(&vm_swap_data_lock);
2148
2149 /*
2150 * if we're running the swapfile list looking for
2151 * candidates with delayed trims, we need to
2152 * wait before making our decision concerning
2153 * the swapfile we want to reclaim
2154 */
2155 vm_swap_wait_on_trim_handling_in_progress();
2156
2157 /*
2158 * from here until we knock down the SWAP_READY bit,
2159 * we need to remain behind the vm_swap_data_lock...
2160 * once that bit has been turned off, "vm_swap_handle_delayed_trims"
2161 * will not consider this swapfile for processing
2162 */
2163 swf = (struct swapfile*) queue_first(&swf_global_queue);
2164 min_nsegs = MAX_SWAP_FILE_SIZE / compressed_swap_chunk_size;
2165 smallest_swf = NULL;
2166
2167 while (queue_end(&swf_global_queue, (queue_entry_t)swf) == FALSE) {
2168 if ((swf->swp_flags & SWAP_READY) && (swf->swp_nseginuse <= min_nsegs)) {
2169 smallest_swf = swf;
2170 min_nsegs = swf->swp_nseginuse;
2171 }
2172 swf = (struct swapfile*) queue_next(&swf->swp_queue);
2173 }
2174
2175 if (smallest_swf == NULL) {
2176 goto done;
2177 }
2178
2179 swf = smallest_swf;
2180
2181
2182 swf->swp_flags &= ~SWAP_READY;
2183 swf->swp_flags |= SWAP_RECLAIM;
2184
2185 if (swf->swp_delayed_trim_count) {
2186 lck_mtx_unlock(&vm_swap_data_lock);
2187
2188 vm_swap_do_delayed_trim(swf);
2189
2190 lck_mtx_lock(&vm_swap_data_lock);
2191 }
2192 segidx = 0;
2193
2194 while (segidx < swf->swp_nsegs) {
2195 ReTry_for_cseg:
2196 /*
2197 * Wait for outgoing I/Os.
2198 */
2199 while (swf->swp_io_count) {
2200 swf->swp_flags |= SWAP_WANTED;
2201
2202 assert_wait((event_t) &swf->swp_flags, THREAD_UNINT);
2203 lck_mtx_unlock(&vm_swap_data_lock);
2204
2205 thread_block(THREAD_CONTINUE_NULL);
2206
2207 lck_mtx_lock(&vm_swap_data_lock);
2208 }
2209 if (compressor_store_stop_compaction == TRUE || VM_SWAP_SHOULD_ABORT_RECLAIM() || VM_SWAP_BUSY()) {
2210 vm_swap_reclaim_yielded++;
2211 break;
2212 }
2213
2214 byte_for_segidx = segidx >> 3;
2215 offset_within_byte = segidx % 8;
2216
2217 if (((swf->swp_bitmap)[byte_for_segidx] & (1 << offset_within_byte)) == 0) {
2218 segidx++;
2219 continue;
2220 }
2221
2222 c_seg = swf->swp_csegs[segidx];
2223 assert(c_seg);
2224
2225 lck_mtx_lock_spin_always(&c_seg->c_lock);
2226
2227 if (c_seg->c_busy) {
2228 /*
2229 * a swapped out c_segment in the process of being freed will remain in the
2230 * busy state until after the vm_swap_free is called on it... vm_swap_free
2231 * takes the vm_swap_data_lock, so can't change the swap state until after
2232 * we drop the vm_swap_data_lock... once we do, vm_swap_free will complete
2233 * which will allow c_seg_free_locked to clear busy and wake up this thread...
2234 * at that point, we re-look up the swap state which will now indicate that
2235 * this c_segment no longer exists.
2236 */
2237 c_seg->c_wanted = 1;
2238
2239 assert_wait((event_t) (c_seg), THREAD_UNINT);
2240 lck_mtx_unlock_always(&c_seg->c_lock);
2241
2242 lck_mtx_unlock(&vm_swap_data_lock);
2243
2244 thread_block(THREAD_CONTINUE_NULL);
2245
2246 lck_mtx_lock(&vm_swap_data_lock);
2247
2248 goto ReTry_for_cseg;
2249 }
2250 (swf->swp_bitmap)[byte_for_segidx] &= ~(1 << offset_within_byte);
2251
2252 f_offset = segidx * compressed_swap_chunk_size;
2253
2254 assert(c_seg == swf->swp_csegs[segidx]);
2255 swf->swp_csegs[segidx] = NULL;
2256 swf->swp_nseginuse--;
2257
2258 vm_swapfile_total_segs_used--;
2259
2260 lck_mtx_unlock(&vm_swap_data_lock);
2261
2262 assert(C_SEG_IS_ONDISK(c_seg));
2263
2264 C_SEG_BUSY(c_seg);
2265 c_seg->c_busy_swapping = 1;
2266 #if !CHECKSUM_THE_SWAP
2267 c_seg_trim_tail(c_seg);
2268 #endif
2269 c_size = round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset));
2270
2271 assert(c_size <= c_seg_bufsize && c_size);
2272
2273 lck_mtx_unlock_always(&c_seg->c_lock);
2274
2275 if (vnode_getwithref(swf->swp_vp)) {
2276 printf("vm_swap_reclaim: vnode_getwithref on swapfile failed.\n");
2277 vm_swap_get_failures++;
2278 goto swap_io_failed;
2279 } else {
2280 if (vm_swapfile_io(swf->swp_vp, f_offset, addr, (int)(c_size / PAGE_SIZE_64), SWAP_READ, NULL)) {
2281 /*
2282 * reading the data back in failed, so convert c_seg
2283 * to a swapped in c_segment that contains no data
2284 */
2285 c_seg_swapin_requeue(c_seg, FALSE, TRUE, FALSE);
2286 /*
2287 * returns with c_busy_swapping cleared
2288 */
2289 vnode_put(swf->swp_vp);
2290 vm_swap_get_failures++;
2291 goto swap_io_failed;
2292 }
2293 vnode_put(swf->swp_vp);
2294 }
2295
2296 counter_add(&vm_statistics_swapins, c_size >> PAGE_SHIFT);
2297 vmcs_stats.reclaim_swapins += c_size >> PAGE_SHIFT;
2298
2299 if (vm_swap_put(addr, &f_offset, c_size, c_seg, NULL)) {
2300 vm_offset_t c_buffer;
2301
2302 /*
2303 * the put failed, so convert c_seg to a fully swapped in c_segment
2304 * with valid data
2305 */
2306 c_buffer = (vm_offset_t)C_SEG_BUFFER_ADDRESS(c_seg->c_mysegno);
2307
2308 kernel_memory_populate(c_buffer, c_size,
2309 KMA_NOFAIL | KMA_COMPRESSOR,
2310 VM_KERN_MEMORY_COMPRESSOR);
2311
2312 memcpy((char *)c_buffer, (char *)addr, c_size);
2313
2314 c_seg->c_store.c_buffer = (int32_t *)c_buffer;
2315 #if ENCRYPTED_SWAP
2316 vm_swap_decrypt(c_seg, true);
2317 #endif /* ENCRYPTED_SWAP */
2318 c_seg_swapin_requeue(c_seg, TRUE, TRUE, FALSE);
2319 /*
2320 * returns with c_busy_swapping cleared
2321 */
2322 OSAddAtomic64(c_seg->c_bytes_used, &compressor_bytes_used);
2323
2324 goto swap_io_failed;
2325 }
2326 counter_add(&vm_statistics_swapouts, c_size >> PAGE_SHIFT);
2327
2328 lck_mtx_lock_spin_always(&c_seg->c_lock);
2329
2330 c_seg->c_swappedin = false;
2331
2332 assert(C_SEG_IS_ONDISK(c_seg));
2333 /*
2334 * The c_seg will now know about the new location on disk.
2335 */
2336 c_seg->c_store.c_swap_handle = f_offset;
2337
2338 assert(c_seg->c_busy_swapping);
2339 c_seg->c_busy_swapping = 0;
2340 swap_io_failed:
2341 assert(c_seg->c_busy);
2342 C_SEG_WAKEUP_DONE(c_seg);
2343
2344 lck_mtx_unlock_always(&c_seg->c_lock);
2345 lck_mtx_lock(&vm_swap_data_lock);
2346 }
2347
2348 if (swf->swp_nseginuse) {
2349 swf->swp_flags &= ~SWAP_RECLAIM;
2350 swf->swp_flags |= SWAP_READY;
2351
2352 goto done;
2353 }
2354 /*
2355 * We don't remove this inactive swf from the queue.
2356 * That way, we can re-use it when needed again and
2357 * preserve the namespace. The delayed_trim processing
2358 * is also dependent on us not removing swfs from the queue.
2359 */
2360 //queue_remove(&swf_global_queue, swf, struct swapfile*, swp_queue);
2361
2362 vm_swapfile_total_segs_alloced -= swf->swp_nsegs;
2363
2364 lck_mtx_unlock(&vm_swap_data_lock);
2365
2366 vm_swapfile_close((uint64_t)(swf->swp_path), swf->swp_vp);
2367
2368 kfree_type(c_segment_t, swf->swp_nsegs, swf->swp_csegs);
2369 kfree_data(swf->swp_bitmap, MAX((swf->swp_nsegs >> 3), 1));
2370
2371 lck_mtx_lock(&vm_swap_data_lock);
2372
2373 if (swf->swp_flags & SWAP_PINNED) {
2374 vm_num_pinned_swap_files--;
2375 vm_swappin_avail += swf->swp_size;
2376 }
2377
2378 swf->swp_vp = NULL;
2379 swf->swp_size = 0;
2380 swf->swp_free_hint = 0;
2381 swf->swp_nsegs = 0;
2382 swf->swp_flags = SWAP_REUSE;
2383
2384 vm_num_swap_files--;
2385
2386 done:
2387 thread_wakeup((event_t) &swf->swp_flags);
2388 lck_mtx_unlock(&vm_swap_data_lock);
2389
2390 kmem_free(compressor_map, (vm_offset_t) addr, c_seg_bufsize);
2391 }
2392
2393
2394 uint64_t
vm_swap_get_total_space(void)2395 vm_swap_get_total_space(void)
2396 {
2397 uint64_t total_space = 0;
2398
2399 total_space = (uint64_t)vm_swapfile_total_segs_alloced * compressed_swap_chunk_size;
2400
2401 return total_space;
2402 }
2403
2404 uint64_t
vm_swap_get_used_space(void)2405 vm_swap_get_used_space(void)
2406 {
2407 uint64_t used_space = 0;
2408
2409 used_space = (uint64_t)vm_swapfile_total_segs_used * compressed_swap_chunk_size;
2410
2411 return used_space;
2412 }
2413
2414 uint64_t
vm_swap_get_free_space(void)2415 vm_swap_get_free_space(void)
2416 {
2417 return vm_swap_get_total_space() - vm_swap_get_used_space();
2418 }
2419
2420 uint64_t
vm_swap_get_max_configured_space(void)2421 vm_swap_get_max_configured_space(void)
2422 {
2423 int num_swap_files = (vm_num_swap_files_config ? vm_num_swap_files_config : VM_MAX_SWAP_FILE_NUM);
2424 return num_swap_files * MAX_SWAP_FILE_SIZE;
2425 }
2426
2427 bool
vm_swap_low_on_space(void)2428 vm_swap_low_on_space(void)
2429 {
2430 if (vm_num_swap_files == 0 && vm_swapfile_can_be_created == FALSE) {
2431 return false;
2432 }
2433
2434 if (((vm_swapfile_total_segs_alloced - vm_swapfile_total_segs_used) < ((unsigned int)vm_swapfile_hiwater_segs) / 8)) {
2435 if (vm_num_swap_files == 0 && !SWAPPER_NEEDS_TO_UNTHROTTLE()) {
2436 return false;
2437 }
2438
2439 if (vm_swapfile_last_failed_to_create_ts >= vm_swapfile_last_successful_create_ts) {
2440 return true;
2441 }
2442 }
2443 return false;
2444 }
2445
2446 bool
vm_swap_out_of_space(void)2447 vm_swap_out_of_space(void)
2448 {
2449 if ((vm_num_swap_files == vm_num_swap_files_config) &&
2450 ((vm_swapfile_total_segs_alloced - vm_swapfile_total_segs_used) < VM_SWAPOUT_LIMIT_MAX)) {
2451 /*
2452 * Last swapfile and we have only space for the
2453 * last few swapouts.
2454 */
2455 return true;
2456 }
2457
2458 return false;
2459 }
2460
2461 boolean_t
vm_swap_files_pinned(void)2462 vm_swap_files_pinned(void)
2463 {
2464 boolean_t result;
2465
2466 if (vm_swappin_enabled == FALSE) {
2467 return TRUE;
2468 }
2469
2470 result = (vm_num_pinned_swap_files == vm_num_swap_files);
2471
2472 return result;
2473 }
2474
2475 #if CONFIG_FREEZE
2476 boolean_t
vm_swap_max_budget(uint64_t * freeze_daily_budget)2477 vm_swap_max_budget(uint64_t *freeze_daily_budget)
2478 {
2479 boolean_t use_device_value = FALSE;
2480 struct swapfile *swf = NULL;
2481
2482 if (vm_num_swap_files) {
2483 lck_mtx_lock(&vm_swap_data_lock);
2484
2485 swf = (struct swapfile*) queue_first(&swf_global_queue);
2486
2487 if (swf) {
2488 while (queue_end(&swf_global_queue, (queue_entry_t)swf) == FALSE) {
2489 if (swf->swp_flags == SWAP_READY) {
2490 assert(swf->swp_vp);
2491
2492 if (vm_swap_vol_get_budget(swf->swp_vp, freeze_daily_budget) == 0) {
2493 use_device_value = TRUE;
2494 }
2495 break;
2496 }
2497 swf = (struct swapfile*) queue_next(&swf->swp_queue);
2498 }
2499 }
2500
2501 lck_mtx_unlock(&vm_swap_data_lock);
2502 } else {
2503 /*
2504 * This block is used for the initial budget value before any swap files
2505 * are created. We create a temp swap file to get the budget.
2506 */
2507
2508 struct vnode *temp_vp = NULL;
2509
2510 vm_swapfile_open(swapfilename, &temp_vp);
2511
2512 if (temp_vp) {
2513 if (vm_swap_vol_get_budget(temp_vp, freeze_daily_budget) == 0) {
2514 use_device_value = TRUE;
2515 }
2516
2517 vm_swapfile_close((uint64_t)&swapfilename, temp_vp);
2518 temp_vp = NULL;
2519 } else {
2520 *freeze_daily_budget = 0;
2521 }
2522 }
2523
2524 return use_device_value;
2525 }
2526 #endif /* CONFIG_FREEZE */
2527
2528 void
vm_swap_reset_max_segs_tracking(uint64_t * alloced_max,uint64_t * used_max)2529 vm_swap_reset_max_segs_tracking(uint64_t *alloced_max, uint64_t *used_max)
2530 {
2531 lck_mtx_lock(&vm_swap_data_lock);
2532
2533 *alloced_max = (uint64_t) vm_swapfile_total_segs_alloced_max * compressed_swap_chunk_size;
2534 *used_max = (uint64_t) vm_swapfile_total_segs_used_max * compressed_swap_chunk_size;
2535
2536 vm_swapfile_total_segs_alloced_max = vm_swapfile_total_segs_alloced;
2537 vm_swapfile_total_segs_used_max = vm_swapfile_total_segs_used;
2538
2539 lck_mtx_unlock(&vm_swap_data_lock);
2540 }
2541