xref: /xnu-8792.61.2/libsyscall/mach/vm_reclaim.c (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #if defined(__LP64__)
30 /*
31  * Userspace functions for manipulating the reclaim buffer.
32  */
33 #include <inttypes.h>
34 #include <stdbool.h>
35 #include <stdlib.h>
36 #include <mach/vm_reclaim.h>
37 #include <mach/mach.h>
38 #include <mach/mach_vm.h>
39 #undef _mach_vm_user_
40 #include <mach/mach_vm_internal.h>
41 #include <mach/vm_map.h>
42 #include <os/atomic_private.h>
43 #include <mach/vm_page_size.h>
44 
45 
46 #pragma mark Utilities
47 #define _assert(__op, __condition, __cause) \
48 	do { \
49 	        if (!(__condition)) { \
50 	                __builtin_trap(); \
51 	        } \
52 	} while (0)
53 
54 static uint64_t kAccountingThreshold;
55 
56 static bool
update_accounting(mach_vm_reclaim_ringbuffer_v1_t ring_buffer,int64_t size)57 update_accounting(mach_vm_reclaim_ringbuffer_v1_t ring_buffer, int64_t size)
58 {
59 	ring_buffer->va_in_buffer += size;
60 	if ((ring_buffer->va_in_buffer > ring_buffer->last_accounting_given_to_kernel &&
61 	    ring_buffer->va_in_buffer - ring_buffer->last_accounting_given_to_kernel > kAccountingThreshold) ||
62 	    (ring_buffer->last_accounting_given_to_kernel > ring_buffer->va_in_buffer &&
63 	    ring_buffer->last_accounting_given_to_kernel - ring_buffer->va_in_buffer > kAccountingThreshold)) {
64 		/*
65 		 * The caller should call mach_vm_reclaim_update_kernel_accounting.
66 		 * We store the value that they will give to the kernel here while we hold the lock.
67 		 * Technically it's out of sync with what the kernel has seen, but
68 		 * that will be rectified once the caller makes the mach_vm_reclaim_update_kernel_accounting call.
69 		 * If we forced this value to be in sync with the kernel's value
70 		 * all callers would start calling mach_vm_reclaim_update_kernel_accounting until one of them
71 		 * finishes & we'd have to take the ringbuffer lock again in
72 		 * mach_vm_reclaim_update_kernel_accounting.
73 		 */
74 		ring_buffer->last_accounting_given_to_kernel = ring_buffer->va_in_buffer;
75 		return true;
76 	}
77 	return false;
78 }
79 
80 static inline
81 mach_vm_reclaim_entry_v1_t
construct_entry(mach_vm_address_t start_addr,uint32_t size)82 construct_entry(mach_vm_address_t start_addr, uint32_t size)
83 {
84 	mach_vm_reclaim_entry_v1_t entry = {0ULL};
85 	entry.address = start_addr;
86 	entry.size = size;
87 	return entry;
88 }
89 
90 kern_return_t
mach_vm_reclaim_ringbuffer_init(mach_vm_reclaim_ringbuffer_v1_t ring_buffer)91 mach_vm_reclaim_ringbuffer_init(mach_vm_reclaim_ringbuffer_v1_t ring_buffer)
92 {
93 	kAccountingThreshold = vm_page_size;
94 	kern_return_t kr;
95 	mach_vm_size_t buffer_size = vm_page_size;
96 	bzero(ring_buffer, sizeof(struct mach_vm_reclaim_ringbuffer_v1_s));
97 	ring_buffer->buffer_len = buffer_size / sizeof(mach_vm_reclaim_entry_v1_t);
98 	kr = mach_vm_map(mach_task_self(), (mach_vm_address_t *)&ring_buffer->buffer,
99 	    buffer_size, 0, VM_FLAGS_ANYWHERE, MEMORY_OBJECT_NULL, 0, FALSE,
100 	    VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
101 	if (kr != KERN_SUCCESS) {
102 		return kr;
103 	}
104 
105 	kr = mach_vm_deferred_reclamation_buffer_init(mach_task_self(),
106 	    (mach_vm_address_t) ring_buffer->buffer, buffer_size, &ring_buffer->indices);
107 
108 	if (kr != KERN_SUCCESS) {
109 		mach_vm_deallocate(current_task(), (mach_vm_address_t) ring_buffer->buffer,
110 		    buffer_size);
111 		return kr;
112 	}
113 
114 	return KERN_SUCCESS;
115 }
116 
117 uint64_t
mach_vm_reclaim_mark_free(mach_vm_reclaim_ringbuffer_v1_t ring_buffer,mach_vm_address_t start_addr,uint32_t size,bool * should_update_kernel_accounting)118 mach_vm_reclaim_mark_free(
119 	mach_vm_reclaim_ringbuffer_v1_t ring_buffer, mach_vm_address_t start_addr, uint32_t size,
120 	bool *should_update_kernel_accounting)
121 {
122 	uint64_t idx = 0, head = 0;
123 	mach_vm_reclaim_entry_v1_t entry = construct_entry(start_addr, size);
124 	mach_vm_reclaim_indices_v1_t *indices = &ring_buffer->indices;
125 	mach_vm_reclaim_entry_v1_t *buffer = ring_buffer->buffer;
126 	mach_vm_size_t buffer_len = ring_buffer->buffer_len;
127 	*should_update_kernel_accounting = false;
128 
129 	idx = os_atomic_load_wide(&indices->tail, relaxed);
130 	head = os_atomic_load_wide(&indices->head, relaxed);
131 
132 	// This leaves one entry empty at the end of the buffer to differentiate an empty buffer from a full one
133 	while ((idx + 1) % buffer_len == head % buffer_len) {
134 		/*
135 		 * Buffer is full. Ask the kernel to reap it.
136 		 */
137 		mach_vm_deferred_reclamation_buffer_synchronize(mach_task_self(), buffer_len - 1);
138 		head = os_atomic_load_wide(&indices->head, relaxed);
139 		/* kernel had to march head forward at least kNumEntriesToReclaim. We hold the buffer lock so tail couldn't have changed */
140 		_assert("mach_vm_reclaim_mark_free", os_atomic_load_wide(&indices->tail, relaxed) % size != head % buffer_len, head);
141 	}
142 
143 	/*
144 	 * idx must be >= head & the buffer is not full so it's not possible for the kernel to be acting on the entry at (tail + 1) % size.
145 	 * Thus we don't need to check the busy pointer here.
146 	 */
147 	buffer[idx % buffer_len] = entry;
148 	os_atomic_thread_fence(seq_cst); // tail increment can not be seen before the entry is cleared in the buffer
149 	os_atomic_inc(&indices->tail, relaxed);
150 	*should_update_kernel_accounting = update_accounting(ring_buffer, size);
151 
152 	return idx;
153 }
154 
155 bool
mach_vm_reclaim_mark_used(mach_vm_reclaim_ringbuffer_v1_t ring_buffer,uint64_t id,mach_vm_address_t start_addr,uint32_t size)156 mach_vm_reclaim_mark_used(
157 	mach_vm_reclaim_ringbuffer_v1_t ring_buffer, uint64_t id, mach_vm_address_t start_addr, uint32_t size)
158 {
159 	mach_vm_reclaim_indices_v1_t *indices = &ring_buffer->indices;
160 	mach_vm_reclaim_entry_v1_t *buffer = ring_buffer->buffer;
161 	mach_vm_size_t buffer_len = ring_buffer->buffer_len;
162 	uint64_t head = 0, busy = 0, original_tail = 0;
163 	if (id == VM_RECLAIM_INDEX_NULL) {
164 		// entry was never put in the reclaim ring buffer, so it's safe to re-use.
165 		return true;
166 	}
167 
168 	head = os_atomic_load_wide(&indices->head, relaxed);
169 	if (id < head) {
170 		/*
171 		 * This is just a fast path for the case where the buffer has wrapped.
172 		 * It's not strictly necessary beacuse idx must also be < busy.
173 		 * That's why we can use a relaxed load for the head ptr.
174 		 */
175 		return false;
176 	}
177 
178 	/* Attempt to move tail to idx */
179 	original_tail = os_atomic_load_wide(&indices->tail, relaxed);
180 	_assert("mach_vm_reclaim_mark_used", id < original_tail, original_tail);
181 
182 	os_atomic_store_wide(&indices->tail, id, relaxed);
183 	os_atomic_thread_fence(seq_cst); // Our write to tail must happen before our read of busy
184 	busy = os_atomic_load_wide(&indices->busy, relaxed);
185 	if (id < busy) {
186 		/* Kernel is acting on this entry. Undo. */
187 		os_atomic_store_wide(&indices->tail, original_tail, relaxed);
188 		return false;
189 	}
190 	mach_vm_reclaim_entry_v1_t *entry = &buffer[id % buffer_len];
191 	_assert("mach_vm_reclaim_mark_used", entry->size == size && entry->address == start_addr, entry->address);
192 
193 	/* Sucessfully moved tail back. Can now overwrite the entry */
194 	memset(entry, 0, sizeof(mach_vm_reclaim_entry_v1_t));
195 	os_atomic_thread_fence(seq_cst); // tail increment can not be seen before the entry is cleared in the buffer
196 	/* Reset tail. */
197 	os_atomic_store_wide(&indices->tail, original_tail, relaxed);
198 
199 	update_accounting(ring_buffer, -(int64_t) size);
200 
201 	return true;
202 }
203 
204 kern_return_t
mach_vm_reclaim_update_kernel_accounting(const mach_vm_reclaim_ringbuffer_v1_t ring_buffer)205 mach_vm_reclaim_update_kernel_accounting(const mach_vm_reclaim_ringbuffer_v1_t ring_buffer)
206 {
207 	return mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes(current_task(),
208 	           ring_buffer->va_in_buffer);
209 }
210 
211 bool
mach_vm_reclaim_is_available(const mach_vm_reclaim_ringbuffer_v1_t ring_buffer,uint64_t id)212 mach_vm_reclaim_is_available(const mach_vm_reclaim_ringbuffer_v1_t ring_buffer, uint64_t id)
213 {
214 	const mach_vm_reclaim_indices_v1_t *indices = &ring_buffer->indices;
215 	if (id == VM_RECLAIM_INDEX_NULL) {
216 		// entry was never put in the reclaim ring buffer, so it's safe to re-use.
217 		return true;
218 	}
219 
220 	/*
221 	 * If the kernel has marched its busy pointer past this entry, consider it reclaimed.
222 	 * It's possible that the kernel will not reclaim this entry yet b/c we're racing with it on
223 	 * another thread via mach_vm_reclaim_mark_used.
224 	 */
225 	uint64_t busy = os_atomic_load_wide(&indices->busy, relaxed);
226 
227 	return id >= busy;
228 }
229 
230 kern_return_t
mach_vm_reclaim_synchronize(mach_vm_reclaim_ringbuffer_v1_t ringbuffer,mach_vm_size_t num_entries_to_reclaim)231 mach_vm_reclaim_synchronize(mach_vm_reclaim_ringbuffer_v1_t ringbuffer, mach_vm_size_t num_entries_to_reclaim)
232 {
233 	if (ringbuffer == NULL) {
234 		return KERN_INVALID_ARGUMENT;
235 	}
236 
237 	return mach_vm_deferred_reclamation_buffer_synchronize(mach_task_self(), num_entries_to_reclaim);
238 }
239 
240 #endif /* defined(__LP64__) */
241