1 /*
2 * Copyright (c) 2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #if defined(__LP64__)
30 /*
31 * Userspace functions for manipulating the reclaim buffer.
32 */
33 #include <inttypes.h>
34 #include <stdbool.h>
35 #include <stdlib.h>
36 #include <mach/vm_reclaim.h>
37 #include <mach/mach.h>
38 #include <mach/mach_vm.h>
39 #undef _mach_vm_user_
40 #include <mach/mach_vm_internal.h>
41 #include <mach/vm_map.h>
42 #include <mach/vm_statistics.h>
43 #include <os/atomic_private.h>
44 #include <mach/vm_page_size.h>
45
46
47 #pragma mark Utilities
48 #define _assert(__op, __condition, __cause) \
49 do { \
50 if (!(__condition)) { \
51 __builtin_trap(); \
52 } \
53 } while (0)
54
55 static uint64_t kAccountingThreshold;
56
57 static bool
update_accounting(mach_vm_reclaim_ringbuffer_v1_t ring_buffer,int64_t size)58 update_accounting(mach_vm_reclaim_ringbuffer_v1_t ring_buffer, int64_t size)
59 {
60 ring_buffer->va_in_buffer += size;
61 if ((ring_buffer->va_in_buffer > ring_buffer->last_accounting_given_to_kernel &&
62 ring_buffer->va_in_buffer - ring_buffer->last_accounting_given_to_kernel > kAccountingThreshold) ||
63 (ring_buffer->last_accounting_given_to_kernel > ring_buffer->va_in_buffer &&
64 ring_buffer->last_accounting_given_to_kernel - ring_buffer->va_in_buffer > kAccountingThreshold)) {
65 /*
66 * The caller should call mach_vm_reclaim_update_kernel_accounting.
67 * We store the value that they will give to the kernel here while we hold the lock.
68 * Technically it's out of sync with what the kernel has seen, but
69 * that will be rectified once the caller makes the mach_vm_reclaim_update_kernel_accounting call.
70 * If we forced this value to be in sync with the kernel's value
71 * all callers would start calling mach_vm_reclaim_update_kernel_accounting until one of them
72 * finishes & we'd have to take the ringbuffer lock again in
73 * mach_vm_reclaim_update_kernel_accounting.
74 */
75 ring_buffer->last_accounting_given_to_kernel = ring_buffer->va_in_buffer;
76 return true;
77 }
78 return false;
79 }
80
81 static inline
82 mach_vm_reclaim_entry_v1_t
construct_entry(mach_vm_address_t start_addr,uint32_t size,mach_vm_reclaim_behavior_v1_t behavior)83 construct_entry(mach_vm_address_t start_addr, uint32_t size, mach_vm_reclaim_behavior_v1_t behavior)
84 {
85 mach_vm_reclaim_entry_v1_t entry = {0ULL};
86 entry.address = start_addr;
87 entry.size = size;
88 entry.behavior = behavior;
89 return entry;
90 }
91
92 kern_return_t
mach_vm_reclaim_ringbuffer_init(mach_vm_reclaim_ringbuffer_v1_t ring_buffer)93 mach_vm_reclaim_ringbuffer_init(mach_vm_reclaim_ringbuffer_v1_t ring_buffer)
94 {
95 kAccountingThreshold = vm_page_size;
96 kern_return_t kr;
97 mach_vm_size_t buffer_size = vm_page_size;
98 bzero(ring_buffer, sizeof(struct mach_vm_reclaim_ringbuffer_v1_s));
99 size_t entries_size = buffer_size - \
100 offsetof(struct mach_vm_reclaim_buffer_v1_s, entries);
101 ring_buffer->buffer_len = entries_size / sizeof(mach_vm_reclaim_entry_v1_t);
102
103 int flags = VM_FLAGS_ANYWHERE | VM_FLAGS_PERMANENT | VM_MAKE_TAG(VM_MEMORY_MALLOC);
104 kr = mach_vm_map(mach_task_self(), (mach_vm_address_t *)&ring_buffer->buffer,
105 buffer_size, 0, flags, MEMORY_OBJECT_NULL, 0, FALSE,
106 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
107 if (kr != KERN_SUCCESS) {
108 return kr;
109 }
110
111 kr = mach_vm_deferred_reclamation_buffer_init(mach_task_self(),
112 (mach_vm_address_t)ring_buffer->buffer, buffer_size);
113
114 if (kr != KERN_SUCCESS) {
115 mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)ring_buffer->buffer,
116 buffer_size);
117 return kr;
118 }
119
120 return KERN_SUCCESS;
121 }
122
123 uint64_t
mach_vm_reclaim_mark_free(mach_vm_reclaim_ringbuffer_v1_t ring_buffer,mach_vm_address_t start_addr,uint32_t size,mach_vm_reclaim_behavior_v1_t behavior,bool * should_update_kernel_accounting)124 mach_vm_reclaim_mark_free(
125 mach_vm_reclaim_ringbuffer_v1_t ring_buffer, mach_vm_address_t start_addr, uint32_t size,
126 mach_vm_reclaim_behavior_v1_t behavior, bool *should_update_kernel_accounting)
127 {
128 uint64_t idx = 0, head = 0;
129 kern_return_t kr;
130 mach_vm_reclaim_entry_v1_t entry = construct_entry(start_addr, size, behavior);
131 mach_vm_reclaim_indices_v1_t *indices = &ring_buffer->buffer->indices;
132 mach_vm_reclaim_entry_v1_t *buffer = ring_buffer->buffer->entries;
133 mach_vm_size_t buffer_len = ring_buffer->buffer_len;
134 *should_update_kernel_accounting = false;
135
136 idx = os_atomic_load_wide(&indices->tail, relaxed);
137 head = os_atomic_load_wide(&indices->head, relaxed);
138
139 // This leaves one entry empty at the end of the buffer to differentiate an empty buffer from a full one
140 while ((idx + 1) % buffer_len == head % buffer_len) {
141 /*
142 * Buffer is full. Ask the kernel to reap it.
143 */
144 kr = mach_vm_deferred_reclamation_buffer_synchronize(mach_task_self(), buffer_len - 1);
145 _assert("mach_vm_reclaim_mark_free", kr == KERN_SUCCESS, kr);
146 head = os_atomic_load_wide(&indices->head, relaxed);
147 /* kernel had to march head forward at least kNumEntriesToReclaim. We hold the buffer lock so tail couldn't have changed */
148 _assert("mach_vm_reclaim_mark_free", (idx + 1) % buffer_len != head % buffer_len, head);
149 }
150
151 /*
152 * idx must be >= head & the buffer is not full so it's not possible for the kernel to be acting on the entry at (tail + 1) % size.
153 * Thus we don't need to check the busy pointer here.
154 */
155 buffer[idx % buffer_len] = entry;
156 os_atomic_thread_fence(seq_cst); // tail increment can not be seen before the entry is cleared in the buffer
157 os_atomic_inc(&indices->tail, relaxed);
158 *should_update_kernel_accounting = update_accounting(ring_buffer, size);
159
160 return idx;
161 }
162
163 bool
mach_vm_reclaim_mark_used(mach_vm_reclaim_ringbuffer_v1_t ring_buffer,uint64_t id,mach_vm_address_t start_addr,uint32_t size)164 mach_vm_reclaim_mark_used(
165 mach_vm_reclaim_ringbuffer_v1_t ring_buffer, uint64_t id,
166 mach_vm_address_t start_addr, uint32_t size)
167 {
168 mach_vm_reclaim_indices_v1_t *indices = &ring_buffer->buffer->indices;
169 mach_vm_reclaim_entry_v1_t *buffer = ring_buffer->buffer->entries;
170 mach_vm_size_t buffer_len = ring_buffer->buffer_len;
171 uint64_t head = 0, busy = 0, original_tail = 0;
172 if (id == VM_RECLAIM_INDEX_NULL) {
173 // entry was never put in the reclaim ring buffer, so it's safe to re-use.
174 return true;
175 }
176
177 head = os_atomic_load_wide(&indices->head, relaxed);
178 if (id < head) {
179 /*
180 * This is just a fast path for the case where the buffer has wrapped.
181 * It's not strictly necessary beacuse idx must also be < busy.
182 * That's why we can use a relaxed load for the head ptr.
183 */
184 return false;
185 }
186
187 /* Attempt to move tail to idx */
188 original_tail = os_atomic_load_wide(&indices->tail, relaxed);
189 _assert("mach_vm_reclaim_mark_used", id < original_tail, original_tail);
190
191 os_atomic_store_wide(&indices->tail, id, relaxed);
192 os_atomic_thread_fence(seq_cst); // Our write to tail must happen before our read of busy
193 busy = os_atomic_load_wide(&indices->busy, relaxed);
194 if (id < busy) {
195 /* Kernel is acting on this entry. Undo. */
196 os_atomic_store_wide(&indices->tail, original_tail, relaxed);
197 return false;
198 }
199 mach_vm_reclaim_entry_v1_t *entry = &buffer[id % buffer_len];
200 _assert("mach_vm_reclaim_mark_used", entry->size == size && entry->address == start_addr, entry->address);
201
202 /* Sucessfully moved tail back. Can now overwrite the entry */
203 memset(entry, 0, sizeof(mach_vm_reclaim_entry_v1_t));
204 os_atomic_thread_fence(seq_cst); // tail increment can not be seen before the entry is cleared in the buffer
205 /* Reset tail. */
206 os_atomic_store_wide(&indices->tail, original_tail, relaxed);
207
208 update_accounting(ring_buffer, -(int64_t) size);
209
210 return true;
211 }
212
213 kern_return_t
mach_vm_reclaim_update_kernel_accounting(const mach_vm_reclaim_ringbuffer_v1_t ring_buffer)214 mach_vm_reclaim_update_kernel_accounting(const mach_vm_reclaim_ringbuffer_v1_t ring_buffer)
215 {
216 return mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes(current_task(),
217 ring_buffer->va_in_buffer);
218 }
219
220 bool
mach_vm_reclaim_is_available(const mach_vm_reclaim_ringbuffer_v1_t ring_buffer,uint64_t id)221 mach_vm_reclaim_is_available(const mach_vm_reclaim_ringbuffer_v1_t ring_buffer,
222 uint64_t id)
223 {
224 const mach_vm_reclaim_indices_v1_t *indices = &ring_buffer->buffer->indices;
225 if (id == VM_RECLAIM_INDEX_NULL) {
226 // entry was never put in the reclaim ring buffer, so it's safe to re-use.
227 return true;
228 }
229
230 /*
231 * If the kernel has marched its busy pointer past this entry, consider it reclaimed.
232 * It's possible that the kernel will not reclaim this entry yet b/c we're racing with it on
233 * another thread via mach_vm_reclaim_mark_used.
234 */
235 uint64_t busy = os_atomic_load_wide(&indices->busy, relaxed);
236
237 return id >= busy;
238 }
239
240 bool
mach_vm_reclaim_is_reclaimed(const mach_vm_reclaim_ringbuffer_v1_t ring_buffer,uint64_t id)241 mach_vm_reclaim_is_reclaimed(const mach_vm_reclaim_ringbuffer_v1_t ring_buffer,
242 uint64_t id)
243 {
244 const mach_vm_reclaim_indices_v1_t *indices = &ring_buffer->buffer->indices;
245 if (id == VM_RECLAIM_INDEX_NULL) {
246 // entry was never put in reclaim ring buffer, consider it un-reclaimed
247 return false;
248 }
249
250 /*
251 * If the kernel has marched its head pointer past this entry, consider it
252 * reclaimed.
253 */
254 uint64_t head = os_atomic_load_wide(&indices->head, relaxed);
255
256 return id < head;
257 }
258
259 kern_return_t
mach_vm_reclaim_synchronize(mach_vm_reclaim_ringbuffer_v1_t ringbuffer,mach_vm_size_t num_entries_to_reclaim)260 mach_vm_reclaim_synchronize(mach_vm_reclaim_ringbuffer_v1_t ringbuffer, mach_vm_size_t num_entries_to_reclaim)
261 {
262 if (ringbuffer == NULL) {
263 return KERN_INVALID_ARGUMENT;
264 }
265
266 return mach_vm_deferred_reclamation_buffer_synchronize(mach_task_self(), num_entries_to_reclaim);
267 }
268
269 #endif /* defined(__LP64__) */
270