1 /*
2 * Copyright (c) 2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /**
29 * This file is meant to contain all of the PPL entry points and PPL-specific
30 * functionality.
31 *
32 * Every single function in the pmap that chooses between running a "*_ppl()" or
33 * "*_internal()" function variant will be placed into this file. This file also
34 * contains the ppl_handler_table, as well as a few PPL-only entry/exit helper
35 * functions.
36 *
37 * See doc/ppl.md for more information about how these PPL entry points work.
38 */
39 #include <kern/ledger.h>
40
41 #include <vm/vm_object.h>
42 #include <vm/vm_page.h>
43 #include <vm/vm_pageout.h>
44
45 #include <arm/pmap/pmap_internal.h>
46
47 /**
48 * Keeps track of the total number of pages taken from the PPL page free lists
49 * and returned back to the kernel. This value isn't used in logic anywhere,
50 * it's available for debugging purposes strictly.
51 */
52 #if XNU_MONITOR
53 static uint64_t pmap_ppl_pages_returned_to_kernel_count_total = 0;
54 #endif /* XNU_MONITOR */
55
56 /**
57 * PMAP_SUPPORT_PROTOTYPES() will automatically create prototypes for the
58 * _internal() and _ppl() variants of a PPL entry point. It also automatically
59 * generates the code for the _ppl() variant which is what is used to jump into
60 * the PPL.
61 *
62 * See doc/ppl.md for more information about how these PPL entry points work.
63 */
64
65 #if XNU_MONITOR
66
67 PMAP_SUPPORT_PROTOTYPES(
68 void,
69 pmap_mark_page_as_ppl_page, (pmap_paddr_t pa, bool initially_free), PMAP_MARK_PAGE_AS_PMAP_PAGE_INDEX);
70
71 PMAP_SUPPORT_PROTOTYPES(
72 void,
73 pmap_cpu_data_init, (unsigned int cpu_number), PMAP_CPU_DATA_INIT_INDEX);
74
75 PMAP_SUPPORT_PROTOTYPES(
76 uint64_t,
77 pmap_release_ppl_pages_to_kernel, (void), PMAP_RELEASE_PAGES_TO_KERNEL_INDEX);
78
79 PMAP_SUPPORT_PROTOTYPES(
80 void,
81 pmap_ledger_verify_size, (size_t),
82 PMAP_LEDGER_VERIFY_SIZE_INDEX);
83
84 PMAP_SUPPORT_PROTOTYPES(
85 ledger_t,
86 pmap_ledger_alloc, (void),
87 PMAP_LEDGER_ALLOC_INDEX);
88
89 PMAP_SUPPORT_PROTOTYPES(
90 void,
91 pmap_ledger_free, (ledger_t),
92 PMAP_LEDGER_FREE_INDEX);
93
94 #endif /* XNU_MONITOR */
95
96 PMAP_SUPPORT_PROTOTYPES(
97 kern_return_t,
98 mapping_free_prime, (void), MAPPING_FREE_PRIME_INDEX);
99
100 /* TODO: Move the ppl_handler_table into this file. */
101
102 #if XNU_MONITOR
103
104 /**
105 * Claim a page on behalf of the PPL by marking it as PPL-owned and only
106 * allowing the PPL to write to it. Also adds that page to the PPL page free
107 * list for allocation later.
108 *
109 * @param pa The physical address of the page to mark as PPL-owned.
110 */
111 void
pmap_mark_page_as_ppl_page(pmap_paddr_t pa)112 pmap_mark_page_as_ppl_page(pmap_paddr_t pa)
113 {
114 pmap_mark_page_as_ppl_page_ppl(pa, true);
115 }
116
117 /**
118 * Quickly release pages living on the PPL page free list back to the VM. The
119 * VM will call this when the system is under memory pressure.
120 *
121 * @note A minimum amount of pages (set by PMAP_MIN_FREE_PPL_PAGES) will always
122 * be kept on the PPL page free list to ensure that core operations can
123 * occur without having to refill the free list.
124 */
125 uint64_t
pmap_release_ppl_pages_to_kernel(void)126 pmap_release_ppl_pages_to_kernel(void)
127 {
128 pmap_paddr_t pa = 0;
129 vm_page_t mem = VM_PAGE_NULL;
130 vm_page_t local_freeq = VM_PAGE_NULL;
131 uint64_t pmap_ppl_pages_returned_to_kernel_count = 0;
132
133 while (pmap_ppl_free_page_count > PMAP_MIN_FREE_PPL_PAGES) {
134 /* Convert a single PPL page back into a kernel-usable page. */
135 pa = pmap_release_ppl_pages_to_kernel_ppl();
136
137 if (!pa) {
138 break;
139 }
140
141 /**
142 * If we retrieved a page, add it to the queue of pages that will be
143 * given back to the VM.
144 */
145 vm_object_lock(pmap_object);
146
147 mem = vm_page_lookup(pmap_object, (pa - gPhysBase));
148 assert(mem != VM_PAGE_NULL);
149 assert(VM_PAGE_WIRED(mem));
150
151 mem->vmp_busy = TRUE;
152 mem->vmp_snext = local_freeq;
153 local_freeq = mem;
154 pmap_ppl_pages_returned_to_kernel_count++;
155 pmap_ppl_pages_returned_to_kernel_count_total++;
156
157 /* Pages are considered "in use" until given back to the VM. */
158 OSAddAtomic(-1, &inuse_pmap_pages_count);
159
160 vm_object_unlock(pmap_object);
161 }
162
163 /**
164 * Return back the pages to the VM that we've converted into kernel-usable
165 * pages.
166 */
167 if (local_freeq) {
168 /* We need to hold the object lock for freeing pages. */
169 vm_object_lock(pmap_object);
170 vm_page_free_list(local_freeq, TRUE);
171 vm_object_unlock(pmap_object);
172 }
173
174 /**
175 * If we have any pages to return to the VM, take the page queues lock and
176 * decrement the wire count.
177 */
178 if (pmap_ppl_pages_returned_to_kernel_count) {
179 vm_page_lockspin_queues();
180 vm_page_wire_count -= pmap_ppl_pages_returned_to_kernel_count;
181 vm_page_unlock_queues();
182 }
183
184 return pmap_ppl_pages_returned_to_kernel_count;
185 }
186
187 #endif /* XNU_MONITOR */
188
189 /**
190 * See pmap_cpu_data_init_internal()'s function header for more info.
191 */
192 void
pmap_cpu_data_init(void)193 pmap_cpu_data_init(void)
194 {
195 #if XNU_MONITOR
196 pmap_cpu_data_init_ppl(cpu_number());
197 #else
198 pmap_cpu_data_init_internal(cpu_number());
199 #endif
200 }
201
202 /**
203 * Prime the pv_entry_t free lists with a healthy amount of objects first thing
204 * during boot. These objects will be used to keep track of physical-to-virtual
205 * mappings.
206 */
207 void
mapping_free_prime(void)208 mapping_free_prime(void)
209 {
210 kern_return_t kr = KERN_FAILURE;
211
212 #if XNU_MONITOR
213 unsigned int i = 0;
214
215 /**
216 * Allocate the needed PPL pages up front, to minimize the chance that we
217 * will need to call into the PPL multiple times.
218 */
219 for (i = 0; i < pv_alloc_initial_target; i += (PAGE_SIZE / sizeof(pv_entry_t))) {
220 pmap_alloc_page_for_ppl(0);
221 }
222
223 for (i = 0; i < pv_kern_alloc_initial_target; i += (PAGE_SIZE / sizeof(pv_entry_t))) {
224 pmap_alloc_page_for_ppl(0);
225 }
226
227 while ((kr = mapping_free_prime_ppl()) == KERN_RESOURCE_SHORTAGE) {
228 pmap_alloc_page_for_ppl(0);
229 }
230 #else /* XNU_MONITOR */
231 kr = mapping_free_prime_internal();
232 #endif /* XNU_MONITOR */
233
234 if (kr != KERN_SUCCESS) {
235 panic("%s: failed, no pages available? kr=%d", __func__, kr);
236 }
237 }
238
239 /**
240 * See pmap_ledger_verify_size_internal()'s function header for more information.
241 */
242 #if !XNU_MONITOR
243 __attribute__((noreturn))
244 #endif /* !XNU_MONITOR */
245 void
pmap_ledger_verify_size(size_t size)246 pmap_ledger_verify_size(size_t size)
247 {
248 #if XNU_MONITOR
249 pmap_ledger_verify_size_ppl(size);
250 #else /* XNU_MONITOR */
251 /**
252 * Ledger objects are only managed by the pmap on PPL-enabled systems. Other
253 * systems will allocate them using a zone allocator.
254 */
255 panic("%s: unsupported on non-PPL systems, size=%lu", __func__, size);
256 __builtin_unreachable();
257 #endif /* XNU_MONITOR */
258 }
259
260 /**
261 * See pmap_ledger_alloc_internal()'s function header for more information.
262 */
263 ledger_t
pmap_ledger_alloc(void)264 pmap_ledger_alloc(void)
265 {
266 #if XNU_MONITOR
267 ledger_t ledger = NULL;
268
269 while ((ledger = pmap_ledger_alloc_ppl()) == NULL) {
270 pmap_alloc_page_for_ppl(0);
271 }
272
273 return ledger;
274 #else /* XNU_MONITOR */
275 /**
276 * Ledger objects are only managed by the pmap on PPL-enabled systems. Other
277 * systems will allocate them using a zone allocator.
278 */
279 panic("%s: unsupported on non-PPL systems", __func__);
280 __builtin_unreachable();
281 #endif /* XNU_MONITOR */
282 }
283
284 /**
285 * See pmap_ledger_free_internal()'s function header for more information.
286 */
287 #if !XNU_MONITOR
288 __attribute__((noreturn))
289 #endif /* !XNU_MONITOR */
290 void
pmap_ledger_free(ledger_t ledger)291 pmap_ledger_free(ledger_t ledger)
292 {
293 #if XNU_MONITOR
294 pmap_ledger_free_ppl(ledger);
295 #else /* XNU_MONITOR */
296 /**
297 * Ledger objects are only managed by the pmap on PPL-enabled systems. Other
298 * systems will allocate them using a zone allocator.
299 */
300 panic("%s: unsupported on non-PPL systems, ledger=%p", __func__, ledger);
301 __builtin_unreachable();
302 #endif /* XNU_MONITOR */
303 }
304