1 /*
2 * Copyright (c) 2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /**
29 * This header file stores the types, and prototypes used strictly by the pmap
30 * itself. The public pmap API exported to the rest of the kernel should be
31 * located in osfmk/arm/pmap.h.
32 *
33 * This file will automatically include all of the other internal arm/pmap/
34 * headers so .c files will only need to include this one header.
35 */
36 #ifndef _ARM_PMAP_PMAP_INTERNAL_H_
37 #define _ARM_PMAP_PMAP_INTERNAL_H_
38
39 #include <stdint.h>
40
41 #include <kern/debug.h>
42 #include <kern/locks.h>
43 #include <mach/vm_types.h>
44 #include <mach_assert.h>
45
46 #include <arm/cpu_data.h>
47 #include <arm64/proc_reg.h>
48
49 /**
50 * arm/pmap.h and the other /arm/pmap/ internal header files are safe to be
51 * included in this file since they shouldn't rely on any of the internal pmap
52 * header files (so no circular dependencies). Implementation files will only
53 * need to include this one header to get all of the relevant pmap types.
54 */
55 #include <arm/pmap.h>
56 #include <arm/pmap/pmap_data.h>
57 #include <arm/pmap/pmap_pt_geometry.h>
58
59 #if XNU_MONITOR
60 /**
61 * Temporary macros and prototypes needed to implement the ppl_handler_table.
62 *
63 * Eventually all calls to these macros will be in pmap_ppl_interface.c and
64 * these macros can be moved into that .c file.
65 *
66 * The <function>_internal() externs in here are also only included to be used
67 * by the ppl_handler_table. Once the ppl_handler_table moves into
68 * pmap_ppl_interface.c, then these prototypes can be removed (the
69 * PMAP_SUPPORT_PROTOTYPES() macro creates these prototypes automatically).
70 *
71 * TODO: Move PMAP_SUPPORT_PROTOTYPES_*() macros into pmap_ppl_interface.c and
72 * remove these _internal() prototypes.
73 */
74
75 extern pmap_paddr_t pmap_release_ppl_pages_to_kernel_internal(void);
76 extern kern_return_t mapping_free_prime_internal(void);
77
78 extern void pmap_ledger_verify_size_internal(size_t);
79 extern ledger_t pmap_ledger_alloc_internal(void);
80 extern void pmap_ledger_free_internal(ledger_t);
81
82 /**
83 * This macro generates prototypes for the *_internal functions, which represent
84 * the PPL interface. When the PPL is enabled, this will also generate
85 * prototypes for the PPL entrypoints (*_ppl), as well as generating the
86 * entrypoints themselves.
87 *
88 * Since these macros generate code, they should only be called from a single
89 * implementation file for each PPL entry point.
90 */
91 #define GEN_ASM_NAME(__function_name) _##__function_name##_ppl
92
93 #define PMAP_SUPPORT_PROTOTYPES_WITH_ASM_INTERNAL(__return_type, __function_name, __function_args, __function_index, __assembly_function_name) \
94 extern __return_type __function_name##_internal __function_args; \
95 extern __return_type __function_name##_ppl __function_args; \
96 __asm__ (".text \n" \
97 ".align 2 \n" \
98 ".globl " #__assembly_function_name "\n" \
99 #__assembly_function_name ":\n" \
100 "mov x15, " #__function_index "\n" \
101 "b _aprr_ppl_enter\n")
102
103 #define PMAP_SUPPORT_PROTOTYPES_WITH_ASM(__return_type, __function_name, __function_args, __function_index, __assembly_function_name) \
104 PMAP_SUPPORT_PROTOTYPES_WITH_ASM_INTERNAL(__return_type, __function_name, __function_args, __function_index, __assembly_function_name)
105
106 #define PMAP_SUPPORT_PROTOTYPES(__return_type, __function_name, __function_args, __function_index) \
107 PMAP_SUPPORT_PROTOTYPES_WITH_ASM(__return_type, __function_name, __function_args, __function_index, GEN_ASM_NAME(__function_name))
108 #else /* XNU_MONITOR */
109 #define PMAP_SUPPORT_PROTOTYPES(__return_type, __function_name, __function_args, __function_index) \
110 extern __return_type __function_name##_internal __function_args
111 #endif /* XNU_MONITOR */
112
113 /**
114 * Global variables exported to the rest of the internal pmap implementation.
115 */
116 extern lck_grp_t pmap_lck_grp;
117 extern bool hib_entry_pmap_lockdown;
118 extern pmap_paddr_t avail_start;
119 extern pmap_paddr_t avail_end;
120 extern uint32_t pmap_max_asids;
121
122 /**
123 * Functions exported to the rest of the internal pmap implementation.
124 */
125
126 #if XNU_MONITOR
127 extern void pmap_set_xprr_perm(unsigned int, unsigned int, unsigned int);
128 extern void pa_set_range_xprr_perm(pmap_paddr_t, pmap_paddr_t, unsigned int, unsigned int);
129 #endif /* XNU_MONITOR */
130
131 extern int pmap_remove_range_options(
132 pmap_t, vm_map_address_t, pt_entry_t *, pt_entry_t *, vm_map_address_t *, bool *, int);
133
134 extern void pmap_tte_deallocate(
135 pmap_t, vm_offset_t, vm_offset_t, bool, tt_entry_t *, unsigned int);
136
137 #if defined(PVH_FLAG_EXEC)
138 extern void pmap_set_ptov_ap(unsigned int, unsigned int, boolean_t);
139 #endif /* defined(PVH_FLAG_EXEC) */
140
141
142 extern pmap_t current_pmap(void);
143 extern void pmap_tt_ledger_credit(pmap_t, vm_size_t);
144 extern void pmap_tt_ledger_debit(pmap_t, vm_size_t);
145
146 extern void write_pte(pt_entry_t *, pt_entry_t);
147
148 /**
149 * The qsort function is used by various parts of the pmap but doesn't contain
150 * its own header file with prototype so it must be manually extern'd.
151 *
152 * The `cmpfunc_t` type is a pointer to a function that should return the
153 * following:
154 *
155 * return < 0 for a < b
156 * 0 for a == b
157 * > 0 for a > b
158 */
159 typedef int (*cmpfunc_t)(const void *a, const void *b);
160 extern void qsort(void *a, size_t n, size_t es, cmpfunc_t cmp);
161
162 /**
163 * Inline and macro functions exported for usage by other pmap modules.
164 *
165 * In an effort to not cause any performance regressions while breaking up the
166 * pmap, I'm keeping all functions originally marked as "static inline", as
167 * inline and moving them into header files to be shared across the pmap
168 * modules. In reality, many of these functions probably don't need to be inline
169 * and can be moved back into a .c file.
170 *
171 * TODO: rdar://70538514 (PMAP Cleanup: re-evaluate whether inline functions should actually be inline)
172 */
173
174 /**
175 * Macro used to ensure that pmap data structures aren't modified during
176 * hibernation image copying.
177 */
178 #if HIBERNATION
179 #define ASSERT_NOT_HIBERNATING() (assertf(!hib_entry_pmap_lockdown, \
180 "Attempted to modify PMAP data structures after hibernation image copying has begun."))
181 #else
182 #define ASSERT_NOT_HIBERNATING()
183 #endif /* HIBERNATION */
184
185 /* Helper macro for rounding an address up to a correctly aligned value. */
186 #define PMAP_ALIGN(addr, align) ((addr) + ((align) - 1) & ~((align) - 1))
187
188 /**
189 * pmap_data.h must be included before this point so that pmap_lock_mode_t is
190 * defined before the rest of the locking code.
191 */
192
193 /**
194 * Initialize a pmap object's reader/writer lock.
195 *
196 * @param pmap The pmap whose lock to initialize.
197 */
198 static inline void
pmap_lock_init(pmap_t pmap)199 pmap_lock_init(pmap_t pmap)
200 {
201 lck_rw_init(&pmap->rwlock, &pmap_lck_grp, 0);
202 pmap->rwlock.lck_rw_can_sleep = FALSE;
203 }
204
205 /**
206 * Destroy a pmap object's reader/writer lock.
207 *
208 * @param pmap The pmap whose lock to destroy.
209 */
210 static inline void
pmap_lock_destroy(pmap_t pmap)211 pmap_lock_destroy(pmap_t pmap)
212 {
213 lck_rw_destroy(&pmap->rwlock, &pmap_lck_grp);
214 }
215
216 /**
217 * Assert that the pmap lock is held in the given mode.
218 *
219 * @param pmap The pmap whose lock to assert is being held.
220 * @param mode The mode the lock should be held in.
221 */
222 static inline void
pmap_assert_locked(__unused pmap_t pmap,__unused pmap_lock_mode_t mode)223 pmap_assert_locked(__unused pmap_t pmap, __unused pmap_lock_mode_t mode)
224 {
225 #if MACH_ASSERT
226 switch (mode) {
227 case PMAP_LOCK_SHARED:
228 lck_rw_assert(&pmap->rwlock, LCK_RW_ASSERT_SHARED);
229 break;
230 case PMAP_LOCK_EXCLUSIVE:
231 lck_rw_assert(&pmap->rwlock, LCK_RW_ASSERT_EXCLUSIVE);
232 break;
233 default:
234 panic("%s: Unknown pmap_lock_mode. pmap=%p, mode=%d", __FUNCTION__, pmap, mode);
235 }
236 #endif
237 }
238
239 /**
240 * Assert that the pmap lock is held in any mode.
241 *
242 * @param pmap The pmap whose lock should be held.
243 */
244 __unused static inline void
pmap_assert_locked_any(__unused pmap_t pmap)245 pmap_assert_locked_any(__unused pmap_t pmap)
246 {
247 #if MACH_ASSERT
248 lck_rw_assert(&pmap->rwlock, LCK_RW_ASSERT_HELD);
249 #endif
250 }
251
252 /**
253 * Acquire a pmap object's reader/writer lock as either shared (read-only) or
254 * exclusive (read/write).
255 *
256 * @note Failed attempts to grab the lock will NOT go to sleep, they'll spin
257 * until the lock can be acquired.
258 *
259 * @param pmap The pmap whose lock to acquire.
260 * @param mode Whether to grab the lock as shared (read-only) or exclusive (read/write).
261 */
262 static inline void
pmap_lock(pmap_t pmap,pmap_lock_mode_t mode)263 pmap_lock(pmap_t pmap, pmap_lock_mode_t mode)
264 {
265 #if !XNU_MONITOR
266 mp_disable_preemption();
267 #endif
268
269 switch (mode) {
270 case PMAP_LOCK_SHARED:
271 lck_rw_lock_shared(&pmap->rwlock);
272 break;
273 case PMAP_LOCK_EXCLUSIVE:
274 lck_rw_lock_exclusive(&pmap->rwlock);
275 break;
276 default:
277 panic("%s: Unknown pmap_lock_mode. pmap=%p, mode=%d", __func__, pmap, mode);
278 }
279 }
280
281 /**
282 * Attempt to acquire the pmap lock in the specified mode. If the lock couldn't
283 * be acquired, then spin until it can be or a preemption is pending.
284 *
285 * @param pmap The pmap whose lock to attempt to acquire.
286 * @param mode Whether to grab the lock as shared (read-only) or exclusive (read/write).
287 *
288 * @return true if the lock was acquired, false otherwise.
289 */
290 static inline bool
pmap_lock_preempt(pmap_t pmap,pmap_lock_mode_t mode)291 pmap_lock_preempt(pmap_t pmap, pmap_lock_mode_t mode)
292 {
293 bool ret = false;
294
295 #if !XNU_MONITOR
296 mp_disable_preemption();
297 #endif
298
299 bool (^check_preemption)(void) = ^{
300 return pmap_pending_preemption();
301 };
302
303 switch (mode) {
304 case PMAP_LOCK_SHARED:
305 ret = lck_rw_lock_shared_b(&pmap->rwlock, check_preemption);
306 break;
307 case PMAP_LOCK_EXCLUSIVE:
308 ret = lck_rw_lock_exclusive_b(&pmap->rwlock, check_preemption);
309 break;
310 default:
311 panic("%s: Unknown pmap_lock_mode. pmap=%p, mode=%d", __func__, pmap, mode);
312 }
313
314 if (!ret) {
315 #if !XNU_MONITOR
316 mp_enable_preemption();
317 #endif
318 }
319
320 return ret;
321 }
322
323 /**
324 * Attempt to acquire the pmap lock in the specified mode. If the lock couldn't
325 * be acquired, then return immediately instead of spinning.
326 *
327 * @param pmap The pmap whose lock to attempt to acquire.
328 * @param mode Whether to grab the lock as shared (read-only) or exclusive (read/write).
329 *
330 * @return True if the lock was acquired, false otherwise.
331 */
332 static inline bool
pmap_try_lock(pmap_t pmap,pmap_lock_mode_t mode)333 pmap_try_lock(pmap_t pmap, pmap_lock_mode_t mode)
334 {
335 bool ret = false;
336
337 #if !XNU_MONITOR
338 mp_disable_preemption();
339 #endif
340
341 switch (mode) {
342 case PMAP_LOCK_SHARED:
343 ret = lck_rw_try_lock_shared(&pmap->rwlock);
344 break;
345 case PMAP_LOCK_EXCLUSIVE:
346 ret = lck_rw_try_lock_exclusive(&pmap->rwlock);
347 break;
348 default:
349 panic("%s: Unknown pmap_lock_mode. pmap=%p, mode=%d", __func__, pmap, mode);
350 }
351
352 if (!ret) {
353 #if !XNU_MONITOR
354 mp_enable_preemption();
355 #endif
356 }
357
358 return ret;
359 }
360
361 /**
362 * Attempts to promote an already acquired pmap lock from shared to exclusive.
363 *
364 * @param pmap The pmap whose lock should be promoted from shared to exclusive.
365 *
366 * @return True if successfully promoted, otherwise false upon failure in
367 * which case the shared lock is dropped.
368 */
369 static inline bool
pmap_lock_shared_to_exclusive(pmap_t pmap)370 pmap_lock_shared_to_exclusive(pmap_t pmap)
371 {
372 pmap_assert_locked(pmap, PMAP_LOCK_SHARED);
373
374 bool locked = lck_rw_lock_shared_to_exclusive(&pmap->rwlock);
375
376 #if !XNU_MONITOR
377 if (!locked) {
378 mp_enable_preemption();
379 }
380 #endif
381
382 return locked;
383 }
384
385 /**
386 * Release a pmap object's reader/writer lock.
387 *
388 * @param pmap The pmap whose lock to release.
389 * @param mode Which mode the lock should be in at time of release.
390 */
391 static inline void
pmap_unlock(pmap_t pmap,pmap_lock_mode_t mode)392 pmap_unlock(pmap_t pmap, pmap_lock_mode_t mode)
393 {
394 switch (mode) {
395 case PMAP_LOCK_SHARED:
396 lck_rw_unlock_shared(&pmap->rwlock);
397 break;
398 case PMAP_LOCK_EXCLUSIVE:
399 lck_rw_unlock_exclusive(&pmap->rwlock);
400 break;
401 default:
402 panic("%s: Unknown pmap_lock_mode. pmap=%p, mode=%d", __func__, pmap, mode);
403 }
404
405 #if !XNU_MONITOR
406 mp_enable_preemption();
407 #endif
408 }
409
410 #if __arm64__
411 /*
412 * Disable interrupts and return previous state.
413 *
414 * The PPL has its own interrupt state facility separately from
415 * ml_set_interrupts_enable(), since that function is not part of the
416 * PPL, and so doing things like manipulating untrusted data and
417 * taking ASTs.
418 *
419 * @return The previous interrupt state, to be restored with
420 * pmap_interrupts_restore().
421 */
422 static inline uint64_t __attribute__((warn_unused_result)) __used
pmap_interrupts_disable(void)423 pmap_interrupts_disable(void)
424 {
425 uint64_t state = __builtin_arm_rsr64("DAIF");
426
427 if ((state & DAIF_STANDARD_DISABLE) != DAIF_STANDARD_DISABLE) {
428 __builtin_arm_wsr64("DAIFSet", DAIFSC_STANDARD_DISABLE);
429 }
430
431 return state;
432 }
433
434 /*
435 * Restore previous interrupt state.
436 *
437 * @param state The previous interrupt state to restore.
438 */
439 static inline void __used
pmap_interrupts_restore(uint64_t state)440 pmap_interrupts_restore(uint64_t state)
441 {
442 // no unknown bits?
443 assert((state & ~DAIF_ALL) == 0);
444
445 if (state != DAIF_STANDARD_DISABLE) {
446 __builtin_arm_wsr64("DAIF", state);
447 }
448 }
449
450 /*
451 * Query interrupt state.
452 *
453 * ml_get_interrupts_enabled() is safe enough at the time of writing
454 * this comment, but because it is not considered part of the PPL, so
455 * could change without notice, and because it presently only checks
456 * DAIF_IRQ, we have our own version.
457 *
458 * @return true if interrupts are enable (not fully disabled).
459 */
460
461 static inline bool __attribute__((warn_unused_result)) __used
pmap_interrupts_enabled(void)462 pmap_interrupts_enabled(void)
463 {
464 return (__builtin_arm_rsr64("DAIF") & DAIF_STANDARD_DISABLE) != DAIF_STANDARD_DISABLE;
465 }
466 #endif /* __arm64__ */
467
468 #endif /* _ARM_PMAP_PMAP_INTERNAL_H_ */
469