xref: /xnu-12377.1.9/osfmk/arm/pmap/pmap_internal.h (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /**
29  * This header file stores the types, and prototypes used strictly by the pmap
30  * itself. The public pmap API exported to the rest of the kernel should be
31  * located in osfmk/arm/pmap.h.
32  *
33  * This file will automatically include all of the other internal arm/pmap/
34  * headers so .c files will only need to include this one header.
35  */
36 #ifndef _ARM_PMAP_PMAP_INTERNAL_H_
37 #define _ARM_PMAP_PMAP_INTERNAL_H_
38 
39 #include <stdint.h>
40 
41 #include <kern/debug.h>
42 #include <kern/locks.h>
43 #include <mach/vm_types.h>
44 #include <mach_assert.h>
45 
46 #include <arm/cpu_data.h>
47 #include <arm64/proc_reg.h>
48 
49 /**
50  * arm/pmap.h and the other /arm/pmap/ internal header files are safe to be
51  * included in this file since they shouldn't rely on any of the internal pmap
52  * header files (so no circular dependencies). Implementation files will only
53  * need to include this one header to get all of the relevant pmap types.
54  */
55 #include <arm/pmap.h>
56 #include <arm/pmap/pmap_data.h>
57 #include <arm/pmap/pmap_pt_geometry.h>
58 
59 #if XNU_MONITOR
60 /**
61  * Temporary macros and prototypes needed to implement the ppl_handler_table.
62  *
63  * Eventually all calls to these macros will be in pmap_ppl_interface.c and
64  * these macros can be moved into that .c file.
65  *
66  * The <function>_internal() externs in here are also only included to be used
67  * by the ppl_handler_table. Once the ppl_handler_table moves into
68  * pmap_ppl_interface.c, then these prototypes can be removed (the
69  * PMAP_SUPPORT_PROTOTYPES() macro creates these prototypes automatically).
70  *
71  * TODO: Move PMAP_SUPPORT_PROTOTYPES_*() macros into pmap_ppl_interface.c and
72  *       remove these _internal() prototypes.
73  */
74 
75 extern pmap_paddr_t pmap_release_ppl_pages_to_kernel_internal(void);
76 extern kern_return_t mapping_free_prime_internal(void);
77 
78 extern void pmap_ledger_verify_size_internal(size_t);
79 extern ledger_t pmap_ledger_alloc_internal(void);
80 extern void pmap_ledger_free_internal(ledger_t);
81 
82 /**
83  * This macro generates prototypes for the *_internal functions, which represent
84  * the PPL interface. When the PPL is enabled, this will also generate
85  * prototypes for the PPL entrypoints (*_ppl), as well as generating the
86  * entrypoints themselves.
87  *
88  * Since these macros generate code, they should only be called from a single
89  * implementation file for each PPL entry point.
90  */
91 #define GEN_ASM_NAME(__function_name) _##__function_name##_ppl
92 
93 #if BTI_ENFORCED
94 #define PMAP_SUPPORT_PROTOTYPES_BTI_LANDING_PAD "bti c\n"
95 #else
96 #define PMAP_SUPPORT_PROTOTYPES_BTI_LANDING_PAD ""
97 #endif /* BTI_ENFROCED */
98 
99 #define PMAP_SUPPORT_PROTOTYPES_WITH_ASM_INTERNAL(__return_type, __function_name, __function_args, __function_index, __assembly_function_name) \
100 	extern __return_type __function_name##_internal __function_args; \
101 	extern __return_type __function_name##_ppl __function_args; \
102 	__asm__ (".text \n" \
103 	         ".align 2 \n" \
104 	         ".globl " #__assembly_function_name "\n" \
105 	         #__assembly_function_name ":\n" \
106 	         PMAP_SUPPORT_PROTOTYPES_BTI_LANDING_PAD \
107 	         "mov x15, " #__function_index "\n" \
108 	         "b _aprr_ppl_enter\n")
109 
110 #define PMAP_SUPPORT_PROTOTYPES_WITH_ASM(__return_type, __function_name, __function_args, __function_index, __assembly_function_name) \
111 	PMAP_SUPPORT_PROTOTYPES_WITH_ASM_INTERNAL(__return_type, __function_name, __function_args, __function_index, __assembly_function_name)
112 
113 #define PMAP_SUPPORT_PROTOTYPES(__return_type, __function_name, __function_args, __function_index) \
114 	PMAP_SUPPORT_PROTOTYPES_WITH_ASM(__return_type, __function_name, __function_args, __function_index, GEN_ASM_NAME(__function_name))
115 #else /* XNU_MONITOR */
116 #define PMAP_SUPPORT_PROTOTYPES(__return_type, __function_name, __function_args, __function_index) \
117 	extern __return_type __function_name##_internal __function_args
118 #endif /* XNU_MONITOR */
119 
120 /**
121  * Global variables exported to the rest of the internal pmap implementation.
122  */
123 extern lck_grp_t pmap_lck_grp;
124 extern bool hib_entry_pmap_lockdown;
125 extern pmap_paddr_t avail_start;
126 extern pmap_paddr_t avail_end;
127 extern uint32_t pmap_max_asids;
128 
129 /**
130  * Functions exported to the rest of the internal pmap implementation.
131  */
132 
133 #if XNU_MONITOR
134 extern void pmap_set_xprr_perm(unsigned int, unsigned int, unsigned int);
135 extern void pa_set_range_xprr_perm(pmap_paddr_t, pmap_paddr_t, unsigned int, unsigned int);
136 #endif /* XNU_MONITOR */
137 
138 extern int pmap_remove_range_options(
139 	pmap_t, vm_map_address_t, pt_entry_t *, pt_entry_t *, vm_map_address_t *, bool *, int);
140 
141 extern void pmap_tte_deallocate(
142 	pmap_t, vm_offset_t, vm_offset_t, bool, tt_entry_t *, unsigned int);
143 
144 #if defined(PVH_FLAG_EXEC)
145 extern void pmap_set_ptov_ap(unsigned int, unsigned int, boolean_t);
146 #endif /* defined(PVH_FLAG_EXEC) */
147 
148 
149 extern pmap_t current_pmap(void);
150 extern void pmap_tt_ledger_credit(pmap_t, vm_size_t);
151 extern void pmap_tt_ledger_debit(pmap_t, vm_size_t);
152 
153 extern void write_pte(pt_entry_t *, pt_entry_t);
154 
155 /**
156  * The qsort function is used by various parts of the pmap but doesn't contain
157  * its own header file with prototype so it must be manually extern'd.
158  *
159  * The `cmpfunc_t` type is a pointer to a function that should return the
160  * following:
161  *
162  * return < 0 for a < b
163  *          0 for a == b
164  *        > 0 for a > b
165  */
166 typedef int (*cmpfunc_t)(const void *a, const void *b);
167 extern void qsort(void *a, size_t n, size_t es, cmpfunc_t cmp);
168 
169 /**
170  * Inline and macro functions exported for usage by other pmap modules.
171  *
172  * In an effort to not cause any performance regressions while breaking up the
173  * pmap, I'm keeping all functions originally marked as "static inline", as
174  * inline and moving them into header files to be shared across the pmap
175  * modules. In reality, many of these functions probably don't need to be inline
176  * and can be moved back into a .c file.
177  *
178  * TODO: rdar://70538514 (PMAP Cleanup: re-evaluate whether inline functions should actually be inline)
179  */
180 
181 /**
182  * Macro used to ensure that pmap data structures aren't modified during
183  * hibernation image copying.
184  */
185 #if HIBERNATION
186 #define ASSERT_NOT_HIBERNATING() (assertf(!hib_entry_pmap_lockdown, \
187 	"Attempted to modify PMAP data structures after hibernation image copying has begun."))
188 #else
189 #define ASSERT_NOT_HIBERNATING()
190 #endif /* HIBERNATION */
191 
192 /* Helper macro for rounding an address up to a correctly aligned value. */
193 #define PMAP_ALIGN(addr, align) ((addr) + ((align) - 1) & ~((align) - 1))
194 
195 /**
196  * pmap_data.h must be included before this point so that pmap_lock_mode_t is
197  * defined before the rest of the locking code.
198  */
199 
200 /**
201  * Initialize a pmap object's reader/writer lock.
202  *
203  * @param pmap The pmap whose lock to initialize.
204  */
205 static inline void
pmap_lock_init(pmap_t pmap)206 pmap_lock_init(pmap_t pmap)
207 {
208 	lck_rw_init(&pmap->rwlock, &pmap_lck_grp, 0);
209 	pmap->rwlock.lck_rw_can_sleep = FALSE;
210 }
211 
212 /**
213  * Destroy a pmap object's reader/writer lock.
214  *
215  * @param pmap The pmap whose lock to destroy.
216  */
217 static inline void
pmap_lock_destroy(pmap_t pmap)218 pmap_lock_destroy(pmap_t pmap)
219 {
220 	lck_rw_destroy(&pmap->rwlock, &pmap_lck_grp);
221 }
222 
223 /**
224  * Assert that the pmap lock is held in the given mode.
225  *
226  * @param pmap The pmap whose lock to assert is being held.
227  * @param mode The mode the lock should be held in.
228  */
229 static inline void
pmap_assert_locked(__unused pmap_t pmap,__unused pmap_lock_mode_t mode)230 pmap_assert_locked(__unused pmap_t pmap, __unused pmap_lock_mode_t mode)
231 {
232 #if MACH_ASSERT
233 	switch (mode) {
234 	case PMAP_LOCK_SHARED:
235 		LCK_RW_ASSERT(&pmap->rwlock, LCK_RW_ASSERT_SHARED);
236 		break;
237 	case PMAP_LOCK_EXCLUSIVE:
238 		LCK_RW_ASSERT(&pmap->rwlock, LCK_RW_ASSERT_EXCLUSIVE);
239 		break;
240 	default:
241 		panic("%s: Unknown pmap_lock_mode. pmap=%p, mode=%d", __FUNCTION__, pmap, mode);
242 	}
243 #endif
244 }
245 
246 /**
247  * Assert that the pmap lock is held in any mode.
248  *
249  * @param pmap The pmap whose lock should be held.
250  */
251 __unused static inline void
pmap_assert_locked_any(__unused pmap_t pmap)252 pmap_assert_locked_any(__unused pmap_t pmap)
253 {
254 	LCK_RW_ASSERT(&pmap->rwlock, LCK_RW_ASSERT_HELD);
255 }
256 
257 /**
258  * Acquire a pmap object's reader/writer lock as either shared (read-only) or
259  * exclusive (read/write).
260  *
261  * @note Failed attempts to grab the lock will NOT go to sleep, they'll spin
262  *       until the lock can be acquired.
263  *
264  * @param pmap The pmap whose lock to acquire.
265  * @param mode Whether to grab the lock as shared (read-only) or exclusive (read/write).
266  */
267 static inline void
pmap_lock(pmap_t pmap,pmap_lock_mode_t mode)268 pmap_lock(pmap_t pmap, pmap_lock_mode_t mode)
269 {
270 #if !XNU_MONITOR
271 	mp_disable_preemption();
272 #endif
273 
274 	switch (mode) {
275 	case PMAP_LOCK_SHARED:
276 		lck_rw_lock_shared(&pmap->rwlock);
277 		break;
278 	case PMAP_LOCK_EXCLUSIVE:
279 		lck_rw_lock_exclusive(&pmap->rwlock);
280 		break;
281 	default:
282 		panic("%s: Unknown pmap_lock_mode. pmap=%p, mode=%d", __func__, pmap, mode);
283 	}
284 }
285 
286 /**
287  * Attempt to acquire the pmap lock in the specified mode. If the lock couldn't
288  * be acquired, then spin until it can be or a preemption is pending.
289  *
290  * @param pmap The pmap whose lock to attempt to acquire.
291  * @param mode Whether to grab the lock as shared (read-only) or exclusive (read/write).
292  *
293  * @return true if the lock was acquired, false if it was not and the caller should
294  *         abort to some preemptible state to allow the preemption.
295  */
296 static inline bool
pmap_lock_preempt(pmap_t pmap,pmap_lock_mode_t mode)297 pmap_lock_preempt(pmap_t pmap, pmap_lock_mode_t mode)
298 {
299 	bool ret = false;
300 
301 	/**
302 	 * When the lock cannot be acquired, we check if we are preemptible.
303 	 *
304 	 * If we are already preemptible, there's no point of exiting this function and aborting.
305 	 *
306 	 * Also, if we are very early in boot, we should just spin. This is similar to how
307 	 * pmap_verify_preemptible() is used in pmap.
308 	 */
309 	do {
310 #if !XNU_MONITOR
311 		mp_disable_preemption();
312 #endif
313 
314 		bool (^check_preemption)(void) = ^{
315 			return pmap_pending_preemption();
316 		};
317 
318 		switch (mode) {
319 		case PMAP_LOCK_SHARED:
320 			ret = lck_rw_lock_shared_b(&pmap->rwlock, check_preemption);
321 			break;
322 		case PMAP_LOCK_EXCLUSIVE:
323 			ret = lck_rw_lock_exclusive_b(&pmap->rwlock, check_preemption);
324 			break;
325 		default:
326 			panic("%s: Unknown pmap_lock_mode. pmap=%p, mode=%d", __func__, pmap, mode);
327 		}
328 
329 		if (!ret) {
330 #if !XNU_MONITOR
331 			mp_enable_preemption();
332 #endif
333 		}
334 	} while (!ret && (preemption_enabled() || (startup_phase < STARTUP_SUB_EARLY_BOOT)));
335 
336 	return ret;
337 }
338 
339 /**
340  * Attempt to acquire the pmap lock in the specified mode. If the lock couldn't
341  * be acquired, then return immediately instead of spinning.
342  *
343  * @param pmap The pmap whose lock to attempt to acquire.
344  * @param mode Whether to grab the lock as shared (read-only) or exclusive (read/write).
345  *
346  * @return True if the lock was acquired, false otherwise.
347  */
348 static inline bool
pmap_try_lock(pmap_t pmap,pmap_lock_mode_t mode)349 pmap_try_lock(pmap_t pmap, pmap_lock_mode_t mode)
350 {
351 	bool ret = false;
352 
353 #if !XNU_MONITOR
354 	mp_disable_preemption();
355 #endif
356 
357 	switch (mode) {
358 	case PMAP_LOCK_SHARED:
359 		ret = lck_rw_try_lock_shared(&pmap->rwlock);
360 		break;
361 	case PMAP_LOCK_EXCLUSIVE:
362 		ret = lck_rw_try_lock_exclusive(&pmap->rwlock);
363 		break;
364 	default:
365 		panic("%s: Unknown pmap_lock_mode. pmap=%p, mode=%d", __func__, pmap, mode);
366 	}
367 
368 	if (!ret) {
369 #if !XNU_MONITOR
370 		mp_enable_preemption();
371 #endif
372 	}
373 
374 	return ret;
375 }
376 
377 /**
378  * Attempts to promote an already acquired pmap lock from shared to exclusive.
379  *
380  * @param pmap The pmap whose lock should be promoted from shared to exclusive.
381  *
382  * @return True if successfully promoted, otherwise false upon failure in
383  *         which case the shared lock is dropped.
384  */
385 static inline bool
pmap_lock_shared_to_exclusive(pmap_t pmap)386 pmap_lock_shared_to_exclusive(pmap_t pmap)
387 {
388 	pmap_assert_locked(pmap, PMAP_LOCK_SHARED);
389 
390 	bool locked = lck_rw_lock_shared_to_exclusive(&pmap->rwlock);
391 
392 #if !XNU_MONITOR
393 	if (!locked) {
394 		mp_enable_preemption();
395 	}
396 #endif
397 
398 	return locked;
399 }
400 
401 /**
402  * Release a pmap object's reader/writer lock.
403  *
404  * @param pmap The pmap whose lock to release.
405  * @param mode Which mode the lock should be in at time of release.
406  */
407 static inline void
pmap_unlock(pmap_t pmap,pmap_lock_mode_t mode)408 pmap_unlock(pmap_t pmap, pmap_lock_mode_t mode)
409 {
410 	switch (mode) {
411 	case PMAP_LOCK_SHARED:
412 		lck_rw_unlock_shared(&pmap->rwlock);
413 		break;
414 	case PMAP_LOCK_EXCLUSIVE:
415 		lck_rw_unlock_exclusive(&pmap->rwlock);
416 		break;
417 	default:
418 		panic("%s: Unknown pmap_lock_mode. pmap=%p, mode=%d", __func__, pmap, mode);
419 	}
420 
421 #if !XNU_MONITOR
422 	mp_enable_preemption();
423 #endif
424 }
425 
426 #if __arm64__
427 /*
428  * Disable interrupts and return previous state.
429  *
430  * The PPL has its own interrupt state facility separately from
431  * ml_set_interrupts_enable(), since that function is not part of the
432  * PPL, and so doing things like manipulating untrusted data and
433  * taking ASTs.
434  *
435  * @return The previous interrupt state, to be restored with
436  *         pmap_interrupts_restore().
437  */
438 static inline uint64_t __attribute__((warn_unused_result)) __used
pmap_interrupts_disable(void)439 pmap_interrupts_disable(void)
440 {
441 	uint64_t state = __builtin_arm_rsr64("DAIF");
442 
443 	/* Ensure that debug exceptions are masked. */
444 	assert((state & DAIF_DEBUGF) == DAIF_DEBUGF);
445 
446 	if ((state & DAIF_ALL) != DAIF_ALL) {
447 		__builtin_arm_wsr64("DAIFSet", DAIFSC_ALL);
448 	}
449 
450 	return state;
451 }
452 
453 /*
454  * Restore previous interrupt state.
455  *
456  * @param state The previous interrupt state to restore.
457  */
458 static inline void __used
pmap_interrupts_restore(uint64_t state)459 pmap_interrupts_restore(uint64_t state)
460 {
461 	// no unknown bits?
462 	assert((state & ~DAIF_ALL) == 0);
463 
464 	/* Assert that previous state had debug exceptions masked. */
465 	assert((state & DAIF_DEBUGF) == DAIF_DEBUGF);
466 
467 	if (state != DAIF_ALL) {
468 		__builtin_arm_wsr64("DAIF", state);
469 	}
470 }
471 
472 /*
473  * Query interrupt state.
474  *
475  * ml_get_interrupts_enabled() is safe enough at the time of writing
476  * this comment, but because it is not considered part of the PPL, so
477  * could change without notice, and because it presently only checks
478  * DAIF_IRQ, we have our own version.
479  *
480  * @return true if interrupts are enable (not fully disabled).
481  */
482 
483 static inline bool __attribute__((warn_unused_result)) __used
pmap_interrupts_enabled(void)484 pmap_interrupts_enabled(void)
485 {
486 	return (__builtin_arm_rsr64("DAIF") & DAIF_ALL) != DAIF_ALL;
487 }
488 #endif /* __arm64__ */
489 
490 #endif /* _ARM_PMAP_PMAP_INTERNAL_H_ */
491