xref: /xnu-8020.121.3/osfmk/arm/pmap/pmap_internal.h (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /**
29  * This header file stores the types, and prototypes used strictly by the pmap
30  * itself. The public pmap API exported to the rest of the kernel should be
31  * located in osfmk/arm/pmap.h.
32  *
33  * This file will automatically include all of the other internal arm/pmap/
34  * headers so .c files will only need to include this one header.
35  */
36 #ifndef _ARM_PMAP_PMAP_INTERNAL_H_
37 #define _ARM_PMAP_PMAP_INTERNAL_H_
38 
39 #include <stdint.h>
40 
41 #include <kern/debug.h>
42 #include <kern/locks.h>
43 #include <mach/vm_types.h>
44 #include <mach_assert.h>
45 
46 #include <arm/cpu_data.h>
47 #include <arm/proc_reg.h>
48 #if defined(__arm64__)
49 #include <arm64/proc_reg.h>
50 #endif /* defined(__arm64__) */
51 
52 /**
53  * arm/pmap.h and the other /arm/pmap/ internal header files are safe to be
54  * included in this file since they shouldn't rely on any of the internal pmap
55  * header files (so no circular dependencies). Implementation files will only
56  * need to include this one header to get all of the relevant pmap types.
57  */
58 #include <arm/pmap.h>
59 #include <arm/pmap/pmap_data.h>
60 #include <arm/pmap/pmap_pt_geometry.h>
61 
62 #if XNU_MONITOR
63 /**
64  * Temporary macros and prototypes needed to implement the ppl_handler_table.
65  *
66  * Eventually all calls to these macros will be in pmap_ppl_interface.c and
67  * these macros can be moved into that .c file.
68  *
69  * The <function>_internal() externs in here are also only included to be used
70  * by the ppl_handler_table. Once the ppl_handler_table moves into
71  * pmap_ppl_interface.c, then these prototypes can be removed (the
72  * PMAP_SUPPORT_PROTOTYPES() macro creates these prototypes automatically).
73  *
74  * TODO: Move PMAP_SUPPORT_PROTOTYPES_*() macros into pmap_ppl_interface.c and
75  *       remove these _internal() prototypes.
76  */
77 
78 extern pmap_paddr_t pmap_release_ppl_pages_to_kernel_internal(void);
79 extern kern_return_t mapping_free_prime_internal(void);
80 
81 extern void pmap_ledger_verify_size_internal(size_t);
82 extern ledger_t pmap_ledger_alloc_internal(void);
83 extern void pmap_ledger_free_internal(ledger_t);
84 
85 /**
86  * This macro generates prototypes for the *_internal functions, which represent
87  * the PPL interface. When the PPL is enabled, this will also generate
88  * prototypes for the PPL entrypoints (*_ppl), as well as generating the
89  * entrypoints themselves.
90  *
91  * Since these macros generate code, they should only be called from a single
92  * implementation file for each PPL entry point.
93  */
94 #define GEN_ASM_NAME(__function_name) _##__function_name##_ppl
95 
96 #define PMAP_SUPPORT_PROTOTYPES_WITH_ASM_INTERNAL(__return_type, __function_name, __function_args, __function_index, __assembly_function_name) \
97 	extern __return_type __function_name##_internal __function_args; \
98 	extern __return_type __function_name##_ppl __function_args; \
99 	__asm__ (".text \n" \
100 	         ".align 2 \n" \
101 	         ".globl " #__assembly_function_name "\n" \
102 	         #__assembly_function_name ":\n" \
103 	         "mov x15, " #__function_index "\n" \
104 	         "b _aprr_ppl_enter\n")
105 
106 #define PMAP_SUPPORT_PROTOTYPES_WITH_ASM(__return_type, __function_name, __function_args, __function_index, __assembly_function_name) \
107 	PMAP_SUPPORT_PROTOTYPES_WITH_ASM_INTERNAL(__return_type, __function_name, __function_args, __function_index, __assembly_function_name)
108 
109 #define PMAP_SUPPORT_PROTOTYPES(__return_type, __function_name, __function_args, __function_index) \
110 	PMAP_SUPPORT_PROTOTYPES_WITH_ASM(__return_type, __function_name, __function_args, __function_index, GEN_ASM_NAME(__function_name))
111 #else /* XNU_MONITOR */
112 #define PMAP_SUPPORT_PROTOTYPES(__return_type, __function_name, __function_args, __function_index) \
113 	extern __return_type __function_name##_internal __function_args
114 #endif /* XNU_MONITOR */
115 
116 /**
117  * Global variables exported to the rest of the internal pmap implementation.
118  */
119 extern lck_grp_t pmap_lck_grp;
120 extern bool hib_entry_pmap_lockdown;
121 extern pmap_paddr_t avail_start;
122 extern pmap_paddr_t avail_end;
123 extern uint32_t pmap_max_asids;
124 
125 /**
126  * Functions exported to the rest of the internal pmap implementation.
127  */
128 
129 #if XNU_MONITOR
130 extern void pmap_set_xprr_perm(unsigned int, unsigned int, unsigned int);
131 extern void pa_set_range_xprr_perm(pmap_paddr_t, pmap_paddr_t, unsigned int, unsigned int);
132 #endif /* XNU_MONITOR */
133 
134 extern int pmap_remove_range_options(
135 	pmap_t, vm_map_address_t, pt_entry_t *, pt_entry_t *, vm_map_address_t *, bool *, int);
136 
137 extern void pmap_tte_deallocate(
138 	pmap_t, vm_offset_t, vm_offset_t, bool, tt_entry_t *, unsigned int);
139 
140 #if defined(PVH_FLAG_EXEC)
141 extern void pmap_set_ptov_ap(unsigned int, unsigned int, boolean_t);
142 #endif /* defined(PVH_FLAG_EXEC) */
143 
144 extern pmap_t current_pmap(void);
145 extern void pmap_tt_ledger_credit(pmap_t, vm_size_t);
146 extern void pmap_tt_ledger_debit(pmap_t, vm_size_t);
147 
148 extern void write_pte(pt_entry_t *, pt_entry_t);
149 
150 /**
151  * The qsort function is used by various parts of the pmap but doesn't contain
152  * its own header file with prototype so it must be manually extern'd.
153  *
154  * The `cmpfunc_t` type is a pointer to a function that should return the
155  * following:
156  *
157  * return < 0 for a < b
158  *          0 for a == b
159  *        > 0 for a > b
160  */
161 typedef int (*cmpfunc_t)(const void *a, const void *b);
162 extern void qsort(void *a, size_t n, size_t es, cmpfunc_t cmp);
163 
164 /**
165  * Inline and macro functions exported for usage by other pmap modules.
166  *
167  * In an effort to not cause any performance regressions while breaking up the
168  * pmap, I'm keeping all functions originally marked as "static inline", as
169  * inline and moving them into header files to be shared across the pmap
170  * modules. In reality, many of these functions probably don't need to be inline
171  * and can be moved back into a .c file.
172  *
173  * TODO: rdar://70538514 (PMAP Cleanup: re-evaluate whether inline functions should actually be inline)
174  */
175 
176 /**
177  * Macro used to ensure that pmap data structures aren't modified during
178  * hibernation image copying.
179  */
180 #if HIBERNATION
181 #define ASSERT_NOT_HIBERNATING() (assertf(!hib_entry_pmap_lockdown, \
182 	"Attempted to modify PMAP data structures after hibernation image copying has begun."))
183 #else
184 #define ASSERT_NOT_HIBERNATING()
185 #endif /* HIBERNATION */
186 
187 /* Helper macro for rounding an address up to a correctly aligned value. */
188 #define PMAP_ALIGN(addr, align) ((addr) + ((align) - 1) & ~((align) - 1))
189 
190 /**
191  * pmap_data.h must be included before this point so that pmap_lock_mode_t is
192  * defined before the rest of the locking code.
193  */
194 
195 /**
196  * Initialize a pmap object's reader/writer lock.
197  *
198  * @param pmap The pmap whose lock to initialize.
199  */
200 static inline void
pmap_lock_init(pmap_t pmap)201 pmap_lock_init(pmap_t pmap)
202 {
203 	lck_rw_init(&pmap->rwlock, &pmap_lck_grp, 0);
204 	pmap->rwlock.lck_rw_can_sleep = FALSE;
205 }
206 
207 /**
208  * Destroy a pmap object's reader/writer lock.
209  *
210  * @param pmap The pmap whose lock to destroy.
211  */
212 static inline void
pmap_lock_destroy(pmap_t pmap)213 pmap_lock_destroy(pmap_t pmap)
214 {
215 	lck_rw_destroy(&pmap->rwlock, &pmap_lck_grp);
216 }
217 
218 /**
219  * Assert that the pmap lock is held in the given mode.
220  *
221  * @param pmap The pmap whose lock to assert is being held.
222  * @param mode The mode the lock should be held in.
223  */
224 static inline void
pmap_assert_locked(__unused pmap_t pmap,__unused pmap_lock_mode_t mode)225 pmap_assert_locked(__unused pmap_t pmap, __unused pmap_lock_mode_t mode)
226 {
227 #if MACH_ASSERT
228 	switch (mode) {
229 	case PMAP_LOCK_SHARED:
230 		lck_rw_assert(&pmap->rwlock, LCK_RW_ASSERT_SHARED);
231 		break;
232 	case PMAP_LOCK_EXCLUSIVE:
233 		lck_rw_assert(&pmap->rwlock, LCK_RW_ASSERT_EXCLUSIVE);
234 		break;
235 	default:
236 		panic("%s: Unknown pmap_lock_mode. pmap=%p, mode=%d", __FUNCTION__, pmap, mode);
237 	}
238 #endif
239 }
240 
241 /**
242  * Assert that the pmap lock is held in any mode.
243  *
244  * @param pmap The pmap whose lock should be held.
245  */
246 __unused static inline void
pmap_assert_locked_any(__unused pmap_t pmap)247 pmap_assert_locked_any(__unused pmap_t pmap)
248 {
249 #if MACH_ASSERT
250 	lck_rw_assert(&pmap->rwlock, LCK_RW_ASSERT_HELD);
251 #endif
252 }
253 
254 /**
255  * Acquire a pmap object's reader/writer lock as either shared (read-only) or
256  * exclusive (read/write).
257  *
258  * @note Failed attempts to grab the lock will NOT go to sleep, they'll spin
259  *       until the lock can be acquired.
260  *
261  * @param pmap The pmap whose lock to acquire.
262  * @param mode Whether to grab the lock as shared (read-only) or exclusive (read/write).
263  */
264 static inline void
pmap_lock(pmap_t pmap,pmap_lock_mode_t mode)265 pmap_lock(pmap_t pmap, pmap_lock_mode_t mode)
266 {
267 #if !XNU_MONITOR
268 	mp_disable_preemption();
269 #endif
270 
271 	switch (mode) {
272 	case PMAP_LOCK_SHARED:
273 		lck_rw_lock_shared(&pmap->rwlock);
274 		break;
275 	case PMAP_LOCK_EXCLUSIVE:
276 		lck_rw_lock_exclusive(&pmap->rwlock);
277 		break;
278 	default:
279 		panic("%s: Unknown pmap_lock_mode. pmap=%p, mode=%d", __func__, pmap, mode);
280 	}
281 }
282 
283 /**
284  * Attempt to acquire the pmap lock in the specified mode. If the lock couldn't
285  * be acquired, then return immediately instead of spinning.
286  *
287  * @param pmap The pmap whose lock to attempt to acquire.
288  * @param mode Whether to grab the lock as shared (read-only) or exclusive (read/write).
289  *
290  * @return True if the lock was acquired, false otherwise.
291  */
292 static inline bool
pmap_try_lock(pmap_t pmap,pmap_lock_mode_t mode)293 pmap_try_lock(pmap_t pmap, pmap_lock_mode_t mode)
294 {
295 	bool ret = false;
296 
297 #if !XNU_MONITOR
298 	mp_disable_preemption();
299 #endif
300 
301 	switch (mode) {
302 	case PMAP_LOCK_SHARED:
303 		ret = lck_rw_try_lock_shared(&pmap->rwlock);
304 		break;
305 	case PMAP_LOCK_EXCLUSIVE:
306 		ret = lck_rw_try_lock_exclusive(&pmap->rwlock);
307 		break;
308 	default:
309 		panic("%s: Unknown pmap_lock_mode. pmap=%p, mode=%d", __func__, pmap, mode);
310 	}
311 
312 	if (!ret) {
313 #if !XNU_MONITOR
314 		mp_enable_preemption();
315 #endif
316 	}
317 
318 	return ret;
319 }
320 
321 /**
322  * Attempts to promote an already acquired pmap lock from shared to exclusive.
323  *
324  * @param pmap The pmap whose lock should be promoted from shared to exclusive.
325  *
326  * @return True if successfully promoted, otherwise false upon failure in
327  *         which case the shared lock is dropped.
328  */
329 static inline bool
pmap_lock_shared_to_exclusive(pmap_t pmap)330 pmap_lock_shared_to_exclusive(pmap_t pmap)
331 {
332 	pmap_assert_locked(pmap, PMAP_LOCK_SHARED);
333 
334 	bool locked = lck_rw_lock_shared_to_exclusive(&pmap->rwlock);
335 
336 #if !XNU_MONITOR
337 	if (!locked) {
338 		mp_enable_preemption();
339 	}
340 #endif
341 
342 	return locked;
343 }
344 
345 /**
346  * Release a pmap object's reader/writer lock.
347  *
348  * @param pmap The pmap whose lock to release.
349  * @param mode Which mode the lock should be in at time of release.
350  */
351 static inline void
pmap_unlock(pmap_t pmap,pmap_lock_mode_t mode)352 pmap_unlock(pmap_t pmap, pmap_lock_mode_t mode)
353 {
354 	switch (mode) {
355 	case PMAP_LOCK_SHARED:
356 		lck_rw_unlock_shared(&pmap->rwlock);
357 		break;
358 	case PMAP_LOCK_EXCLUSIVE:
359 		lck_rw_unlock_exclusive(&pmap->rwlock);
360 		break;
361 	default:
362 		panic("%s: Unknown pmap_lock_mode. pmap=%p, mode=%d", __func__, pmap, mode);
363 	}
364 
365 #if !XNU_MONITOR
366 	mp_enable_preemption();
367 #endif
368 }
369 
370 #if __arm64__
371 /*
372  * Disable interrupts and return previous state.
373  *
374  * The PPL has its own interrupt state facility separately from
375  * ml_set_interrupts_enable(), since that function is not part of the
376  * PPL, and so doing things like manipulating untrusted data and
377  * taking ASTs.
378  *
379  * @return The previous interrupt state, to be restored with
380  *         pmap_interrupts_restore().
381  */
382 static inline uint64_t __attribute__((warn_unused_result)) __used
pmap_interrupts_disable(void)383 pmap_interrupts_disable(void)
384 {
385 	uint64_t state = __builtin_arm_rsr64("DAIF");
386 
387 	if ((state & DAIF_STANDARD_DISABLE) != DAIF_STANDARD_DISABLE) {
388 		__builtin_arm_wsr64("DAIFSet", DAIFSC_STANDARD_DISABLE);
389 	}
390 
391 	return state;
392 }
393 
394 /*
395  * Restore previous interrupt state.
396  *
397  * @param state The previous interrupt state to restore.
398  */
399 static inline void __used
pmap_interrupts_restore(uint64_t state)400 pmap_interrupts_restore(uint64_t state)
401 {
402 	// no unknown bits?
403 	assert((state & ~DAIF_ALL) == 0);
404 
405 	if (state != DAIF_STANDARD_DISABLE) {
406 		__builtin_arm_wsr64("DAIF", state);
407 	}
408 }
409 
410 /*
411  * Query interrupt state.
412  *
413  * ml_get_interrupts_enabled() is safe enough at the time of writing
414  * this comment, but because it is not considered part of the PPL, so
415  * could change without notice, and because it presently only checks
416  * DAIF_IRQ, we have our own version.
417  *
418  * @return true if interrupts are enable (not fully disabled).
419  */
420 
421 static inline bool __attribute__((warn_unused_result)) __used
pmap_interrupts_enabled(void)422 pmap_interrupts_enabled(void)
423 {
424 	return (__builtin_arm_rsr64("DAIF") & DAIF_STANDARD_DISABLE) != DAIF_STANDARD_DISABLE;
425 }
426 #endif /* __arm64__ */
427 
428 #endif /* _ARM_PMAP_PMAP_INTERNAL_H_ */
429