xref: /xnu-11215.81.4/osfmk/arm64/sptm/pmap/pmap_data.c (revision d4514f0bc1d3f944c22d92e68b646ac3fb40d452)
1 /*
2  * Copyright (c) 2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <arm/cpu_data_internal.h>
29 #include <kern/queue.h>
30 #include <libkern/OSAtomic.h>
31 #include <libkern/section_keywords.h>
32 #include <pexpert/device_tree.h>
33 #include <os/atomic_private.h>
34 #include <vm/cpm_internal.h>
35 #include <vm/vm_kern.h>
36 #include <vm/vm_protos.h>
37 #include <vm/vm_object_xnu.h>
38 #include <vm/vm_page_internal.h>
39 #include <vm/vm_pageout.h>
40 
41 #include <arm64/sptm/pmap/pmap_internal.h>
42 
43 /**
44  * Physical Page Attribute Table.
45  *
46  * Array that contains a set of flags for each kernel-managed physical VM page.
47  *
48  * @note There can be a disparity between the VM page size and the underlying
49  *       hardware page size for a specific address space. In those cases, it's
50  *       possible that multiple hardware pages will share the same set of
51  *       attributes. The VM operates on regions of memory by the VM page size
52  *       and is aware that all hardware pages within each VM page share
53  *       attributes.
54  */
55 SECURITY_READ_ONLY_LATE(volatile pp_attr_t*) pp_attr_table = (volatile pp_attr_t*)NULL;
56 
57 /**
58  * Physical to Virtual Table.
59  *
60  * Data structure that contains a list of virtual mappings for each kernel-
61  * managed physical page. Other flags and metadata are also stored in this
62  * structure on a per-physical-page basis.
63  *
64  * This structure is arranged as an array of pointers, where each pointer can
65  * point to one of three different types of data (single mapping, multiple
66  * mappings, or page table descriptor). Metadata about each page (including the
67  * type of pointer) are located in the lower and upper bits of the pointer.
68  * These bits need to be set/masked out to be able to dereference the pointer,
69  * so it's recommended to use the provided API in pmap_data.h to access the
70  * pv_head_table since it handles these details for you.
71  */
72 SECURITY_READ_ONLY_LATE(uintptr_t*) pv_head_table = NULL;
73 
74 /* Simple linked-list structure used in various page free lists. */
75 typedef struct page_free_entry {
76 	/**
77 	 * The first word in an empty page on a free list is used as a pointer to
78 	 * the next free page in the list.
79 	 */
80 	struct page_free_entry *next;
81 } page_free_entry_t;
82 
83 /* Represents a NULL entry in various page free lists. */
84 #define PAGE_FREE_ENTRY_NULL ((page_free_entry_t *) 0)
85 
86 /**
87  * This VM object will contain every VM page being used by the pmap. This acts
88  * as a convenient place to put pmap pages to keep the VM from reusing them, as
89  * well as providing a way for looping over every page being used by the pmap.
90  */
91 struct vm_object pmap_object_store VM_PAGE_PACKED_ALIGNED;
92 
93 /* Pointer to the pmap's VM object that can't be modified after machine_lockdown(). */
94 SECURITY_READ_ONLY_LATE(vm_object_t) pmap_object = &pmap_object_store;
95 
96 /**
97  * This variable, used for debugging purposes only, keeps track of how many pages
98  * are currently in use by the pmap layer. Once a page is given back to the VM,
99  * then inuse_pmap_pages_count will be decremented.
100  *
101  * Even if a page is sitting in one of the pmap's various free lists and hasn't
102  * been allocated for usage, it is still considered "used" by the pmap, from
103  * the perspective of the VM.
104  */
105 unsigned int inuse_pmap_pages_count = 0;
106 
107 /**
108  * Default watermark values used to keep a healthy supply of physical-to-virtual
109  * entries (PVEs) always available. These values can be overriden by the device
110  * tree (see pmap_compute_pv_targets() for more info).
111  */
112 #define PV_KERN_LOW_WATER_MARK_DEFAULT (0x400)
113 #define PV_ALLOC_CHUNK_INITIAL         (0x400)
114 #define PV_KERN_ALLOC_CHUNK_INITIAL    (0x400)
115 
116 /**
117  * The pv_free array acts as a ring buffer where each entry points to a linked
118  * list of PVEs that have a length set by this define.
119  */
120 #define PV_BATCH_SIZE (PAGE_SIZE / sizeof(pv_entry_t))
121 
122 /* The batch allocation code assumes that a batch can fit within a single page. */
123 #if __ARM_16K_PG__
124 /**
125  * PAGE_SIZE is a variable on arm64 systems with 4K VM pages, so no static
126  * assert on those systems.
127  */
128 static_assert((PV_BATCH_SIZE * sizeof(pv_entry_t)) <= PAGE_SIZE);
129 #endif /* __ARM_16K_PG__ */
130 
131 /**
132  * The number of PVEs to attempt to keep in the kernel-dedicated free list. If
133  * the number of entries is below this value, then allocate more.
134  */
135 static uint32_t pv_kern_low_water_mark MARK_AS_PMAP_DATA = PV_KERN_LOW_WATER_MARK_DEFAULT;
136 
137 /**
138  * The initial number of PVEs to allocate during bootstrap (can be overriden in
139  * the device tree, see pmap_compute_pv_targets() for more info).
140  */
141 uint32_t pv_alloc_initial_target MARK_AS_PMAP_DATA = PV_ALLOC_CHUNK_INITIAL * MAX_CPUS;
142 uint32_t pv_kern_alloc_initial_target MARK_AS_PMAP_DATA = PV_KERN_ALLOC_CHUNK_INITIAL;
143 
144 /**
145  * Global variables strictly used for debugging purposes. These variables keep
146  * track of the number of pages being used for PVE objects, PTD objects, and the
147  * total number of PVEs that have been added to the global or kernel-dedicated
148  * free lists respectively.
149  */
150 static _Atomic unsigned int pv_page_count MARK_AS_PMAP_DATA = 0;
151 static unsigned int ptd_page_count MARK_AS_PMAP_DATA = 0;
152 static unsigned pmap_reserve_replenish_stat MARK_AS_PMAP_DATA = 0;
153 static unsigned pmap_kern_reserve_alloc_stat MARK_AS_PMAP_DATA = 0;
154 
155 /**
156  * Number of linked lists of PVEs ("batches") in the global PV free ring buffer.
157  * This must be a power of two for the pv_free_array_n_elems() logic to work.
158  */
159 #define PV_FREE_ARRAY_SIZE (256U)
160 
161 /**
162  * A ring buffer where each entry in the buffer is a linked list of PV entries
163  * (called "batches"). Allocations out of this array will always operate on
164  * a PV_BATCH_SIZE amount of entries at a time.
165  */
166 static pv_free_list_t pv_free_ring[PV_FREE_ARRAY_SIZE] MARK_AS_PMAP_DATA = {0};
167 
168 /* Read and write indices for the pv_free ring buffer. */
169 static uint16_t pv_free_read_idx MARK_AS_PMAP_DATA = 0;
170 static uint16_t pv_free_write_idx MARK_AS_PMAP_DATA = 0;
171 
172 /**
173  * Make sure the PV free array is small enough so that all elements can be
174  * properly indexed by pv_free_[read/write]_idx.
175  */
176 static_assert(PV_FREE_ARRAY_SIZE <= (1 << (sizeof(pv_free_read_idx) * 8)));
177 
178 /**
179  * Return the number of free batches available for allocation out of the PV free
180  * ring buffer. Each batch is a linked list of PVEs with length PV_BATCH_SIZE.
181  *
182  * @note This function requires that PV_FREE_ARRAY_SIZE is a power of two.
183  */
184 static inline uint16_t
pv_free_array_n_elems(void)185 pv_free_array_n_elems(void)
186 {
187 	return (pv_free_write_idx - pv_free_read_idx) & (PV_FREE_ARRAY_SIZE - 1);
188 }
189 
190 /* Free list of PV entries dedicated for usage by the kernel. */
191 static pv_free_list_t pv_kern_free MARK_AS_PMAP_DATA = {0};
192 
193 /* Locks for the global and kernel-dedicated PV free lists. */
194 static MARK_AS_PMAP_DATA SIMPLE_LOCK_DECLARE(pv_free_array_lock, 0);
195 static MARK_AS_PMAP_DATA SIMPLE_LOCK_DECLARE(pv_kern_free_list_lock, 0);
196 
197 /* Represents a null page table descriptor (PTD). */
198 #define PTD_ENTRY_NULL ((pt_desc_t *) 0)
199 
200 /* Running free list of PTD nodes. */
201 static pt_desc_t *ptd_free_list MARK_AS_PMAP_DATA = PTD_ENTRY_NULL;
202 
203 /* The number of free PTD nodes available in the free list. */
204 static unsigned int ptd_free_count MARK_AS_PMAP_DATA = 0;
205 
206 /**
207  * The number of PTD objects located in each page being used by the PTD
208  * allocator. The PTD objects share each page with their associated ptd_info_t
209  * objects (with cache-line alignment padding between them). The maximum number
210  * of PTDs that can be placed into a single page is calculated once at boot.
211  */
212 static SECURITY_READ_ONLY_LATE(unsigned) ptd_per_page = 0;
213 
214 /**
215  * The offset in bytes from the beginning of a page of PTD objects where you
216  * start seeing the associated ptd_info_t objects. This is calculated once
217  * during boot to maximize the number of PTD and ptd_info_t objects that can
218  * reside within a page without sharing a cache-line.
219  */
220 static SECURITY_READ_ONLY_LATE(unsigned) ptd_info_offset = 0;
221 
222 /* Lock to protect accesses to the PTD free list. */
223 static decl_simple_lock_data(, ptd_free_list_lock MARK_AS_PMAP_DATA);
224 
225 /**
226  * Dummy _internal() prototypes so Clang doesn't complain about missing
227  * prototypes on a non-static function. These functions can't be marked as
228  * static because they need to be called from pmap_ppl_interface.c where the
229  * PMAP_SUPPORT_PROTOYPES() macro will auto-generate the prototype implicitly.
230  */
231 kern_return_t mapping_free_prime_internal(void);
232 
233 /**
234  * Flag indicating whether any I/O regions that require strong DSB are present.
235  * If not, certain TLB maintenance operations can be streamlined.
236  */
237 SECURITY_READ_ONLY_LATE(bool) sdsb_io_rgns_present = false;
238 
239 /**
240  * Sorted representation of the pmap-io-ranges nodes in the device tree. These
241  * nodes describe all of the SPTM/PPL-owned I/O ranges.
242  */
243 SECURITY_READ_ONLY_LATE(pmap_io_range_t*) io_attr_table = (pmap_io_range_t*)0;
244 
245 /* The number of ranges described by io_attr_table. */
246 SECURITY_READ_ONLY_LATE(unsigned int) num_io_rgns = 0;
247 
248 /**
249  * Sorted representation of the pmap-io-filter entries in the device tree
250  * The entries are sorted and queried by {signature, range}.
251  */
252 SECURITY_READ_ONLY_LATE(pmap_io_filter_entry_t*) io_filter_table = (pmap_io_filter_entry_t*)0;
253 
254 /* Number of total pmap-io-filter entries. */
255 SECURITY_READ_ONLY_LATE(unsigned int) num_io_filter_entries = 0;
256 
257 /**
258  * A list of pages that define the per-cpu scratch areas used by IOMMU drivers
259  * when preparing data to be passed into the SPTM. The size allocated per-cpu is
260  * defined by PMAP_IOMMU_SCRATCH_SIZE.
261  *
262  * SPTM TODO: Only have these variables on systems with IOMMU drivers (H11+).
263  */
264 #define PMAP_IOMMU_SCRATCH_SIZE (PMAP_IOMMU_NUM_SCRATCH_PAGES * PAGE_SIZE)
265 SECURITY_READ_ONLY_LATE(pmap_paddr_t) sptm_cpu_iommu_scratch_start = 0;
266 SECURITY_READ_ONLY_LATE(pmap_paddr_t) sptm_cpu_iommu_scratch_end = 0;
267 
268 /* Prototypes used by pmap_data_bootstrap(). */
269 void pmap_cpu_data_array_init(void);
270 
271 /**
272  * This function is called once during pmap_bootstrap() to allocate and
273  * initialize many of the core data structures that are implemented in this
274  * file.
275  *
276  * Memory for these data structures is carved out of `avail_start` which is a
277  * global setup by arm_vm_init() that points to a physically contiguous region
278  * used for bootstrap allocations.
279  *
280  * @note There is no guaranteed alignment of `avail_start` when this function
281  *       returns. If avail_start needs to be aligned to a specific value then it
282  *       must be done so by the caller before they use it for more allocations.
283  */
284 void
pmap_data_bootstrap(void)285 pmap_data_bootstrap(void)
286 {
287 	/**
288 	 * Set ptd_per_page to the maximum number of (pt_desc_t + ptd_info_t) we can
289 	 * fit in a single page. We need to allow for some padding between the two,
290 	 * so that no ptd_info_t shares a cache line with a pt_desc_t.
291 	 */
292 	const unsigned ptd_info_size = sizeof(ptd_info_t);
293 	const unsigned l2_cline_bytes = 1 << MAX_L2_CLINE;
294 	ptd_per_page = (PAGE_SIZE - (l2_cline_bytes - 1)) / (sizeof(pt_desc_t) + ptd_info_size);
295 	unsigned increment = 0;
296 	bool try_next = true;
297 
298 	/**
299 	 * The current ptd_per_page calculation was done assuming the worst-case
300 	 * scenario in terms of padding between the two object arrays that reside in
301 	 * the same page. The following loop attempts to optimize this further by
302 	 * finding the smallest possible amount of padding while still ensuring that
303 	 * the two object arrays don't share a cache line.
304 	 */
305 	while (try_next) {
306 		increment++;
307 		const unsigned pt_desc_total_size =
308 		    PMAP_ALIGN((ptd_per_page + increment) * sizeof(pt_desc_t), l2_cline_bytes);
309 		const unsigned ptd_info_total_size = (ptd_per_page + increment) * ptd_info_size;
310 		try_next = (pt_desc_total_size + ptd_info_total_size) <= PAGE_SIZE;
311 	}
312 	ptd_per_page += increment - 1;
313 	assert(ptd_per_page > 0);
314 
315 	/**
316 	 * ptd_info objects reside after the ptd descriptor objects, with some
317 	 * padding in between if necessary to ensure that they don't co-exist in the
318 	 * same cache line.
319 	 */
320 	const unsigned pt_desc_bytes = ptd_per_page * sizeof(pt_desc_t);
321 	ptd_info_offset = PMAP_ALIGN(pt_desc_bytes, l2_cline_bytes);
322 
323 	/* The maximum amount of padding should be (l2_cline_bytes - 1). */
324 	assert((ptd_info_offset - pt_desc_bytes) < l2_cline_bytes);
325 
326 	/**
327 	 * Allocate enough initial PTDs to map twice the available physical memory.
328 	 *
329 	 * To do this, start by calculating the number of leaf page tables that are
330 	 * needed to cover all of kernel-managed physical memory.
331 	 */
332 	const uint32_t num_leaf_page_tables =
333 	    (uint32_t)(mem_size / ((PAGE_SIZE / sizeof(pt_entry_t)) * ARM_PGBYTES));
334 
335 	/**
336 	 * There should be one PTD per page table (times 2 since we want twice the
337 	 * number of required PTDs), plus round the number of PTDs up to the next
338 	 * `ptd_per_page` value so there's no wasted space.
339 	 */
340 	const uint32_t ptd_root_table_n_ptds =
341 	    (ptd_per_page * ((num_leaf_page_tables * 2) / ptd_per_page)) + ptd_per_page;
342 
343 	/* Lastly, calculate the number of VM pages and bytes these PTDs take up. */
344 	const uint32_t num_ptd_pages = ptd_root_table_n_ptds / ptd_per_page;
345 	vm_size_t ptd_root_table_size = num_ptd_pages * PAGE_SIZE;
346 
347 	/* Number of VM pages that span all of kernel-managed memory. */
348 	unsigned int npages = (unsigned int)atop(mem_size);
349 
350 
351 	/* The pv_head_table and pp_attr_table both have one entry per VM page. */
352 	const vm_size_t pp_attr_table_size = npages * sizeof(pp_attr_t);
353 	const vm_size_t pv_head_size = round_page(npages * sizeof(*pv_head_table));
354 
355 	/* Scan the device tree and override heuristics in the PV entry management code. */
356 	pmap_compute_pv_targets();
357 
358 	io_attr_table = (pmap_io_range_t *) SPTMArgs->sptm_pmap_io_ranges;
359 	num_io_rgns = SPTMArgs->sptm_pmap_io_ranges_count;
360 	io_filter_table = (pmap_io_filter_entry_t *) SPTMArgs->sptm_pmap_io_filters;
361 	num_io_filter_entries = SPTMArgs->sptm_pmap_io_filters_count;
362 
363 	/**
364 	 * Don't make any assumptions about the alignment of avail_start before
365 	 * execution of this function. Always re-align it to ensure the first
366 	 * allocated data structure is aligned correctly.
367 	 */
368 	avail_start = PMAP_ALIGN(avail_start, __alignof(pp_attr_t));
369 
370 	/**
371 	 * Keep track of where the data structures start so we can clear this memory
372 	 * later.
373 	 */
374 	const pmap_paddr_t pmap_struct_start = avail_start;
375 
376 	pp_attr_table = (pp_attr_t *)phystokv(avail_start);
377 	avail_start = PMAP_ALIGN(avail_start + pp_attr_table_size, __alignof(pv_entry_t *));
378 
379 	pv_head_table = (uintptr_t *)phystokv(avail_start);
380 
381 	/**
382 	 * ptd_root_table must start on a page boundary because all of the math for
383 	 * associating pt_desc_t objects with ptd_info objects assumes the first
384 	 * pt_desc_t in a page starts at the beginning of the page it resides in.
385 	 */
386 	avail_start = round_page(avail_start + pv_head_size);
387 
388 	pt_desc_t *ptd_root_table = (pt_desc_t *)phystokv(avail_start);
389 	avail_start = round_page(avail_start + ptd_root_table_size);
390 
391 	memset((char *)phystokv(pmap_struct_start), 0, avail_start - pmap_struct_start);
392 
393 	/* This function assumes that ptd_root_table has been zeroed out already. */
394 	ptd_bootstrap(ptd_root_table, num_ptd_pages);
395 
396 	/* Setup the pmap per-cpu data structures. */
397 	pmap_cpu_data_array_init();
398 }
399 
400 /**
401  * Add a queue of VM pages to the pmap's VM object. This informs the VM that
402  * these pages are being used by the pmap and shouldn't be reused.
403  *
404  * This also means that the pmap_object can be used as a convenient way to loop
405  * through every page currently being used by the pmap. For instance, this queue
406  * of pages is exposed to the debugger through the Low Globals, where it's used
407  * to ensure that all pmap data is saved in an active core dump.
408  *
409  * @param mem The head of the queue of VM pages to add to the pmap's VM object.
410  */
411 void
pmap_enqueue_pages(vm_page_t mem)412 pmap_enqueue_pages(vm_page_t mem)
413 {
414 	vm_page_t m_prev;
415 	vm_object_lock(pmap_object);
416 	while (mem != VM_PAGE_NULL) {
417 		const vm_object_offset_t offset =
418 		    (vm_object_offset_t) ((ptoa(VM_PAGE_GET_PHYS_PAGE(mem))) - gPhysBase);
419 
420 		vm_page_insert_wired(mem, pmap_object, offset, VM_KERN_MEMORY_PTE);
421 		m_prev = mem;
422 		mem = NEXT_PAGE(m_prev);
423 		*(NEXT_PAGE_PTR(m_prev)) = VM_PAGE_NULL;
424 	}
425 	vm_object_unlock(pmap_object);
426 }
427 
428 /**
429  * Allocate a page from the VM for usage within the pmap.
430  *
431  * @param ppa Output parameter to store the physical address of the allocated
432  *           page if one was able to be allocated (NULL otherwise).
433  * @param options The following options can be specified:
434  *     - PMAP_PAGE_ALLOCATE_NOWAIT: If the VM page free list doesn't have
435  *       any free pages available then don't wait for one, just return
436  *       immediately without allocating a page.
437  *
438  *     - PMAP_PAGE_RECLAIM_NOWAIT: If memory can't be allocated from the VM,
439  *       then fall back to attempting to reclaim a userspace page table. This
440  *       should only be specified in paths that absolutely can't take the
441  *       latency hit of waiting for the VM to allocate more pages. This flag
442  *       doesn't make much sense unless it's paired with
443  *       PMAP_PAGE_ALLOCATE_NOWAIT.
444  *
445  *     - PMAP_PAGE_NOZEROFILL: don't zero-fill the pages. This should only be
446  *       used if you know that something else in the relevant code path will
447  *       zero-fill or otherwise fully initialize the page with consistent data.
448  *       This is mostly intended for cases in which sptm_retype() is guaranteed
449  *       to zero-fill the page for us.
450  *
451  * @return KERN_SUCCESS if a page was successfully allocated, or
452  *         KERN_RESOURCE_SHORTAGE if a page failed to get allocated. This should
453  *         only be returned if PMAP_PAGE_ALLOCATE_NOWAIT is passed or if
454  *         preemption is disabled after early boot since allocating memory from
455  *         the VM requires grabbing a mutex. If PMAP_PAGE_ALLOCATE_NOWAIT is not
456  *         passed and the system is in a preemptable state, then the return
457  *         value should always be KERN_SUCCESS (as the thread will block until
458  *         there are free pages available).
459  */
460 MARK_AS_PMAP_TEXT kern_return_t
pmap_page_alloc(pmap_paddr_t * ppa,unsigned options)461 pmap_page_alloc(pmap_paddr_t *ppa, unsigned options)
462 {
463 	assert(ppa != NULL);
464 	pmap_paddr_t pa = 0;
465 	PMAP_ASSERT_NOT_WRITING_HIB();
466 	vm_page_t mem = VM_PAGE_NULL;
467 	thread_t self = current_thread();
468 
469 	/**
470 	 * It's not possible to allocate memory from the VM in a preemption disabled
471 	 * environment except during early boot (since the VM needs to grab a mutex).
472 	 * In those cases just return a resource shortage error and let the caller
473 	 * deal with it.
474 	 *
475 	 * We don't panic here as there are genuinely some cases where pmap_enter()
476 	 * is called with preemption disabled, and it's better to return an error
477 	 * to those callers to notify them to try again with preemption enabled.
478 	 */
479 	if (!pmap_is_preemptible()) {
480 		return KERN_RESOURCE_SHORTAGE;
481 	}
482 
483 	*ppa = 0;
484 
485 	/**
486 	 * We qualify for allocating reserved memory so set TH_OPT_VMPRIV to inform
487 	 * the VM of this.
488 	 *
489 	 * This field should only be modified by the local thread itself, so no lock
490 	 * needs to be taken.
491 	 */
492 	uint16_t thread_options = self->options;
493 	self->options |= TH_OPT_VMPRIV;
494 
495 	/**
496 	 * If we're only allocating a single page, just grab one off the VM's
497 	 * global page free list.
498 	 */
499 	while ((mem = vm_page_grab()) == VM_PAGE_NULL) {
500 		if (options & PMAP_PAGE_ALLOCATE_NOWAIT) {
501 			break;
502 		}
503 
504 		VM_PAGE_WAIT();
505 	}
506 
507 	if (mem != VM_PAGE_NULL) {
508 		vm_page_lock_queues();
509 		vm_page_wire(mem, VM_KERN_MEMORY_PTE, TRUE);
510 		vm_page_unlock_queues();
511 	}
512 
513 	self->options = thread_options;
514 
515 	if (mem == VM_PAGE_NULL) {
516 		return KERN_RESOURCE_SHORTAGE;
517 	}
518 
519 	pa = (pmap_paddr_t)ptoa(VM_PAGE_GET_PHYS_PAGE(mem));
520 
521 	/* Add the allocated VM page(s) to the pmap's VM object. */
522 	pmap_enqueue_pages(mem);
523 
524 	/* Pages are considered "in use" by the pmap until returned to the VM. */
525 	OSAddAtomic(1, &inuse_pmap_pages_count);
526 
527 	/* SPTM TODO: assert that the returned page is of type XNU_DEFAULT in frame table */
528 	if (!(options & PMAP_PAGE_NOZEROFILL)) {
529 		bzero((void*)phystokv(pa), PAGE_SIZE);
530 	}
531 	*ppa = pa;
532 	return KERN_SUCCESS;
533 }
534 
535 /**
536  * Free memory previously allocated through pmap_page_alloc() back to the VM.
537  *
538  * @param pa Physical address of the page(s) to free.
539  */
540 void
pmap_page_free(pmap_paddr_t pa)541 pmap_page_free(pmap_paddr_t pa)
542 {
543 	/* SPTM TODO: assert that the page to be freed is of type XNU_DEFAULT in frame table */
544 
545 	/* Pages are considered "in use" until given back to the VM. */
546 	OSAddAtomic(-1, &inuse_pmap_pages_count);
547 
548 	vm_page_t mem = VM_PAGE_NULL;
549 	vm_object_lock(pmap_object);
550 
551 	/**
552 	 * Remove the page from the pmap's VM object and return it back to the
553 	 * VM's global free list of pages.
554 	 */
555 	mem = vm_page_lookup(pmap_object, (pa - gPhysBase));
556 	assert(mem != VM_PAGE_NULL);
557 	assert(VM_PAGE_WIRED(mem));
558 	vm_page_lock_queues();
559 	vm_page_free(mem);
560 	vm_page_unlock_queues();
561 	vm_object_unlock(pmap_object);
562 }
563 
564 /**
565  * Called by the VM to reclaim pages that we can reclaim quickly and cheaply.
566  * This will take pages in the pmap's VM object and add them back to the VM's
567  * global list of free pages.
568  *
569  * @return The number of pages returned to the VM.
570  */
571 uint64_t
pmap_release_pages_fast(void)572 pmap_release_pages_fast(void)
573 {
574 	return 0;
575 }
576 
577 /**
578  * Allocates a batch (list) of pv_entry_t's from the global PV free array.
579  *
580  * @return A pointer to the head of the newly-allocated batch, or PV_ENTRY_NULL
581  *         if empty.
582  */
583 MARK_AS_PMAP_TEXT static pv_entry_t *
pv_free_array_get_batch(void)584 pv_free_array_get_batch(void)
585 {
586 	pv_entry_t *new_batch = PV_ENTRY_NULL;
587 
588 	pmap_simple_lock(&pv_free_array_lock);
589 	if (pv_free_array_n_elems() > 0) {
590 		/**
591 		 * The global PV array acts as a ring buffer where each entry points to
592 		 * a linked list of PVEs of length PV_BATCH_SIZE. Get the next free
593 		 * batch.
594 		 */
595 		const size_t index = pv_free_read_idx++ & (PV_FREE_ARRAY_SIZE - 1);
596 		pv_free_list_t *free_list = &pv_free_ring[index];
597 
598 		assert((free_list->count == PV_BATCH_SIZE) && (free_list->list != PV_ENTRY_NULL));
599 		new_batch = free_list->list;
600 	}
601 	pmap_simple_unlock(&pv_free_array_lock);
602 
603 	return new_batch;
604 }
605 
606 /**
607  * Frees a batch (list) of pv_entry_t's into the global PV free array.
608  *
609  * @param batch_head Pointer to the first entry in the batch to be returned to
610  *                   the array. This must be a linked list of pv_entry_t's of
611  *                   length PV_BATCH_SIZE.
612  *
613  * @return KERN_SUCCESS, or KERN_FAILURE if the global array is full.
614  */
615 MARK_AS_PMAP_TEXT static kern_return_t
pv_free_array_give_batch(pv_entry_t * batch_head)616 pv_free_array_give_batch(pv_entry_t *batch_head)
617 {
618 	assert(batch_head != NULL);
619 
620 	pmap_simple_lock(&pv_free_array_lock);
621 	if (pv_free_array_n_elems() == (PV_FREE_ARRAY_SIZE - 1)) {
622 		pmap_simple_unlock(&pv_free_array_lock);
623 		return KERN_FAILURE;
624 	}
625 
626 	const size_t index = pv_free_write_idx++ & (PV_FREE_ARRAY_SIZE - 1);
627 	pv_free_list_t *free_list = &pv_free_ring[index];
628 	free_list->list = batch_head;
629 	free_list->count = PV_BATCH_SIZE;
630 	pmap_simple_unlock(&pv_free_array_lock);
631 
632 	return KERN_SUCCESS;
633 }
634 
635 /**
636  * Helper function for allocating a single PVE from an arbitrary free list.
637  *
638  * @param free_list The free list to allocate a node from.
639  * @param pvepp Output parameter that will get updated with a pointer to the
640  *              allocated node if the free list isn't empty, or a pointer to
641  *              NULL if the list is empty.
642  */
643 MARK_AS_PMAP_TEXT static void
pv_free_list_alloc(pv_free_list_t * free_list,pv_entry_t ** pvepp)644 pv_free_list_alloc(pv_free_list_t *free_list, pv_entry_t **pvepp)
645 {
646 	assert(pvepp != NULL);
647 	assert(((free_list->list != NULL) && (free_list->count > 0)) ||
648 	    ((free_list->list == NULL) && (free_list->count == 0)));
649 
650 	if ((*pvepp = free_list->list) != NULL) {
651 		pv_entry_t *pvep = *pvepp;
652 		free_list->list = pvep->pve_next;
653 		pvep->pve_next = PV_ENTRY_NULL;
654 		free_list->count--;
655 	}
656 }
657 
658 /**
659  * Allocates a PVE from the kernel-dedicated list.
660  *
661  * @note This is only called when the global free list is empty, so don't bother
662  *       trying to allocate more nodes from that list.
663  *
664  * @param pvepp Output parameter that will get updated with a pointer to the
665  *              allocated node if the free list isn't empty, or a pointer to
666  *              NULL if the list is empty. This pointer can't already be
667  *              pointing to a valid entry before allocation.
668  */
669 MARK_AS_PMAP_TEXT static void
pv_list_kern_alloc(pv_entry_t ** pvepp)670 pv_list_kern_alloc(pv_entry_t **pvepp)
671 {
672 	assert((pvepp != NULL) && (*pvepp == PV_ENTRY_NULL));
673 	pmap_simple_lock(&pv_kern_free_list_lock);
674 	if (pv_kern_free.count > 0) {
675 		pmap_kern_reserve_alloc_stat++;
676 	}
677 	pv_free_list_alloc(&pv_kern_free, pvepp);
678 	pmap_simple_unlock(&pv_kern_free_list_lock);
679 }
680 
681 /**
682  * Returns a list of PVEs to the kernel-dedicated free list.
683  *
684  * @param pve_head Head of the list to be returned.
685  * @param pve_tail Tail of the list to be returned.
686  * @param pv_cnt Number of elements in the list to be returned.
687  */
688 MARK_AS_PMAP_TEXT static void
pv_list_kern_free(pv_entry_t * pve_head,pv_entry_t * pve_tail,int pv_cnt)689 pv_list_kern_free(pv_entry_t *pve_head, pv_entry_t *pve_tail, int pv_cnt)
690 {
691 	assert((pve_head != PV_ENTRY_NULL) && (pve_tail != PV_ENTRY_NULL));
692 
693 	pmap_simple_lock(&pv_kern_free_list_lock);
694 	pve_tail->pve_next = pv_kern_free.list;
695 	pv_kern_free.list = pve_head;
696 	pv_kern_free.count += pv_cnt;
697 	pmap_simple_unlock(&pv_kern_free_list_lock);
698 }
699 
700 /**
701  * Attempts to allocate from the per-cpu free list of PVEs, and if that fails,
702  * then replenish the per-cpu free list with a batch of PVEs from the global
703  * PVE free list.
704  *
705  * @param pvepp Output parameter that will get updated with a pointer to the
706  *              allocated node if the free lists aren't empty, or a pointer to
707  *              NULL if both the per-cpu and global lists are empty. This
708  *              pointer can't already be pointing to a valid entry before
709  *              allocation.
710  */
711 MARK_AS_PMAP_TEXT static void
pv_list_alloc(pv_entry_t ** pvepp)712 pv_list_alloc(pv_entry_t **pvepp)
713 {
714 	assert((pvepp != NULL) && (*pvepp == PV_ENTRY_NULL));
715 
716 	/* Disable preemption while working with per-CPU data. */
717 	mp_disable_preemption();
718 
719 	pmap_cpu_data_t *pmap_cpu_data = pmap_get_cpu_data();
720 	pv_free_list_alloc(&pmap_cpu_data->pv_free, pvepp);
721 
722 	if (*pvepp != PV_ENTRY_NULL) {
723 		goto pv_list_alloc_done;
724 	}
725 
726 	if (pv_kern_free.count < pv_kern_low_water_mark) {
727 		/**
728 		 * If the kernel reserved pool is low, let non-kernel mappings wait for
729 		 * a page from the VM.
730 		 */
731 		goto pv_list_alloc_done;
732 	}
733 
734 	/**
735 	 * Attempt to replenish the local list off the global one, and return the
736 	 * first element. If the global list is empty, then the allocation failed.
737 	 */
738 	pv_entry_t *new_batch = pv_free_array_get_batch();
739 
740 	if (new_batch != PV_ENTRY_NULL) {
741 		pmap_cpu_data->pv_free.count = PV_BATCH_SIZE - 1;
742 		pmap_cpu_data->pv_free.list = new_batch->pve_next;
743 		assert(pmap_cpu_data->pv_free.list != NULL);
744 
745 		new_batch->pve_next = PV_ENTRY_NULL;
746 		*pvepp = new_batch;
747 	}
748 
749 pv_list_alloc_done:
750 	mp_enable_preemption();
751 
752 	return;
753 }
754 
755 /**
756  * Adds a list of PVEs to the per-CPU PVE free list. May spill out some entries
757  * to the global or the kernel PVE free lists if the per-CPU list contains too
758  * many PVEs.
759  *
760  * @param pve_head Head of the list to be returned.
761  * @param pve_tail Tail of the list to be returned.
762  * @param pv_cnt Number of elements in the list to be returned.
763  */
764 MARK_AS_PMAP_TEXT void
pv_list_free(pv_entry_t * pve_head,pv_entry_t * pve_tail,unsigned int pv_cnt)765 pv_list_free(pv_entry_t *pve_head, pv_entry_t *pve_tail, unsigned int pv_cnt)
766 {
767 	assert((pve_head != PV_ENTRY_NULL) && (pve_tail != PV_ENTRY_NULL));
768 
769 	/* Disable preemption while working with per-CPU data. */
770 	disable_preemption();
771 
772 	pmap_cpu_data_t *pmap_cpu_data = pmap_get_cpu_data();
773 
774 	/**
775 	 * How many more PVEs need to be added to the last allocated batch to get it
776 	 * back up to a PV_BATCH_SIZE number of objects.
777 	 */
778 	const uint32_t available = PV_BATCH_SIZE - (pmap_cpu_data->pv_free.count % PV_BATCH_SIZE);
779 
780 	/**
781 	 * The common case is that the number of PVEs to be freed fit in the current
782 	 * PV_BATCH_SIZE boundary. If that is the case, quickly prepend the whole
783 	 * list and return.
784 	 */
785 	if (__probable((pv_cnt <= available) &&
786 	    ((pmap_cpu_data->pv_free.count % PV_BATCH_SIZE != 0) || (pmap_cpu_data->pv_free.count == 0)))) {
787 		pve_tail->pve_next = pmap_cpu_data->pv_free.list;
788 		pmap_cpu_data->pv_free.list = pve_head;
789 		pmap_cpu_data->pv_free.count += pv_cnt;
790 		goto pv_list_free_done;
791 	}
792 
793 	unsigned int freed_count = 0;
794 
795 	/**
796 	 * In the degenerate case, we need to process PVEs one by one, to make sure
797 	 * we spill out to the global list, or update the spill marker as
798 	 * appropriate.
799 	 */
800 	while (pv_cnt) {
801 		/**
802 		 * Check for (and if necessary reenable) preemption every PV_BATCH_SIZE PVEs to
803 		 * avoid leaving preemption disabled for an excessive duration if we happen to be
804 		 * processing a very large PV list.
805 		 */
806 		if (__improbable(freed_count == PV_BATCH_SIZE)) {
807 			freed_count = 0;
808 			if (__improbable(pmap_pending_preemption())) {
809 				enable_preemption();
810 				assert(preemption_enabled() || PMAP_IS_HIBERNATING());
811 				disable_preemption();
812 				pmap_cpu_data = pmap_get_cpu_data();
813 			}
814 		}
815 
816 		/**
817 		 * Take the node off the top of the passed in list and prepend it to the
818 		 * per-cpu list.
819 		 */
820 		pv_entry_t *pv_next = pve_head->pve_next;
821 		pve_head->pve_next = pmap_cpu_data->pv_free.list;
822 		pmap_cpu_data->pv_free.list = pve_head;
823 		pve_head = pv_next;
824 		pmap_cpu_data->pv_free.count++;
825 		pv_cnt--;
826 		freed_count++;
827 
828 		if (__improbable(pmap_cpu_data->pv_free.count == (PV_BATCH_SIZE + 1))) {
829 			/**
830 			 * A full batch of entries have been freed to the per-cpu list.
831 			 * Update the spill marker which is used to remember the end of a
832 			 * batch (remember, we prepend nodes) to eventually return back to
833 			 * the global list (we try to only keep one PV_BATCH_SIZE worth of
834 			 * nodes in any single per-cpu list).
835 			 */
836 			pmap_cpu_data->pv_free_spill_marker = pmap_cpu_data->pv_free.list;
837 		} else if (__improbable(pmap_cpu_data->pv_free.count == (PV_BATCH_SIZE * 2) + 1)) {
838 			/* Spill out excess PVEs to the global PVE array */
839 			pv_entry_t *spill_head = pmap_cpu_data->pv_free.list->pve_next;
840 			pv_entry_t *spill_tail = pmap_cpu_data->pv_free_spill_marker;
841 			pmap_cpu_data->pv_free.list->pve_next = pmap_cpu_data->pv_free_spill_marker->pve_next;
842 			spill_tail->pve_next = PV_ENTRY_NULL;
843 			pmap_cpu_data->pv_free.count -= PV_BATCH_SIZE;
844 			pmap_cpu_data->pv_free_spill_marker = pmap_cpu_data->pv_free.list;
845 
846 			if (__improbable(pv_free_array_give_batch(spill_head) != KERN_SUCCESS)) {
847 				/**
848 				 * This is extremely unlikely to happen, as it would imply that
849 				 * we have (PV_FREE_ARRAY_SIZE * PV_BATCH_SIZE) PVEs sitting in
850 				 * the global array. Just in case, push the excess down to the
851 				 * kernel PVE free list.
852 				 */
853 				pv_list_kern_free(spill_head, spill_tail, PV_BATCH_SIZE);
854 			}
855 		}
856 	}
857 
858 pv_list_free_done:
859 	enable_preemption();
860 
861 	return;
862 }
863 
864 /**
865  * Adds a single page to the PVE allocation subsystem.
866  *
867  * @note This function operates under the assumption that a PV_BATCH_SIZE amount
868  *       of PVEs can fit within a single page. One page is always allocated for
869  *       one batch, so if there's empty space in the page after the batch of
870  *       PVEs, it'll go unused (so it's best to keep the batch size at an amount
871  *       that utilizes a whole page).
872  *
873  * @param alloc_flags Allocation flags passed to pmap_page_alloc(). See
874  *                    the definition of that function for a detailed description
875  *                    of the available flags.
876  *
877  * @return KERN_SUCCESS, or the value returned by pmap_page_alloc() upon
878  *         failure.
879  */
880 MARK_AS_PMAP_TEXT static kern_return_t
pve_feed_page(unsigned alloc_flags)881 pve_feed_page(unsigned alloc_flags)
882 {
883 	kern_return_t kr = KERN_FAILURE;
884 
885 	pv_entry_t *pve_head = PV_ENTRY_NULL;
886 	pv_entry_t *pve_tail = PV_ENTRY_NULL;
887 	pmap_paddr_t pa = 0;
888 
889 	kr = pmap_page_alloc(&pa, alloc_flags);
890 
891 	if (kr != KERN_SUCCESS) {
892 		return kr;
893 	}
894 
895 	/* Update statistics globals. See the variables' definitions for more info. */
896 	os_atomic_inc(&pv_page_count, relaxed);
897 	pmap_reserve_replenish_stat += PV_BATCH_SIZE;
898 
899 	/* Prepare a new list by linking all of the entries in advance. */
900 	pve_head = (pv_entry_t *)phystokv(pa);
901 	pve_tail = &pve_head[PV_BATCH_SIZE - 1];
902 
903 	for (int i = 0; i < PV_BATCH_SIZE; i++) {
904 		pve_head[i].pve_next = &pve_head[i + 1];
905 	}
906 	pve_head[PV_BATCH_SIZE - 1].pve_next = PV_ENTRY_NULL;
907 
908 	/**
909 	 * Add the new list to the kernel PVE free list if we are running low on
910 	 * kernel-dedicated entries or the global free array is full.
911 	 */
912 	if ((pv_kern_free.count < pv_kern_low_water_mark) ||
913 	    (pv_free_array_give_batch(pve_head) != KERN_SUCCESS)) {
914 		pv_list_kern_free(pve_head, pve_tail, PV_BATCH_SIZE);
915 	}
916 
917 	return KERN_SUCCESS;
918 }
919 
920 /**
921  * Allocate a PV node from one of many different free lists (per-cpu, global, or
922  * kernel-specific).
923  *
924  * @note This function is very tightly coupled with pmap_enter_pv(). If
925  *       modifying this code, please ensure that pmap_enter_pv() doesn't break.
926  *
927  * @note The pmap lock must already be held if the new mapping is a CPU mapping.
928  *
929  * @note The PVH lock for the physical page that is getting a new mapping
930  *       registered must already be held.
931  *
932  * @param pmap The pmap that owns the new mapping, or NULL if this is tracking
933  *             an IOMMU translation.
934  * @param lock_mode Which state the pmap lock is being held in if the mapping is
935  *                  owned by a pmap, otherwise this is a don't care.
936  * @param options PMAP_OPTIONS_* family of options passed from the caller.
937  * @param pvepp Output parameter that will get updated with a pointer to the
938  *              allocated node if none of the free lists are empty, or a pointer
939  *              to NULL otherwise. This pointer can't already be pointing to a
940  *              valid entry before allocation.
941  * @param locked_pvh Input/output parameter pointing to the wrapped value of the
942  *                   pv_head_table entry previously obtained from pvh_lock().
943  *                   This value will be updated if [locked_pvh->pai] needs to be
944  *                   re-locked.
945  * @param refcountp Pointer to a reference count that will be temporarily
946  *                  atomically incremented in the event that [pmap]'s lock needs
947  *                  to be temporarily dropped in order to satisfy the allocation.
948  *                  This is typically used to prevent a page table from being
949  *                  reclaimed while the lock is dropped.  May be NULL.
950  *
951  * @return These are the possible return values:
952  *     PV_ALLOC_SUCCESS: A PVE object was successfully allocated.
953  *     PV_ALLOC_FAIL: No objects were available for allocation, and
954  *                    allocating a new page failed.
955  *     PV_ALLOC_RETRY: No objects were available on the free lists, so a new
956  *                     page of PVE objects needed to be allocated. To do that,
957  *                     the pmap and PVH locks were dropped. The caller may have
958  *                     depended on these locks for consistency, so return and
959  *                     let the caller retry the PVE allocation with the locks
960  *                     held. Note that the locks have already been re-acquired
961  *                     before this function exits.
962  */
963 MARK_AS_PMAP_TEXT pv_alloc_return_t
pv_alloc(pmap_t pmap,pmap_lock_mode_t lock_mode,unsigned int options,pv_entry_t ** pvepp,locked_pvh_t * locked_pvh,volatile uint16_t * refcountp)964 pv_alloc(
965 	pmap_t pmap,
966 	pmap_lock_mode_t lock_mode,
967 	unsigned int options,
968 	pv_entry_t **pvepp,
969 	locked_pvh_t *locked_pvh,
970 	volatile uint16_t *refcountp)
971 {
972 	assert((pvepp != NULL) && (*pvepp == PV_ENTRY_NULL));
973 	assert(locked_pvh != NULL);
974 
975 	if (pmap != NULL) {
976 		pmap_assert_locked(pmap, lock_mode);
977 	}
978 
979 	pv_list_alloc(pvepp);
980 	if (PV_ENTRY_NULL != *pvepp) {
981 		return PV_ALLOC_SUCCESS;
982 	}
983 
984 	unsigned alloc_flags = 0;
985 
986 	/**
987 	 * We got here because both the per-CPU and the global lists are empty. If
988 	 * this allocation is for the kernel pmap or an IOMMU kernel driver, we try
989 	 * to get an entry from the kernel list next.
990 	 */
991 	if ((pmap == NULL) || (kernel_pmap == pmap)) {
992 		pv_list_kern_alloc(pvepp);
993 		if (PV_ENTRY_NULL != *pvepp) {
994 			return PV_ALLOC_SUCCESS;
995 		}
996 	}
997 
998 	/**
999 	 * Make sure we have PMAP_PAGES_ALLOCATE_NOWAIT set in alloc_flags when the
1000 	 * input options argument has PMAP_OPTIONS_NOWAIT set.
1001 	 */
1002 	alloc_flags |= (options & PMAP_OPTIONS_NOWAIT) ? PMAP_PAGE_ALLOCATE_NOWAIT : 0;
1003 
1004 	/**
1005 	 * We ran out of PV entries all across the board, or this allocation is not
1006 	 * for the kernel. Let's make sure that the kernel list is not too full
1007 	 * (very unlikely), in which case we can rebalance here.
1008 	 */
1009 	if (__improbable(pv_kern_free.count > (PV_BATCH_SIZE * 2))) {
1010 		pmap_simple_lock(&pv_kern_free_list_lock);
1011 		/* Re-check, now that the lock is held. */
1012 		if (pv_kern_free.count > (PV_BATCH_SIZE * 2)) {
1013 			pv_entry_t *pve_head = pv_kern_free.list;
1014 			pv_entry_t *pve_tail = pve_head;
1015 
1016 			for (int i = 0; i < (PV_BATCH_SIZE - 1); i++) {
1017 				pve_tail = pve_tail->pve_next;
1018 			}
1019 
1020 			pv_kern_free.list = pve_tail->pve_next;
1021 			pv_kern_free.count -= PV_BATCH_SIZE;
1022 			pve_tail->pve_next = PV_ENTRY_NULL;
1023 			pmap_simple_unlock(&pv_kern_free_list_lock);
1024 
1025 			/* Return back every node except the first one to the free lists. */
1026 			pv_list_free(pve_head->pve_next, pve_tail, PV_BATCH_SIZE - 1);
1027 			pve_head->pve_next = PV_ENTRY_NULL;
1028 			*pvepp = pve_head;
1029 			return PV_ALLOC_SUCCESS;
1030 		}
1031 		pmap_simple_unlock(&pv_kern_free_list_lock);
1032 	}
1033 
1034 	/**
1035 	 * If all else fails, try to get a new pmap page so that the allocation
1036 	 * succeeds once the caller retries it.
1037 	 */
1038 	kern_return_t kr = KERN_FAILURE;
1039 	pv_alloc_return_t pv_status = PV_ALLOC_FAIL;
1040 	const unsigned int pai = locked_pvh->pai;
1041 
1042 	/**
1043 	 * Drop the lock during page allocation since that can take a while and
1044 	 * because preemption must be enabled when attempting to allocate memory
1045 	 * from the VM (which requires grabbing a mutex).
1046 	 */
1047 	pvh_unlock(locked_pvh);
1048 	if (pmap != NULL) {
1049 		/**
1050 		 * Bump the provided refcount before we drop the pmap lock in order to prevent
1051 		 * page table reclamation while the lock is dropped.
1052 		 */
1053 		if (__improbable((refcountp != NULL) && (os_atomic_inc_orig(refcountp, relaxed) == UINT16_MAX))) {
1054 			panic("%s: pmap %p refcount %p overflow", __func__, pmap, refcountp);
1055 		}
1056 		pmap_unlock(pmap, lock_mode);
1057 	}
1058 
1059 	if ((kr = pve_feed_page(alloc_flags)) == KERN_SUCCESS) {
1060 		/**
1061 		 * Since the lock was dropped, even though we successfully allocated a
1062 		 * new page to be used for PVE nodes, the code that relies on this
1063 		 * function might have depended on the lock being held for consistency,
1064 		 * so return out early and let them retry the allocation with the lock
1065 		 * re-held.
1066 		 */
1067 		pv_status = PV_ALLOC_RETRY;
1068 	} else {
1069 		pv_status = PV_ALLOC_FAIL;
1070 	}
1071 
1072 	if (pmap != NULL) {
1073 		pmap_lock(pmap, lock_mode);
1074 		if (__improbable((refcountp != NULL) && (os_atomic_dec_orig(refcountp, relaxed) == 0))) {
1075 			panic("%s: pmap %p refcount %p underflow", __func__, pmap, refcountp);
1076 		}
1077 	}
1078 
1079 	if (__improbable(options & PMAP_OPTIONS_NOPREEMPT)) {
1080 		*locked_pvh = pvh_lock_nopreempt(pai);
1081 	} else {
1082 		*locked_pvh = pvh_lock(pai);
1083 	}
1084 
1085 	/* Ensure that no node was created if we're not returning successfully. */
1086 	assert(*pvepp == PV_ENTRY_NULL);
1087 
1088 	return pv_status;
1089 }
1090 
1091 /**
1092  * Utility function for freeing a single PVE object back to the free lists.
1093  *
1094  * @param pvep Pointer to the PVE object to free.
1095  */
1096 MARK_AS_PMAP_TEXT void
pv_free(pv_entry_t * pvep)1097 pv_free(pv_entry_t *pvep)
1098 {
1099 	assert(pvep != PV_ENTRY_NULL);
1100 
1101 	pv_list_free(pvep, pvep, 1);
1102 }
1103 
1104 /**
1105  * This function provides a mechanism for the device tree to override the
1106  * default PV allocation amounts and the watermark level which determines how
1107  * many PVE objects are kept in the kernel-dedicated free list.
1108  */
1109 MARK_AS_PMAP_TEXT void
pmap_compute_pv_targets(void)1110 pmap_compute_pv_targets(void)
1111 {
1112 	DTEntry entry = NULL;
1113 	void const *prop = NULL;
1114 	int err = 0;
1115 	unsigned int prop_size = 0;
1116 
1117 	err = SecureDTLookupEntry(NULL, "/defaults", &entry);
1118 	assert(err == kSuccess);
1119 
1120 	if (kSuccess == SecureDTGetProperty(entry, "pmap-pv-count", &prop, &prop_size)) {
1121 		if (prop_size != sizeof(pv_alloc_initial_target)) {
1122 			panic("pmap-pv-count property is not a 32-bit integer");
1123 		}
1124 		pv_alloc_initial_target = *((uint32_t const *)prop);
1125 	}
1126 
1127 	if (kSuccess == SecureDTGetProperty(entry, "pmap-kern-pv-count", &prop, &prop_size)) {
1128 		if (prop_size != sizeof(pv_kern_alloc_initial_target)) {
1129 			panic("pmap-kern-pv-count property is not a 32-bit integer");
1130 		}
1131 		pv_kern_alloc_initial_target = *((uint32_t const *)prop);
1132 	}
1133 
1134 	if (kSuccess == SecureDTGetProperty(entry, "pmap-kern-pv-min", &prop, &prop_size)) {
1135 		if (prop_size != sizeof(pv_kern_low_water_mark)) {
1136 			panic("pmap-kern-pv-min property is not a 32-bit integer");
1137 		}
1138 		pv_kern_low_water_mark = *((uint32_t const *)prop);
1139 	}
1140 }
1141 
1142 /**
1143  * This would normally be used to adjust the amount of PVE objects available in
1144  * the system, but we do that dynamically at runtime anyway so this is unneeded.
1145  */
1146 void
mapping_adjust(void)1147 mapping_adjust(void)
1148 {
1149 	/* Not implemented for arm/arm64. */
1150 }
1151 
1152 /**
1153  * Creates a target number of free pv_entry_t objects for the kernel free list
1154  * and the general free list.
1155  *
1156  * @note This function is called once during early boot, in kernel_bootstrap().
1157  *
1158  * @return KERN_SUCCESS if the objects were successfully allocated, or the
1159  *         return value from pve_feed_page() on failure (could be caused by not
1160  *         being able to allocate a page).
1161  */
1162 MARK_AS_PMAP_TEXT kern_return_t
mapping_free_prime_internal(void)1163 mapping_free_prime_internal(void)
1164 {
1165 	kern_return_t kr = KERN_FAILURE;
1166 
1167 	/*
1168 	 * We do not need to hold the pv_free_array lock to calculate the number of
1169 	 * elements in it because no other core is running at this point.
1170 	 */
1171 	while (((pv_free_array_n_elems() * PV_BATCH_SIZE) < pv_alloc_initial_target) ||
1172 	    (pv_kern_free.count < pv_kern_alloc_initial_target)) {
1173 		if ((kr = pve_feed_page(0)) != KERN_SUCCESS) {
1174 			return kr;
1175 		}
1176 	}
1177 
1178 	return KERN_SUCCESS;
1179 }
1180 
1181 /**
1182  * Helper function for pmap_enter_pv (hereby shortened to "pepv") which converts
1183  * a PVH entry from PVH_TYPE_PTEP to PVH_TYPE_PVEP which will transform the
1184  * entry into a linked list of mappings.
1185  *
1186  * @note This should only be called from pmap_enter_pv().
1187  *
1188  * @note The PVH lock for the passed in page must already be held and the type
1189  *       must be PVH_TYPE_PTEP (wouldn't make sense to call this otherwise).
1190  *
1191  * @param pmap Either the pmap that owns the mapping being registered in
1192  *             pmap_enter_pv(), or NULL if this is an IOMMU mapping.
1193  * @param lock_mode Which state the pmap lock is being held in if the mapping is
1194  *                  owned by a pmap, otherwise this is a don't care.
1195  * @param options PMAP_OPTIONS_* family of options.
1196  * @param locked_pvh Input/output parameter pointing to the wrapped value of the
1197  *                   pv_head_table entry previously obtained from pvh_lock().
1198  *                   This value will be updated if [locked_pvh->pai] needs to be
1199  *                   re-locked or if the allocation is successful and the PVH
1200  *                   entry is updated with the new PVE pointer.
1201  *
1202  * @return PV_ALLOC_SUCCESS if the entry at `pai` was successfully converted
1203  *         into PVH_TYPE_PVEP, or the return value of pv_alloc() otherwise. See
1204  *         pv_alloc()'s function header for a detailed explanation of the
1205  *         possible return values.
1206  */
1207 MARK_AS_PMAP_TEXT static pv_alloc_return_t
pepv_convert_ptep_to_pvep(pmap_t pmap,pmap_lock_mode_t lock_mode,unsigned int options,locked_pvh_t * locked_pvh)1208 pepv_convert_ptep_to_pvep(
1209 	pmap_t pmap,
1210 	pmap_lock_mode_t lock_mode,
1211 	unsigned int options,
1212 	locked_pvh_t *locked_pvh)
1213 {
1214 	assert(locked_pvh != NULL);
1215 	assert(pvh_test_type(locked_pvh->pvh, PVH_TYPE_PTEP));
1216 
1217 	pv_entry_t *pvep = PV_ENTRY_NULL;
1218 	pv_alloc_return_t ret = pv_alloc(pmap, lock_mode, options, &pvep, locked_pvh, NULL);
1219 	if (ret != PV_ALLOC_SUCCESS) {
1220 		return ret;
1221 	}
1222 
1223 	const unsigned int pai = locked_pvh->pai;
1224 
1225 	/* If we've gotten this far then a node should've been allocated. */
1226 	assert(pvep != PV_ENTRY_NULL);
1227 
1228 	/* The new PVE should have the same PTE pointer as the previous PVH entry. */
1229 	pve_init(pvep);
1230 	pve_set_ptep(pvep, 0, pvh_ptep(locked_pvh->pvh));
1231 
1232 	assert(!pve_get_internal(pvep, 0));
1233 	assert(!pve_get_altacct(pvep, 0));
1234 	if (ppattr_is_internal(pai)) {
1235 		/**
1236 		 * Transfer "internal" status from pp_attr to this pve. See the comment
1237 		 * above PP_ATTR_INTERNAL for more information on this.
1238 		 */
1239 		ppattr_clear_internal(pai);
1240 		pve_set_internal(pvep, 0);
1241 	}
1242 	if (ppattr_is_altacct(pai)) {
1243 		/**
1244 		 * Transfer "altacct" status from pp_attr to this pve. See the comment
1245 		 * above PP_ATTR_ALTACCT for more information on this.
1246 		 */
1247 		ppattr_clear_altacct(pai);
1248 		pve_set_altacct(pvep, 0);
1249 	}
1250 
1251 	pvh_update_head(locked_pvh, pvep, PVH_TYPE_PVEP);
1252 
1253 	return PV_ALLOC_SUCCESS;
1254 }
1255 
1256 /**
1257  * Register a new mapping into the pv_head_table. This is the main data
1258  * structure used for performing a reverse physical to virtual translation and
1259  * finding all mappings to a physical page. Whenever a new page table mapping is
1260  * created (regardless of whether it's for a CPU or an IOMMU), it should be
1261  * registered with a call to this function.
1262  *
1263  * @note The pmap lock must already be held if the new mapping is a CPU mapping.
1264  *
1265  * @note The PVH lock for the physical page that is getting a new mapping
1266  *       registered must already be held.
1267  *
1268  * @note This function cannot be called during the hibernation process because
1269  *       it modifies critical pmap data structures that need to be dumped into
1270  *       the hibernation image in a consistent state.
1271  *
1272  * @param pmap The pmap that owns the new mapping, or NULL if this is tracking
1273  *             an IOMMU translation.
1274  * @param ptep The new mapping to register.
1275  * @param options Flags that can potentially be set on a per-page basis:
1276  *                PMAP_OPTIONS_INTERNAL: If this is the first CPU mapping, then
1277  *                    mark the page as being "internal". See the definition of
1278  *                    PP_ATTR_INTERNAL for more info.
1279  *                PMAP_OPTIONS_REUSABLE: If this is the first CPU mapping, and
1280  *                    this page is also marked internal, then mark the page as
1281  *                    being "reusable". See the definition of PP_ATTR_REUSABLE
1282  *                    for more info.
1283  * @param lock_mode Which state the pmap lock is being held in if the mapping is
1284  *                  owned by a pmap, otherwise this is a don't care.
1285  * @param locked_pvh Input/output parameter pointing to the wrapped value of the
1286  *                   pv_head_table entry previously obtained from pvh_lock().
1287  *                   If the registration is successful, locked_pvh->pvh will be
1288  *                   updated to reflect the new PV list head.
1289  * @param new_pvepp An output parameter that is updated with a pointer to the
1290  *                  PVE object where the PTEP was allocated into. In the event
1291  *                  of failure, or if the pointer passed in is NULL,
1292  *                  it's not modified.
1293  * @param new_pve_ptep_idx An output parameter that is updated with the index
1294  *                  into the PVE object where the PTEP was allocated into.
1295  *                  In the event of failure, or if new_pvepp in is NULL,
1296  *                  it's not modified.
1297  *
1298  * @return PV_ALLOC_SUCCESS if the entry at [locked_pvh->pai] was successfully
1299  *         updated with the new mapping, or the return value of pv_alloc()
1300  *         otherwise. See pv_alloc()'s function header for a detailed explanation
1301  *         of the possible return values.
1302  */
1303 MARK_AS_PMAP_TEXT pv_alloc_return_t
pmap_enter_pv(pmap_t pmap,pt_entry_t * ptep,unsigned int options,pmap_lock_mode_t lock_mode,locked_pvh_t * locked_pvh,pv_entry_t ** new_pvepp,int * new_pve_ptep_idx)1304 pmap_enter_pv(
1305 	pmap_t pmap,
1306 	pt_entry_t *ptep,
1307 	unsigned int options,
1308 	pmap_lock_mode_t lock_mode,
1309 	locked_pvh_t *locked_pvh,
1310 	pv_entry_t **new_pvepp,
1311 	int *new_pve_ptep_idx)
1312 {
1313 	assert(ptep != PT_ENTRY_NULL);
1314 	assert(locked_pvh != NULL);
1315 
1316 	bool first_cpu_mapping = false;
1317 
1318 	PMAP_ASSERT_NOT_WRITING_HIB();
1319 
1320 	if (pmap != NULL) {
1321 		pmap_assert_locked(pmap, lock_mode);
1322 	}
1323 
1324 	uintptr_t pvh_flags = pvh_get_flags(locked_pvh->pvh);
1325 	const unsigned int pai = locked_pvh->pai;
1326 
1327 
1328 	/**
1329 	 * An IOMMU mapping may already be present for a page that hasn't yet had a
1330 	 * CPU mapping established, so we use PVH_FLAG_CPU to determine if this is
1331 	 * the first CPU mapping. We base internal/reusable accounting on the
1332 	 * options specified for the first CPU mapping. PVH_FLAG_CPU, and thus this
1333 	 * accounting, will then persist as long as there are *any* mappings of the
1334 	 * page. The accounting for a page should not need to change until the page
1335 	 * is recycled by the VM layer, and we assert that there are no mappings
1336 	 * when a page is recycled. An IOMMU mapping of a freed/recycled page is
1337 	 * considered a security violation & potential DMA corruption path.
1338 	 */
1339 	first_cpu_mapping = ((pmap != NULL) && !(pvh_flags & PVH_FLAG_CPU));
1340 	if (first_cpu_mapping) {
1341 		pvh_flags |= PVH_FLAG_CPU;
1342 		pvh_set_flags(locked_pvh, pvh_flags);
1343 	}
1344 
1345 	/**
1346 	 * Internal/reusable flags are based on the first CPU mapping made to a
1347 	 * page. These will persist until all mappings to the page are removed.
1348 	 */
1349 	if (first_cpu_mapping) {
1350 		if ((options & PMAP_OPTIONS_INTERNAL) &&
1351 		    (options & PMAP_OPTIONS_REUSABLE)) {
1352 			ppattr_set_reusable(pai);
1353 		} else {
1354 			ppattr_clear_reusable(pai);
1355 		}
1356 	}
1357 
1358 	/* Visit the definitions for the PVH_TYPEs to learn more about each one. */
1359 	if (pvh_test_type(locked_pvh->pvh, PVH_TYPE_NULL)) {
1360 		/* If this is the first mapping, upgrade the type to store a single PTEP. */
1361 		pvh_update_head(locked_pvh, ptep, PVH_TYPE_PTEP);
1362 	} else {
1363 		pv_alloc_return_t ret = PV_ALLOC_FAIL;
1364 
1365 		if (pvh_test_type(locked_pvh->pvh, PVH_TYPE_PTEP)) {
1366 			/**
1367 			 * There was already a single mapping to the page. Convert the PVH
1368 			 * entry from PVH_TYPE_PTEP to PVH_TYPE_PVEP so that multiple
1369 			 * mappings can be tracked. If PVEs cannot hold more than a single
1370 			 * mapping, a second PVE will be added farther down.
1371 			 */
1372 			if ((ret = pepv_convert_ptep_to_pvep(pmap, lock_mode, options, locked_pvh)) != PV_ALLOC_SUCCESS) {
1373 				return ret;
1374 			}
1375 
1376 			/**
1377 			 * At this point, the PVH flags have been clobbered due to updating
1378 			 * PTEP->PVEP, but that's ok because the locks are being held and
1379 			 * the flags will get set again below before pv_alloc() is called
1380 			 * and the locks are potentially dropped again.
1381 			 */
1382 		} else if (__improbable(!pvh_test_type(locked_pvh->pvh, PVH_TYPE_PVEP))) {
1383 			panic("%s: unexpected PV head %p, ptep=%p pmap=%p",
1384 			    __func__, (void*)locked_pvh->pvh, ptep, pmap);
1385 		}
1386 
1387 		/**
1388 		 * Check if we have room for one more mapping in this PVE
1389 		 */
1390 		pv_entry_t *pvep = pvh_pve_list(locked_pvh->pvh);
1391 		assert(pvep != PV_ENTRY_NULL);
1392 
1393 		int pve_ptep_idx = pve_find_ptep_index(pvep, PT_ENTRY_NULL);
1394 
1395 		if (pve_ptep_idx == -1) {
1396 			/**
1397 			 * Set up the pv_entry for this new mapping and then add it to the list
1398 			 * for this physical page.
1399 			 */
1400 			pve_ptep_idx = 0;
1401 			pvep = PV_ENTRY_NULL;
1402 			if ((ret = pv_alloc(pmap, lock_mode, options, &pvep, locked_pvh, NULL)) != PV_ALLOC_SUCCESS) {
1403 				return ret;
1404 			}
1405 
1406 			/* If we've gotten this far then a node should've been allocated. */
1407 			assert(pvep != PV_ENTRY_NULL);
1408 			pve_init(pvep);
1409 			pve_add(locked_pvh, pvep);
1410 		}
1411 
1412 		pve_set_ptep(pvep, pve_ptep_idx, ptep);
1413 
1414 		/*
1415 		 * The PTEP was successfully entered into the PVE object.
1416 		 * If the caller requests it, set new_pvepp and new_pve_ptep_idx
1417 		 * appropriately.
1418 		 */
1419 		if (new_pvepp != NULL) {
1420 			*new_pvepp = pvep;
1421 			*new_pve_ptep_idx = pve_ptep_idx;
1422 		}
1423 	}
1424 
1425 	return PV_ALLOC_SUCCESS;
1426 }
1427 
1428 /**
1429  * Remove a mapping that was registered with the pv_head_table. This needs to be
1430  * done for every mapping that was previously registered using pmap_enter_pv()
1431  * when the mapping is removed.
1432  *
1433  * @note The PVH lock for the physical page that is getting a new mapping
1434  *       registered must already be held.
1435  *
1436  * @note This function cannot be called during the hibernation process because
1437  *       it modifies critical pmap data structures that need to be dumped into
1438  *       the hibernation image in a consistent state.
1439  *
1440  * @param pmap The pmap that owns the new mapping, or NULL if this is tracking
1441  *             an IOMMU translation.
1442  * @param ptep The mapping that's getting removed.
1443  * @param locked_pvh Input/output parameter pointing to the wrapped value of the
1444  *                   pv_head_table entry previously obtained from pvh_lock().
1445  *                   If the removal is successful, locked_pvh->pvh may be updated
1446  *                   to reflect a new PV list head.
1447  * @param is_internal_p The internal bit of the PTE that was removed.
1448  * @param is_altacct_p The altacct bit of the PTE that was removed.
1449  * @return These are the possible return values:
1450  *     PV_REMOVE_SUCCESS: A PV entry matching the PTE was found and
1451  *                        removed.
1452  *     PV_REMOVE_FAIL: No matching PV entry was found.  This may not be a fatal
1453  *                        condition; for example, pmap_disconnect() on another
1454  *                        thread may have removed the PV entry between removal
1455  *                        of the mapping and acquisition of the PV lock in
1456  *                        pmap_remove();
1457  */
1458 pv_remove_return_t
pmap_remove_pv(pmap_t pmap __assert_only,pt_entry_t * ptep,locked_pvh_t * locked_pvh,bool * is_internal_p,bool * is_altacct_p)1459 pmap_remove_pv(
1460 	pmap_t pmap __assert_only,
1461 	pt_entry_t *ptep,
1462 	locked_pvh_t *locked_pvh,
1463 	bool *is_internal_p,
1464 	bool *is_altacct_p)
1465 {
1466 	PMAP_ASSERT_NOT_WRITING_HIB();
1467 	assert(locked_pvh != NULL);
1468 
1469 	pv_remove_return_t ret = PV_REMOVE_SUCCESS;
1470 	const unsigned int pai = locked_pvh->pai;
1471 	bool is_internal = false;
1472 	bool is_altacct = false;
1473 
1474 
1475 	if (pvh_test_type(locked_pvh->pvh, PVH_TYPE_PTEP)) {
1476 		if (__improbable((ptep != pvh_ptep(locked_pvh->pvh)))) {
1477 			return PV_REMOVE_FAIL;
1478 		}
1479 
1480 		pvh_update_head(locked_pvh, PV_ENTRY_NULL, PVH_TYPE_NULL);
1481 		is_internal = ppattr_is_internal(pai);
1482 		is_altacct = ppattr_is_altacct(pai);
1483 	} else if (pvh_test_type(locked_pvh->pvh, PVH_TYPE_PVEP)) {
1484 		pv_entry_t **pvepp = NULL;
1485 		pv_entry_t *pvep = pvh_pve_list(locked_pvh->pvh);
1486 		assert(pvep != PV_ENTRY_NULL);
1487 		unsigned int npves = 0;
1488 		int pve_pte_idx = 0;
1489 		/* Find the PVE that represents the mapping we're removing. */
1490 		while ((pvep != PV_ENTRY_NULL) && ((pve_pte_idx = pve_find_ptep_index(pvep, ptep)) == -1)) {
1491 			if (__improbable(npves == (SPTM_MAPPING_LIMIT / PTE_PER_PVE))) {
1492 				pvh_lock_enter_sleep_mode(locked_pvh);
1493 			}
1494 			pvepp = pve_next_ptr(pvep);
1495 			pvep = pve_next(pvep);
1496 			npves++;
1497 		}
1498 
1499 		if (__improbable((pvep == PV_ENTRY_NULL))) {
1500 			return PV_REMOVE_FAIL;
1501 		}
1502 
1503 		is_internal = pve_get_internal(pvep, pve_pte_idx);
1504 		is_altacct = pve_get_altacct(pvep, pve_pte_idx);
1505 		pve_set_ptep(pvep, pve_pte_idx, PT_ENTRY_NULL);
1506 
1507 #if MACH_ASSERT
1508 		/**
1509 		 * Ensure that the mapping didn't accidentally have multiple PVEs
1510 		 * associated with it (there should only be one PVE per mapping). This
1511 		 * checking only occurs on configurations that can accept the perf hit
1512 		 * that walking the PVE chain on every unmap entails.
1513 		 *
1514 		 * This is skipped for IOMMU mappings because some IOMMUs don't use
1515 		 * normal page tables (e.g., NVMe) to map pages, so the `ptep` field in
1516 		 * the associated PVE won't actually point to a real page table (see the
1517 		 * definition of PVH_FLAG_IOMMU_TABLE for more info). Because of that,
1518 		 * it's perfectly possible for duplicate IOMMU PVEs to exist.
1519 		 */
1520 		if ((pmap != NULL) && (kern_feature_override(KF_PMAPV_OVRD) == FALSE)) {
1521 			pv_entry_t *check_pvep = pvep;
1522 
1523 			do {
1524 				if (__improbable(npves == (SPTM_MAPPING_LIMIT / PTE_PER_PVE))) {
1525 					pvh_lock_enter_sleep_mode(locked_pvh);
1526 				}
1527 				if (pve_find_ptep_index(check_pvep, ptep) != -1) {
1528 					panic_plain("%s: duplicate pve entry ptep=%p pmap=%p, pvh=%p, "
1529 					    "pvep=%p, pai=0x%x", __func__, ptep, pmap,
1530 					    (void*)locked_pvh->pvh, pvep, pai);
1531 				}
1532 				npves++;
1533 			} while ((check_pvep = pve_next(check_pvep)) != PV_ENTRY_NULL);
1534 		}
1535 #endif /* MACH_ASSERT */
1536 
1537 		const bool pve_is_first = (pvepp == NULL);
1538 		const bool pve_is_last = (pve_next(pvep) == PV_ENTRY_NULL);
1539 		const int other_pte_idx = !pve_pte_idx;
1540 
1541 		if (pve_is_empty(pvep)) {
1542 			/*
1543 			 * This PVE doesn't contain any mappings. We can get rid of it.
1544 			 */
1545 			pve_remove(locked_pvh, pvepp, pvep);
1546 			pv_free(pvep);
1547 		} else if (!pve_is_first) {
1548 			/*
1549 			 * This PVE contains a single mapping. See if we can coalesce it with the one
1550 			 * at the top of the list.
1551 			 */
1552 			pv_entry_t *head_pvep = pvh_pve_list(locked_pvh->pvh);
1553 			int head_pve_pte_empty_idx;
1554 			if ((head_pve_pte_empty_idx = pve_find_ptep_index(head_pvep, PT_ENTRY_NULL)) != -1) {
1555 				pve_set_ptep(head_pvep, head_pve_pte_empty_idx, pve_get_ptep(pvep, other_pte_idx));
1556 				if (pve_get_internal(pvep, other_pte_idx)) {
1557 					pve_set_internal(head_pvep, head_pve_pte_empty_idx);
1558 				}
1559 				if (pve_get_altacct(pvep, other_pte_idx)) {
1560 					pve_set_altacct(head_pvep, head_pve_pte_empty_idx);
1561 				}
1562 				pve_remove(locked_pvh, pvepp, pvep);
1563 				pv_free(pvep);
1564 			} else {
1565 				/*
1566 				 * We could not coalesce it. Move it to the start of the list, so that it
1567 				 * can be coalesced against in the future.
1568 				 */
1569 				*pvepp = pve_next(pvep);
1570 				pve_add(locked_pvh, pvep);
1571 			}
1572 		} else if (pve_is_first && pve_is_last) {
1573 			/*
1574 			 * This PVE contains a single mapping, and it's the last mapping for this PAI.
1575 			 * Collapse this list back into the head, turning it into a PVH_TYPE_PTEP entry.
1576 			 */
1577 			assertf(pvh_pve_list(locked_pvh->pvh) == pvep, "%s: pvh %p != pvep %p",
1578 			    __func__, (void*)locked_pvh->pvh, pvep);
1579 			pvh_update_head(locked_pvh, pve_get_ptep(pvep, other_pte_idx), PVH_TYPE_PTEP);
1580 			pp_attr_t attrs_to_set = 0;
1581 			if (pve_get_internal(pvep, other_pte_idx)) {
1582 				attrs_to_set |= PP_ATTR_INTERNAL;
1583 			}
1584 			if (pve_get_altacct(pvep, other_pte_idx)) {
1585 				attrs_to_set |= PP_ATTR_ALTACCT;
1586 			}
1587 			if (attrs_to_set != 0) {
1588 				ppattr_modify_bits(pai, 0, attrs_to_set);
1589 			}
1590 			pv_free(pvep);
1591 		}
1592 	} else {
1593 		/*
1594 		 * A concurrent disconnect operation may have already cleared the PVH to PVH_TYPE_NULL.
1595 		 * It's also possible that a subsequent page table allocation may have transitioned
1596 		 * the PVH to PVH_TYPE_PTDP.
1597 		 */
1598 		return PV_REMOVE_FAIL;
1599 	}
1600 
1601 	if (pvh_test_type(locked_pvh->pvh, PVH_TYPE_NULL)) {
1602 		pvh_set_flags(locked_pvh, 0);
1603 		const pmap_paddr_t pa = pai_to_pa(pai);
1604 		pmap_prepare_unmapped_page_for_retype(pa);
1605 		pp_attr_t attrs_to_clear = 0;
1606 		if (is_internal) {
1607 			attrs_to_clear |= PP_ATTR_INTERNAL;
1608 		}
1609 		if (is_altacct) {
1610 			attrs_to_clear |= PP_ATTR_ALTACCT;
1611 		}
1612 		if (attrs_to_clear != 0) {
1613 			ppattr_modify_bits(pai, attrs_to_clear, 0);
1614 		}
1615 		/* If removing the last mapping to a specially-protected page, retype the page back to XNU_DEFAULT. */
1616 		pmap_retype_unmapped_page(pa);
1617 	}
1618 
1619 	*is_internal_p = is_internal;
1620 	*is_altacct_p = is_altacct;
1621 	return ret;
1622 }
1623 
1624 /**
1625  * Bootstrap the initial Page Table Descriptor (PTD) node free list.
1626  *
1627  * @note It's not safe to allocate PTD nodes until after this function is
1628  *       invoked.
1629  *
1630  * @note The maximum number of PTD objects that can reside within one page
1631  *       (`ptd_per_page`) must have already been calculated before calling this
1632  *       function.
1633  *
1634  * @param ptdp Pointer to the virtually-contiguous memory used for the initial
1635  *             free list.
1636  * @param num_pages The number of virtually-contiguous pages pointed to by
1637  *                  `ptdp` that will be used to prime the PTD allocator.
1638  */
1639 MARK_AS_PMAP_TEXT void
ptd_bootstrap(pt_desc_t * ptdp,unsigned int num_pages)1640 ptd_bootstrap(pt_desc_t *ptdp, unsigned int num_pages)
1641 {
1642 	assert(ptd_per_page > 0);
1643 	assert((ptdp != NULL) && (((uintptr_t)ptdp & PAGE_MASK) == 0) && (num_pages > 0));
1644 
1645 	/**
1646 	 * Region represented by ptdp should be cleared by pmap_bootstrap().
1647 	 *
1648 	 * Only part of each page is being used for PTD objects (the rest is used
1649 	 * for each PTD's associated ptd_info_t object) so link together the last
1650 	 * PTD element of each page to the first element of the previous page.
1651 	 */
1652 	for (int i = 0; i < num_pages; i++) {
1653 		*((void**)(&ptdp[ptd_per_page - 1])) = (void*)ptd_free_list;
1654 		ptd_free_list = ptdp;
1655 		ptdp = (void *)(((uint8_t *)ptdp) + PAGE_SIZE);
1656 	}
1657 
1658 	ptd_free_count = num_pages * ptd_per_page;
1659 	simple_lock_init(&ptd_free_list_lock, 0);
1660 }
1661 
1662 /**
1663  * Allocate a page table descriptor (PTD) object from the PTD free list, but
1664  * don't add it to the list of reclaimable userspace page table pages just yet
1665  * and don't associate the PTD with a specific pmap (that's what "unlinked"
1666  * means here).
1667  *
1668  * @param alloc_flags Allocation flags passed to pmap_page_alloc(). See the
1669  *                    definition of that function for a detailed description of
1670  *                    the available flags.
1671  *
1672  * @return The page table descriptor object if the allocation was successful, or
1673  *         NULL otherwise (which indicates that a page failed to be allocated
1674  *         for new nodes).
1675  */
1676 MARK_AS_PMAP_TEXT pt_desc_t*
ptd_alloc_unlinked(unsigned int alloc_flags)1677 ptd_alloc_unlinked(unsigned int alloc_flags)
1678 {
1679 	pt_desc_t *ptdp = PTD_ENTRY_NULL;
1680 
1681 	pmap_simple_lock(&ptd_free_list_lock);
1682 
1683 	assert(ptd_per_page != 0);
1684 
1685 	/**
1686 	 * Ensure that we either have a free list with nodes available, or a
1687 	 * completely empty list to allocate and prepend new nodes to.
1688 	 */
1689 	assert(((ptd_free_list != NULL) && (ptd_free_count > 0)) ||
1690 	    ((ptd_free_list == NULL) && (ptd_free_count == 0)));
1691 
1692 	if (__improbable(ptd_free_count == 0)) {
1693 		pmap_paddr_t pa = 0;
1694 
1695 		/**
1696 		 * Drop the lock while allocating pages since that can take a while and
1697 		 * because preemption has to be enabled when allocating memory.
1698 		 */
1699 		pmap_simple_unlock(&ptd_free_list_lock);
1700 
1701 		if (pmap_page_alloc(&pa, alloc_flags) != KERN_SUCCESS) {
1702 			return NULL;
1703 		}
1704 		ptdp = (pt_desc_t *)phystokv(pa);
1705 
1706 		pmap_simple_lock(&ptd_free_list_lock);
1707 		ptd_page_count++;
1708 
1709 		/**
1710 		 * Since the lock was dropped while allocating, it's possible another
1711 		 * CPU already allocated a page. To be safe, prepend the current free
1712 		 * list (which may or may not be empty now) to the page of nodes just
1713 		 * allocated and update the head to point to these new nodes.
1714 		 */
1715 		*((void**)(&ptdp[ptd_per_page - 1])) = (void*)ptd_free_list;
1716 		ptd_free_list = ptdp;
1717 		ptd_free_count += ptd_per_page;
1718 	}
1719 
1720 	/* There should be available nodes at this point. */
1721 	if (__improbable((ptd_free_count == 0) || (ptd_free_list == PTD_ENTRY_NULL))) {
1722 		panic_plain("%s: out of PTD entries and for some reason didn't "
1723 		    "allocate more %d %p", __func__, ptd_free_count, ptd_free_list);
1724 	}
1725 
1726 	/* Grab the top node off of the free list to return later. */
1727 	ptdp = ptd_free_list;
1728 
1729 	/**
1730 	 * Advance the free list to the next node.
1731 	 *
1732 	 * Each free pt_desc_t-sized object in this free list uses the first few
1733 	 * bytes of the object to point to the next object in the list. When an
1734 	 * object is deallocated (in ptd_deallocate()) the object is prepended onto
1735 	 * the free list by setting its first few bytes to point to the current free
1736 	 * list head. Then the head is updated to point to that object.
1737 	 *
1738 	 * When a new page is allocated for PTD nodes, it's left zeroed out. Once we
1739 	 * use up all of the previously deallocated nodes, the list will point
1740 	 * somewhere into the last allocated, empty page. We know we're pointing at
1741 	 * this page because the first few bytes of the object will be NULL. In
1742 	 * that case just set the head to this empty object.
1743 	 *
1744 	 * This empty page can be thought of as a "reserve" of empty nodes for the
1745 	 * case where more nodes are being allocated than there are nodes being
1746 	 * deallocated.
1747 	 */
1748 	pt_desc_t *const next_node = (pt_desc_t *)(*(void **)ptd_free_list);
1749 
1750 	/**
1751 	 * If the next node in the list is NULL but there are supposed to still be
1752 	 * nodes left, then we've hit the previously allocated empty page of nodes.
1753 	 * Go ahead and advance the free list to the next free node in that page.
1754 	 */
1755 	if ((next_node == PTD_ENTRY_NULL) && (ptd_free_count > 1)) {
1756 		ptd_free_list = ptd_free_list + 1;
1757 	} else {
1758 		ptd_free_list = next_node;
1759 	}
1760 
1761 	ptd_free_count--;
1762 
1763 	pmap_simple_unlock(&ptd_free_list_lock);
1764 
1765 	ptdp->pmap = NULL;
1766 
1767 	/**
1768 	 * Calculate and stash the address of the ptd_info_t associated with this
1769 	 * PTD. This can be done easily because both structures co-exist in the same
1770 	 * page, with ptd_info_t's starting at a given offset from the start of the
1771 	 * page.
1772 	 *
1773 	 * Each PTD is associated with a ptd_info_t of the same index. For example,
1774 	 * the 15th PTD will use the 15th ptd_info_t in the same page.
1775 	 */
1776 	const unsigned ptd_index = ((uintptr_t)ptdp & PAGE_MASK) / sizeof(pt_desc_t);
1777 	assert(ptd_index < ptd_per_page);
1778 
1779 	const uintptr_t start_of_page = (uintptr_t)ptdp & ~PAGE_MASK;
1780 	ptd_info_t *first_ptd_info = (ptd_info_t *)(start_of_page + ptd_info_offset);
1781 	ptdp->ptd_info = &first_ptd_info[ptd_index];
1782 
1783 	ptdp->va = (vm_offset_t)-1;
1784 	ptdp->ptd_info->wiredcnt = 0;
1785 
1786 	return ptdp;
1787 }
1788 
1789 /**
1790  * Allocate a single page table descriptor (PTD) object.
1791  *
1792  * @param pmap The pmap object that will be owning the page table(s) that this
1793  *             descriptor object represents.
1794  * @param alloc_flags Allocation flags passed to ptd_alloc_unlinked(). See the
1795  *                    definition of that function for a detailed description of
1796  *                    the available flags.
1797  *
1798  * @return The allocated PTD object, or NULL if one failed to get allocated
1799  *         (which indicates that memory wasn't able to get allocated).
1800  */
1801 MARK_AS_PMAP_TEXT pt_desc_t*
ptd_alloc(pmap_t pmap,unsigned int alloc_flags)1802 ptd_alloc(pmap_t pmap, unsigned int alloc_flags)
1803 {
1804 	pt_desc_t *ptdp = ptd_alloc_unlinked(alloc_flags);
1805 
1806 	if (ptdp == NULL) {
1807 		return NULL;
1808 	}
1809 
1810 	ptdp->pmap = pmap;
1811 
1812 	pmap_tt_ledger_credit(pmap, sizeof(*ptdp));
1813 	return ptdp;
1814 }
1815 
1816 /**
1817  * Deallocate a single page table descriptor (PTD) object.
1818  *
1819  * @note Ledger statistics are tracked on a per-pmap basis, so for those pages
1820  *       which are not associated with any specific pmap (e.g., IOMMU pages),
1821  *       the caller must ensure that the pmap/iommu field in the PTD object is
1822  *       NULL before calling this function.
1823  *
1824  * @param ptdp Pointer to the PTD object to deallocate.
1825  */
1826 MARK_AS_PMAP_TEXT void
ptd_deallocate(pt_desc_t * ptdp)1827 ptd_deallocate(pt_desc_t *ptdp)
1828 {
1829 	pmap_t pmap = ptdp->pmap;
1830 
1831 	/* Prepend the deallocated node to the free list. */
1832 	pmap_simple_lock(&ptd_free_list_lock);
1833 	(*(void **)ptdp) = (void *)ptd_free_list;
1834 	ptd_free_list = (pt_desc_t *)ptdp;
1835 	ptd_free_count++;
1836 	pmap_simple_unlock(&ptd_free_list_lock);
1837 
1838 	/**
1839 	 * If this PTD was being used to represent an IOMMU page then there won't be
1840 	 * an associated pmap, and therefore no ledger statistics to update.
1841 	 */
1842 	if ((uintptr_t)pmap != IOMMU_INSTANCE_NULL) {
1843 		pmap_tt_ledger_debit(pmap, sizeof(*ptdp));
1844 	}
1845 }
1846 
1847 /**
1848  * In address spaces where the VM page size is larger than the underlying
1849  * hardware page size, one page table descriptor (PTD) object can represent
1850  * multiple page tables. Some fields (like the reference counts) still need to
1851  * be tracked on a per-page-table basis. Because of this, those values are
1852  * stored in a separate array of ptd_info_t objects within the PTD where there's
1853  * one ptd_info_t for every page table a single PTD can manage.
1854  *
1855  * This function initializes the correct ptd_info_t field within a PTD based on
1856  * the page table it's representing.
1857  *
1858  * @param ptdp Pointer to the PTD object which contains the ptd_info_t field to
1859  *             update. Must match up with the `pmap` and `ptep` parameters.
1860  * @param pmap The pmap that owns the page table managed by the passed in PTD.
1861  * @param va Any virtual address that resides within the virtual address space
1862  *           being mapped by the page table pointed to by `ptep`.
1863  * @param level The level in the page table hierarchy that the table resides.
1864  * @param ptep A pointer into a page table that the passed in PTD manages. This
1865  *             page table must be owned by `pmap` and be the PTE that maps `va`.
1866  */
1867 MARK_AS_PMAP_TEXT void
ptd_info_init(pt_desc_t * ptdp,pmap_t pmap,vm_map_address_t va,unsigned int level,pt_entry_t * ptep)1868 ptd_info_init(
1869 	pt_desc_t *ptdp,
1870 	pmap_t pmap,
1871 	vm_map_address_t va,
1872 	unsigned int level,
1873 	pt_entry_t *ptep)
1874 {
1875 	const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
1876 
1877 	if (ptdp->pmap != pmap) {
1878 		panic("%s: pmap mismatch, ptdp=%p, pmap=%p, va=%p, level=%u, ptep=%p",
1879 		    __func__, ptdp, pmap, (void*)va, level, ptep);
1880 	}
1881 
1882 	/**
1883 	 * Root tables are managed separately, and can be accessed through the
1884 	 * pmap structure itself (there's only one root table per address space).
1885 	 */
1886 	assert(level > pt_attr_root_level(pt_attr));
1887 
1888 	/**
1889 	 * The "va" field represents the first virtual address that this page table
1890 	 * is translating for. Naturally, this is dependent on the level the page
1891 	 * table resides at since more VA space is mapped the closer the page
1892 	 * table's level is to the root.
1893 	 */
1894 	ptdp->va = (vm_offset_t) va & ~pt_attr_ln_pt_offmask(pt_attr, level - 1);
1895 }
1896 
1897 /**
1898  * Credit a specific ledger entry within the passed in pmap's ledger object.
1899  *
1900  * @param pmap The pmap whose ledger should be updated.
1901  * @param entry The specifc ledger entry to update. This needs to be one of the
1902  *              task_ledger entries.
1903  * @param amount The amount to credit from the ledger.
1904  *
1905  * @return The return value from the credit operation.
1906  */
1907 kern_return_t
pmap_ledger_credit(pmap_t pmap,int entry,ledger_amount_t amount)1908 pmap_ledger_credit(pmap_t pmap, int entry, ledger_amount_t amount)
1909 {
1910 	assert(pmap != NULL);
1911 
1912 	return ledger_credit(pmap->ledger, entry, amount);
1913 }
1914 
1915 /**
1916  * Debit a specific ledger entry within the passed in pmap's ledger object.
1917  *
1918  * @param pmap The pmap whose ledger should be updated.
1919  * @param entry The specifc ledger entry to update. This needs to be one of the
1920  *              task_ledger entries.
1921  * @param amount The amount to debit from the ledger.
1922  *
1923  * @return The return value from the debit operation.
1924  */
1925 kern_return_t
pmap_ledger_debit(pmap_t pmap,int entry,ledger_amount_t amount)1926 pmap_ledger_debit(pmap_t pmap, int entry, ledger_amount_t amount)
1927 {
1928 	assert(pmap != NULL);
1929 
1930 	return ledger_debit(pmap->ledger, entry, amount);
1931 }
1932 
1933 /**
1934  * Validate that the pointer passed into this method is a valid pmap object.
1935  *
1936  * @param pmap The pointer to validate.
1937  * @param func The stringized function name of the caller that will be printed
1938  *             in the case that the validation fails.
1939  */
1940 void
validate_pmap_internal(const volatile struct pmap * pmap,const char * func)1941 validate_pmap_internal(const volatile struct pmap *pmap, const char *func)
1942 {
1943 	#pragma unused(pmap, func)
1944 	assert(pmap != NULL);
1945 }
1946 
1947 /**
1948  * Validate that the pointer passed into this method is a valid pmap object and
1949  * is safe to both read and write.
1950  *
1951  * @param pmap The pointer to validate.
1952  * @param func The stringized function name of the caller that will be printed
1953  *             in the case that the validation fails.
1954  */
1955 void
validate_pmap_mutable_internal(const volatile struct pmap * pmap,const char * func)1956 validate_pmap_mutable_internal(const volatile struct pmap *pmap, const char *func)
1957 {
1958 	#pragma unused(pmap, func)
1959 	assert(pmap != NULL);
1960 }
1961 
1962 /**
1963  * Validate that the passed in pmap pointer is a pmap object that was allocated
1964  * by the pmap and not just random memory.
1965  *
1966  * This function will panic if the validation fails.
1967  *
1968  * @param pmap The object to validate.
1969  */
1970 void
pmap_require(pmap_t pmap)1971 pmap_require(pmap_t pmap)
1972 {
1973 	if (pmap != kernel_pmap) {
1974 		zone_id_require(ZONE_ID_PMAP, sizeof(struct pmap), pmap);
1975 	}
1976 }
1977 
1978 /**
1979  * Helper function used when sorting and searching SPTM/PPL I/O ranges.
1980  *
1981  * @param a The first SPTM/PPL I/O range to compare.
1982  * @param b The second SPTM/PPL I/O range to compare.
1983  *
1984  * @return < 0 for a < b
1985  *           0 for a == b
1986  *         > 0 for a > b
1987  */
1988 static int
cmp_io_rgns(const void * a,const void * b)1989 cmp_io_rgns(const void *a, const void *b)
1990 {
1991 	const pmap_io_range_t *range_a = a;
1992 	const pmap_io_range_t *range_b = b;
1993 
1994 	if ((range_b->addr + range_b->len) <= range_a->addr) {
1995 		return 1;
1996 	} else if ((range_a->addr + range_a->len) <= range_b->addr) {
1997 		return -1;
1998 	} else {
1999 		return 0;
2000 	}
2001 }
2002 
2003 /**
2004  * Find and return the SPTM/PPL I/O range that contains the passed in physical
2005  * address.
2006  *
2007  * @note This function performs a binary search on the already sorted
2008  *       io_attr_table, so it should be reasonably fast.
2009  *
2010  * @param paddr The physical address to query a specific I/O range for.
2011  *
2012  * @return A pointer to the pmap_io_range_t structure if one of the ranges
2013  *         contains the passed in physical address. Otherwise, NULL.
2014  */
2015 pmap_io_range_t*
pmap_find_io_attr(pmap_paddr_t paddr)2016 pmap_find_io_attr(pmap_paddr_t paddr)
2017 {
2018 	unsigned int begin = 0;
2019 	unsigned int end = num_io_rgns - 1;
2020 
2021 	/**
2022 	 * If there are no I/O ranges, or the wanted address is below the lowest
2023 	 * range or above the highest range, then there's no point in searching
2024 	 * since it won't be here.
2025 	 */
2026 	if ((num_io_rgns == 0) || (paddr < io_attr_table[begin].addr) ||
2027 	    (paddr >= (io_attr_table[end].addr + io_attr_table[end].len))) {
2028 		return NULL;
2029 	}
2030 
2031 	/**
2032 	 * A dummy I/O range to compare against when searching for a range that
2033 	 * includes `paddr`.
2034 	 */
2035 	const pmap_io_range_t wanted_range = {
2036 		.addr = paddr & ~PAGE_MASK,
2037 		.len = PAGE_SIZE
2038 	};
2039 
2040 	/* Perform a binary search to find the wanted I/O range. */
2041 	for (;;) {
2042 		const unsigned int middle = (begin + end) / 2;
2043 		const int cmp = cmp_io_rgns(&wanted_range, &io_attr_table[middle]);
2044 
2045 		if (cmp == 0) {
2046 			/* Success! Found the wanted I/O range. */
2047 			return &io_attr_table[middle];
2048 		} else if (begin == end) {
2049 			/* We've checked every range and didn't find a match. */
2050 			break;
2051 		} else if (cmp > 0) {
2052 			/* The wanted range is above the middle. */
2053 			begin = middle + 1;
2054 		} else {
2055 			/* The wanted range is below the middle. */
2056 			end = middle;
2057 		}
2058 	}
2059 
2060 	return NULL;
2061 }
2062 
2063 /**
2064  * Initialize the pmap per-CPU data structure for a single CPU. This is called
2065  * once for each CPU in the system, on the CPU whose per-cpu data needs to be
2066  * initialized.
2067  *
2068  * In reality, many of the per-cpu data fields will have either already been
2069  * initialized or will rely on the fact that the per-cpu data is either zeroed
2070  * out during allocation (on non-PPL systems), or the data itself is a global
2071  * variable which will be zeroed by default (on PPL systems).
2072  *
2073  * @param cpu_number The number of the CPU whose pmap per-cpu data should be
2074  *                   initialized. This number should correspond to the CPU
2075  *                   executing this code.
2076  */
2077 MARK_AS_PMAP_TEXT void
pmap_cpu_data_init_internal(unsigned int cpu_number)2078 pmap_cpu_data_init_internal(unsigned int cpu_number)
2079 {
2080 	pmap_cpu_data_t *pmap_cpu_data = pmap_get_cpu_data();
2081 
2082 	pmap_cpu_data->cpu_number = cpu_number;
2083 
2084 	/* Setup per-cpu fields used when calling into the SPTM. */
2085 	pmap_sptm_percpu_data_t *sptm_pcpu = PERCPU_GET(pmap_sptm_percpu);
2086 	assert(((uintptr_t)sptm_pcpu & (PMAP_SPTM_PCPU_ALIGN - 1)) == 0);
2087 	sptm_pcpu->sptm_ops_pa = kvtophys_nofail((vm_offset_t)sptm_pcpu->sptm_ops);
2088 	sptm_pcpu->sptm_templates_pa = kvtophys_nofail((vm_offset_t)sptm_pcpu->sptm_templates);
2089 	sptm_pcpu->sptm_guest_dispatch_paddr = kvtophys_nofail((vm_offset_t)&sptm_pcpu->sptm_guest_dispatch);
2090 
2091 	const uint16_t sptm_cpu_number = sptm_cpu_id(ml_get_topology_info()->cpus[cpu_number].phys_id);
2092 	sptm_pcpu->sptm_cpu_id = sptm_cpu_number;
2093 
2094 	const pmap_paddr_t iommu_scratch =
2095 	    sptm_cpu_iommu_scratch_start + (sptm_cpu_number * PMAP_IOMMU_SCRATCH_SIZE);
2096 	assert(iommu_scratch <= (sptm_cpu_iommu_scratch_end - PMAP_IOMMU_SCRATCH_SIZE));
2097 	sptm_pcpu->sptm_iommu_scratch = (void*)phystokv(iommu_scratch);
2098 	sptm_pcpu->sptm_prev_ptes = (sptm_pte_t *)((uintptr_t)(SPTMArgs->sptm_prev_ptes) + (PAGE_SIZE * sptm_cpu_number));
2099 	sptm_pcpu->sptm_cpu_id = sptm_cpu_number;
2100 }
2101 
2102 /**
2103  * Initialize the pmap per-cpu data for the bootstrap CPU (the other CPUs should
2104  * just call pmap_cpu_data_init() directly).
2105  */
2106 void
pmap_cpu_data_array_init(void)2107 pmap_cpu_data_array_init(void)
2108 {
2109 	/**
2110 	 * The EL2 portion of the IOMMU drivers need to have some memory they can
2111 	 * use to pass data into the SPTM. To save memory (since most IOMMU drivers
2112 	 * need this) and to preclude the need for IOMMU drivers to dynamically
2113 	 * allocate memory in their mapping/unmapping paths, memory is pre-allocated
2114 	 * here per-cpu for their usage.
2115 	 *
2116 	 * SPTM TODO: Only allocate this memory on systems that have IOMMU drivers.
2117 	 */
2118 	sptm_cpu_iommu_scratch_start = avail_start;
2119 	avail_start += MAX_CPUS * PMAP_IOMMU_SCRATCH_SIZE;
2120 	sptm_cpu_iommu_scratch_end = avail_start;
2121 
2122 	pmap_cpu_data_init();
2123 }
2124 
2125 /**
2126  * Retrieve the pmap per-cpu data for the current CPU.
2127  *
2128  * @return The per-cpu pmap data for the current CPU.
2129  */
2130 pmap_cpu_data_t *
pmap_get_cpu_data(void)2131 pmap_get_cpu_data(void)
2132 {
2133 	pmap_cpu_data_t *pmap_cpu_data = NULL;
2134 
2135 	pmap_cpu_data = &getCpuDatap()->cpu_pmap_cpu_data;
2136 	return pmap_cpu_data;
2137 }
2138 
2139 /**
2140  * Retrieve the pmap per-cpu data for the specified cpu index.
2141  *
2142  * @return The per-cpu pmap data for the CPU
2143  */
2144 pmap_cpu_data_t *
pmap_get_remote_cpu_data(unsigned int cpu)2145 pmap_get_remote_cpu_data(unsigned int cpu)
2146 {
2147 	cpu_data_t *cpu_data = cpu_datap((int)cpu);
2148 	if (cpu_data == NULL) {
2149 		return NULL;
2150 	} else {
2151 		return &cpu_data->cpu_pmap_cpu_data;
2152 	}
2153 }
2154 
2155 /**
2156  * Define the resources we need for spinning
2157  * until a paddr is not inflight.
2158  */
2159 __abortlike
2160 static hw_spin_timeout_status_t
hw_lck_paddr_timeout_panic(void * _lock,hw_spin_timeout_t to,hw_spin_state_t st)2161 hw_lck_paddr_timeout_panic(void *_lock, hw_spin_timeout_t to, hw_spin_state_t st)
2162 {
2163 	panic("paddr spinlock[%p] " HW_SPIN_TIMEOUT_FMT "; "
2164 	    HW_SPIN_TIMEOUT_DETAILS_FMT,
2165 	    _lock, HW_SPIN_TIMEOUT_ARG(to, st),
2166 	    HW_SPIN_TIMEOUT_DETAILS_ARG(to, st));
2167 }
2168 
2169 static const struct hw_spin_policy hw_paddr_inflight_spin_policy = {
2170 	.hwsp_name              = "hw_lck_paddr_lock",
2171 	.hwsp_timeout_atomic    = &LockTimeOut,
2172 	.hwsp_op_timeout        = hw_lck_paddr_timeout_panic,
2173 };
2174 
2175 /**
2176  * Barrier function for spinning until the given physical page is
2177  * no longer inflight.
2178  *
2179  * @param paddr The physical address we want to spin until is not inflight.
2180  */
2181 static __attribute__((noinline)) void
pmap_paddr_inflight_barrier(pmap_paddr_t paddr)2182 pmap_paddr_inflight_barrier(pmap_paddr_t paddr)
2183 {
2184 	hw_spin_policy_t  pol = &hw_paddr_inflight_spin_policy;
2185 	hw_spin_timeout_t to;
2186 	hw_spin_state_t   state  = { };
2187 
2188 	disable_preemption();
2189 	to  = hw_spin_compute_timeout(pol);
2190 	while (sptm_paddr_is_inflight(paddr) &&
2191 	    hw_spin_should_keep_spinning((void*)paddr, pol, to, &state)) {
2192 		;
2193 	}
2194 	enable_preemption();
2195 }
2196 
2197 /**
2198  * Convenience function for checking if a given physical page is inflight.
2199  *
2200  * @param paddr The physical address to query.
2201  *
2202  * @return true if the page in question has no mappings, false otherwise.
2203  */
2204 inline bool
pmap_is_page_free(pmap_paddr_t paddr)2205 pmap_is_page_free(pmap_paddr_t paddr)
2206 {
2207 	/**
2208 	 * We can't query the paddr refcounts if the physical page
2209 	 * is currently inflight. If it does, we spin until it's not.
2210 	 */
2211 	if (__improbable(sptm_paddr_is_inflight(paddr))) {
2212 		pmap_paddr_inflight_barrier(paddr);
2213 	}
2214 
2215 	/**
2216 	 * A barrier from the last inflight operation. This allows us
2217 	 * to have proper visibility for the refcounts. Otherwise,
2218 	 * sptm_frame_is_last_mapping() might see stale values.
2219 	 */
2220 	os_atomic_thread_fence(acquire);
2221 
2222 	/**
2223 	 * If SPTM returns TRUE for SPTM_REFCOUNT_NONE, it means
2224 	 * the physical page has no mappings.
2225 	 */
2226 	return sptm_frame_is_last_mapping(paddr, SPTM_REFCOUNT_NONE);
2227 }
2228