xref: /xnu-8020.101.4/osfmk/x86_64/pmap.c (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 
59 /*
60  *	File:	pmap.c
61  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
62  *	(These guys wrote the Vax version)
63  *
64  *	Physical Map management code for Intel i386, i486, and i860.
65  *
66  *	Manages physical address maps.
67  *
68  *	In addition to hardware address maps, this
69  *	module is called upon to provide software-use-only
70  *	maps which may or may not be stored in the same
71  *	form as hardware maps.  These pseudo-maps are
72  *	used to store intermediate results from copy
73  *	operations to and from address spaces.
74  *
75  *	Since the information managed by this module is
76  *	also stored by the logical address mapping module,
77  *	this module may throw away valid virtual-to-physical
78  *	mappings at almost any time.  However, invalidations
79  *	of virtual-to-physical mappings must be done as
80  *	requested.
81  *
82  *	In order to cope with hardware architectures which
83  *	make virtual-to-physical map invalidates expensive,
84  *	this module may delay invalidate or reduced protection
85  *	operations until such time as they are actually
86  *	necessary.  This module is given full information as
87  *	to which processors are currently using which maps,
88  *	and to when physical maps must be made correct.
89  */
90 
91 #include <string.h>
92 #include <mach_ldebug.h>
93 
94 #include <libkern/OSAtomic.h>
95 
96 #include <mach/machine/vm_types.h>
97 
98 #include <mach/boolean.h>
99 #include <kern/thread.h>
100 #include <kern/zalloc.h>
101 #include <kern/zalloc_internal.h>
102 #include <kern/queue.h>
103 #include <kern/ledger.h>
104 #include <kern/mach_param.h>
105 
106 #include <kern/spl.h>
107 
108 #include <vm/pmap.h>
109 #include <vm/pmap_cs.h>
110 #include <vm/vm_map.h>
111 #include <vm/vm_kern.h>
112 #include <mach/vm_param.h>
113 #include <mach/vm_prot.h>
114 #include <vm/vm_object.h>
115 #include <vm/vm_page.h>
116 
117 #include <mach/machine/vm_param.h>
118 #include <machine/thread.h>
119 
120 #include <kern/misc_protos.h>                   /* prototyping */
121 #include <i386/misc_protos.h>
122 #include <i386/i386_lowmem.h>
123 #include <x86_64/lowglobals.h>
124 
125 #include <i386/cpuid.h>
126 #include <i386/cpu_data.h>
127 #include <i386/cpu_number.h>
128 #include <i386/machine_cpu.h>
129 #include <i386/seg.h>
130 #include <i386/serial_io.h>
131 #include <i386/cpu_capabilities.h>
132 #include <i386/machine_routines.h>
133 #include <i386/proc_reg.h>
134 #include <i386/tsc.h>
135 #include <i386/pmap_internal.h>
136 #include <i386/pmap_pcid.h>
137 #if CONFIG_VMX
138 #include <i386/vmx/vmx_cpu.h>
139 #endif
140 
141 #include <vm/vm_protos.h>
142 #include <san/kasan.h>
143 
144 #include <i386/mp.h>
145 #include <i386/mp_desc.h>
146 #include <libkern/kernel_mach_header.h>
147 
148 #include <pexpert/i386/efi.h>
149 #include <libkern/section_keywords.h>
150 #if MACH_ASSERT
151 int pmap_stats_assert = 1;
152 #endif /* MACH_ASSERT */
153 
154 #ifdef IWANTTODEBUG
155 #undef  DEBUG
156 #define DEBUG 1
157 #define POSTCODE_DELAY 1
158 #include <i386/postcode.h>
159 #endif /* IWANTTODEBUG */
160 
161 #ifdef  PMAP_DEBUG
162 #define DBG(x...)       kprintf("DBG: " x)
163 #else
164 #define DBG(x...)
165 #endif
166 /* Compile time assert to ensure adjacency/alignment of per-CPU data fields used
167  * in the trampolines for kernel/user boundary TLB coherency.
168  */
169 char pmap_cpu_data_assert[(((offsetof(cpu_data_t, cpu_tlb_invalid) - offsetof(cpu_data_t, cpu_active_cr3)) == 8) && (offsetof(cpu_data_t, cpu_active_cr3) % 64 == 0)) ? 1 : -1];
170 boolean_t pmap_trace = FALSE;
171 
172 boolean_t       no_shared_cr3 = DEBUG;          /* TRUE for DEBUG by default */
173 
174 #if DEVELOPMENT || DEBUG
175 int nx_enabled = 1;                     /* enable no-execute protection -- set during boot */
176 #else
177 const int nx_enabled = 1;
178 #endif
179 
180 #if DEBUG || DEVELOPMENT
181 int allow_data_exec  = VM_ABI_32;       /* 32-bit apps may execute data by default, 64-bit apps may not */
182 int allow_stack_exec = 0;               /* No apps may execute from the stack by default */
183 #else /* DEBUG || DEVELOPMENT */
184 const int allow_data_exec  = VM_ABI_32; /* 32-bit apps may execute data by default, 64-bit apps may not */
185 const int allow_stack_exec = 0;         /* No apps may execute from the stack by default */
186 #endif /* DEBUG || DEVELOPMENT */
187 
188 uint64_t max_preemption_latency_tsc = 0;
189 
190 pv_hashed_entry_t     *pv_hash_table;  /* hash lists */
191 
192 uint32_t npvhashmask = 0, npvhashbuckets = 0;
193 
194 pv_hashed_entry_t       pv_hashed_free_list = PV_HASHED_ENTRY_NULL;
195 pv_hashed_entry_t       pv_hashed_kern_free_list = PV_HASHED_ENTRY_NULL;
196 SIMPLE_LOCK_DECLARE(pv_hashed_free_list_lock, 0);
197 SIMPLE_LOCK_DECLARE(pv_hashed_kern_free_list_lock, 0);
198 SIMPLE_LOCK_DECLARE(pv_hash_table_lock, 0);
199 SIMPLE_LOCK_DECLARE(phys_backup_lock, 0);
200 
201 SECURITY_READ_ONLY_LATE(zone_t) pv_hashed_list_zone;    /* zone of pv_hashed_entry structures */
202 
203 /*
204  *	First and last physical addresses that we maintain any information
205  *	for.  Initialized to zero so that pmap operations done before
206  *	pmap_init won't touch any non-existent structures.
207  */
208 boolean_t       pmap_initialized = FALSE;/* Has pmap_init completed? */
209 
210 static struct vm_object kptobj_object_store VM_PAGE_PACKED_ALIGNED;
211 static struct vm_object kpml4obj_object_store VM_PAGE_PACKED_ALIGNED;
212 static struct vm_object kpdptobj_object_store VM_PAGE_PACKED_ALIGNED;
213 
214 /*
215  *	Array of physical page attribites for managed pages.
216  *	One byte per physical page.
217  */
218 char            *pmap_phys_attributes;
219 ppnum_t         last_managed_page = 0;
220 
221 unsigned pmap_memory_region_count;
222 unsigned pmap_memory_region_current;
223 
224 pmap_memory_region_t pmap_memory_regions[PMAP_MEMORY_REGIONS_SIZE];
225 
226 /*
227  *	Other useful macros.
228  */
229 #define current_pmap()          (vm_map_pmap(current_thread()->map))
230 
231 struct pmap     kernel_pmap_store;
232 SECURITY_READ_ONLY_LATE(pmap_t)          kernel_pmap = NULL;
233 SECURITY_READ_ONLY_LATE(zone_t)          pmap_zone; /* zone of pmap structures */
234 SECURITY_READ_ONLY_LATE(zone_t)          pmap_anchor_zone;
235 SECURITY_READ_ONLY_LATE(zone_t)          pmap_uanchor_zone;
236 int             pmap_debug = 0;         /* flag for debugging prints */
237 
238 unsigned int    inuse_ptepages_count = 0;
239 long long       alloc_ptepages_count __attribute__((aligned(8))) = 0; /* aligned for atomic access */
240 unsigned int    bootstrap_wired_pages = 0;
241 
242 extern  long    NMIPI_acks;
243 
244 SECURITY_READ_ONLY_LATE(boolean_t)       kernel_text_ps_4K = TRUE;
245 
246 extern char     end;
247 
248 static int      nkpt;
249 
250 #if DEVELOPMENT || DEBUG
251 SECURITY_READ_ONLY_LATE(boolean_t)       pmap_disable_kheap_nx = FALSE;
252 SECURITY_READ_ONLY_LATE(boolean_t)       pmap_disable_kstack_nx = FALSE;
253 SECURITY_READ_ONLY_LATE(boolean_t)       wpkernel = TRUE;
254 #else
255 const boolean_t wpkernel = TRUE;
256 #endif
257 
258 extern long __stack_chk_guard[];
259 
260 static uint64_t pmap_eptp_flags = 0;
261 boolean_t pmap_ept_support_ad = FALSE;
262 
263 static void process_pmap_updates(pmap_t, bool, addr64_t, addr64_t);
264 /*
265  *	Map memory at initialization.  The physical addresses being
266  *	mapped are not managed and are never unmapped.
267  *
268  *	For now, VM is already on, we only need to map the
269  *	specified memory.
270  */
271 vm_offset_t
pmap_map(vm_offset_t virt,vm_map_offset_t start_addr,vm_map_offset_t end_addr,vm_prot_t prot,unsigned int flags)272 pmap_map(
273 	vm_offset_t     virt,
274 	vm_map_offset_t start_addr,
275 	vm_map_offset_t end_addr,
276 	vm_prot_t       prot,
277 	unsigned int    flags)
278 {
279 	kern_return_t   kr;
280 	int             ps;
281 
282 	ps = PAGE_SIZE;
283 	while (start_addr < end_addr) {
284 		kr = pmap_enter(kernel_pmap, (vm_map_offset_t)virt,
285 		    (ppnum_t) i386_btop(start_addr), prot, VM_PROT_NONE, flags, TRUE);
286 
287 		if (kr != KERN_SUCCESS) {
288 			panic("%s: failed pmap_enter, "
289 			    "virt=%p, start_addr=%p, end_addr=%p, prot=%#x, flags=%#x",
290 			    __FUNCTION__,
291 			    (void *)virt, (void *)start_addr, (void *)end_addr, prot, flags);
292 		}
293 
294 		virt += ps;
295 		start_addr += ps;
296 	}
297 	return virt;
298 }
299 
300 extern  char                    *first_avail;
301 extern  vm_offset_t             virtual_avail, virtual_end;
302 extern  pmap_paddr_t            avail_start, avail_end;
303 extern  vm_offset_t             sHIB;
304 extern  vm_offset_t             eHIB;
305 extern  vm_offset_t             stext;
306 extern  vm_offset_t             etext;
307 extern  vm_offset_t             sdata, edata;
308 extern  vm_offset_t             sconst, econst;
309 
310 extern void                     *KPTphys;
311 
312 boolean_t pmap_smep_enabled = FALSE;
313 boolean_t pmap_smap_enabled = FALSE;
314 
315 void
pmap_cpu_init(void)316 pmap_cpu_init(void)
317 {
318 	cpu_data_t      *cdp = current_cpu_datap();
319 
320 	set_cr4(get_cr4() | CR4_PGE);
321 
322 	/*
323 	 * Initialize the per-cpu, TLB-related fields.
324 	 */
325 	cdp->cpu_kernel_cr3 = kernel_pmap->pm_cr3;
326 	cpu_shadowp(cdp->cpu_number)->cpu_kernel_cr3 = cdp->cpu_kernel_cr3;
327 	cdp->cpu_active_cr3 = kernel_pmap->pm_cr3;
328 	cdp->cpu_tlb_invalid = 0;
329 	cdp->cpu_task_map = TASK_MAP_64BIT;
330 
331 	pmap_pcid_configure();
332 	if (cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_SMEP) {
333 		pmap_smep_enabled = TRUE;
334 #if     DEVELOPMENT || DEBUG
335 		boolean_t nsmep;
336 		if (PE_parse_boot_argn("-pmap_smep_disable", &nsmep, sizeof(nsmep))) {
337 			pmap_smep_enabled = FALSE;
338 		}
339 #endif
340 		if (pmap_smep_enabled) {
341 			set_cr4(get_cr4() | CR4_SMEP);
342 		}
343 	}
344 	if (cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_SMAP) {
345 		pmap_smap_enabled = TRUE;
346 #if DEVELOPMENT || DEBUG
347 		boolean_t nsmap;
348 		if (PE_parse_boot_argn("-pmap_smap_disable", &nsmap, sizeof(nsmap))) {
349 			pmap_smap_enabled = FALSE;
350 		}
351 #endif
352 		if (pmap_smap_enabled) {
353 			set_cr4(get_cr4() | CR4_SMAP);
354 		}
355 	}
356 
357 #if !MONOTONIC
358 	if (cdp->cpu_fixed_pmcs_enabled) {
359 		boolean_t enable = TRUE;
360 		cpu_pmc_control(&enable);
361 	}
362 #endif /* !MONOTONIC */
363 }
364 
365 static void
pmap_ro_zone_validate_element_dst(zone_id_t zid,vm_offset_t va,vm_offset_t offset,vm_size_t new_data_size)366 pmap_ro_zone_validate_element_dst(
367 	zone_id_t           zid,
368 	vm_offset_t         va,
369 	vm_offset_t         offset,
370 	vm_size_t           new_data_size)
371 {
372 	vm_size_t elem_size = zone_elem_size_ro(zid);
373 	vm_offset_t sum = 0, page = trunc_page(va);
374 	if (__improbable(new_data_size > (elem_size - offset))) {
375 		panic("%s: New data size %lu too large for elem size %lu at addr %p",
376 		    __func__, (uintptr_t)new_data_size, (uintptr_t)elem_size, (void*)va);
377 	}
378 	if (__improbable(offset >= elem_size)) {
379 		panic("%s: Offset %lu too large for elem size %lu at addr %p",
380 		    __func__, (uintptr_t)offset, (uintptr_t)elem_size, (void*)va);
381 	}
382 	if (__improbable(os_add3_overflow(va, offset, new_data_size, &sum))) {
383 		panic("%s: Integer addition overflow %p + %lu + %lu = %lu",
384 		    __func__, (void*)va, (uintptr_t)offset, (uintptr_t) new_data_size,
385 		    (uintptr_t)sum);
386 	}
387 	if (__improbable((va - page) % elem_size)) {
388 		panic("%s: Start of element %p is not aligned to element size %lu",
389 		    __func__, (void *)va, (uintptr_t)elem_size);
390 	}
391 
392 	/* Check element is from correct zone */
393 	zone_require_ro(zid, elem_size, (void*)va);
394 }
395 
396 static void
pmap_ro_zone_validate_element(zone_id_t zid,vm_offset_t va,vm_offset_t offset,const vm_offset_t new_data,vm_size_t new_data_size)397 pmap_ro_zone_validate_element(
398 	zone_id_t           zid,
399 	vm_offset_t         va,
400 	vm_offset_t         offset,
401 	const vm_offset_t   new_data,
402 	vm_size_t           new_data_size)
403 {
404 	vm_offset_t sum = 0;
405 
406 	if (__improbable(os_add_overflow(new_data, new_data_size, &sum))) {
407 		panic("%s: Integer addition overflow %p + %lu = %lu",
408 		    __func__, (void*)new_data, (uintptr_t)new_data_size, (uintptr_t)sum);
409 	}
410 
411 	pmap_ro_zone_validate_element_dst(zid, va, offset, new_data_size);
412 }
413 
414 void
pmap_ro_zone_memcpy(zone_id_t zid,vm_offset_t va,vm_offset_t offset,const vm_offset_t new_data,vm_size_t new_data_size)415 pmap_ro_zone_memcpy(
416 	zone_id_t             zid,
417 	vm_offset_t           va,
418 	vm_offset_t           offset,
419 	const vm_offset_t     new_data,
420 	vm_size_t             new_data_size)
421 {
422 	const pmap_paddr_t pa = kvtophys(va + offset);
423 
424 	if (!new_data || new_data_size == 0) {
425 		return;
426 	}
427 
428 	pmap_ro_zone_validate_element(zid, va, offset, new_data, new_data_size);
429 	/* Write through Physical Aperture */
430 	memcpy((void*)phystokv(pa), (void*)new_data, new_data_size);
431 }
432 
433 uint64_t
pmap_ro_zone_atomic_op(zone_id_t zid,vm_offset_t va,vm_offset_t offset,zro_atomic_op_t op,uint64_t value)434 pmap_ro_zone_atomic_op(
435 	zone_id_t             zid,
436 	vm_offset_t           va,
437 	vm_offset_t           offset,
438 	zro_atomic_op_t       op,
439 	uint64_t              value)
440 {
441 	const pmap_paddr_t pa = kvtophys(va + offset);
442 	vm_size_t value_size = op & 0xf;
443 
444 	pmap_ro_zone_validate_element_dst(zid, va, offset, value_size);
445 	/* Write through Physical Aperture */
446 	return __zalloc_ro_mut_atomic(phystokv(pa), op, value);
447 }
448 
449 void
pmap_ro_zone_bzero(zone_id_t zid,vm_offset_t va,vm_offset_t offset,vm_size_t size)450 pmap_ro_zone_bzero(
451 	zone_id_t         zid,
452 	vm_offset_t       va,
453 	vm_offset_t       offset,
454 	vm_size_t         size)
455 {
456 	const pmap_paddr_t pa = kvtophys(va + offset);
457 	pmap_ro_zone_validate_element(zid, va, offset, 0, size);
458 	bzero((void*)phystokv(pa), size);
459 }
460 
461 static uint32_t
pmap_scale_shift(void)462 pmap_scale_shift(void)
463 {
464 	uint32_t scale = 0;
465 
466 	if (sane_size <= 8 * GB) {
467 		scale = (uint32_t)(sane_size / (2 * GB));
468 	} else if (sane_size <= 32 * GB) {
469 		scale = 4 + (uint32_t)((sane_size - (8 * GB)) / (4 * GB));
470 	} else {
471 		scale = 10 + (uint32_t)MIN(4, ((sane_size - (32 * GB)) / (8 * GB)));
472 	}
473 	return scale;
474 }
475 
476 LCK_GRP_DECLARE(pmap_lck_grp, "pmap");
477 LCK_ATTR_DECLARE(pmap_lck_rw_attr, 0, LCK_ATTR_DEBUG);
478 
479 /*
480  *	Bootstrap the system enough to run with virtual memory.
481  *	Map the kernel's code and data, and allocate the system page table.
482  *	Called with mapping OFF.  Page_size must already be set.
483  */
484 
485 void
pmap_bootstrap(__unused vm_offset_t load_start,__unused boolean_t IA32e)486 pmap_bootstrap(
487 	__unused vm_offset_t    load_start,
488 	__unused boolean_t      IA32e)
489 {
490 	assert(IA32e);
491 
492 	vm_last_addr = VM_MAX_KERNEL_ADDRESS;   /* Set the highest address
493 	                                         * known to VM */
494 	/*
495 	 *	The kernel's pmap is statically allocated so we don't
496 	 *	have to use pmap_create, which is unlikely to work
497 	 *	correctly at this part of the boot sequence.
498 	 */
499 
500 	kernel_pmap = &kernel_pmap_store;
501 	os_ref_init(&kernel_pmap->ref_count, NULL);
502 #if DEVELOPMENT || DEBUG
503 	kernel_pmap->nx_enabled = TRUE;
504 #endif
505 	kernel_pmap->pm_task_map = TASK_MAP_64BIT;
506 	kernel_pmap->pm_obj = (vm_object_t) NULL;
507 	kernel_pmap->pm_pml4 = IdlePML4;
508 	kernel_pmap->pm_upml4 = IdlePML4;
509 	kernel_pmap->pm_cr3 = (uintptr_t)ID_MAP_VTOP(IdlePML4);
510 	kernel_pmap->pm_ucr3 = (uintptr_t)ID_MAP_VTOP(IdlePML4);
511 	kernel_pmap->pm_eptp = 0;
512 
513 	pmap_pcid_initialize_kernel(kernel_pmap);
514 
515 	current_cpu_datap()->cpu_kernel_cr3 = cpu_shadowp(cpu_number())->cpu_kernel_cr3 = (addr64_t) kernel_pmap->pm_cr3;
516 
517 	nkpt = NKPT;
518 	OSAddAtomic(NKPT, &inuse_ptepages_count);
519 	OSAddAtomic64(NKPT, &alloc_ptepages_count);
520 	bootstrap_wired_pages = NKPT;
521 
522 	virtual_avail = (vm_offset_t)(VM_MIN_KERNEL_ADDRESS) + (vm_offset_t)first_avail;
523 	virtual_end = (vm_offset_t)(VM_MAX_KERNEL_ADDRESS);
524 
525 	if (!PE_parse_boot_argn("npvhash", &npvhashmask, sizeof(npvhashmask))) {
526 		npvhashmask = ((NPVHASHBUCKETS) << pmap_scale_shift()) - 1;
527 	}
528 
529 	npvhashbuckets = npvhashmask + 1;
530 
531 	if (0 != ((npvhashbuckets) & npvhashmask)) {
532 		panic("invalid hash %d, must be ((2^N)-1), "
533 		    "using default %d\n", npvhashmask, NPVHASHMASK);
534 	}
535 
536 	lck_rw_init(&kernel_pmap->pmap_rwl, &pmap_lck_grp, &pmap_lck_rw_attr);
537 	kernel_pmap->pmap_rwl.lck_rw_can_sleep = FALSE;
538 
539 	pmap_cpu_init();
540 
541 	if (pmap_pcid_ncpus) {
542 		printf("PMAP: PCID enabled\n");
543 	}
544 
545 	if (pmap_smep_enabled) {
546 		printf("PMAP: Supervisor Mode Execute Protection enabled\n");
547 	}
548 	if (pmap_smap_enabled) {
549 		printf("PMAP: Supervisor Mode Access Protection enabled\n");
550 	}
551 
552 #if     DEBUG
553 	printf("Stack canary: 0x%lx\n", __stack_chk_guard[0]);
554 	printf("early_random(): 0x%qx\n", early_random());
555 #endif
556 #if     DEVELOPMENT || DEBUG
557 	boolean_t ptmp;
558 	/* Check if the user has requested disabling stack or heap no-execute
559 	 * enforcement. These are "const" variables; that qualifier is cast away
560 	 * when altering them. The TEXT/DATA const sections are marked
561 	 * write protected later in the kernel startup sequence, so altering
562 	 * them is possible at this point, in pmap_bootstrap().
563 	 */
564 	if (PE_parse_boot_argn("-pmap_disable_kheap_nx", &ptmp, sizeof(ptmp))) {
565 		boolean_t *pdknxp = (boolean_t *) &pmap_disable_kheap_nx;
566 		*pdknxp = TRUE;
567 	}
568 
569 	if (PE_parse_boot_argn("-pmap_disable_kstack_nx", &ptmp, sizeof(ptmp))) {
570 		boolean_t *pdknhp = (boolean_t *) &pmap_disable_kstack_nx;
571 		*pdknhp = TRUE;
572 	}
573 #endif /* DEVELOPMENT || DEBUG */
574 
575 	boot_args *args = (boot_args *)PE_state.bootArgs;
576 	if (args->efiMode == kBootArgsEfiMode32) {
577 		printf("EFI32: kernel virtual space limited to 4GB\n");
578 		virtual_end = VM_MAX_KERNEL_ADDRESS_EFI32;
579 	}
580 	kprintf("Kernel virtual space from 0x%lx to 0x%lx.\n",
581 	    (long)KERNEL_BASE, (long)virtual_end);
582 	kprintf("Available physical space from 0x%llx to 0x%llx\n",
583 	    avail_start, avail_end);
584 
585 	/*
586 	 * The -no_shared_cr3 boot-arg is a debugging feature (set by default
587 	 * in the DEBUG kernel) to force the kernel to switch to its own map
588 	 * (and cr3) when control is in kernelspace. The kernel's map does not
589 	 * include (i.e. share) userspace so wild references will cause
590 	 * a panic. Only copyin and copyout are exempt from this.
591 	 */
592 	(void) PE_parse_boot_argn("-no_shared_cr3",
593 	    &no_shared_cr3, sizeof(no_shared_cr3));
594 	if (no_shared_cr3) {
595 		kprintf("Kernel not sharing user map\n");
596 	}
597 
598 #ifdef  PMAP_TRACES
599 	if (PE_parse_boot_argn("-pmap_trace", &pmap_trace, sizeof(pmap_trace))) {
600 		kprintf("Kernel traces for pmap operations enabled\n");
601 	}
602 #endif  /* PMAP_TRACES */
603 
604 #if MACH_ASSERT
605 	PE_parse_boot_argn("pmap_asserts", &pmap_asserts_enabled, sizeof(pmap_asserts_enabled));
606 	PE_parse_boot_argn("pmap_stats_assert",
607 	    &pmap_stats_assert,
608 	    sizeof(pmap_stats_assert));
609 #endif /* MACH_ASSERT */
610 }
611 
612 void
pmap_virtual_space(vm_offset_t * startp,vm_offset_t * endp)613 pmap_virtual_space(
614 	vm_offset_t *startp,
615 	vm_offset_t *endp)
616 {
617 	*startp = virtual_avail;
618 	*endp = virtual_end;
619 }
620 
621 
622 
623 
624 #if HIBERNATION
625 
626 #include <IOKit/IOHibernatePrivate.h>
627 #include <machine/pal_hibernate.h>
628 
629 int32_t         pmap_npages;
630 int32_t         pmap_teardown_last_valid_compact_indx = -1;
631 
632 void    pmap_pack_index(uint32_t);
633 int32_t pmap_unpack_index(pv_rooted_entry_t);
634 
635 int32_t
pmap_unpack_index(pv_rooted_entry_t pv_h)636 pmap_unpack_index(pv_rooted_entry_t pv_h)
637 {
638 	int32_t indx = 0;
639 
640 	indx = (int32_t)(*((uint64_t *)(&pv_h->qlink.next)) >> 48);
641 	indx = indx << 16;
642 	indx |= (int32_t)(*((uint64_t *)(&pv_h->qlink.prev)) >> 48);
643 
644 	*((uint64_t *)(&pv_h->qlink.next)) |= ((uint64_t)0xffff << 48);
645 	*((uint64_t *)(&pv_h->qlink.prev)) |= ((uint64_t)0xffff << 48);
646 
647 	return indx;
648 }
649 
650 
651 void
pmap_pack_index(uint32_t indx)652 pmap_pack_index(uint32_t indx)
653 {
654 	pv_rooted_entry_t       pv_h;
655 
656 	pv_h = &pv_head_table[indx];
657 
658 	*((uint64_t *)(&pv_h->qlink.next)) &= ~((uint64_t)0xffff << 48);
659 	*((uint64_t *)(&pv_h->qlink.prev)) &= ~((uint64_t)0xffff << 48);
660 
661 	*((uint64_t *)(&pv_h->qlink.next)) |= ((uint64_t)(indx >> 16)) << 48;
662 	*((uint64_t *)(&pv_h->qlink.prev)) |= ((uint64_t)(indx & 0xffff)) << 48;
663 }
664 
665 
666 void
pal_hib_teardown_pmap_structs(addr64_t * unneeded_start,addr64_t * unneeded_end)667 pal_hib_teardown_pmap_structs(addr64_t *unneeded_start, addr64_t *unneeded_end)
668 {
669 	int32_t         i;
670 	int32_t         compact_target_indx;
671 
672 	compact_target_indx = 0;
673 
674 	for (i = 0; i < pmap_npages; i++) {
675 		if (pv_head_table[i].pmap == PMAP_NULL) {
676 			if (pv_head_table[compact_target_indx].pmap != PMAP_NULL) {
677 				compact_target_indx = i;
678 			}
679 		} else {
680 			pmap_pack_index((uint32_t)i);
681 
682 			if (pv_head_table[compact_target_indx].pmap == PMAP_NULL) {
683 				/*
684 				 * we've got a hole to fill, so
685 				 * move this pv_rooted_entry_t to it's new home
686 				 */
687 				pv_head_table[compact_target_indx] = pv_head_table[i];
688 				pv_head_table[i].pmap = PMAP_NULL;
689 
690 				pmap_teardown_last_valid_compact_indx = compact_target_indx;
691 				compact_target_indx++;
692 			} else {
693 				pmap_teardown_last_valid_compact_indx = i;
694 			}
695 		}
696 	}
697 	*unneeded_start = (addr64_t)&pv_head_table[pmap_teardown_last_valid_compact_indx + 1];
698 	*unneeded_end = (addr64_t)&pv_head_table[pmap_npages - 1];
699 
700 	HIBLOG("pal_hib_teardown_pmap_structs done: last_valid_compact_indx %d\n", pmap_teardown_last_valid_compact_indx);
701 }
702 
703 
704 void
pal_hib_rebuild_pmap_structs(void)705 pal_hib_rebuild_pmap_structs(void)
706 {
707 	int32_t                 cindx, eindx, rindx = 0;
708 	pv_rooted_entry_t       pv_h;
709 
710 	eindx = (int32_t)pmap_npages;
711 
712 	for (cindx = pmap_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
713 		pv_h = &pv_head_table[cindx];
714 
715 		rindx = pmap_unpack_index(pv_h);
716 		assert(rindx < pmap_npages);
717 
718 		if (rindx != cindx) {
719 			/*
720 			 * this pv_rooted_entry_t was moved by pal_hib_teardown_pmap_structs,
721 			 * so move it back to its real location
722 			 */
723 			pv_head_table[rindx] = pv_head_table[cindx];
724 		}
725 		if (rindx + 1 != eindx) {
726 			/*
727 			 * the 'hole' between this vm_rooted_entry_t and the previous
728 			 * vm_rooted_entry_t we moved needs to be initialized as
729 			 * a range of zero'd vm_rooted_entry_t's
730 			 */
731 			bzero((char *)&pv_head_table[rindx + 1], (eindx - rindx - 1) * sizeof(struct pv_rooted_entry));
732 		}
733 		eindx = rindx;
734 	}
735 	if (rindx) {
736 		bzero((char *)&pv_head_table[0], rindx * sizeof(struct pv_rooted_entry));
737 	}
738 
739 	HIBLOG("pal_hib_rebuild_pmap_structs done: last_valid_compact_indx %d\n", pmap_teardown_last_valid_compact_indx);
740 }
741 
742 #endif
743 
744 /*
745  * Create pv entries for kernel pages mapped by early startup code.
746  * These have to exist so we can ml_static_mfree() them later.
747  */
748 static void
pmap_pv_fixup(vm_offset_t start_va,vm_offset_t end_va)749 pmap_pv_fixup(vm_offset_t start_va, vm_offset_t end_va)
750 {
751 	ppnum_t           ppn;
752 	pv_rooted_entry_t pv_h;
753 	uint32_t          pgsz;
754 
755 	start_va = round_page(start_va);
756 	end_va = trunc_page(end_va);
757 	while (start_va < end_va) {
758 		pgsz = PAGE_SIZE;
759 		ppn = pmap_find_phys(kernel_pmap, start_va);
760 		if (ppn != 0 && IS_MANAGED_PAGE(ppn)) {
761 			pv_h = pai_to_pvh(ppn);
762 			assert(pv_h->qlink.next == 0);           /* shouldn't be init'd yet */
763 			assert(pv_h->pmap == 0);
764 			pv_h->va_and_flags = start_va;
765 			pv_h->pmap = kernel_pmap;
766 			queue_init(&pv_h->qlink);
767 			if (pmap_query_pagesize(kernel_pmap, start_va) == I386_LPGBYTES) {
768 				pgsz = I386_LPGBYTES;
769 			}
770 		}
771 		start_va += pgsz;
772 	}
773 }
774 
775 /*
776  *	Initialize the pmap module.
777  *	Called by vm_init, to initialize any structures that the pmap
778  *	system needs to map virtual memory.
779  */
780 void
pmap_init(void)781 pmap_init(void)
782 {
783 	long                    npages;
784 	vm_offset_t             addr;
785 	vm_size_t               s, vsize;
786 	vm_map_offset_t         vaddr;
787 	ppnum_t ppn;
788 
789 
790 	kernel_pmap->pm_obj_pml4 = &kpml4obj_object_store;
791 	_vm_object_allocate((vm_object_size_t)NPML4PGS * PAGE_SIZE, &kpml4obj_object_store);
792 
793 	kernel_pmap->pm_obj_pdpt = &kpdptobj_object_store;
794 	_vm_object_allocate((vm_object_size_t)NPDPTPGS * PAGE_SIZE, &kpdptobj_object_store);
795 
796 	kernel_pmap->pm_obj = &kptobj_object_store;
797 	_vm_object_allocate((vm_object_size_t)NPDEPGS * PAGE_SIZE, &kptobj_object_store);
798 
799 	/*
800 	 *	Allocate memory for the pv_head_table and its lock bits,
801 	 *	the modify bit array, and the pte_page table.
802 	 */
803 
804 	/*
805 	 * zero bias all these arrays now instead of off avail_start
806 	 * so we cover all memory
807 	 */
808 
809 	npages = i386_btop(avail_end);
810 #if HIBERNATION
811 	pmap_npages = (uint32_t)npages;
812 #endif
813 	s = (vm_size_t) (sizeof(struct pv_rooted_entry) * npages
814 	    + (sizeof(struct pv_hashed_entry_t *) * (npvhashbuckets))
815 	    + pv_lock_table_size(npages)
816 	    + pv_hash_lock_table_size((npvhashbuckets))
817 	    + npages);
818 	s = round_page(s);
819 	if (kernel_memory_allocate(kernel_map, &addr, s, 0,
820 	    KMA_KOBJECT | KMA_PERMANENT, VM_KERN_MEMORY_PMAP)
821 	    != KERN_SUCCESS) {
822 		panic("pmap_init");
823 	}
824 
825 	memset((char *)addr, 0, s);
826 
827 	vaddr = addr;
828 	vsize = s;
829 
830 #if PV_DEBUG
831 	if (0 == npvhashmask) {
832 		panic("npvhashmask not initialized");
833 	}
834 #endif
835 
836 	/*
837 	 *	Allocate the structures first to preserve word-alignment.
838 	 */
839 	pv_head_table = (pv_rooted_entry_t) addr;
840 	addr = (vm_offset_t) (pv_head_table + npages);
841 
842 	pv_hash_table = (pv_hashed_entry_t *)addr;
843 	addr = (vm_offset_t) (pv_hash_table + (npvhashbuckets));
844 
845 	pv_lock_table = (char *) addr;
846 	addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages));
847 
848 	pv_hash_lock_table = (char *) addr;
849 	addr = (vm_offset_t) (pv_hash_lock_table + pv_hash_lock_table_size((npvhashbuckets)));
850 
851 	pmap_phys_attributes = (char *) addr;
852 
853 	ppnum_t  last_pn = i386_btop(avail_end);
854 	unsigned int i;
855 	pmap_memory_region_t *pmptr = pmap_memory_regions;
856 	for (i = 0; i < pmap_memory_region_count; i++, pmptr++) {
857 		if (pmptr->type != kEfiConventionalMemory) {
858 			continue;
859 		}
860 		ppnum_t pn;
861 		for (pn = pmptr->base; pn <= pmptr->end; pn++) {
862 			if (pn < last_pn) {
863 				pmap_phys_attributes[pn] |= PHYS_MANAGED;
864 
865 				if (pn > last_managed_page) {
866 					last_managed_page = pn;
867 				}
868 
869 				if ((pmap_high_used_bottom <= pn && pn <= pmap_high_used_top) ||
870 				    (pmap_middle_used_bottom <= pn && pn <= pmap_middle_used_top)) {
871 					pmap_phys_attributes[pn] |= PHYS_NOENCRYPT;
872 				}
873 			}
874 		}
875 	}
876 	while (vsize) {
877 		ppn = pmap_find_phys(kernel_pmap, vaddr);
878 
879 		pmap_phys_attributes[ppn] |= PHYS_NOENCRYPT;
880 
881 		vaddr += PAGE_SIZE;
882 		vsize -= PAGE_SIZE;
883 	}
884 	/*
885 	 *	Create the zone of physical maps,
886 	 *	and of the physical-to-virtual entries.
887 	 */
888 	pmap_zone = zone_create_ext("pmap", sizeof(struct pmap),
889 	    ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM, ZONE_ID_PMAP, NULL);
890 
891 	/* The anchor is required to be page aligned. Zone debugging adds
892 	 * padding which may violate that requirement. Tell the zone
893 	 * subsystem that alignment is required.
894 	 */
895 	pmap_anchor_zone = zone_create("pagetable anchors", PAGE_SIZE,
896 	    ZC_NOENCRYPT | ZC_ALIGNMENT_REQUIRED);
897 
898 /* TODO: possible general optimisation...pre-allocate via zones commonly created
899  * level3/2 pagetables
900  */
901 	/* The anchor is required to be page aligned. Zone debugging adds
902 	 * padding which may violate that requirement. Tell the zone
903 	 * subsystem that alignment is required.
904 	 */
905 	pmap_uanchor_zone = zone_create("pagetable user anchors", PAGE_SIZE,
906 	    ZC_NOENCRYPT | ZC_ALIGNMENT_REQUIRED);
907 
908 	pv_hashed_list_zone = zone_create("pv_list", sizeof(struct pv_hashed_entry),
909 	    ZC_NOENCRYPT | ZC_ALIGNMENT_REQUIRED);
910 
911 	/*
912 	 * Create pv entries for kernel pages that might get pmap_remove()ed.
913 	 *
914 	 * - very low pages that were identity mapped.
915 	 * - vm_pages[] entries that might be unused and reclaimed.
916 	 */
917 	assert((uintptr_t)VM_MIN_KERNEL_ADDRESS + avail_start <= (uintptr_t)vm_page_array_beginning_addr);
918 	pmap_pv_fixup((uintptr_t)VM_MIN_KERNEL_ADDRESS, (uintptr_t)VM_MIN_KERNEL_ADDRESS + avail_start);
919 	pmap_pv_fixup((uintptr_t)vm_page_array_beginning_addr, (uintptr_t)vm_page_array_ending_addr);
920 
921 	pmap_initialized = TRUE;
922 
923 	max_preemption_latency_tsc = tmrCvt((uint64_t)MAX_PREEMPTION_LATENCY_NS, tscFCvtn2t);
924 
925 	/*
926 	 * Ensure the kernel's PML4 entry exists for the basement
927 	 * before this is shared with any user.
928 	 */
929 	pmap_expand_pml4(kernel_pmap, KERNEL_BASEMENT, PMAP_EXPAND_OPTIONS_NONE);
930 
931 #if CONFIG_VMX
932 	pmap_ept_support_ad = vmx_hv_support() && (VMX_CAP(MSR_IA32_VMX_EPT_VPID_CAP, MSR_IA32_VMX_EPT_VPID_CAP_AD_SHIFT, 1) ? TRUE : FALSE);
933 	pmap_eptp_flags = HV_VMX_EPTP_MEMORY_TYPE_WB | HV_VMX_EPTP_WALK_LENGTH(4) | (pmap_ept_support_ad ? HV_VMX_EPTP_ENABLE_AD_FLAGS : 0);
934 #endif /* CONFIG_VMX */
935 }
936 
937 void
pmap_mark_range(pmap_t npmap,uint64_t sv,uint64_t nxrosz,boolean_t NX,boolean_t ro)938 pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX, boolean_t ro)
939 {
940 	uint64_t ev = sv + nxrosz, cv = sv;
941 	pd_entry_t *pdep;
942 	pt_entry_t *ptep = NULL;
943 
944 	/* XXX what if nxrosz is 0?  we end up marking the page whose address is passed in via sv -- is that kosher? */
945 	assert(!is_ept_pmap(npmap));
946 
947 	assert(((sv & 0xFFFULL) | (nxrosz & 0xFFFULL)) == 0);
948 
949 	for (pdep = pmap_pde(npmap, cv); pdep != NULL && (cv < ev);) {
950 		uint64_t pdev = (cv & ~((uint64_t)PDEMASK));
951 
952 		if (*pdep & INTEL_PTE_PS) {
953 #ifdef REMAP_DEBUG
954 			if ((NX ^ !!(*pdep & INTEL_PTE_NX)) || (ro ^ !!!(*pdep & INTEL_PTE_WRITE))) {
955 				kprintf("WARNING: Remapping PDE for %p from %s%s%s to %s%s%s\n", (void *)cv,
956 				    (*pdep & INTEL_PTE_VALID) ? "R" : "",
957 				    (*pdep & INTEL_PTE_WRITE) ? "W" : "",
958 				    (*pdep & INTEL_PTE_NX) ? "" : "X",
959 				    "R",
960 				    ro ? "" : "W",
961 				    NX ? "" : "X");
962 			}
963 #endif
964 
965 			if (NX) {
966 				*pdep |= INTEL_PTE_NX;
967 			} else {
968 				*pdep &= ~INTEL_PTE_NX;
969 			}
970 			if (ro) {
971 				*pdep &= ~INTEL_PTE_WRITE;
972 			} else {
973 				*pdep |= INTEL_PTE_WRITE;
974 			}
975 			cv += NBPD;
976 			cv &= ~((uint64_t) PDEMASK);
977 			pdep = pmap_pde(npmap, cv);
978 			continue;
979 		}
980 
981 		for (ptep = pmap_pte(npmap, cv); ptep != NULL && (cv < (pdev + NBPD)) && (cv < ev);) {
982 #ifdef REMAP_DEBUG
983 			if ((NX ^ !!(*ptep & INTEL_PTE_NX)) || (ro ^ !!!(*ptep & INTEL_PTE_WRITE))) {
984 				kprintf("WARNING: Remapping PTE for %p from %s%s%s to %s%s%s\n", (void *)cv,
985 				    (*ptep & INTEL_PTE_VALID) ? "R" : "",
986 				    (*ptep & INTEL_PTE_WRITE) ? "W" : "",
987 				    (*ptep & INTEL_PTE_NX) ? "" : "X",
988 				    "R",
989 				    ro ? "" : "W",
990 				    NX ? "" : "X");
991 			}
992 #endif
993 			if (NX) {
994 				*ptep |= INTEL_PTE_NX;
995 			} else {
996 				*ptep &= ~INTEL_PTE_NX;
997 			}
998 			if (ro) {
999 				*ptep &= ~INTEL_PTE_WRITE;
1000 			} else {
1001 				*ptep |= INTEL_PTE_WRITE;
1002 			}
1003 			cv += NBPT;
1004 			ptep = pmap_pte(npmap, cv);
1005 		}
1006 	}
1007 	DPRINTF("%s(0x%llx, 0x%llx, %u, %u): 0x%llx, 0x%llx\n", __FUNCTION__, sv, nxrosz, NX, ro, cv, ptep ? *ptep: 0);
1008 }
1009 
1010 /*
1011  * Reclaim memory for early boot 4K page tables that were converted to large page mappings.
1012  * We know this memory is part of the KPTphys[] array that was allocated in Idle_PTs_init(),
1013  * so we can free it using its address in that array.
1014  */
1015 static void
pmap_free_early_PT(ppnum_t ppn,uint32_t cnt)1016 pmap_free_early_PT(ppnum_t ppn, uint32_t cnt)
1017 {
1018 	ppnum_t KPTphys_ppn;
1019 	vm_offset_t offset;
1020 
1021 	KPTphys_ppn = pmap_find_phys(kernel_pmap, (uintptr_t)KPTphys);
1022 	assert(ppn >= KPTphys_ppn);
1023 	assert(ppn + cnt <= KPTphys_ppn + NKPT);
1024 	offset = (ppn - KPTphys_ppn) << PAGE_SHIFT;
1025 	ml_static_mfree((uintptr_t)KPTphys + offset, PAGE_SIZE * cnt);
1026 }
1027 
1028 /*
1029  * Called once VM is fully initialized so that we can release unused
1030  * sections of low memory to the general pool.
1031  * Also complete the set-up of identity-mapped sections of the kernel:
1032  *  1) write-protect kernel text
1033  *  2) map kernel text using large pages if possible
1034  *  3) read and write-protect page zero (for K32)
1035  *  4) map the global page at the appropriate virtual address.
1036  *
1037  * Use of large pages
1038  * ------------------
1039  * To effectively map and write-protect all kernel text pages, the text
1040  * must be 2M-aligned at the base, and the data section above must also be
1041  * 2M-aligned. That is, there's padding below and above. This is achieved
1042  * through linker directives. Large pages are used only if this alignment
1043  * exists (and not overriden by the -kernel_text_page_4K boot-arg). The
1044  * memory layout is:
1045  *
1046  *                       :                :
1047  *                       |     __DATA     |
1048  *               sdata:  ==================  2Meg
1049  *                       |                |
1050  *                       |  zero-padding  |
1051  *                       |                |
1052  *               etext:  ------------------
1053  *                       |                |
1054  *                       :                :
1055  *                       |                |
1056  *                       |     __TEXT     |
1057  *                       |                |
1058  *                       :                :
1059  *                       |                |
1060  *               stext:  ==================  2Meg
1061  *                       |                |
1062  *                       |  zero-padding  |
1063  *                       |                |
1064  *               eHIB:   ------------------
1065  *                       |     __HIB      |
1066  *                       :                :
1067  *
1068  * Prior to changing the mapping from 4K to 2M, the zero-padding pages
1069  * [eHIB,stext] and [etext,sdata] are ml_static_mfree()'d. Then all the
1070  * 4K pages covering [stext,etext] are coalesced as 2M large pages.
1071  * The now unused level-1 PTE pages are also freed.
1072  */
1073 extern ppnum_t  vm_kernel_base_page;
1074 static uint32_t dataptes = 0;
1075 
1076 void
pmap_lowmem_finalize(void)1077 pmap_lowmem_finalize(void)
1078 {
1079 	spl_t           spl;
1080 	int             i;
1081 
1082 	/*
1083 	 * Update wired memory statistics for early boot pages
1084 	 */
1085 	PMAP_ZINFO_PALLOC(kernel_pmap, bootstrap_wired_pages * PAGE_SIZE);
1086 
1087 	/*
1088 	 * Free pages in pmap regions below the base:
1089 	 * rdar://6332712
1090 	 *	We can't free all the pages to VM that EFI reports available.
1091 	 *	Pages in the range 0xc0000-0xff000 aren't safe over sleep/wake.
1092 	 *	There's also a size miscalculation here: pend is one page less
1093 	 *	than it should be but this is not fixed to be backwards
1094 	 *	compatible.
1095 	 * This is important for KASLR because up to 256*2MB = 512MB of space
1096 	 * needs has to be released to VM.
1097 	 */
1098 	for (i = 0;
1099 	    pmap_memory_regions[i].end < vm_kernel_base_page;
1100 	    i++) {
1101 		vm_offset_t     pbase = i386_ptob(pmap_memory_regions[i].base);
1102 		vm_offset_t     pend  = i386_ptob(pmap_memory_regions[i].end + 1);
1103 
1104 		DBG("pmap region %d [%p..[%p\n",
1105 		    i, (void *) pbase, (void *) pend);
1106 
1107 		if (pmap_memory_regions[i].attribute & EFI_MEMORY_KERN_RESERVED) {
1108 			continue;
1109 		}
1110 		/*
1111 		 * rdar://6332712
1112 		 * Adjust limits not to free pages in range 0xc0000-0xff000.
1113 		 */
1114 		if (pbase >= 0xc0000 && pend <= 0x100000) {
1115 			continue;
1116 		}
1117 		if (pbase < 0xc0000 && pend > 0x100000) {
1118 			/* page range entirely within region, free lower part */
1119 			DBG("- ml_static_mfree(%p,%p)\n",
1120 			    (void *) ml_static_ptovirt(pbase),
1121 			    (void *) (0xc0000 - pbase));
1122 			ml_static_mfree(ml_static_ptovirt(pbase), 0xc0000 - pbase);
1123 			pbase = 0x100000;
1124 		}
1125 		if (pbase < 0xc0000) {
1126 			pend = MIN(pend, 0xc0000);
1127 		}
1128 		if (pend > 0x100000) {
1129 			pbase = MAX(pbase, 0x100000);
1130 		}
1131 		DBG("- ml_static_mfree(%p,%p)\n",
1132 		    (void *) ml_static_ptovirt(pbase),
1133 		    (void *) (pend - pbase));
1134 		ml_static_mfree(ml_static_ptovirt(pbase), pend - pbase);
1135 	}
1136 
1137 	/* A final pass to get rid of all initial identity mappings to
1138 	 * low pages.
1139 	 */
1140 	DPRINTF("%s: Removing mappings from 0->0x%lx\n", __FUNCTION__, vm_kernel_base);
1141 
1142 	/*
1143 	 * Remove all mappings past the boot-cpu descriptor aliases and low globals.
1144 	 * Non-boot-cpu GDT aliases will be remapped later as needed.
1145 	 */
1146 	pmap_remove(kernel_pmap, LOWGLOBAL_ALIAS + PAGE_SIZE, vm_kernel_base);
1147 
1148 	/*
1149 	 * Release any memory for early boot 4K page table pages that got replaced
1150 	 * with large page mappings for vm_pages[]. We know this memory is part of
1151 	 * the KPTphys[] array that was allocated in Idle_PTs_init(), so we can free
1152 	 * it using that address.
1153 	 */
1154 	pmap_free_early_PT(released_PT_ppn, released_PT_cnt);
1155 
1156 	/*
1157 	 * If text and data are both 2MB-aligned,
1158 	 * we can map text with large-pages,
1159 	 * unless the -kernel_text_ps_4K boot-arg overrides.
1160 	 */
1161 	if ((stext & I386_LPGMASK) == 0 && (sdata & I386_LPGMASK) == 0) {
1162 		kprintf("Kernel text is 2MB aligned");
1163 		kernel_text_ps_4K = FALSE;
1164 		if (PE_parse_boot_argn("-kernel_text_ps_4K",
1165 		    &kernel_text_ps_4K,
1166 		    sizeof(kernel_text_ps_4K))) {
1167 			kprintf(" but will be mapped with 4K pages\n");
1168 		} else {
1169 			kprintf(" and will be mapped with 2M pages\n");
1170 		}
1171 	}
1172 #if     DEVELOPMENT || DEBUG
1173 	(void) PE_parse_boot_argn("wpkernel", &wpkernel, sizeof(wpkernel));
1174 #endif
1175 	if (wpkernel) {
1176 		kprintf("Kernel text %p-%p to be write-protected\n",
1177 		    (void *) stext, (void *) etext);
1178 	}
1179 
1180 	spl = splhigh();
1181 
1182 	/*
1183 	 * Scan over text if mappings are to be changed:
1184 	 * - Remap kernel text readonly unless the "wpkernel" boot-arg is 0
1185 	 * - Change to large-pages if possible and not overriden.
1186 	 */
1187 	if (kernel_text_ps_4K && wpkernel) {
1188 		vm_offset_t     myva;
1189 		for (myva = stext; myva < etext; myva += PAGE_SIZE) {
1190 			pt_entry_t     *ptep;
1191 
1192 			ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva);
1193 			if (ptep) {
1194 				pmap_store_pte(FALSE, ptep, *ptep & ~INTEL_PTE_WRITE);
1195 			}
1196 		}
1197 	}
1198 
1199 	if (!kernel_text_ps_4K) {
1200 		vm_offset_t     myva;
1201 
1202 		/*
1203 		 * Release zero-filled page padding used for 2M-alignment.
1204 		 */
1205 		DBG("ml_static_mfree(%p,%p) for padding below text\n",
1206 		    (void *) eHIB, (void *) (stext - eHIB));
1207 		ml_static_mfree(eHIB, stext - eHIB);
1208 		DBG("ml_static_mfree(%p,%p) for padding above text\n",
1209 		    (void *) etext, (void *) (sdata - etext));
1210 		ml_static_mfree(etext, sdata - etext);
1211 
1212 		/*
1213 		 * Coalesce text pages into large pages.
1214 		 */
1215 		for (myva = stext; myva < sdata; myva += I386_LPGBYTES) {
1216 			pt_entry_t      *ptep;
1217 			vm_offset_t     pte_phys;
1218 			pt_entry_t      *pdep;
1219 			pt_entry_t      pde;
1220 			ppnum_t         KPT_ppn;
1221 
1222 			pdep = pmap_pde(kernel_pmap, (vm_map_offset_t)myva);
1223 			KPT_ppn = (ppnum_t)((*pdep & PG_FRAME) >> PAGE_SHIFT);
1224 			ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva);
1225 			DBG("myva: %p pdep: %p ptep: %p\n",
1226 			    (void *) myva, (void *) pdep, (void *) ptep);
1227 			if ((*ptep & INTEL_PTE_VALID) == 0) {
1228 				continue;
1229 			}
1230 			pte_phys = (vm_offset_t)(*ptep & PG_FRAME);
1231 			pde = *pdep & PTMASK;   /* page attributes from pde */
1232 			pde |= INTEL_PTE_PS;    /* make it a 2M entry */
1233 			pde |= pte_phys;        /* take page frame from pte */
1234 
1235 			if (wpkernel) {
1236 				pde &= ~INTEL_PTE_WRITE;
1237 			}
1238 			DBG("pmap_store_pte(%p,0x%llx)\n",
1239 			    (void *)pdep, pde);
1240 			pmap_store_pte(FALSE, pdep, pde);
1241 
1242 			/*
1243 			 * Free the now-unused level-1 pte.
1244 			 */
1245 			pmap_free_early_PT(KPT_ppn, 1);
1246 		}
1247 
1248 		/* Change variable read by sysctl machdep.pmap */
1249 		pmap_kernel_text_ps = I386_LPGBYTES;
1250 	}
1251 
1252 	vm_offset_t dva;
1253 
1254 	for (dva = sdata; dva < edata; dva += I386_PGBYTES) {
1255 		assert(((sdata | edata) & PAGE_MASK) == 0);
1256 		pt_entry_t dpte, *dptep = pmap_pte(kernel_pmap, dva);
1257 
1258 		dpte = *dptep;
1259 		assert((dpte & INTEL_PTE_VALID));
1260 		dpte |= INTEL_PTE_NX;
1261 		pmap_store_pte(FALSE, dptep, dpte);
1262 		dataptes++;
1263 	}
1264 	assert(dataptes > 0);
1265 
1266 	kernel_segment_command_t * seg;
1267 	kernel_section_t         * sec;
1268 	kc_format_t kc_format;
1269 
1270 	PE_get_primary_kc_format(&kc_format);
1271 
1272 	for (seg = firstseg(); seg != NULL; seg = nextsegfromheader(&_mh_execute_header, seg)) {
1273 		if (!strcmp(seg->segname, "__TEXT") ||
1274 		    !strcmp(seg->segname, "__DATA")) {
1275 			continue;
1276 		}
1277 
1278 		/* XXX: FIXME_IN_dyld: This is a workaround (see below) */
1279 		if (kc_format != KCFormatFileset) {
1280 			//XXX
1281 			if (!strcmp(seg->segname, "__KLD")) {
1282 				continue;
1283 			}
1284 		}
1285 
1286 		if (!strcmp(seg->segname, "__HIB")) {
1287 			for (sec = firstsect(seg); sec != NULL; sec = nextsect(seg, sec)) {
1288 				if (sec->addr & PAGE_MASK) {
1289 					panic("__HIB segment's sections misaligned");
1290 				}
1291 				if (!strcmp(sec->sectname, "__text")) {
1292 					pmap_mark_range(kernel_pmap, sec->addr, round_page(sec->size), FALSE, TRUE);
1293 				} else {
1294 					pmap_mark_range(kernel_pmap, sec->addr, round_page(sec->size), TRUE, FALSE);
1295 				}
1296 			}
1297 		} else {
1298 			if (kc_format == KCFormatFileset) {
1299 #if 0
1300 				/*
1301 				 * This block of code is commented out because it may or may not have induced an earlier panic
1302 				 * in ledger init.
1303 				 */
1304 
1305 
1306 				boolean_t NXbit = !(seg->initprot & VM_PROT_EXECUTE),
1307 				    robit = (seg->initprot & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ;
1308 
1309 				/*
1310 				 * XXX: FIXME_IN_dyld: This is a workaround for primary KC containing incorrect inaccurate
1311 				 * initprot for segments containing code.
1312 				 */
1313 				if (!strcmp(seg->segname, "__KLD") || !strcmp(seg->segname, "__VECTORS")) {
1314 					NXbit = FALSE;
1315 					robit = FALSE;
1316 				}
1317 
1318 				pmap_mark_range(kernel_pmap, seg->vmaddr & ~(uint64_t)PAGE_MASK,
1319 				    round_page_64(seg->vmsize), NXbit, robit);
1320 #endif
1321 
1322 				/*
1323 				 * XXX: We are marking *every* segment with rwx permissions as a workaround
1324 				 * XXX: until the primary KC's kernel segments are page-aligned.
1325 				 */
1326 				kprintf("Marking (%p, %p) as rwx\n", (void *)(seg->vmaddr & ~(uint64_t)PAGE_MASK),
1327 				    (void *)((seg->vmaddr & ~(uint64_t)PAGE_MASK) + round_page_64(seg->vmsize)));
1328 				pmap_mark_range(kernel_pmap, seg->vmaddr & ~(uint64_t)PAGE_MASK,
1329 				    round_page_64(seg->vmsize), FALSE, FALSE);
1330 			} else {
1331 				pmap_mark_range(kernel_pmap, seg->vmaddr, round_page_64(seg->vmsize), TRUE, FALSE);
1332 			}
1333 		}
1334 	}
1335 
1336 	/*
1337 	 * If we're debugging, map the low global vector page at the fixed
1338 	 * virtual address.  Otherwise, remove the mapping for this.
1339 	 */
1340 	if (debug_boot_arg) {
1341 		pt_entry_t *pte = NULL;
1342 		if (0 == (pte = pmap_pte(kernel_pmap, LOWGLOBAL_ALIAS))) {
1343 			panic("lowmem pte");
1344 		}
1345 
1346 		/* make sure it is defined on page boundary */
1347 		assert(0 == ((vm_offset_t) &lowGlo & PAGE_MASK));
1348 		pmap_store_pte(FALSE, pte, kvtophys((vm_offset_t)&lowGlo)
1349 		    | INTEL_PTE_REF
1350 		    | INTEL_PTE_MOD
1351 		    | INTEL_PTE_WIRED
1352 		    | INTEL_PTE_VALID
1353 		    | INTEL_PTE_WRITE
1354 		    | INTEL_PTE_NX);
1355 
1356 #if KASAN
1357 		kasan_notify_address(LOWGLOBAL_ALIAS, PAGE_SIZE);
1358 #endif
1359 	} else {
1360 		pmap_remove(kernel_pmap,
1361 		    LOWGLOBAL_ALIAS, LOWGLOBAL_ALIAS + PAGE_SIZE);
1362 	}
1363 	pmap_tlbi_range(0, ~0ULL, true, 0);
1364 	splx(spl);
1365 }
1366 
1367 /*
1368  *	Mark the const data segment as read-only, non-executable.
1369  */
1370 void
x86_64_protect_data_const()1371 x86_64_protect_data_const()
1372 {
1373 	boolean_t doconstro = TRUE;
1374 #if DEVELOPMENT || DEBUG
1375 	(void) PE_parse_boot_argn("dataconstro", &doconstro, sizeof(doconstro));
1376 #endif
1377 	if (doconstro) {
1378 		if (sconst & PAGE_MASK) {
1379 			panic("CONST segment misaligned 0x%lx 0x%lx",
1380 			    sconst, econst);
1381 		}
1382 		kprintf("Marking const DATA read-only\n");
1383 		pmap_protect(kernel_pmap, sconst, econst, VM_PROT_READ);
1384 	}
1385 }
1386 /*
1387  * this function is only used for debugging fron the vm layer
1388  */
1389 bool
pmap_verify_free(ppnum_t pn)1390 pmap_verify_free(
1391 	ppnum_t pn)
1392 {
1393 	pv_rooted_entry_t       pv_h;
1394 	int             pai;
1395 	bool            result;
1396 
1397 	assert(pn != vm_page_fictitious_addr);
1398 
1399 	if (!pmap_initialized) {
1400 		return true;
1401 	}
1402 
1403 	if (pn == vm_page_guard_addr) {
1404 		return true;
1405 	}
1406 
1407 	pai = ppn_to_pai(pn);
1408 	if (!IS_MANAGED_PAGE(pai)) {
1409 		return false;
1410 	}
1411 	pv_h = pai_to_pvh(pn);
1412 	result = (pv_h->pmap == PMAP_NULL);
1413 	return result;
1414 }
1415 
1416 #if MACH_ASSERT
1417 void
pmap_assert_free(ppnum_t pn)1418 pmap_assert_free(ppnum_t pn)
1419 {
1420 	int pai;
1421 	pv_rooted_entry_t pv_h = NULL;
1422 	pmap_t pmap = NULL;
1423 	vm_offset_t va = 0;
1424 	static char buffer[32];
1425 	static char *pr_name = "not managed pn";
1426 	uint_t attr;
1427 	pt_entry_t *ptep;
1428 	pt_entry_t pte = -1ull;
1429 
1430 	if (pmap_verify_free(pn)) {
1431 		return;
1432 	}
1433 
1434 	if (pn > last_managed_page) {
1435 		attr = 0xff;
1436 		goto done;
1437 	}
1438 
1439 	pai = ppn_to_pai(pn);
1440 	attr = pmap_phys_attributes[pai];
1441 	pv_h = pai_to_pvh(pai);
1442 	va = pv_h->va_and_flags;
1443 	pmap = pv_h->pmap;
1444 	if (pmap == kernel_pmap) {
1445 		pr_name = "kernel";
1446 	} else if (pmap == NULL) {
1447 		pr_name = "pmap NULL";
1448 	} else if (pmap->pmap_procname[0] != 0) {
1449 		pr_name = &pmap->pmap_procname[0];
1450 	} else {
1451 		snprintf(buffer, sizeof(buffer), "pmap %p", pv_h->pmap);
1452 		pr_name = buffer;
1453 	}
1454 
1455 	if (pmap != NULL) {
1456 		ptep = pmap_pte(pmap, va);
1457 		if (ptep != NULL) {
1458 			pte = (uintptr_t)*ptep;
1459 		}
1460 	}
1461 
1462 done:
1463 	panic("page not FREE page: 0x%lx attr: 0x%x %s va: 0x%lx PTE: 0x%llx",
1464 	    (ulong_t)pn, attr, pr_name, va, pte);
1465 }
1466 #endif /* MACH_ASSERT */
1467 
1468 boolean_t
pmap_is_empty(pmap_t pmap,vm_map_offset_t va_start,vm_map_offset_t va_end)1469 pmap_is_empty(
1470 	pmap_t          pmap,
1471 	vm_map_offset_t va_start,
1472 	vm_map_offset_t va_end)
1473 {
1474 	vm_map_offset_t offset;
1475 	ppnum_t         phys_page;
1476 	ledger_amount_t phys_mem;
1477 
1478 	if (pmap == PMAP_NULL) {
1479 		return TRUE;
1480 	}
1481 
1482 	/*
1483 	 * Check the ledger's phys_mem value
1484 	 * - if it's zero, the pmap is completely empty.
1485 	 * This short-circuit test prevents a virtual address scan which is
1486 	 * painfully slow for 64-bit spaces.
1487 	 * This assumes the count is correct
1488 	 * .. the debug kernel ought to be checking perhaps by page table walk.
1489 	 */
1490 	if (pmap != kernel_pmap) {
1491 		ledger_get_balance(pmap->ledger, task_ledgers.phys_mem, &phys_mem);
1492 		if (phys_mem == 0) {
1493 			return TRUE;
1494 		}
1495 	}
1496 
1497 	for (offset = va_start;
1498 	    offset < va_end;
1499 	    offset += PAGE_SIZE_64) {
1500 		phys_page = pmap_find_phys(pmap, offset);
1501 		if (phys_page) {
1502 			kprintf("pmap_is_empty(%p,0x%llx,0x%llx): "
1503 			    "page %d at 0x%llx\n",
1504 			    pmap, va_start, va_end, phys_page, offset);
1505 			return FALSE;
1506 		}
1507 	}
1508 
1509 	return TRUE;
1510 }
1511 
1512 void
hv_ept_pmap_create(void ** ept_pmap,void ** eptp)1513 hv_ept_pmap_create(void **ept_pmap, void **eptp)
1514 {
1515 	pmap_t p;
1516 
1517 	if ((ept_pmap == NULL) || (eptp == NULL)) {
1518 		return;
1519 	}
1520 
1521 	p = pmap_create_options(get_task_ledger(current_task()), 0, (PMAP_CREATE_64BIT | PMAP_CREATE_EPT));
1522 	if (p == PMAP_NULL) {
1523 		*ept_pmap = NULL;
1524 		*eptp = NULL;
1525 		return;
1526 	}
1527 
1528 	assert(is_ept_pmap(p));
1529 
1530 	*ept_pmap = (void*)p;
1531 	*eptp = (void*)(p->pm_eptp);
1532 	return;
1533 }
1534 
1535 /*
1536  * pmap_create() is used by some special, legacy 3rd party kexts.
1537  * In our kernel code, always use pmap_create_options().
1538  */
1539 extern pmap_t pmap_create(ledger_t ledger, vm_map_size_t sz, boolean_t is_64bit);
1540 
1541 __attribute__((used))
1542 pmap_t
pmap_create(ledger_t ledger,vm_map_size_t sz,boolean_t is_64bit)1543 pmap_create(
1544 	ledger_t      ledger,
1545 	vm_map_size_t sz,
1546 	boolean_t     is_64bit)
1547 {
1548 	return pmap_create_options(ledger, sz, is_64bit ? PMAP_CREATE_64BIT : 0);
1549 }
1550 
1551 /*
1552  *	Create and return a physical map.
1553  *
1554  *	If the size specified for the map
1555  *	is zero, the map is an actual physical
1556  *	map, and may be referenced by the
1557  *	hardware.
1558  *
1559  *	If the size specified is non-zero,
1560  *	the map will be used in software only, and
1561  *	is bounded by that size.
1562  */
1563 
1564 pmap_t
pmap_create_options(ledger_t ledger,vm_map_size_t sz,unsigned int flags)1565 pmap_create_options(
1566 	ledger_t        ledger,
1567 	vm_map_size_t   sz,
1568 	unsigned int    flags)
1569 {
1570 	pmap_t          p;
1571 	vm_size_t       size;
1572 	pml4_entry_t    *pml4;
1573 	pml4_entry_t    *kpml4;
1574 	int             i;
1575 
1576 	PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START, sz, flags);
1577 
1578 	size = (vm_size_t) sz;
1579 
1580 	/*
1581 	 *	A software use-only map doesn't even need a map.
1582 	 */
1583 
1584 	if (size != 0) {
1585 		return PMAP_NULL;
1586 	}
1587 
1588 	/*
1589 	 *	Return error when unrecognized flags are passed.
1590 	 */
1591 	if (__improbable((flags & ~(PMAP_CREATE_KNOWN_FLAGS)) != 0)) {
1592 		return PMAP_NULL;
1593 	}
1594 
1595 	p = zalloc_flags(pmap_zone, Z_WAITOK | Z_ZERO);
1596 	if (PMAP_NULL == p) {
1597 		panic("pmap_create zalloc");
1598 	}
1599 
1600 	lck_rw_init(&p->pmap_rwl, &pmap_lck_grp, &pmap_lck_rw_attr);
1601 	p->pmap_rwl.lck_rw_can_sleep = FALSE;
1602 
1603 	os_ref_init(&p->ref_count, NULL);
1604 #if DEVELOPMENT || DEBUG
1605 	p->nx_enabled = 1;
1606 #endif
1607 	p->pm_shared = FALSE;
1608 	ledger_reference(ledger);
1609 	p->ledger = ledger;
1610 
1611 	p->pm_task_map = ((flags & PMAP_CREATE_64BIT) ? TASK_MAP_64BIT : TASK_MAP_32BIT);
1612 
1613 	p->pagezero_accessible = FALSE;
1614 	p->pm_vm_map_cs_enforced = FALSE;
1615 
1616 	if (pmap_pcid_ncpus) {
1617 		pmap_pcid_initialize(p);
1618 	}
1619 
1620 	p->pm_pml4 = zalloc(pmap_anchor_zone);
1621 	p->pm_upml4 = zalloc(pmap_uanchor_zone); //cleanup for EPT
1622 
1623 	pmap_assert((((uintptr_t)p->pm_pml4) & PAGE_MASK) == 0);
1624 	pmap_assert((((uintptr_t)p->pm_upml4) & PAGE_MASK) == 0);
1625 
1626 	memset((char *)p->pm_pml4, 0, PAGE_SIZE);
1627 	memset((char *)p->pm_upml4, 0, PAGE_SIZE);
1628 
1629 	if (flags & PMAP_CREATE_EPT) {
1630 		p->pm_eptp = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_pml4) | pmap_eptp_flags;
1631 		p->pm_cr3 = 0;
1632 	} else {
1633 		p->pm_eptp = 0;
1634 		p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_pml4);
1635 		p->pm_ucr3 = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_upml4);
1636 	}
1637 
1638 	/* allocate the vm_objs to hold the pdpt, pde and pte pages */
1639 
1640 	p->pm_obj_pml4 = vm_object_allocate((vm_object_size_t)(NPML4PGS) *PAGE_SIZE);
1641 	if (NULL == p->pm_obj_pml4) {
1642 		panic("pmap_create pdpt obj");
1643 	}
1644 
1645 	p->pm_obj_pdpt = vm_object_allocate((vm_object_size_t)(NPDPTPGS) *PAGE_SIZE);
1646 	if (NULL == p->pm_obj_pdpt) {
1647 		panic("pmap_create pdpt obj");
1648 	}
1649 
1650 	p->pm_obj = vm_object_allocate((vm_object_size_t)(NPDEPGS) *PAGE_SIZE);
1651 	if (NULL == p->pm_obj) {
1652 		panic("pmap_create pte obj");
1653 	}
1654 
1655 	if (!(flags & PMAP_CREATE_EPT)) {
1656 		/* All host pmaps share the kernel's pml4 */
1657 		pml4 = pmap64_pml4(p, 0ULL);
1658 		kpml4 = kernel_pmap->pm_pml4;
1659 		for (i = KERNEL_PML4_INDEX; i < (KERNEL_PML4_INDEX + KERNEL_PML4_COUNT); i++) {
1660 			pml4[i] = kpml4[i];
1661 		}
1662 		pml4[KERNEL_KEXTS_INDEX]   = kpml4[KERNEL_KEXTS_INDEX];
1663 		for (i = KERNEL_PHYSMAP_PML4_INDEX; i < (KERNEL_PHYSMAP_PML4_INDEX + KERNEL_PHYSMAP_PML4_COUNT); i++) {
1664 			pml4[i] = kpml4[i];
1665 		}
1666 		pml4[KERNEL_DBLMAP_PML4_INDEX] = kpml4[KERNEL_DBLMAP_PML4_INDEX];
1667 #if KASAN
1668 		for (i = KERNEL_KASAN_PML4_FIRST; i <= KERNEL_KASAN_PML4_LAST; i++) {
1669 			pml4[i] = kpml4[i];
1670 		}
1671 #endif
1672 		pml4_entry_t    *pml4u = pmap64_user_pml4(p, 0ULL);
1673 		pml4u[KERNEL_DBLMAP_PML4_INDEX] = kpml4[KERNEL_DBLMAP_PML4_INDEX];
1674 	}
1675 
1676 #if MACH_ASSERT
1677 	p->pmap_stats_assert = TRUE;
1678 	p->pmap_pid = 0;
1679 	strlcpy(p->pmap_procname, "<nil>", sizeof(p->pmap_procname));
1680 #endif /* MACH_ASSERT */
1681 
1682 	PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_END,
1683 	    VM_KERNEL_ADDRHIDE(p));
1684 
1685 	return p;
1686 }
1687 
1688 /*
1689  * We maintain stats and ledgers so that a task's physical footprint is:
1690  * phys_footprint = ((internal - alternate_accounting)
1691  *                   + (internal_compressed - alternate_accounting_compressed)
1692  *                   + iokit_mapped
1693  *                   + purgeable_nonvolatile
1694  *                   + purgeable_nonvolatile_compressed
1695  *                   + page_table)
1696  * where "alternate_accounting" includes "iokit" and "purgeable" memory.
1697  */
1698 
1699 #if MACH_ASSERT
1700 static void pmap_check_ledgers(pmap_t pmap);
1701 #else /* MACH_ASSERT */
1702 static inline void
pmap_check_ledgers(__unused pmap_t pmap)1703 pmap_check_ledgers(__unused pmap_t pmap)
1704 {
1705 }
1706 #endif /* MACH_ASSERT */
1707 
1708 /*
1709  *	Retire the given physical map from service.
1710  *	Should only be called if the map contains
1711  *	no valid mappings.
1712  */
1713 extern int vm_wired_objects_page_count;
1714 
1715 void
pmap_destroy(pmap_t p)1716 pmap_destroy(pmap_t     p)
1717 {
1718 	os_ref_count_t c;
1719 
1720 	if (p == PMAP_NULL) {
1721 		return;
1722 	}
1723 
1724 	PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_START,
1725 	    VM_KERNEL_ADDRHIDe(p));
1726 
1727 	PMAP_LOCK_EXCLUSIVE(p);
1728 
1729 	c = os_ref_release_locked(&p->ref_count);
1730 
1731 	pmap_assert((current_thread() && (current_thread()->map)) ? (current_thread()->map->pmap != p) : TRUE);
1732 
1733 	if (c == 0) {
1734 		/*
1735 		 * If some cpu is not using the physical pmap pointer that it
1736 		 * is supposed to be (see set_dirbase), we might be using the
1737 		 * pmap that is being destroyed! Make sure we are
1738 		 * physically on the right pmap:
1739 		 */
1740 		PMAP_UPDATE_TLBS(p, 0x0ULL, 0xFFFFFFFFFFFFF000ULL);
1741 		if (pmap_pcid_ncpus) {
1742 			pmap_destroy_pcid_sync(p);
1743 		}
1744 	}
1745 
1746 	PMAP_UNLOCK_EXCLUSIVE(p);
1747 
1748 	if (c != 0) {
1749 		PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END);
1750 		pmap_assert(p == kernel_pmap);
1751 		return; /* still in use */
1752 	}
1753 
1754 	/*
1755 	 *	Free the memory maps, then the
1756 	 *	pmap structure.
1757 	 */
1758 	int inuse_ptepages = 0;
1759 
1760 	zfree(pmap_anchor_zone, p->pm_pml4);
1761 	zfree(pmap_uanchor_zone, p->pm_upml4);
1762 
1763 	inuse_ptepages += p->pm_obj_pml4->resident_page_count;
1764 	vm_object_deallocate(p->pm_obj_pml4);
1765 
1766 	inuse_ptepages += p->pm_obj_pdpt->resident_page_count;
1767 	vm_object_deallocate(p->pm_obj_pdpt);
1768 
1769 	inuse_ptepages += p->pm_obj->resident_page_count;
1770 	vm_object_deallocate(p->pm_obj);
1771 
1772 	OSAddAtomic(-inuse_ptepages, &inuse_ptepages_count);
1773 	PMAP_ZINFO_PFREE(p, inuse_ptepages * PAGE_SIZE);
1774 
1775 	pmap_check_ledgers(p);
1776 	ledger_dereference(p->ledger);
1777 	lck_rw_destroy(&p->pmap_rwl, &pmap_lck_grp);
1778 	zfree(pmap_zone, p);
1779 
1780 	PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END);
1781 }
1782 
1783 /*
1784  *	Add a reference to the specified pmap.
1785  */
1786 
1787 void
pmap_reference(pmap_t p)1788 pmap_reference(pmap_t   p)
1789 {
1790 	if (p != PMAP_NULL) {
1791 		PMAP_LOCK_EXCLUSIVE(p);
1792 		os_ref_retain_locked(&p->ref_count);
1793 		PMAP_UNLOCK_EXCLUSIVE(p);
1794 	}
1795 }
1796 
1797 /*
1798  *	Remove phys addr if mapped in specified map
1799  *
1800  */
1801 void
pmap_remove_some_phys(__unused pmap_t map,__unused ppnum_t pn)1802 pmap_remove_some_phys(
1803 	__unused pmap_t         map,
1804 	__unused ppnum_t         pn)
1805 {
1806 /* Implement to support working set code */
1807 }
1808 
1809 
1810 void
pmap_protect(pmap_t map,vm_map_offset_t sva,vm_map_offset_t eva,vm_prot_t prot)1811 pmap_protect(
1812 	pmap_t          map,
1813 	vm_map_offset_t sva,
1814 	vm_map_offset_t eva,
1815 	vm_prot_t       prot)
1816 {
1817 	pmap_protect_options(map, sva, eva, prot, 0, NULL);
1818 }
1819 
1820 
1821 /*
1822  *	Set the physical protection on the
1823  *	specified range of this map as requested.
1824  *
1825  * VERY IMPORTANT: Will *NOT* increase permissions.
1826  *	pmap_protect_options() should protect the range against any access types
1827  *      that are not in "prot" but it should never grant extra access.
1828  *	For example, if "prot" is READ|EXECUTE, that means "remove write
1829  *      access" but it does *not* mean "add read and execute" access.
1830  *	VM relies on getting soft-faults to enforce extra checks (code
1831  *	signing, for example), for example.
1832  *	New access permissions are granted via pmap_enter() only.
1833  *      ***NOTE***:
1834  *	The only exception is for EPT pmaps, where we MUST populate all exec
1835  *      bits when the protection API is invoked (so that the HV fault handler
1836  *      can make decisions based on the exit qualification information, which
1837  *      includes the execute bits in the EPT entries.  Soft-faulting them
1838  *      in would cause a chicken-and-egg problem where the HV fault handler
1839  *      would not be able to identify mode-based execute control (MBE) faults.)
1840  */
1841 void
pmap_protect_options(pmap_t map,vm_map_offset_t sva,vm_map_offset_t eva,vm_prot_t prot,unsigned int options,void * arg)1842 pmap_protect_options(
1843 	pmap_t          map,
1844 	vm_map_offset_t sva,
1845 	vm_map_offset_t eva,
1846 	vm_prot_t       prot,
1847 	unsigned int    options,
1848 	void            *arg)
1849 {
1850 	pt_entry_t      *pde;
1851 	pt_entry_t      *spte, *epte;
1852 	vm_map_offset_t lva;
1853 	vm_map_offset_t orig_sva;
1854 	boolean_t       set_NX;
1855 	int             num_found = 0;
1856 	boolean_t       is_ept;
1857 	uint64_t        cur_vaddr;
1858 
1859 	pmap_intr_assert();
1860 
1861 	if (map == PMAP_NULL) {
1862 		return;
1863 	}
1864 
1865 	if (prot == VM_PROT_NONE) {
1866 		pmap_remove_options(map, sva, eva, options);
1867 		return;
1868 	}
1869 
1870 	PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_START,
1871 	    VM_KERNEL_ADDRHIDE(map), VM_KERNEL_ADDRHIDE(sva),
1872 	    VM_KERNEL_ADDRHIDE(eva));
1873 
1874 	is_ept = is_ept_pmap(map);
1875 
1876 	if ((prot & VM_PROT_EXECUTE) || __improbable(is_ept && (prot & VM_PROT_UEXEC))) {
1877 		set_NX = FALSE;
1878 	} else {
1879 		set_NX = TRUE;
1880 	}
1881 
1882 #if DEVELOPMENT || DEBUG
1883 	if (__improbable(set_NX && (!nx_enabled || !map->nx_enabled))) {
1884 		set_NX = FALSE;
1885 	}
1886 #endif
1887 	PMAP_LOCK_EXCLUSIVE(map);
1888 
1889 	orig_sva = sva;
1890 	cur_vaddr = sva;
1891 	while (sva < eva) {
1892 		uint64_t vaddr_incr;
1893 		lva = (sva + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE - 1);
1894 		if (lva > eva) {
1895 			lva = eva;
1896 		}
1897 		pde = pmap_pde(map, sva);
1898 		if (pde && (*pde & PTE_VALID_MASK(is_ept))) {
1899 			if (*pde & PTE_PS) {
1900 				/* superpage */
1901 				spte = pde;
1902 				epte = spte + 1; /* excluded */
1903 				vaddr_incr = I386_LPGBYTES;
1904 			} else {
1905 				spte = pmap_pte(map, (sva & ~(PDE_MAPPED_SIZE - 1)));
1906 				spte = &spte[ptenum(sva)];
1907 				epte = &spte[intel_btop(lva - sva)];
1908 				vaddr_incr = I386_PGBYTES;
1909 			}
1910 
1911 			for (; spte < epte; spte++) {
1912 				uint64_t clear_bits, set_bits;
1913 
1914 				if (!(*spte & PTE_VALID_MASK(is_ept))) {
1915 					continue;
1916 				}
1917 
1918 				clear_bits = 0;
1919 				set_bits = 0;
1920 
1921 				if (is_ept) {
1922 					if (!(prot & VM_PROT_READ)) {
1923 						clear_bits |= PTE_READ(is_ept);
1924 					}
1925 				}
1926 				if (!(prot & VM_PROT_WRITE)) {
1927 					clear_bits |= PTE_WRITE(is_ept);
1928 				}
1929 #if DEVELOPMENT || DEBUG
1930 				else if ((options & PMAP_OPTIONS_PROTECT_IMMEDIATE) &&
1931 				    map == kernel_pmap) {
1932 					set_bits |= PTE_WRITE(is_ept);
1933 				}
1934 #endif /* DEVELOPMENT || DEBUG */
1935 
1936 				if (set_NX) {
1937 					if (!is_ept) {
1938 						set_bits |= INTEL_PTE_NX;
1939 					} else {
1940 						clear_bits |= INTEL_EPT_EX | INTEL_EPT_UEX;
1941 					}
1942 				} else if (is_ept) {
1943 					/* This is the exception to the "Don't add permissions" statement, above */
1944 					set_bits |= ((prot & VM_PROT_EXECUTE) ? INTEL_EPT_EX : 0) |
1945 					    ((prot & VM_PROT_UEXEC) ? INTEL_EPT_UEX : 0);
1946 				}
1947 
1948 				pmap_update_pte(is_ept, spte, clear_bits, set_bits, false);
1949 
1950 				DTRACE_VM3(set_pte, pmap_t, map, void *, cur_vaddr, uint64_t, *spte);
1951 				cur_vaddr += vaddr_incr;
1952 
1953 				num_found++;
1954 			}
1955 		}
1956 		sva = lva;
1957 	}
1958 	if (num_found) {
1959 		if (options & PMAP_OPTIONS_NOFLUSH) {
1960 			PMAP_UPDATE_TLBS_DELAYED(map, orig_sva, eva, (pmap_flush_context *)arg);
1961 		} else {
1962 			PMAP_UPDATE_TLBS(map, orig_sva, eva);
1963 		}
1964 	}
1965 
1966 	PMAP_UNLOCK_EXCLUSIVE(map);
1967 
1968 	PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_END);
1969 }
1970 
1971 /* Map a (possibly) autogenned block */
1972 kern_return_t
pmap_map_block_addr(pmap_t pmap,addr64_t va,pmap_paddr_t pa,uint32_t size,vm_prot_t prot,int attr,unsigned int flags)1973 pmap_map_block_addr(
1974 	pmap_t          pmap,
1975 	addr64_t        va,
1976 	pmap_paddr_t    pa,
1977 	uint32_t        size,
1978 	vm_prot_t       prot,
1979 	int             attr,
1980 	unsigned int    flags)
1981 {
1982 	return pmap_map_block(pmap, va, intel_btop(pa), size, prot, attr, flags);
1983 }
1984 
1985 kern_return_t
pmap_map_block(pmap_t pmap,addr64_t va,ppnum_t pa,uint32_t size,vm_prot_t prot,int attr,__unused unsigned int flags)1986 pmap_map_block(
1987 	pmap_t          pmap,
1988 	addr64_t        va,
1989 	ppnum_t         pa,
1990 	uint32_t        size,
1991 	vm_prot_t       prot,
1992 	int             attr,
1993 	__unused unsigned int   flags)
1994 {
1995 	kern_return_t   kr;
1996 	addr64_t        original_va = va;
1997 	uint32_t        page;
1998 	int             cur_page_size;
1999 
2000 	if (attr & VM_MEM_SUPERPAGE) {
2001 		cur_page_size =  SUPERPAGE_SIZE;
2002 	} else {
2003 		cur_page_size =  PAGE_SIZE;
2004 	}
2005 
2006 	for (page = 0; page < size; page += cur_page_size / PAGE_SIZE) {
2007 		kr = pmap_enter(pmap, va, pa, prot, VM_PROT_NONE, attr, TRUE);
2008 
2009 		if (kr != KERN_SUCCESS) {
2010 			/*
2011 			 * This will panic for now, as it is unclear that
2012 			 * removing the mappings is correct.
2013 			 */
2014 			panic("%s: failed pmap_enter, "
2015 			    "pmap=%p, va=%#llx, pa=%u, size=%u, prot=%#x, flags=%#x",
2016 			    __FUNCTION__,
2017 			    pmap, va, pa, size, prot, flags);
2018 
2019 			pmap_remove(pmap, original_va, va - original_va);
2020 			return kr;
2021 		}
2022 
2023 		va += cur_page_size;
2024 		pa += cur_page_size / PAGE_SIZE;
2025 	}
2026 
2027 	return KERN_SUCCESS;
2028 }
2029 
2030 kern_return_t
pmap_expand_pml4(pmap_t map,vm_map_offset_t vaddr,unsigned int options)2031 pmap_expand_pml4(
2032 	pmap_t          map,
2033 	vm_map_offset_t vaddr,
2034 	unsigned int options)
2035 {
2036 	vm_page_t       m;
2037 	pmap_paddr_t    pa;
2038 	uint64_t        i;
2039 	ppnum_t         pn;
2040 	pml4_entry_t    *pml4p;
2041 	boolean_t       is_ept = is_ept_pmap(map);
2042 
2043 	DBG("pmap_expand_pml4(%p,%p)\n", map, (void *)vaddr);
2044 
2045 	/* With the exception of the kext "basement", the kernel's level 4
2046 	 * pagetables must not be dynamically expanded.
2047 	 */
2048 	assert(map != kernel_pmap || (vaddr == KERNEL_BASEMENT));
2049 	/*
2050 	 *	Allocate a VM page for the pml4 page
2051 	 */
2052 	while ((m = vm_page_grab()) == VM_PAGE_NULL) {
2053 		if (options & PMAP_EXPAND_OPTIONS_NOWAIT) {
2054 			return KERN_RESOURCE_SHORTAGE;
2055 		}
2056 		VM_PAGE_WAIT();
2057 	}
2058 	/*
2059 	 *	put the page into the pmap's obj list so it
2060 	 *	can be found later.
2061 	 */
2062 	pn = VM_PAGE_GET_PHYS_PAGE(m);
2063 	pa = i386_ptob(pn);
2064 	i = pml4idx(map, vaddr);
2065 
2066 	/*
2067 	 *	Zero the page.
2068 	 */
2069 	pmap_zero_page(pn);
2070 
2071 	vm_page_lockspin_queues();
2072 	vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE);
2073 	vm_page_unlock_queues();
2074 
2075 	OSAddAtomic(1, &inuse_ptepages_count);
2076 	OSAddAtomic64(1, &alloc_ptepages_count);
2077 	PMAP_ZINFO_PALLOC(map, PAGE_SIZE);
2078 
2079 	/* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
2080 	vm_object_lock(map->pm_obj_pml4);
2081 
2082 	PMAP_LOCK_EXCLUSIVE(map);
2083 	/*
2084 	 *	See if someone else expanded us first
2085 	 */
2086 	if (pmap64_pdpt(map, vaddr) != PDPT_ENTRY_NULL) {
2087 		PMAP_UNLOCK_EXCLUSIVE(map);
2088 		vm_object_unlock(map->pm_obj_pml4);
2089 
2090 		VM_PAGE_FREE(m);
2091 
2092 		OSAddAtomic(-1, &inuse_ptepages_count);
2093 		PMAP_ZINFO_PFREE(map, PAGE_SIZE);
2094 		return KERN_SUCCESS;
2095 	}
2096 
2097 #if 0 /* DEBUG */
2098 	if (0 != vm_page_lookup(map->pm_obj_pml4, (vm_object_offset_t)i * PAGE_SIZE)) {
2099 		panic("pmap_expand_pml4: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx",
2100 		    map, map->pm_obj_pml4, vaddr, i);
2101 	}
2102 #endif
2103 	vm_page_insert_wired(m, map->pm_obj_pml4, (vm_object_offset_t)i * PAGE_SIZE, VM_KERN_MEMORY_PTE);
2104 	vm_object_unlock(map->pm_obj_pml4);
2105 
2106 	/*
2107 	 *	Set the page directory entry for this page table.
2108 	 */
2109 	pml4p = pmap64_pml4(map, vaddr); /* refetch under lock */
2110 
2111 	/*
2112 	 * Note that INTEL_EPT_UEX is unconditionally set (as is INTEL_EPT_EX) for
2113 	 * all intermediate paging levels, from PML4Es to PDEs.  Processors with
2114 	 * VT-x implementations that do not support MBE ignore the INTEL_EPT_UEX
2115 	 * bit at all levels of the EPT, so there is no risk of inducing EPT
2116 	 * violation faults.
2117 	 */
2118 	pmap_store_pte(is_ept, pml4p, pa_to_pte(pa)
2119 	    | PTE_READ(is_ept)
2120 	    | (is_ept ? (INTEL_EPT_EX | INTEL_EPT_UEX) : INTEL_PTE_USER)
2121 	    | PTE_WRITE(is_ept));
2122 	pml4_entry_t    *upml4p;
2123 
2124 	upml4p = pmap64_user_pml4(map, vaddr);
2125 	pmap_store_pte(is_ept, upml4p, pa_to_pte(pa)
2126 	    | PTE_READ(is_ept)
2127 	    | (is_ept ? (INTEL_EPT_EX | INTEL_EPT_UEX) : INTEL_PTE_USER)
2128 	    | PTE_WRITE(is_ept));
2129 
2130 	PMAP_UNLOCK_EXCLUSIVE(map);
2131 
2132 	return KERN_SUCCESS;
2133 }
2134 
2135 kern_return_t
pmap_expand_pdpt(pmap_t map,vm_map_offset_t vaddr,unsigned int options)2136 pmap_expand_pdpt(pmap_t map, vm_map_offset_t vaddr, unsigned int options)
2137 {
2138 	vm_page_t       m;
2139 	pmap_paddr_t    pa;
2140 	uint64_t        i;
2141 	ppnum_t         pn;
2142 	pdpt_entry_t    *pdptp;
2143 	boolean_t       is_ept = is_ept_pmap(map);
2144 
2145 	DBG("pmap_expand_pdpt(%p,%p)\n", map, (void *)vaddr);
2146 
2147 	while ((pdptp = pmap64_pdpt(map, vaddr)) == PDPT_ENTRY_NULL) {
2148 		kern_return_t pep4kr = pmap_expand_pml4(map, vaddr, options);
2149 		if (pep4kr != KERN_SUCCESS) {
2150 			return pep4kr;
2151 		}
2152 	}
2153 
2154 	/*
2155 	 *	Allocate a VM page for the pdpt page
2156 	 */
2157 	while ((m = vm_page_grab()) == VM_PAGE_NULL) {
2158 		if (options & PMAP_EXPAND_OPTIONS_NOWAIT) {
2159 			return KERN_RESOURCE_SHORTAGE;
2160 		}
2161 		VM_PAGE_WAIT();
2162 	}
2163 
2164 	/*
2165 	 *	put the page into the pmap's obj list so it
2166 	 *	can be found later.
2167 	 */
2168 	pn = VM_PAGE_GET_PHYS_PAGE(m);
2169 	pa = i386_ptob(pn);
2170 	i = pdptidx(map, vaddr);
2171 
2172 	/*
2173 	 *	Zero the page.
2174 	 */
2175 	pmap_zero_page(pn);
2176 
2177 	vm_page_lockspin_queues();
2178 	vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE);
2179 	vm_page_unlock_queues();
2180 
2181 	OSAddAtomic(1, &inuse_ptepages_count);
2182 	OSAddAtomic64(1, &alloc_ptepages_count);
2183 	PMAP_ZINFO_PALLOC(map, PAGE_SIZE);
2184 
2185 	/* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
2186 	vm_object_lock(map->pm_obj_pdpt);
2187 
2188 	PMAP_LOCK_EXCLUSIVE(map);
2189 	/*
2190 	 *	See if someone else expanded us first
2191 	 */
2192 	if (pmap_pde(map, vaddr) != PD_ENTRY_NULL) {
2193 		PMAP_UNLOCK_EXCLUSIVE(map);
2194 		vm_object_unlock(map->pm_obj_pdpt);
2195 
2196 		VM_PAGE_FREE(m);
2197 
2198 		OSAddAtomic(-1, &inuse_ptepages_count);
2199 		PMAP_ZINFO_PFREE(map, PAGE_SIZE);
2200 		return KERN_SUCCESS;
2201 	}
2202 
2203 #if 0 /* DEBUG */
2204 	if (0 != vm_page_lookup(map->pm_obj_pdpt, (vm_object_offset_t)i * PAGE_SIZE)) {
2205 		panic("pmap_expand_pdpt: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx",
2206 		    map, map->pm_obj_pdpt, vaddr, i);
2207 	}
2208 #endif
2209 	vm_page_insert_wired(m, map->pm_obj_pdpt, (vm_object_offset_t)i * PAGE_SIZE, VM_KERN_MEMORY_PTE);
2210 	vm_object_unlock(map->pm_obj_pdpt);
2211 
2212 	/*
2213 	 *	Set the page directory entry for this page table.
2214 	 */
2215 	pdptp = pmap64_pdpt(map, vaddr); /* refetch under lock */
2216 
2217 	pmap_store_pte(is_ept, pdptp, pa_to_pte(pa)
2218 	    | PTE_READ(is_ept)
2219 	    | (is_ept ? (INTEL_EPT_EX | INTEL_EPT_UEX) : INTEL_PTE_USER)
2220 	    | PTE_WRITE(is_ept));
2221 
2222 	PMAP_UNLOCK_EXCLUSIVE(map);
2223 
2224 	return KERN_SUCCESS;
2225 }
2226 
2227 
2228 
2229 /*
2230  *	Routine:	pmap_expand
2231  *
2232  *	Expands a pmap to be able to map the specified virtual address.
2233  *
2234  *	Allocates new virtual memory for the P0 or P1 portion of the
2235  *	pmap, then re-maps the physical pages that were in the old
2236  *	pmap to be in the new pmap.
2237  *
2238  *	Must be called with the pmap system and the pmap unlocked,
2239  *	since these must be unlocked to use vm_allocate or vm_deallocate.
2240  *	Thus it must be called in a loop that checks whether the map
2241  *	has been expanded enough.
2242  *	(We won't loop forever, since page tables aren't shrunk.)
2243  */
2244 kern_return_t
pmap_expand(pmap_t map,vm_map_offset_t vaddr,unsigned int options)2245 pmap_expand(
2246 	pmap_t          map,
2247 	vm_map_offset_t vaddr,
2248 	unsigned int options)
2249 {
2250 	pt_entry_t              *pdp;
2251 	vm_page_t               m;
2252 	pmap_paddr_t            pa;
2253 	uint64_t                i;
2254 	ppnum_t                 pn;
2255 	boolean_t               is_ept = is_ept_pmap(map);
2256 
2257 
2258 	/*
2259 	 * For the kernel, the virtual address must be in or above the basement
2260 	 * which is for kexts and is in the 512GB immediately below the kernel..
2261 	 * XXX - should use VM_MIN_KERNEL_AND_KEXT_ADDRESS not KERNEL_BASEMENT
2262 	 */
2263 	if (__improbable(map == kernel_pmap &&
2264 	    !(vaddr >= KERNEL_BASEMENT && vaddr <= VM_MAX_KERNEL_ADDRESS))) {
2265 		if ((options & PMAP_EXPAND_OPTIONS_ALIASMAP) == 0) {
2266 			panic("pmap_expand: bad vaddr 0x%llx for kernel pmap", vaddr);
2267 		}
2268 	}
2269 
2270 	while ((pdp = pmap_pde(map, vaddr)) == PD_ENTRY_NULL) {
2271 		assert((options & PMAP_EXPAND_OPTIONS_ALIASMAP) == 0);
2272 		kern_return_t pepkr = pmap_expand_pdpt(map, vaddr, options);
2273 		if (pepkr != KERN_SUCCESS) {
2274 			return pepkr;
2275 		}
2276 	}
2277 
2278 	/*
2279 	 *	Allocate a VM page for the pde entries.
2280 	 */
2281 	while ((m = vm_page_grab()) == VM_PAGE_NULL) {
2282 		if (options & PMAP_EXPAND_OPTIONS_NOWAIT) {
2283 			return KERN_RESOURCE_SHORTAGE;
2284 		}
2285 		VM_PAGE_WAIT();
2286 	}
2287 
2288 	/*
2289 	 *	put the page into the pmap's obj list so it
2290 	 *	can be found later.
2291 	 */
2292 	pn = VM_PAGE_GET_PHYS_PAGE(m);
2293 	pa = i386_ptob(pn);
2294 	i = pdeidx(map, vaddr);
2295 
2296 	/*
2297 	 *	Zero the page.
2298 	 */
2299 	pmap_zero_page(pn);
2300 
2301 	vm_page_lockspin_queues();
2302 	vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE);
2303 	vm_page_unlock_queues();
2304 
2305 	OSAddAtomic(1, &inuse_ptepages_count);
2306 	OSAddAtomic64(1, &alloc_ptepages_count);
2307 	PMAP_ZINFO_PALLOC(map, PAGE_SIZE);
2308 
2309 	/* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
2310 	vm_object_lock(map->pm_obj);
2311 
2312 	PMAP_LOCK_EXCLUSIVE(map);
2313 
2314 	/*
2315 	 *	See if someone else expanded us first
2316 	 */
2317 	if (pmap_pte(map, vaddr) != PT_ENTRY_NULL) {
2318 		PMAP_UNLOCK_EXCLUSIVE(map);
2319 		vm_object_unlock(map->pm_obj);
2320 
2321 		VM_PAGE_FREE(m);
2322 
2323 		OSAddAtomic(-1, &inuse_ptepages_count); //todo replace all with inlines
2324 		PMAP_ZINFO_PFREE(map, PAGE_SIZE);
2325 		return KERN_SUCCESS;
2326 	}
2327 
2328 #if 0 /* DEBUG */
2329 	if (0 != vm_page_lookup(map->pm_obj, (vm_object_offset_t)i * PAGE_SIZE)) {
2330 		panic("pmap_expand: obj not empty, pmap 0x%x pm_obj 0x%x vaddr 0x%llx i 0x%llx",
2331 		    map, map->pm_obj, vaddr, i);
2332 	}
2333 #endif
2334 	vm_page_insert_wired(m, map->pm_obj, (vm_object_offset_t)i * PAGE_SIZE, VM_KERN_MEMORY_PTE);
2335 	vm_object_unlock(map->pm_obj);
2336 
2337 	/*
2338 	 *	Set the page directory entry for this page table.
2339 	 */
2340 	pdp = pmap_pde(map, vaddr);
2341 
2342 	pmap_store_pte(is_ept, pdp, pa_to_pte(pa)
2343 	    | PTE_READ(is_ept)
2344 	    | (is_ept ? (INTEL_EPT_EX | INTEL_EPT_UEX) : INTEL_PTE_USER)
2345 	    | PTE_WRITE(is_ept));
2346 
2347 	PMAP_UNLOCK_EXCLUSIVE(map);
2348 
2349 	return KERN_SUCCESS;
2350 }
2351 /*
2352  * Query a pmap to see what size a given virtual address is mapped with.
2353  * If the vaddr is not mapped, returns 0.
2354  */
2355 vm_size_t
pmap_query_pagesize(pmap_t pmap,vm_map_offset_t vaddr)2356 pmap_query_pagesize(
2357 	pmap_t          pmap,
2358 	vm_map_offset_t vaddr)
2359 {
2360 	pd_entry_t      *pdep;
2361 	vm_size_t       size = 0;
2362 
2363 	assert(!is_ept_pmap(pmap));
2364 	PMAP_LOCK_EXCLUSIVE(pmap);
2365 
2366 	pdep = pmap_pde(pmap, vaddr);
2367 	if (pdep != PD_ENTRY_NULL) {
2368 		if (*pdep & INTEL_PTE_PS) {
2369 			size = I386_LPGBYTES;
2370 		} else if (pmap_pte(pmap, vaddr) != PT_ENTRY_NULL) {
2371 			size = I386_PGBYTES;
2372 		}
2373 	}
2374 
2375 	PMAP_UNLOCK_EXCLUSIVE(pmap);
2376 
2377 	return size;
2378 }
2379 
2380 /*
2381  * Ensure the page table hierarchy is filled in down to
2382  * the large page level. Additionally returns FAILURE if
2383  * a lower page table already exists.
2384  */
2385 static kern_return_t
pmap_pre_expand_large_internal(pmap_t pmap,vm_map_offset_t vaddr)2386 pmap_pre_expand_large_internal(
2387 	pmap_t          pmap,
2388 	vm_map_offset_t vaddr)
2389 {
2390 	ppnum_t         pn;
2391 	pt_entry_t      *pte;
2392 	boolean_t       is_ept = is_ept_pmap(pmap);
2393 	kern_return_t   kr = KERN_SUCCESS;
2394 
2395 	if (pmap64_pdpt(pmap, vaddr) == PDPT_ENTRY_NULL) {
2396 		if (!pmap_next_page_hi(&pn, FALSE)) {
2397 			panic("pmap_pre_expand_large no PDPT");
2398 		}
2399 
2400 		pmap_zero_page(pn);
2401 
2402 		pte = pmap64_pml4(pmap, vaddr);
2403 
2404 		pmap_store_pte(is_ept, pte, pa_to_pte(i386_ptob(pn)) |
2405 		    PTE_READ(is_ept) |
2406 		    (is_ept ? (INTEL_EPT_EX | INTEL_EPT_UEX) : INTEL_PTE_USER) |
2407 		    PTE_WRITE(is_ept));
2408 
2409 		pte = pmap64_user_pml4(pmap, vaddr);
2410 
2411 		pmap_store_pte(is_ept, pte, pa_to_pte(i386_ptob(pn)) |
2412 		    PTE_READ(is_ept) |
2413 		    (is_ept ? (INTEL_EPT_EX | INTEL_EPT_UEX) : INTEL_PTE_USER) |
2414 		    PTE_WRITE(is_ept));
2415 	}
2416 
2417 	if (pmap_pde(pmap, vaddr) == PD_ENTRY_NULL) {
2418 		if (!pmap_next_page_hi(&pn, FALSE)) {
2419 			panic("pmap_pre_expand_large no PDE");
2420 		}
2421 
2422 		pmap_zero_page(pn);
2423 
2424 		pte = pmap64_pdpt(pmap, vaddr);
2425 
2426 		pmap_store_pte(is_ept, pte, pa_to_pte(i386_ptob(pn)) |
2427 		    PTE_READ(is_ept) |
2428 		    (is_ept ? (INTEL_EPT_EX | INTEL_EPT_UEX) : INTEL_PTE_USER) |
2429 		    PTE_WRITE(is_ept));
2430 	} else if (pmap_pte(pmap, vaddr) != PT_ENTRY_NULL) {
2431 		kr = KERN_FAILURE;
2432 	}
2433 
2434 	return kr;
2435 }
2436 
2437 /*
2438  * Wrapper that locks the pmap.
2439  */
2440 kern_return_t
pmap_pre_expand_large(pmap_t pmap,vm_map_offset_t vaddr)2441 pmap_pre_expand_large(
2442 	pmap_t          pmap,
2443 	vm_map_offset_t vaddr)
2444 {
2445 	kern_return_t   kr;
2446 
2447 	PMAP_LOCK_EXCLUSIVE(pmap);
2448 	kr = pmap_pre_expand_large_internal(pmap, vaddr);
2449 	PMAP_UNLOCK_EXCLUSIVE(pmap);
2450 	return kr;
2451 }
2452 
2453 /*
2454  * On large memory machines, pmap_steal_memory() will allocate past
2455  * the 1GB of pre-allocated/mapped virtual kernel area. This function
2456  * expands kernel the page tables to cover a given vaddr. It uses pages
2457  * from the same pool that pmap_steal_memory() uses, since vm_page_grab()
2458  * isn't available yet.
2459  */
2460 void
pmap_pre_expand(pmap_t pmap,vm_map_offset_t vaddr)2461 pmap_pre_expand(
2462 	pmap_t          pmap,
2463 	vm_map_offset_t vaddr)
2464 {
2465 	ppnum_t         pn;
2466 	pt_entry_t      *pte;
2467 	boolean_t       is_ept = is_ept_pmap(pmap);
2468 
2469 	/*
2470 	 * This returns failure if a 4K page table already exists.
2471 	 * Othewise it fills in the page table hierarchy down
2472 	 * to that level.
2473 	 */
2474 	PMAP_LOCK_EXCLUSIVE(pmap);
2475 	if (pmap_pre_expand_large_internal(pmap, vaddr) == KERN_FAILURE) {
2476 		PMAP_UNLOCK_EXCLUSIVE(pmap);
2477 		return;
2478 	}
2479 
2480 	/* Add the lowest table */
2481 	if (!pmap_next_page_hi(&pn, FALSE)) {
2482 		panic("pmap_pre_expand");
2483 	}
2484 
2485 	pmap_zero_page(pn);
2486 
2487 	pte = pmap_pde(pmap, vaddr);
2488 
2489 	pmap_store_pte(is_ept, pte, pa_to_pte(i386_ptob(pn)) |
2490 	    PTE_READ(is_ept) |
2491 	    (is_ept ? (INTEL_EPT_EX | INTEL_EPT_UEX) : INTEL_PTE_USER) |
2492 	    PTE_WRITE(is_ept));
2493 	PMAP_UNLOCK_EXCLUSIVE(pmap);
2494 }
2495 
2496 /*
2497  * pmap_sync_page_data_phys(ppnum_t pa)
2498  *
2499  * Invalidates all of the instruction cache on a physical page and
2500  * pushes any dirty data from the data cache for the same physical page
2501  * Not required in i386.
2502  */
2503 void
pmap_sync_page_data_phys(__unused ppnum_t pa)2504 pmap_sync_page_data_phys(__unused ppnum_t pa)
2505 {
2506 	return;
2507 }
2508 
2509 /*
2510  * pmap_sync_page_attributes_phys(ppnum_t pa)
2511  *
2512  * Write back and invalidate all cachelines on a physical page.
2513  */
2514 void
pmap_sync_page_attributes_phys(ppnum_t pa)2515 pmap_sync_page_attributes_phys(ppnum_t pa)
2516 {
2517 	cache_flush_page_phys(pa);
2518 }
2519 
2520 void
pmap_copy_page(ppnum_t src,ppnum_t dst)2521 pmap_copy_page(ppnum_t src, ppnum_t dst)
2522 {
2523 	bcopy_phys((addr64_t)i386_ptob(src),
2524 	    (addr64_t)i386_ptob(dst),
2525 	    PAGE_SIZE);
2526 }
2527 
2528 
2529 /*
2530  *	Routine:	pmap_pageable
2531  *	Function:
2532  *		Make the specified pages (by pmap, offset)
2533  *		pageable (or not) as requested.
2534  *
2535  *		A page which is not pageable may not take
2536  *		a fault; therefore, its page table entry
2537  *		must remain valid for the duration.
2538  *
2539  *		This routine is merely advisory; pmap_enter
2540  *		will specify that these pages are to be wired
2541  *		down (or not) as appropriate.
2542  */
2543 void
pmap_pageable(__unused pmap_t pmap,__unused vm_map_offset_t start_addr,__unused vm_map_offset_t end_addr,__unused boolean_t pageable)2544 pmap_pageable(
2545 	__unused pmap_t                 pmap,
2546 	__unused vm_map_offset_t        start_addr,
2547 	__unused vm_map_offset_t        end_addr,
2548 	__unused boolean_t              pageable)
2549 {
2550 #ifdef  lint
2551 	pmap++; start_addr++; end_addr++; pageable++;
2552 #endif  /* lint */
2553 }
2554 
2555 void
invalidate_icache(__unused vm_offset_t addr,__unused unsigned cnt,__unused int phys)2556 invalidate_icache(__unused vm_offset_t  addr,
2557     __unused unsigned     cnt,
2558     __unused int          phys)
2559 {
2560 	return;
2561 }
2562 
2563 void
flush_dcache(__unused vm_offset_t addr,__unused unsigned count,__unused int phys)2564 flush_dcache(__unused vm_offset_t       addr,
2565     __unused unsigned          count,
2566     __unused int               phys)
2567 {
2568 	return;
2569 }
2570 
2571 #if CONFIG_DTRACE
2572 /*
2573  * Constrain DTrace copyin/copyout actions
2574  */
2575 extern kern_return_t dtrace_copyio_preflight(addr64_t);
2576 extern kern_return_t dtrace_copyio_postflight(addr64_t);
2577 
2578 kern_return_t
dtrace_copyio_preflight(__unused addr64_t va)2579 dtrace_copyio_preflight(__unused addr64_t va)
2580 {
2581 	thread_t thread = current_thread();
2582 	uint64_t ccr3;
2583 	if (current_map() == kernel_map) {
2584 		return KERN_FAILURE;
2585 	} else if (((ccr3 = get_cr3_base()) != thread->map->pmap->pm_cr3) && (no_shared_cr3 == FALSE)) {
2586 		return KERN_FAILURE;
2587 	} else if (no_shared_cr3 && (ccr3 != kernel_pmap->pm_cr3)) {
2588 		return KERN_FAILURE;
2589 	} else {
2590 		return KERN_SUCCESS;
2591 	}
2592 }
2593 
2594 kern_return_t
dtrace_copyio_postflight(__unused addr64_t va)2595 dtrace_copyio_postflight(__unused addr64_t va)
2596 {
2597 	return KERN_SUCCESS;
2598 }
2599 #endif /* CONFIG_DTRACE */
2600 
2601 #include <mach_vm_debug.h>
2602 #if     MACH_VM_DEBUG
2603 #include <vm/vm_debug.h>
2604 
2605 int
pmap_list_resident_pages(__unused pmap_t pmap,__unused vm_offset_t * listp,__unused int space)2606 pmap_list_resident_pages(
2607 	__unused pmap_t         pmap,
2608 	__unused vm_offset_t    *listp,
2609 	__unused int            space)
2610 {
2611 	return 0;
2612 }
2613 #endif  /* MACH_VM_DEBUG */
2614 
2615 
2616 #if CONFIG_COREDUMP
2617 /* temporary workaround */
2618 boolean_t
coredumpok(__unused vm_map_t map,__unused mach_vm_offset_t va)2619 coredumpok(__unused vm_map_t map, __unused mach_vm_offset_t va)
2620 {
2621 #if 0
2622 	pt_entry_t     *ptep;
2623 
2624 	ptep = pmap_pte(map->pmap, va);
2625 	if (0 == ptep) {
2626 		return FALSE;
2627 	}
2628 	return (*ptep & (INTEL_PTE_NCACHE | INTEL_PTE_WIRED)) != (INTEL_PTE_NCACHE | INTEL_PTE_WIRED);
2629 #else
2630 	return TRUE;
2631 #endif
2632 }
2633 #endif
2634 
2635 boolean_t
phys_page_exists(ppnum_t pn)2636 phys_page_exists(ppnum_t pn)
2637 {
2638 	assert(pn != vm_page_fictitious_addr);
2639 
2640 	if (!pmap_initialized) {
2641 		return TRUE;
2642 	}
2643 
2644 	if (pn == vm_page_guard_addr) {
2645 		return FALSE;
2646 	}
2647 
2648 	if (!IS_MANAGED_PAGE(ppn_to_pai(pn))) {
2649 		return FALSE;
2650 	}
2651 
2652 	return TRUE;
2653 }
2654 
2655 
2656 
2657 void
pmap_switch(pmap_t tpmap)2658 pmap_switch(pmap_t tpmap)
2659 {
2660 	PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__SWITCH) | DBG_FUNC_START, VM_KERNEL_ADDRHIDE(tpmap));
2661 	assert(ml_get_interrupts_enabled() == FALSE);
2662 	set_dirbase(tpmap, current_thread(), cpu_number());
2663 	PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__SWITCH) | DBG_FUNC_END);
2664 }
2665 
2666 void
pmap_require(pmap_t pmap)2667 pmap_require(pmap_t pmap)
2668 {
2669 	if (pmap != kernel_pmap) {
2670 		zone_id_require(ZONE_ID_PMAP, sizeof(struct pmap), pmap);
2671 	}
2672 }
2673 
2674 /*
2675  * disable no-execute capability on
2676  * the specified pmap
2677  */
2678 void
pmap_disable_NX(__unused pmap_t pmap)2679 pmap_disable_NX(__unused pmap_t pmap)
2680 {
2681 #if DEVELOPMENT || DEBUG
2682 	pmap->nx_enabled = 0;
2683 #endif
2684 }
2685 
2686 void
pmap_flush_context_init(pmap_flush_context * pfc)2687 pmap_flush_context_init(pmap_flush_context *pfc)
2688 {
2689 	pfc->pfc_cpus = 0;
2690 	pfc->pfc_invalid_global = 0;
2691 }
2692 
2693 static bool
pmap_tlbi_response(uint32_t lcpu,uint32_t rcpu,bool ngflush)2694 pmap_tlbi_response(uint32_t lcpu, uint32_t rcpu, bool ngflush)
2695 {
2696 	bool responded = false;
2697 	bool gflushed = (cpu_datap(rcpu)->cpu_tlb_invalid_global_count !=
2698 	    cpu_datap(lcpu)->cpu_tlb_gen_counts_global[rcpu]);
2699 
2700 	if (ngflush) {
2701 		if (gflushed) {
2702 			responded = true;
2703 		}
2704 	} else {
2705 		if (gflushed) {
2706 			responded = true;
2707 		} else {
2708 			bool lflushed = (cpu_datap(rcpu)->cpu_tlb_invalid_local_count !=
2709 			    cpu_datap(lcpu)->cpu_tlb_gen_counts_local[rcpu]);
2710 			if (lflushed) {
2711 				responded = true;
2712 			}
2713 		}
2714 	}
2715 
2716 	if (responded == false) {
2717 		if ((cpu_datap(rcpu)->cpu_tlb_invalid == 0) ||
2718 		    !CPU_CR3_IS_ACTIVE(rcpu) ||
2719 		    !cpu_is_running(rcpu)) {
2720 			responded = true;
2721 		}
2722 	}
2723 	return responded;
2724 }
2725 
2726 extern uint64_t TLBTimeOut;
2727 void
pmap_flush(pmap_flush_context * pfc)2728 pmap_flush(
2729 	pmap_flush_context *pfc)
2730 {
2731 	unsigned int    my_cpu;
2732 	unsigned int    cpu;
2733 	cpumask_t       cpu_bit;
2734 	cpumask_t       cpus_to_respond = 0;
2735 	cpumask_t       cpus_to_signal = 0;
2736 	cpumask_t       cpus_signaled = 0;
2737 	boolean_t       flush_self = FALSE;
2738 	uint64_t        deadline;
2739 	bool            need_global_flush = false;
2740 
2741 	mp_disable_preemption();
2742 
2743 	my_cpu = cpu_number();
2744 	cpus_to_signal = pfc->pfc_cpus;
2745 
2746 	PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_DELAYED_TLBS) | DBG_FUNC_START,
2747 	    NULL, cpus_to_signal);
2748 
2749 	for (cpu = 0, cpu_bit = 1; cpu < real_ncpus && cpus_to_signal; cpu++, cpu_bit <<= 1) {
2750 		if (cpus_to_signal & cpu_bit) {
2751 			cpus_to_signal &= ~cpu_bit;
2752 
2753 			if (!cpu_is_running(cpu)) {
2754 				continue;
2755 			}
2756 
2757 			if (pfc->pfc_invalid_global & cpu_bit) {
2758 				cpu_datap(cpu)->cpu_tlb_invalid_global = 1;
2759 				need_global_flush = true;
2760 			} else {
2761 				cpu_datap(cpu)->cpu_tlb_invalid_local = 1;
2762 			}
2763 			cpu_datap(my_cpu)->cpu_tlb_gen_counts_global[cpu] = cpu_datap(cpu)->cpu_tlb_invalid_global_count;
2764 			cpu_datap(my_cpu)->cpu_tlb_gen_counts_local[cpu] = cpu_datap(cpu)->cpu_tlb_invalid_local_count;
2765 			mfence();
2766 
2767 			if (cpu == my_cpu) {
2768 				flush_self = TRUE;
2769 				continue;
2770 			}
2771 			if (CPU_CR3_IS_ACTIVE(cpu)) {
2772 				cpus_to_respond |= cpu_bit;
2773 				i386_signal_cpu(cpu, MP_TLB_FLUSH, ASYNC);
2774 			}
2775 		}
2776 	}
2777 	cpus_signaled = cpus_to_respond;
2778 
2779 	/*
2780 	 * Flush local tlb if required.
2781 	 * Do this now to overlap with other processors responding.
2782 	 */
2783 	if (flush_self) {
2784 		process_pmap_updates(NULL, (pfc->pfc_invalid_global != 0), 0ULL, ~0ULL);
2785 	}
2786 
2787 	if (cpus_to_respond) {
2788 		deadline = mach_absolute_time() +
2789 		    (TLBTimeOut ? TLBTimeOut : LockTimeOut);
2790 		boolean_t is_timeout_traced = FALSE;
2791 
2792 		/*
2793 		 * Wait for those other cpus to acknowledge
2794 		 */
2795 		while (cpus_to_respond != 0) {
2796 			long orig_acks = 0;
2797 
2798 			for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
2799 				bool responded = false;
2800 				if ((cpus_to_respond & cpu_bit) != 0) {
2801 					responded = pmap_tlbi_response(my_cpu, cpu, need_global_flush);
2802 					if (responded) {
2803 						cpus_to_respond &= ~cpu_bit;
2804 					}
2805 					cpu_pause();
2806 				}
2807 
2808 				if (cpus_to_respond == 0) {
2809 					break;
2810 				}
2811 			}
2812 			if (cpus_to_respond && (mach_absolute_time() > deadline)) {
2813 				if (machine_timeout_suspended()) {
2814 					continue;
2815 				}
2816 				if (TLBTimeOut == 0) {
2817 					if (is_timeout_traced) {
2818 						continue;
2819 					}
2820 
2821 					PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_TLBS_TO),
2822 					    NULL, cpus_to_signal, cpus_to_respond);
2823 
2824 					is_timeout_traced = TRUE;
2825 					continue;
2826 				}
2827 				orig_acks = NMIPI_acks;
2828 				NMIPI_panic(cpus_to_respond, TLB_FLUSH_TIMEOUT);
2829 				panic("Uninterruptible processor(s): CPU bitmap: 0x%llx, NMIPI acks: 0x%lx, now: 0x%lx, deadline: %llu",
2830 				    cpus_to_respond, orig_acks, NMIPI_acks, deadline);
2831 			}
2832 		}
2833 	}
2834 
2835 	PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_DELAYED_TLBS) | DBG_FUNC_END,
2836 	    NULL, cpus_signaled, flush_self);
2837 
2838 	mp_enable_preemption();
2839 }
2840 
2841 
2842 static void
invept(void * eptp)2843 invept(void *eptp)
2844 {
2845 	struct {
2846 		uint64_t eptp;
2847 		uint64_t reserved;
2848 	} __attribute__((aligned(16), packed)) invept_descriptor = {(uint64_t)eptp, 0};
2849 
2850 	__asm__ volatile ("invept (%%rax), %%rcx"
2851                  : : "c" (PMAP_INVEPT_SINGLE_CONTEXT), "a" (&invept_descriptor)
2852                  : "cc", "memory");
2853 }
2854 
2855 /*
2856  * Called with pmap locked, we:
2857  *  - scan through per-cpu data to see which other cpus need to flush
2858  *  - send an IPI to each non-idle cpu to be flushed
2859  *  - wait for all to signal back that they are inactive or we see that
2860  *    they are at a safe point (idle).
2861  *  - flush the local tlb if active for this pmap
2862  *  - return ... the caller will unlock the pmap
2863  */
2864 
2865 void
pmap_flush_tlbs(pmap_t pmap,vm_map_offset_t startv,vm_map_offset_t endv,int options,pmap_flush_context * pfc)2866 pmap_flush_tlbs(pmap_t  pmap, vm_map_offset_t startv, vm_map_offset_t endv, int options, pmap_flush_context *pfc)
2867 {
2868 	unsigned int    cpu;
2869 	cpumask_t       cpu_bit;
2870 	cpumask_t       cpus_to_signal = 0;
2871 	unsigned int    my_cpu = cpu_number();
2872 	pmap_paddr_t    pmap_cr3 = pmap->pm_cr3;
2873 	boolean_t       flush_self = FALSE;
2874 	uint64_t        deadline;
2875 	boolean_t       pmap_is_shared = (pmap->pm_shared || (pmap == kernel_pmap));
2876 	bool            need_global_flush = false;
2877 	uint32_t        event_code = 0;
2878 	vm_map_offset_t event_startv = 0, event_endv = 0;
2879 	boolean_t       is_ept = is_ept_pmap(pmap);
2880 
2881 	assert((processor_avail_count < 2) ||
2882 	    (ml_get_interrupts_enabled() && get_preemption_level() != 0));
2883 
2884 	assert((endv - startv) >= PAGE_SIZE);
2885 	assert(((endv | startv) & PAGE_MASK) == 0);
2886 
2887 	if (__improbable(kdebug_enable)) {
2888 		if (pmap == kernel_pmap) {
2889 			event_code = PMAP_CODE(PMAP__FLUSH_KERN_TLBS);
2890 			event_startv = VM_KERNEL_UNSLIDE_OR_PERM(startv);
2891 			event_endv = VM_KERNEL_UNSLIDE_OR_PERM(endv);
2892 		} else if (__improbable(is_ept)) {
2893 			event_code = PMAP_CODE(PMAP__FLUSH_EPT);
2894 			event_startv = startv;
2895 			event_endv = endv;
2896 		} else {
2897 			event_code = PMAP_CODE(PMAP__FLUSH_TLBS);
2898 			event_startv = startv;
2899 			event_endv = endv;
2900 		}
2901 	}
2902 
2903 	PMAP_TRACE_CONSTANT(event_code | DBG_FUNC_START,
2904 	    VM_KERNEL_UNSLIDE_OR_PERM(pmap), options,
2905 	    event_startv, event_endv);
2906 
2907 	if (__improbable(is_ept)) {
2908 		mp_cpus_call(CPUMASK_ALL, ASYNC, invept, (void*)pmap->pm_eptp);
2909 		goto out;
2910 	}
2911 
2912 	/*
2913 	 * Scan other cpus for matching active or task CR3.
2914 	 * For idle cpus (with no active map) we mark them invalid but
2915 	 * don't signal -- they'll check as they go busy.
2916 	 */
2917 	if (pmap_pcid_ncpus) {
2918 		if (pmap_is_shared) {
2919 			need_global_flush = true;
2920 		}
2921 		pmap_pcid_invalidate_all_cpus(pmap);
2922 		mfence();
2923 	}
2924 
2925 	for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
2926 		if (!cpu_is_running(cpu)) {
2927 			continue;
2928 		}
2929 		uint64_t        cpu_active_cr3 = CPU_GET_ACTIVE_CR3(cpu);
2930 		uint64_t        cpu_task_cr3 = CPU_GET_TASK_CR3(cpu);
2931 
2932 		if ((pmap_cr3 == cpu_task_cr3) ||
2933 		    (pmap_cr3 == cpu_active_cr3) ||
2934 		    (pmap_is_shared)) {
2935 			if (options & PMAP_DELAY_TLB_FLUSH) {
2936 				if (need_global_flush == true) {
2937 					pfc->pfc_invalid_global |= cpu_bit;
2938 				}
2939 				pfc->pfc_cpus |= cpu_bit;
2940 
2941 				continue;
2942 			}
2943 			if (need_global_flush == true) {
2944 				cpu_datap(my_cpu)->cpu_tlb_gen_counts_global[cpu] = cpu_datap(cpu)->cpu_tlb_invalid_global_count;
2945 				cpu_datap(cpu)->cpu_tlb_invalid_global = 1;
2946 			} else {
2947 				cpu_datap(my_cpu)->cpu_tlb_gen_counts_local[cpu] = cpu_datap(cpu)->cpu_tlb_invalid_local_count;
2948 				cpu_datap(cpu)->cpu_tlb_invalid_local = 1;
2949 			}
2950 
2951 			if (cpu == my_cpu) {
2952 				flush_self = TRUE;
2953 				continue;
2954 			}
2955 
2956 			mfence();
2957 
2958 			/*
2959 			 * We don't need to signal processors which will flush
2960 			 * lazily at the idle state or kernel boundary.
2961 			 * For example, if we're invalidating the kernel pmap,
2962 			 * processors currently in userspace don't need to flush
2963 			 * their TLBs until the next time they enter the kernel.
2964 			 * Alterations to the address space of a task active
2965 			 * on a remote processor result in a signal, to
2966 			 * account for copy operations. (There may be room
2967 			 * for optimization in such cases).
2968 			 * The order of the loads below with respect
2969 			 * to the store to the "cpu_tlb_invalid" field above
2970 			 * is important--hence the barrier.
2971 			 */
2972 			if (CPU_CR3_IS_ACTIVE(cpu) &&
2973 			    (pmap_cr3 == CPU_GET_ACTIVE_CR3(cpu) ||
2974 			    pmap->pm_shared ||
2975 			    (pmap_cr3 == CPU_GET_TASK_CR3(cpu)))) {
2976 				cpus_to_signal |= cpu_bit;
2977 				i386_signal_cpu(cpu, MP_TLB_FLUSH, ASYNC);
2978 			}
2979 		}
2980 	}
2981 
2982 	if ((options & PMAP_DELAY_TLB_FLUSH)) {
2983 		goto out;
2984 	}
2985 
2986 	/*
2987 	 * Flush local tlb if required.
2988 	 * Do this now to overlap with other processors responding.
2989 	 */
2990 	if (flush_self) {
2991 		process_pmap_updates(pmap, pmap_is_shared, startv, endv);
2992 	}
2993 
2994 	if (cpus_to_signal) {
2995 		cpumask_t       cpus_to_respond = cpus_to_signal;
2996 
2997 		deadline = mach_absolute_time() +
2998 		    (TLBTimeOut ? TLBTimeOut : LockTimeOut);
2999 		boolean_t is_timeout_traced = FALSE;
3000 
3001 		/*
3002 		 * Wait for those other cpus to acknowledge
3003 		 */
3004 		while (cpus_to_respond != 0) {
3005 			long orig_acks = 0;
3006 
3007 			for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
3008 				bool responded = false;
3009 				if ((cpus_to_respond & cpu_bit) != 0) {
3010 					responded = pmap_tlbi_response(my_cpu, cpu, need_global_flush);
3011 					if (responded) {
3012 						cpus_to_respond &= ~cpu_bit;
3013 					}
3014 					cpu_pause();
3015 				}
3016 				if (cpus_to_respond == 0) {
3017 					break;
3018 				}
3019 			}
3020 			if (cpus_to_respond && (mach_absolute_time() > deadline)) {
3021 				if (machine_timeout_suspended()) {
3022 					continue;
3023 				}
3024 				if (TLBTimeOut == 0) {
3025 					/* cut tracepoint but don't panic */
3026 					if (is_timeout_traced) {
3027 						continue;
3028 					}
3029 
3030 					PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_TLBS_TO),
3031 					    VM_KERNEL_UNSLIDE_OR_PERM(pmap),
3032 					    cpus_to_signal,
3033 					    cpus_to_respond);
3034 
3035 					is_timeout_traced = TRUE;
3036 					continue;
3037 				}
3038 				orig_acks = NMIPI_acks;
3039 				uint64_t tstamp1 = mach_absolute_time();
3040 				NMIPI_panic(cpus_to_respond, TLB_FLUSH_TIMEOUT);
3041 				uint64_t tstamp2 = mach_absolute_time();
3042 				panic("IPI timeout, unresponsive CPU bitmap: 0x%llx, NMIPI acks: 0x%lx, now: 0x%lx, deadline: %llu, pre-NMIPI time: 0x%llx, current: 0x%llx, global: %d",
3043 				    cpus_to_respond, orig_acks, NMIPI_acks, deadline, tstamp1, tstamp2, need_global_flush);
3044 			}
3045 		}
3046 	}
3047 
3048 	if (__improbable((pmap == kernel_pmap) && (flush_self != TRUE))) {
3049 		panic("pmap_flush_tlbs: pmap == kernel_pmap && flush_self != TRUE; kernel CR3: 0x%llX, pmap_cr3: 0x%llx, CPU active CR3: 0x%llX, CPU Task Map: %d", kernel_pmap->pm_cr3, pmap_cr3, current_cpu_datap()->cpu_active_cr3, current_cpu_datap()->cpu_task_map);
3050 	}
3051 
3052 out:
3053 	PMAP_TRACE_CONSTANT(event_code | DBG_FUNC_END,
3054 	    VM_KERNEL_UNSLIDE_OR_PERM(pmap), cpus_to_signal,
3055 	    event_startv, event_endv);
3056 }
3057 
3058 static void
process_pmap_updates(pmap_t p,bool pshared,addr64_t istart,addr64_t iend)3059 process_pmap_updates(pmap_t p, bool pshared, addr64_t istart, addr64_t iend)
3060 {
3061 	int ccpu = cpu_number();
3062 	bool gtlbf = false;
3063 
3064 	pmap_assert(ml_get_interrupts_enabled() == 0 ||
3065 	    get_preemption_level() != 0);
3066 
3067 	if (cpu_datap(ccpu)->cpu_tlb_invalid_global) {
3068 		cpu_datap(ccpu)->cpu_tlb_invalid_global_count++;
3069 		cpu_datap(ccpu)->cpu_tlb_invalid = 0;
3070 		gtlbf = true;
3071 	} else {
3072 		cpu_datap(ccpu)->cpu_tlb_invalid_local_count++;
3073 		cpu_datap(ccpu)->cpu_tlb_invalid_local = 0;
3074 	}
3075 
3076 	if (pmap_pcid_ncpus) {
3077 		if (p) {
3078 			/* TODO global generation count to
3079 			 * avoid potentially redundant
3080 			 * csw invalidations post-global invalidation
3081 			 */
3082 			pmap_pcid_validate_cpu(p, ccpu);
3083 			pmap_tlbi_range(istart, iend, (pshared || gtlbf), p->pmap_pcid_cpus[ccpu]);
3084 		} else {
3085 			pmap_pcid_validate_current();
3086 			pmap_tlbi_range(istart, iend, true, 0);
3087 		}
3088 	} else {
3089 		pmap_tlbi_range(0, ~0ULL, true, 0);
3090 	}
3091 }
3092 
3093 void
pmap_update_interrupt(void)3094 pmap_update_interrupt(void)
3095 {
3096 	PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_START);
3097 
3098 	if (current_cpu_datap()->cpu_tlb_invalid) {
3099 		process_pmap_updates(NULL, true, 0ULL, ~0ULL);
3100 	}
3101 
3102 	PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_END);
3103 }
3104 
3105 #include <mach/mach_vm.h>       /* mach_vm_region_recurse() */
3106 /* Scan kernel pmap for W+X PTEs, scan kernel VM map for W+X map entries
3107  * and identify ranges with mismatched VM permissions and PTE permissions
3108  */
3109 kern_return_t
pmap_permissions_verify(pmap_t ipmap,vm_map_t ivmmap,vm_offset_t sv,vm_offset_t ev)3110 pmap_permissions_verify(pmap_t ipmap, vm_map_t ivmmap, vm_offset_t sv, vm_offset_t ev)
3111 {
3112 	vm_offset_t cv = sv;
3113 	kern_return_t rv = KERN_SUCCESS;
3114 	uint64_t skip4 = 0, skip2 = 0;
3115 
3116 	assert(!is_ept_pmap(ipmap));
3117 
3118 	sv &= ~PAGE_MASK_64;
3119 	ev &= ~PAGE_MASK_64;
3120 	while (cv < ev) {
3121 		if (__improbable((cv > 0x00007FFFFFFFFFFFULL) &&
3122 		    (cv < 0xFFFF800000000000ULL))) {
3123 			cv = 0xFFFF800000000000ULL;
3124 		}
3125 		/* Potential inconsistencies from not holding pmap lock
3126 		 * but harmless for the moment.
3127 		 */
3128 		if (((cv & PML4MASK) == 0) && (pmap64_pml4(ipmap, cv) == 0)) {
3129 			if ((cv + NBPML4) > cv) {
3130 				cv += NBPML4;
3131 			} else {
3132 				break;
3133 			}
3134 			skip4++;
3135 			continue;
3136 		}
3137 		if (((cv & PDMASK) == 0) && (pmap_pde(ipmap, cv) == 0)) {
3138 			if ((cv + NBPD) > cv) {
3139 				cv += NBPD;
3140 			} else {
3141 				break;
3142 			}
3143 			skip2++;
3144 			continue;
3145 		}
3146 
3147 		pt_entry_t *ptep = pmap_pte(ipmap, cv);
3148 		if (ptep && (*ptep & INTEL_PTE_VALID)) {
3149 			if (*ptep & INTEL_PTE_WRITE) {
3150 				if (!(*ptep & INTEL_PTE_NX)) {
3151 					kprintf("W+X PTE at 0x%lx, P4: 0x%llx, P3: 0x%llx, P2: 0x%llx, PT: 0x%llx, VP: %u\n", cv, *pmap64_pml4(ipmap, cv), *pmap64_pdpt(ipmap, cv), *pmap_pde(ipmap, cv), *ptep, pmap_valid_page((ppnum_t)(i386_btop(pte_to_pa(*ptep)))));
3152 					rv = KERN_FAILURE;
3153 				}
3154 			}
3155 		}
3156 		cv += PAGE_SIZE;
3157 	}
3158 	kprintf("Completed pmap scan\n");
3159 	cv = sv;
3160 
3161 	struct vm_region_submap_info_64 vbr;
3162 	mach_msg_type_number_t vbrcount = 0;
3163 	mach_vm_size_t  vmsize;
3164 	vm_prot_t       prot;
3165 	uint32_t nesting_depth = 0;
3166 	kern_return_t kret;
3167 
3168 	while (cv < ev) {
3169 		for (;;) {
3170 			vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
3171 			if ((kret = mach_vm_region_recurse(ivmmap,
3172 			    (mach_vm_address_t *) &cv, &vmsize, &nesting_depth,
3173 			    (vm_region_recurse_info_t)&vbr,
3174 			    &vbrcount)) != KERN_SUCCESS) {
3175 				break;
3176 			}
3177 
3178 			if (vbr.is_submap) {
3179 				nesting_depth++;
3180 				continue;
3181 			} else {
3182 				break;
3183 			}
3184 		}
3185 
3186 		if (kret != KERN_SUCCESS) {
3187 			break;
3188 		}
3189 
3190 		prot = vbr.protection;
3191 
3192 		if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
3193 			kprintf("W+X map entry at address 0x%lx\n", cv);
3194 			rv = KERN_FAILURE;
3195 		}
3196 
3197 		if (prot) {
3198 			vm_offset_t pcv;
3199 			for (pcv = cv; pcv < cv + vmsize; pcv += PAGE_SIZE) {
3200 				pt_entry_t *ptep = pmap_pte(ipmap, pcv);
3201 				vm_prot_t tprot;
3202 
3203 				if ((ptep == NULL) || !(*ptep & INTEL_PTE_VALID)) {
3204 					continue;
3205 				}
3206 				tprot = VM_PROT_READ;
3207 				if (*ptep & INTEL_PTE_WRITE) {
3208 					tprot |= VM_PROT_WRITE;
3209 				}
3210 				if ((*ptep & INTEL_PTE_NX) == 0) {
3211 					tprot |= VM_PROT_EXECUTE;
3212 				}
3213 				if (tprot != prot) {
3214 					kprintf("PTE/map entry permissions mismatch at address 0x%lx, pte: 0x%llx, protection: 0x%x\n", pcv, *ptep, prot);
3215 					rv = KERN_FAILURE;
3216 				}
3217 			}
3218 		}
3219 		cv += vmsize;
3220 	}
3221 	return rv;
3222 }
3223 
3224 #if MACH_ASSERT
3225 extern int pmap_ledgers_panic;
3226 extern int pmap_ledgers_panic_leeway;
3227 
3228 static void
pmap_check_ledgers(pmap_t pmap)3229 pmap_check_ledgers(
3230 	pmap_t pmap)
3231 {
3232 	int     pid;
3233 	char    *procname;
3234 
3235 	if (pmap->pmap_pid == 0) {
3236 		/*
3237 		 * This pmap was not or is no longer fully associated
3238 		 * with a task (e.g. the old pmap after a fork()/exec() or
3239 		 * spawn()).  Its "ledger" still points at a task that is
3240 		 * now using a different (and active) address space, so
3241 		 * we can't check that all the pmap ledgers are balanced here.
3242 		 *
3243 		 * If the "pid" is set, that means that we went through
3244 		 * pmap_set_process() in task_terminate_internal(), so
3245 		 * this task's ledger should not have been re-used and
3246 		 * all the pmap ledgers should be back to 0.
3247 		 */
3248 		return;
3249 	}
3250 
3251 	pid = pmap->pmap_pid;
3252 	procname = pmap->pmap_procname;
3253 
3254 	vm_map_pmap_check_ledgers(pmap, pmap->ledger, pid, procname);
3255 }
3256 
3257 void
pmap_set_process(pmap_t pmap,int pid,char * procname)3258 pmap_set_process(
3259 	pmap_t pmap,
3260 	int pid,
3261 	char *procname)
3262 {
3263 	if (pmap == NULL) {
3264 		return;
3265 	}
3266 
3267 	pmap->pmap_pid = pid;
3268 	strlcpy(pmap->pmap_procname, procname, sizeof(pmap->pmap_procname));
3269 	if (pmap_ledgers_panic_leeway) {
3270 		/*
3271 		 * XXX FBDP
3272 		 * Some processes somehow trigger some issues that make
3273 		 * the pmap stats and ledgers go off track, causing
3274 		 * some assertion failures and ledger panics.
3275 		 * Turn off the sanity checks if we allow some ledger leeway
3276 		 * because of that.  We'll still do a final check in
3277 		 * pmap_check_ledgers() for discrepancies larger than the
3278 		 * allowed leeway after the address space has been fully
3279 		 * cleaned up.
3280 		 */
3281 		pmap->pmap_stats_assert = FALSE;
3282 		ledger_disable_panic_on_negative(pmap->ledger,
3283 		    task_ledgers.phys_footprint);
3284 		ledger_disable_panic_on_negative(pmap->ledger,
3285 		    task_ledgers.internal);
3286 		ledger_disable_panic_on_negative(pmap->ledger,
3287 		    task_ledgers.internal_compressed);
3288 		ledger_disable_panic_on_negative(pmap->ledger,
3289 		    task_ledgers.iokit_mapped);
3290 		ledger_disable_panic_on_negative(pmap->ledger,
3291 		    task_ledgers.alternate_accounting);
3292 		ledger_disable_panic_on_negative(pmap->ledger,
3293 		    task_ledgers.alternate_accounting_compressed);
3294 	}
3295 }
3296 #endif /* MACH_ASSERT */
3297 
3298 
3299 #if DEVELOPMENT || DEBUG
3300 int pmap_pagezero_mitigation = 1;
3301 #endif
3302 
3303 void
pmap_advise_pagezero_range(pmap_t lpmap,uint64_t low_bound)3304 pmap_advise_pagezero_range(pmap_t lpmap, uint64_t low_bound)
3305 {
3306 #if DEVELOPMENT || DEBUG
3307 	if (pmap_pagezero_mitigation == 0) {
3308 		lpmap->pagezero_accessible = FALSE;
3309 		return;
3310 	}
3311 #endif
3312 	lpmap->pagezero_accessible = ((pmap_smap_enabled == FALSE) && (low_bound < 0x1000));
3313 	if (lpmap == current_pmap()) {
3314 		mp_disable_preemption();
3315 		current_cpu_datap()->cpu_pagezero_mapped = lpmap->pagezero_accessible;
3316 		mp_enable_preemption();
3317 	}
3318 }
3319 
3320 uintptr_t
pmap_verify_noncacheable(uintptr_t vaddr)3321 pmap_verify_noncacheable(uintptr_t vaddr)
3322 {
3323 	pt_entry_t *ptep = NULL;
3324 	ptep = pmap_pte(kernel_pmap, vaddr);
3325 	if (ptep == NULL) {
3326 		panic("pmap_verify_noncacheable: no translation for 0x%lx", vaddr);
3327 	}
3328 	/* Non-cacheable OK */
3329 	if (*ptep & (INTEL_PTE_NCACHE)) {
3330 		return pte_to_pa(*ptep) | (vaddr & INTEL_OFFMASK);
3331 	}
3332 	/* Write-combined OK */
3333 	if (*ptep & (INTEL_PTE_PAT)) {
3334 		return pte_to_pa(*ptep) | (vaddr & INTEL_OFFMASK);
3335 	}
3336 	panic("pmap_verify_noncacheable: IO read from a cacheable address? address: 0x%lx, PTE: %p, *PTE: 0x%llx", vaddr, ptep, *ptep);
3337 	/*NOTREACHED*/
3338 	return 0;
3339 }
3340 
3341 void
trust_cache_init(void)3342 trust_cache_init(void)
3343 {
3344 	// Unsupported on this architecture.
3345 }
3346 
3347 kern_return_t
pmap_load_legacy_trust_cache(struct pmap_legacy_trust_cache __unused * trust_cache,const vm_size_t __unused trust_cache_len)3348 pmap_load_legacy_trust_cache(struct pmap_legacy_trust_cache __unused *trust_cache,
3349     const vm_size_t __unused trust_cache_len)
3350 {
3351 	// Unsupported on this architecture.
3352 	return KERN_NOT_SUPPORTED;
3353 }
3354 
3355 pmap_tc_ret_t
pmap_load_image4_trust_cache(struct pmap_image4_trust_cache __unused * trust_cache,const vm_size_t __unused trust_cache_len,uint8_t const * __unused img4_manifest,const vm_size_t __unused img4_manifest_buffer_len,const vm_size_t __unused img4_manifest_actual_len,bool __unused dry_run)3356 pmap_load_image4_trust_cache(struct pmap_image4_trust_cache __unused *trust_cache,
3357     const vm_size_t __unused trust_cache_len,
3358     uint8_t const * __unused img4_manifest,
3359     const vm_size_t __unused img4_manifest_buffer_len,
3360     const vm_size_t __unused img4_manifest_actual_len,
3361     bool __unused dry_run)
3362 {
3363 	// Unsupported on this architecture.
3364 	return PMAP_TC_UNKNOWN_FORMAT;
3365 }
3366 
3367 
3368 bool
pmap_is_trust_cache_loaded(const uuid_t __unused uuid)3369 pmap_is_trust_cache_loaded(const uuid_t __unused uuid)
3370 {
3371 	// Unsupported on this architecture.
3372 	return false;
3373 }
3374 
3375 bool
pmap_lookup_in_loaded_trust_caches(const uint8_t __unused cdhash[20])3376 pmap_lookup_in_loaded_trust_caches(const uint8_t __unused cdhash[20])
3377 {
3378 	// Unsupported on this architecture.
3379 	return false;
3380 }
3381 
3382 uint32_t
pmap_lookup_in_static_trust_cache(const uint8_t __unused cdhash[20])3383 pmap_lookup_in_static_trust_cache(const uint8_t __unused cdhash[20])
3384 {
3385 	// Unsupported on this architecture.
3386 	return false;
3387 }
3388 
3389 int
pmap_cs_configuration(void)3390 pmap_cs_configuration(void)
3391 {
3392 	// Unsupported on this architecture.
3393 	return 0;
3394 }
3395 
3396 SIMPLE_LOCK_DECLARE(pmap_compilation_service_cdhash_lock, 0);
3397 uint8_t pmap_compilation_service_cdhash[CS_CDHASH_LEN] = { 0 };
3398 
3399 void
pmap_set_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])3400 pmap_set_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])
3401 {
3402 	simple_lock(&pmap_compilation_service_cdhash_lock, LCK_GRP_NULL);
3403 	memcpy(pmap_compilation_service_cdhash, cdhash, CS_CDHASH_LEN);
3404 	simple_unlock(&pmap_compilation_service_cdhash_lock);
3405 
3406 #if DEVELOPMENT || DEBUG
3407 	printf("Added Compilation Service CDHash through the PMAP: 0x%02X 0x%02X 0x%02X 0x%02X\n", cdhash[0], cdhash[1], cdhash[2], cdhash[4]);
3408 #endif
3409 }
3410 
3411 bool
pmap_match_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])3412 pmap_match_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])
3413 {
3414 	bool match = false;
3415 
3416 	simple_lock(&pmap_compilation_service_cdhash_lock, LCK_GRP_NULL);
3417 	if (bcmp(pmap_compilation_service_cdhash, cdhash, CS_CDHASH_LEN) == 0) {
3418 		match = true;
3419 	}
3420 	simple_unlock(&pmap_compilation_service_cdhash_lock);
3421 
3422 #if DEVELOPMENT || DEBUG
3423 	if (match) {
3424 		printf("Matched Compilation Service CDHash through the PMAP\n");
3425 	}
3426 #endif
3427 
3428 	return match;
3429 }
3430 
3431 static bool pmap_local_signing_public_key_set = false;
3432 static uint8_t pmap_local_signing_public_key[PMAP_ECC_P384_PUBLIC_KEY_SIZE] = { 0 };
3433 
3434 static bool
pmap_local_signing_public_key_is_set(void)3435 pmap_local_signing_public_key_is_set(void)
3436 {
3437 	return os_atomic_load(&pmap_local_signing_public_key_set, relaxed);
3438 }
3439 
3440 void
pmap_set_local_signing_public_key(const uint8_t public_key[PMAP_ECC_P384_PUBLIC_KEY_SIZE])3441 pmap_set_local_signing_public_key(const uint8_t public_key[PMAP_ECC_P384_PUBLIC_KEY_SIZE])
3442 {
3443 	bool key_set = false;
3444 
3445 	/*
3446 	 * os_atomic_cmpxchg returns true in case the exchange was successful. For us,
3447 	 * a successful exchange means that the local signing public key has _not_ been
3448 	 * set. In case the key has been set, we panic as we would never expect the
3449 	 * kernel to attempt to set the key more than once.
3450 	 */
3451 	key_set = !os_atomic_cmpxchg(&pmap_local_signing_public_key_set, false, true, relaxed);
3452 
3453 	if (key_set) {
3454 		panic("attempted to set the local signing public key multiple times");
3455 	}
3456 
3457 	memcpy(pmap_local_signing_public_key, public_key, PMAP_ECC_P384_PUBLIC_KEY_SIZE);
3458 
3459 #if DEVELOPMENT || DEBUG
3460 	printf("Set local signing public key\n");
3461 #endif
3462 }
3463 
3464 uint8_t*
pmap_get_local_signing_public_key(void)3465 pmap_get_local_signing_public_key(void)
3466 {
3467 	if (pmap_local_signing_public_key_is_set()) {
3468 		return pmap_local_signing_public_key;
3469 	}
3470 	return NULL;
3471 }
3472 
3473 void
pmap_unrestrict_local_signing(__unused const uint8_t cdhash[CS_CDHASH_LEN])3474 pmap_unrestrict_local_signing(
3475 	__unused const uint8_t cdhash[CS_CDHASH_LEN])
3476 {
3477 	// TODO: Once all changes across XNU and AMFI have been submitted, panic.
3478 }
3479 
3480 bool
pmap_query_entitlements(__unused pmap_t pmap,__unused CEQuery_t query,__unused size_t queryLength,__unused CEQueryContext_t finalContext)3481 pmap_query_entitlements(
3482 	__unused pmap_t pmap,
3483 	__unused CEQuery_t query,
3484 	__unused size_t queryLength,
3485 	__unused CEQueryContext_t finalContext)
3486 {
3487 #if !PMAP_SUPPORTS_ENTITLEMENT_CHECKS
3488 	panic("PMAP_CS: do not use this API without checking for \'#if PMAP_SUPPORTS_ENTITLEMENT_CHECKS\'");
3489 #endif
3490 
3491 	panic("PMAP_SUPPORTS_ENTITLEMENT_CHECKS should not be defined on this platform");
3492 }
3493 
3494 bool
pmap_cs_enabled(void)3495 pmap_cs_enabled(void)
3496 {
3497 	return false;
3498 }
3499 
3500 bool
pmap_in_ppl(void)3501 pmap_in_ppl(void)
3502 {
3503 	// Nonexistent on this architecture.
3504 	return false;
3505 }
3506 
3507 bool
pmap_has_ppl(void)3508 pmap_has_ppl(void)
3509 {
3510 	// Not supported on this architecture.
3511 	return false;
3512 }
3513 
3514 void* __attribute__((noreturn))
pmap_image4_pmap_data(__unused size_t * allocated_size)3515 pmap_image4_pmap_data(
3516 	__unused size_t *allocated_size)
3517 {
3518 	panic("PMAP_IMG4: image4 data not available on this architecture");
3519 }
3520 
3521 void __attribute__((noreturn))
pmap_image4_set_nonce(__unused const img4_nonce_domain_index_t ndi,__unused const img4_nonce_t * nonce)3522 pmap_image4_set_nonce(
3523 	__unused const img4_nonce_domain_index_t ndi,
3524 	__unused const img4_nonce_t *nonce)
3525 {
3526 	panic("PMAP_IMG4: set nonce API not supported on this architecture");
3527 }
3528 
3529 void __attribute__((noreturn))
pmap_image4_roll_nonce(__unused const img4_nonce_domain_index_t ndi)3530 pmap_image4_roll_nonce(
3531 	__unused const img4_nonce_domain_index_t ndi)
3532 {
3533 	panic("PMAP_IMG4: roll nonce API not supported on this architecture");
3534 }
3535 
3536 errno_t __attribute__((noreturn))
pmap_image4_copy_nonce(__unused const img4_nonce_domain_index_t ndi,__unused img4_nonce_t * nonce_out)3537 pmap_image4_copy_nonce(
3538 	__unused const img4_nonce_domain_index_t ndi,
3539 	__unused img4_nonce_t *nonce_out
3540 	)
3541 {
3542 	panic("PMAP_IMG4: copy nonce API not supported on this architecture");
3543 }
3544 
3545 errno_t __attribute__((noreturn))
pmap_image4_execute_object(__unused img4_runtime_object_spec_index_t obj_spec_index,__unused const img4_buff_t * payload,__unused const img4_buff_t * _Nullable manifest)3546 pmap_image4_execute_object(
3547 	__unused img4_runtime_object_spec_index_t obj_spec_index,
3548 	__unused const img4_buff_t *payload,
3549 	__unused const img4_buff_t *_Nullable manifest)
3550 {
3551 	panic("PMAP_IMG4: execute object API not supported on this architecture");
3552 }
3553 
3554 errno_t __attribute__((noreturn))
pmap_image4_copy_object(__unused img4_runtime_object_spec_index_t obj_spec_index,__unused vm_address_t object_out,__unused size_t * object_length)3555 pmap_image4_copy_object(
3556 	__unused img4_runtime_object_spec_index_t obj_spec_index,
3557 	__unused vm_address_t object_out,
3558 	__unused size_t *object_length)
3559 {
3560 	panic("PMAP_IMG4: copy object API not supported on this architecture");
3561 }
3562 
3563 void
pmap_lockdown_image4_slab(__unused vm_offset_t slab,__unused vm_size_t slab_len,__unused uint64_t flags)3564 pmap_lockdown_image4_slab(__unused vm_offset_t slab, __unused vm_size_t slab_len, __unused uint64_t flags)
3565 {
3566 	// Unsupported on this architecture.
3567 }
3568 
3569 void
pmap_lockdown_image4_late_slab(__unused vm_offset_t slab,__unused vm_size_t slab_len,__unused uint64_t flags)3570 pmap_lockdown_image4_late_slab(__unused vm_offset_t slab, __unused vm_size_t slab_len, __unused uint64_t flags)
3571 {
3572 	// Unsupported on this architecture.
3573 }
3574 
3575 kern_return_t
pmap_cs_allow_invalid(__unused pmap_t pmap)3576 pmap_cs_allow_invalid(__unused pmap_t pmap)
3577 {
3578 	// Unsupported on this architecture.
3579 	return KERN_SUCCESS;
3580 }
3581 
3582 void *
pmap_claim_reserved_ppl_page(void)3583 pmap_claim_reserved_ppl_page(void)
3584 {
3585 	// Unsupported on this architecture.
3586 	return NULL;
3587 }
3588 
3589 void
pmap_free_reserved_ppl_page(void __unused * kva)3590 pmap_free_reserved_ppl_page(void __unused *kva)
3591 {
3592 	// Unsupported on this architecture.
3593 }
3594 
3595 kern_return_t
pmap_cs_fork_prepare(__unused pmap_t old_pmap,__unused pmap_t new_pmap)3596 pmap_cs_fork_prepare(__unused pmap_t old_pmap, __unused pmap_t new_pmap)
3597 {
3598 	// PMAP_CS isn't enabled for x86_64.
3599 	return KERN_SUCCESS;
3600 }
3601 
3602 #if DEVELOPMENT || DEBUG
3603 /*
3604  * Used for unit testing recovery from text corruptions.
3605  */
3606 kern_return_t
pmap_test_text_corruption(pmap_paddr_t pa)3607 pmap_test_text_corruption(pmap_paddr_t pa)
3608 {
3609 	int pai;
3610 	uint8_t *va;
3611 
3612 	pai = ppn_to_pai(atop(pa));
3613 	if (!IS_MANAGED_PAGE(pai)) {
3614 		return KERN_FAILURE;
3615 	}
3616 
3617 	va = (uint8_t *)PHYSMAP_PTOV(pa);
3618 	va[0] = 0x0f; /* opcode for UD2 */
3619 	va[1] = 0x0b;
3620 
3621 	return KERN_SUCCESS;
3622 }
3623 #endif /* DEVELOPMENT || DEBUG */
3624