xref: /xnu-8020.101.4/osfmk/vm/vm_shared_region.c (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. Please obtain a copy of the License at
10  * http://www.opensource.apple.com/apsl/ and read it before using this
11  * file.
12  *
13  * The Original Code and all software distributed under the License are
14  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18  * Please see the License for the specific language governing rights and
19  * limitations under the License.
20  *
21  * @APPLE_LICENSE_HEADER_END@
22  */
23 
24 /*
25  * Shared region (... and comm page)
26  *
27  * This file handles the VM shared region and comm page.
28  *
29  */
30 /*
31  * SHARED REGIONS
32  * --------------
33  *
34  * A shared region is a submap that contains the most common system shared
35  * libraries for a given environment which is defined by:
36  * - cpu-type
37  * - 64-bitness
38  * - root directory
39  * - Team ID - when we have pointer authentication.
40  *
41  * The point of a shared region is to reduce the setup overhead when exec'ing
42  * a new process. A shared region uses a shared VM submap that gets mapped
43  * automatically at exec() time, see vm_map_exec().  The first process of a given
44  * environment sets up the shared region and all further processes in that
45  * environment can re-use that shared region without having to re-create
46  * the same mappings in their VM map.  All they need is contained in the shared
47  * region.
48  *
49  * The region can also share a pmap (mostly for read-only parts but also for the
50  * initial version of some writable parts), which gets "nested" into the
51  * process's pmap.  This reduces the number of soft faults:  once one process
52  * brings in a page in the shared region, all the other processes can access
53  * it without having to enter it in their own pmap.
54  *
55  * When a process is being exec'ed, vm_map_exec() calls vm_shared_region_enter()
56  * to map the appropriate shared region in the process's address space.
57  * We look up the appropriate shared region for the process's environment.
58  * If we can't find one, we create a new (empty) one and add it to the list.
59  * Otherwise, we just take an extra reference on the shared region we found.
60  *
61  * The "dyld" runtime, mapped into the process's address space at exec() time,
62  * will then use the shared_region_check_np() and shared_region_map_and_slide_np()
63  * system calls to validate and/or populate the shared region with the
64  * appropriate dyld_shared_cache file.
65  *
66  * The shared region is inherited on fork() and the child simply takes an
67  * extra reference on its parent's shared region.
68  *
69  * When the task terminates, we release the reference on its shared region.
70  * When the last reference is released, we destroy the shared region.
71  *
72  * After a chroot(), the calling process keeps using its original shared region,
73  * since that's what was mapped when it was started.  But its children
74  * will use a different shared region, because they need to use the shared
75  * cache that's relative to the new root directory.
76  */
77 
78 /*
79  * COMM PAGE
80  *
81  * A "comm page" is an area of memory that is populated by the kernel with
82  * the appropriate platform-specific version of some commonly used code.
83  * There is one "comm page" per platform (cpu-type, 64-bitness) but only
84  * for the native cpu-type.  No need to overly optimize translated code
85  * for hardware that is not really there !
86  *
87  * The comm pages are created and populated at boot time.
88  *
89  * The appropriate comm page is mapped into a process's address space
90  * at exec() time, in vm_map_exec(). It is then inherited on fork().
91  *
92  * The comm page is shared between the kernel and all applications of
93  * a given platform. Only the kernel can modify it.
94  *
95  * Applications just branch to fixed addresses in the comm page and find
96  * the right version of the code for the platform.  There is also some
97  * data provided and updated by the kernel for processes to retrieve easily
98  * without having to do a system call.
99  */
100 
101 #include <debug.h>
102 
103 #include <kern/ipc_tt.h>
104 #include <kern/kalloc.h>
105 #include <kern/thread_call.h>
106 
107 #include <mach/mach_vm.h>
108 #include <mach/machine.h>
109 
110 #include <vm/vm_map.h>
111 #include <vm/vm_shared_region.h>
112 
113 #include <vm/vm_protos.h>
114 
115 #include <machine/commpage.h>
116 #include <machine/cpu_capabilities.h>
117 #include <sys/random.h>
118 
119 #if defined (__arm__) || defined(__arm64__)
120 #include <arm/cpu_data_internal.h>
121 #include <arm/misc_protos.h>
122 #endif
123 
124 /*
125  * the following codes are used in the  subclass
126  * of the DBG_MACH_SHAREDREGION class
127  */
128 #define PROCESS_SHARED_CACHE_LAYOUT 0x00
129 
130 #if __has_feature(ptrauth_calls)
131 #include <ptrauth.h>
132 #endif /* __has_feature(ptrauth_calls) */
133 
134 /* "dyld" uses this to figure out what the kernel supports */
135 int shared_region_version = 3;
136 
137 /* trace level, output is sent to the system log file */
138 int shared_region_trace_level = SHARED_REGION_TRACE_ERROR_LVL;
139 
140 /* should local (non-chroot) shared regions persist when no task uses them ? */
141 int shared_region_persistence = 0;      /* no by default */
142 
143 
144 /* delay in seconds before reclaiming an unused shared region */
145 TUNABLE_WRITEABLE(int, shared_region_destroy_delay, "vm_shared_region_destroy_delay", 120);
146 
147 /*
148  * Cached pointer to the most recently mapped shared region from PID 1, which should
149  * be the most commonly mapped shared region in the system.  There are many processes
150  * which do not use this, for a variety of reasons.
151  *
152  * The main consumer of this is stackshot.
153  */
154 struct vm_shared_region *primary_system_shared_region = NULL;
155 
156 #if XNU_TARGET_OS_OSX
157 /*
158  * Only one cache gets to slide on Desktop, since we can't
159  * tear down slide info properly today and the desktop actually
160  * produces lots of shared caches.
161  */
162 boolean_t shared_region_completed_slide = FALSE;
163 #endif /* XNU_TARGET_OS_OSX */
164 
165 /* this lock protects all the shared region data structures */
166 static LCK_GRP_DECLARE(vm_shared_region_lck_grp, "vm shared region");
167 static LCK_MTX_DECLARE(vm_shared_region_lock, &vm_shared_region_lck_grp);
168 
169 #define vm_shared_region_lock() lck_mtx_lock(&vm_shared_region_lock)
170 #define vm_shared_region_unlock() lck_mtx_unlock(&vm_shared_region_lock)
171 #define vm_shared_region_sleep(event, interruptible)                    \
172 	lck_mtx_sleep(&vm_shared_region_lock,                           \
173 	              LCK_SLEEP_DEFAULT,                                \
174 	              (event_t) (event),                                \
175 	              (interruptible))
176 
177 /* the list of currently available shared regions (one per environment) */
178 queue_head_t    vm_shared_region_queue = QUEUE_HEAD_INITIALIZER(vm_shared_region_queue);
179 int             vm_shared_region_count = 0;
180 int             vm_shared_region_peak = 0;
181 
182 /*
183  * the number of times an event has forced the recalculation of the reslide
184  * shared region slide.
185  */
186 #if __has_feature(ptrauth_calls)
187 int                             vm_shared_region_reslide_count = 0;
188 #endif /* __has_feature(ptrauth_calls) */
189 
190 static void vm_shared_region_reference_locked(vm_shared_region_t shared_region);
191 static vm_shared_region_t vm_shared_region_create(
192 	void                    *root_dir,
193 	cpu_type_t              cputype,
194 	cpu_subtype_t           cpu_subtype,
195 	boolean_t               is_64bit,
196 	boolean_t               reslide,
197 	boolean_t               is_driverkit);
198 static void vm_shared_region_destroy(vm_shared_region_t shared_region);
199 
200 static kern_return_t vm_shared_region_slide_sanity_check(vm_shared_region_slide_info_entry_t entry, mach_vm_size_t size);
201 static void vm_shared_region_timeout(thread_call_param_t param0,
202     thread_call_param_t param1);
203 static kern_return_t vm_shared_region_slide_mapping(
204 	vm_shared_region_t sr,
205 	user_addr_t        slide_info_addr,
206 	mach_vm_size_t     slide_info_size,
207 	mach_vm_offset_t   start,
208 	mach_vm_size_t     size,
209 	mach_vm_offset_t   slid_mapping,
210 	uint32_t           slide,
211 	memory_object_control_t,
212 	vm_prot_t          prot); /* forward */
213 
214 static int __commpage_setup = 0;
215 #if XNU_TARGET_OS_OSX
216 static int __system_power_source = 1;   /* init to extrnal power source */
217 static void post_sys_powersource_internal(int i, int internal);
218 #endif /* XNU_TARGET_OS_OSX */
219 
220 extern u_int32_t random(void);
221 
222 /*
223  * Retrieve a task's shared region and grab an extra reference to
224  * make sure it doesn't disappear while the caller is using it.
225  * The caller is responsible for consuming that extra reference if
226  * necessary.
227  */
228 vm_shared_region_t
vm_shared_region_get(task_t task)229 vm_shared_region_get(
230 	task_t          task)
231 {
232 	vm_shared_region_t      shared_region;
233 
234 	SHARED_REGION_TRACE_DEBUG(
235 		("shared_region: -> get(%p)\n",
236 		(void *)VM_KERNEL_ADDRPERM(task)));
237 
238 	task_lock(task);
239 	vm_shared_region_lock();
240 	shared_region = task->shared_region;
241 	if (shared_region) {
242 		assert(shared_region->sr_ref_count > 0);
243 		vm_shared_region_reference_locked(shared_region);
244 	}
245 	vm_shared_region_unlock();
246 	task_unlock(task);
247 
248 	SHARED_REGION_TRACE_DEBUG(
249 		("shared_region: get(%p) <- %p\n",
250 		(void *)VM_KERNEL_ADDRPERM(task),
251 		(void *)VM_KERNEL_ADDRPERM(shared_region)));
252 
253 	return shared_region;
254 }
255 
256 vm_map_t
vm_shared_region_vm_map(vm_shared_region_t shared_region)257 vm_shared_region_vm_map(
258 	vm_shared_region_t      shared_region)
259 {
260 	ipc_port_t              sr_handle;
261 	vm_named_entry_t        sr_mem_entry;
262 	vm_map_t                sr_map;
263 
264 	SHARED_REGION_TRACE_DEBUG(
265 		("shared_region: -> vm_map(%p)\n",
266 		(void *)VM_KERNEL_ADDRPERM(shared_region)));
267 	assert(shared_region->sr_ref_count > 0);
268 
269 	sr_handle = shared_region->sr_mem_entry;
270 	sr_mem_entry = mach_memory_entry_from_port(sr_handle);
271 	sr_map = sr_mem_entry->backing.map;
272 	assert(sr_mem_entry->is_sub_map);
273 
274 	SHARED_REGION_TRACE_DEBUG(
275 		("shared_region: vm_map(%p) <- %p\n",
276 		(void *)VM_KERNEL_ADDRPERM(shared_region),
277 		(void *)VM_KERNEL_ADDRPERM(sr_map)));
278 	return sr_map;
279 }
280 
281 /*
282  * Set the shared region the process should use.
283  * A NULL new shared region means that we just want to release the old
284  * shared region.
285  * The caller should already have an extra reference on the new shared region
286  * (if any).  We release a reference on the old shared region (if any).
287  */
288 void
vm_shared_region_set(task_t task,vm_shared_region_t new_shared_region)289 vm_shared_region_set(
290 	task_t                  task,
291 	vm_shared_region_t      new_shared_region)
292 {
293 	vm_shared_region_t      old_shared_region;
294 
295 	SHARED_REGION_TRACE_DEBUG(
296 		("shared_region: -> set(%p, %p)\n",
297 		(void *)VM_KERNEL_ADDRPERM(task),
298 		(void *)VM_KERNEL_ADDRPERM(new_shared_region)));
299 
300 	task_lock(task);
301 	vm_shared_region_lock();
302 
303 	old_shared_region = task->shared_region;
304 	if (new_shared_region) {
305 		assert(new_shared_region->sr_ref_count > 0);
306 	}
307 
308 	task->shared_region = new_shared_region;
309 
310 	vm_shared_region_unlock();
311 	task_unlock(task);
312 
313 	if (old_shared_region) {
314 		assert(old_shared_region->sr_ref_count > 0);
315 		vm_shared_region_deallocate(old_shared_region);
316 	}
317 
318 	SHARED_REGION_TRACE_DEBUG(
319 		("shared_region: set(%p) <- old=%p new=%p\n",
320 		(void *)VM_KERNEL_ADDRPERM(task),
321 		(void *)VM_KERNEL_ADDRPERM(old_shared_region),
322 		(void *)VM_KERNEL_ADDRPERM(new_shared_region)));
323 }
324 
325 /*
326  * New arm64 shared regions match with an existing arm64e region.
327  * They just get a private non-authenticating pager.
328  */
329 static inline bool
match_subtype(cpu_type_t cputype,cpu_subtype_t exist,cpu_subtype_t new)330 match_subtype(cpu_type_t cputype, cpu_subtype_t exist, cpu_subtype_t new)
331 {
332 	if (exist == new) {
333 		return true;
334 	}
335 	if (cputype == CPU_TYPE_ARM64 &&
336 	    exist == CPU_SUBTYPE_ARM64E &&
337 	    new == CPU_SUBTYPE_ARM64_ALL) {
338 		return true;
339 	}
340 	return false;
341 }
342 
343 
344 /*
345  * Lookup up the shared region for the desired environment.
346  * If none is found, create a new (empty) one.
347  * Grab an extra reference on the returned shared region, to make sure
348  * it doesn't get destroyed before the caller is done with it.  The caller
349  * is responsible for consuming that extra reference if necessary.
350  */
351 vm_shared_region_t
vm_shared_region_lookup(void * root_dir,cpu_type_t cputype,cpu_subtype_t cpu_subtype,boolean_t is_64bit,boolean_t reslide,boolean_t is_driverkit)352 vm_shared_region_lookup(
353 	void            *root_dir,
354 	cpu_type_t      cputype,
355 	cpu_subtype_t   cpu_subtype,
356 	boolean_t       is_64bit,
357 	boolean_t       reslide,
358 	boolean_t       is_driverkit)
359 {
360 	vm_shared_region_t      shared_region;
361 	vm_shared_region_t      new_shared_region;
362 
363 	SHARED_REGION_TRACE_DEBUG(
364 		("shared_region: -> lookup(root=%p,cpu=<%d,%d>,64bit=%d,reslide=%d,driverkit=%d)\n",
365 		(void *)VM_KERNEL_ADDRPERM(root_dir),
366 		cputype, cpu_subtype, is_64bit, reslide, is_driverkit));
367 
368 	shared_region = NULL;
369 	new_shared_region = NULL;
370 
371 	vm_shared_region_lock();
372 	for (;;) {
373 		queue_iterate(&vm_shared_region_queue,
374 		    shared_region,
375 		    vm_shared_region_t,
376 		    sr_q) {
377 			assert(shared_region->sr_ref_count > 0);
378 			if (shared_region->sr_cpu_type == cputype &&
379 			    match_subtype(cputype, shared_region->sr_cpu_subtype, cpu_subtype) &&
380 			    shared_region->sr_root_dir == root_dir &&
381 			    shared_region->sr_64bit == is_64bit &&
382 #if __has_feature(ptrauth_calls)
383 			    shared_region->sr_reslide == reslide &&
384 #endif /* __has_feature(ptrauth_calls) */
385 			    shared_region->sr_driverkit == is_driverkit &&
386 			    !shared_region->sr_stale) {
387 				/* found a match ! */
388 				vm_shared_region_reference_locked(shared_region);
389 				goto done;
390 			}
391 		}
392 		if (new_shared_region == NULL) {
393 			/* no match: create a new one */
394 			vm_shared_region_unlock();
395 			new_shared_region = vm_shared_region_create(root_dir,
396 			    cputype,
397 			    cpu_subtype,
398 			    is_64bit,
399 			    reslide,
400 			    is_driverkit);
401 			/* do the lookup again, in case we lost a race */
402 			vm_shared_region_lock();
403 			continue;
404 		}
405 		/* still no match: use our new one */
406 		shared_region = new_shared_region;
407 		new_shared_region = NULL;
408 		queue_enter(&vm_shared_region_queue,
409 		    shared_region,
410 		    vm_shared_region_t,
411 		    sr_q);
412 		vm_shared_region_count++;
413 		if (vm_shared_region_count > vm_shared_region_peak) {
414 			vm_shared_region_peak = vm_shared_region_count;
415 		}
416 		break;
417 	}
418 
419 done:
420 	vm_shared_region_unlock();
421 
422 	if (new_shared_region) {
423 		/*
424 		 * We lost a race with someone else to create a new shared
425 		 * region for that environment. Get rid of our unused one.
426 		 */
427 		assert(new_shared_region->sr_ref_count == 1);
428 		new_shared_region->sr_ref_count--;
429 		vm_shared_region_destroy(new_shared_region);
430 		new_shared_region = NULL;
431 	}
432 
433 	SHARED_REGION_TRACE_DEBUG(
434 		("shared_region: lookup(root=%p,cpu=<%d,%d>,64bit=%d,reslide=%d,driverkit=%d) <- %p\n",
435 		(void *)VM_KERNEL_ADDRPERM(root_dir),
436 		cputype, cpu_subtype, is_64bit, reslide, is_driverkit,
437 		(void *)VM_KERNEL_ADDRPERM(shared_region)));
438 
439 	assert(shared_region->sr_ref_count > 0);
440 	return shared_region;
441 }
442 
443 /*
444  * Take an extra reference on a shared region.
445  * The vm_shared_region_lock should already be held by the caller.
446  */
447 static void
vm_shared_region_reference_locked(vm_shared_region_t shared_region)448 vm_shared_region_reference_locked(
449 	vm_shared_region_t      shared_region)
450 {
451 	LCK_MTX_ASSERT(&vm_shared_region_lock, LCK_MTX_ASSERT_OWNED);
452 
453 	SHARED_REGION_TRACE_DEBUG(
454 		("shared_region: -> reference_locked(%p)\n",
455 		(void *)VM_KERNEL_ADDRPERM(shared_region)));
456 	assert(shared_region->sr_ref_count > 0);
457 	shared_region->sr_ref_count++;
458 	assert(shared_region->sr_ref_count != 0);
459 
460 	if (shared_region->sr_timer_call != NULL) {
461 		boolean_t cancelled;
462 
463 		/* cancel and free any pending timeout */
464 		cancelled = thread_call_cancel(shared_region->sr_timer_call);
465 		if (cancelled) {
466 			thread_call_free(shared_region->sr_timer_call);
467 			shared_region->sr_timer_call = NULL;
468 			/* release the reference held by the cancelled timer */
469 			shared_region->sr_ref_count--;
470 		} else {
471 			/* the timer will drop the reference and free itself */
472 		}
473 	}
474 
475 	SHARED_REGION_TRACE_DEBUG(
476 		("shared_region: reference_locked(%p) <- %d\n",
477 		(void *)VM_KERNEL_ADDRPERM(shared_region),
478 		shared_region->sr_ref_count));
479 }
480 
481 /*
482  * Take a reference on a shared region.
483  */
484 void
vm_shared_region_reference(vm_shared_region_t shared_region)485 vm_shared_region_reference(vm_shared_region_t shared_region)
486 {
487 	SHARED_REGION_TRACE_DEBUG(
488 		("shared_region: -> reference(%p)\n",
489 		(void *)VM_KERNEL_ADDRPERM(shared_region)));
490 
491 	vm_shared_region_lock();
492 	vm_shared_region_reference_locked(shared_region);
493 	vm_shared_region_unlock();
494 
495 	SHARED_REGION_TRACE_DEBUG(
496 		("shared_region: reference(%p) <- %d\n",
497 		(void *)VM_KERNEL_ADDRPERM(shared_region),
498 		shared_region->sr_ref_count));
499 }
500 
501 /*
502  * Release a reference on the shared region.
503  * Destroy it if there are no references left.
504  */
505 void
vm_shared_region_deallocate(vm_shared_region_t shared_region)506 vm_shared_region_deallocate(
507 	vm_shared_region_t      shared_region)
508 {
509 	SHARED_REGION_TRACE_DEBUG(
510 		("shared_region: -> deallocate(%p)\n",
511 		(void *)VM_KERNEL_ADDRPERM(shared_region)));
512 
513 	vm_shared_region_lock();
514 
515 	assert(shared_region->sr_ref_count > 0);
516 
517 	if (shared_region->sr_root_dir == NULL) {
518 		/*
519 		 * Local (i.e. based on the boot volume) shared regions
520 		 * can persist or not based on the "shared_region_persistence"
521 		 * sysctl.
522 		 * Make sure that this one complies.
523 		 *
524 		 * See comments in vm_shared_region_slide() for notes about
525 		 * shared regions we have slid (which are not torn down currently).
526 		 */
527 		if (shared_region_persistence &&
528 		    !shared_region->sr_persists) {
529 			/* make this one persistent */
530 			shared_region->sr_ref_count++;
531 			shared_region->sr_persists = TRUE;
532 		} else if (!shared_region_persistence &&
533 		    shared_region->sr_persists) {
534 			/* make this one no longer persistent */
535 			assert(shared_region->sr_ref_count > 1);
536 			shared_region->sr_ref_count--;
537 			shared_region->sr_persists = FALSE;
538 		}
539 	}
540 
541 	assert(shared_region->sr_ref_count > 0);
542 	shared_region->sr_ref_count--;
543 	SHARED_REGION_TRACE_DEBUG(
544 		("shared_region: deallocate(%p): ref now %d\n",
545 		(void *)VM_KERNEL_ADDRPERM(shared_region),
546 		shared_region->sr_ref_count));
547 
548 	if (shared_region->sr_ref_count == 0) {
549 		uint64_t deadline;
550 
551 		/*
552 		 * Even though a shared region is unused, delay a while before
553 		 * tearing it down, in case a new app launch can use it.
554 		 */
555 		if (shared_region->sr_timer_call == NULL &&
556 		    shared_region_destroy_delay != 0 &&
557 		    !shared_region->sr_stale) {
558 			/* hold one reference for the timer */
559 			assert(!shared_region->sr_mapping_in_progress);
560 			shared_region->sr_ref_count++;
561 
562 			/* set up the timer */
563 			shared_region->sr_timer_call = thread_call_allocate(
564 				(thread_call_func_t) vm_shared_region_timeout,
565 				(thread_call_param_t) shared_region);
566 
567 			/* schedule the timer */
568 			clock_interval_to_deadline(shared_region_destroy_delay,
569 			    NSEC_PER_SEC,
570 			    &deadline);
571 			thread_call_enter_delayed(shared_region->sr_timer_call,
572 			    deadline);
573 
574 			SHARED_REGION_TRACE_DEBUG(
575 				("shared_region: deallocate(%p): armed timer\n",
576 				(void *)VM_KERNEL_ADDRPERM(shared_region)));
577 
578 			vm_shared_region_unlock();
579 		} else {
580 			/* timer expired: let go of this shared region */
581 
582 			/* Make sure there's no cached pointer to the region. */
583 			if (primary_system_shared_region == shared_region) {
584 				primary_system_shared_region = NULL;
585 			}
586 
587 			/*
588 			 * Remove it from the queue first, so no one can find
589 			 * it...
590 			 */
591 			queue_remove(&vm_shared_region_queue,
592 			    shared_region,
593 			    vm_shared_region_t,
594 			    sr_q);
595 			vm_shared_region_count--;
596 			vm_shared_region_unlock();
597 
598 			/* ... and destroy it */
599 			vm_shared_region_destroy(shared_region);
600 			shared_region = NULL;
601 		}
602 	} else {
603 		vm_shared_region_unlock();
604 	}
605 
606 	SHARED_REGION_TRACE_DEBUG(
607 		("shared_region: deallocate(%p) <-\n",
608 		(void *)VM_KERNEL_ADDRPERM(shared_region)));
609 }
610 
611 void
vm_shared_region_timeout(thread_call_param_t param0,__unused thread_call_param_t param1)612 vm_shared_region_timeout(
613 	thread_call_param_t     param0,
614 	__unused thread_call_param_t    param1)
615 {
616 	vm_shared_region_t      shared_region;
617 
618 	shared_region = (vm_shared_region_t) param0;
619 
620 	vm_shared_region_deallocate(shared_region);
621 }
622 
623 
624 /*
625  * Create a new (empty) shared region for a new environment.
626  */
627 static vm_shared_region_t
vm_shared_region_create(void * root_dir,cpu_type_t cputype,cpu_subtype_t cpu_subtype,boolean_t is_64bit,__unused boolean_t reslide,boolean_t is_driverkit)628 vm_shared_region_create(
629 	void                    *root_dir,
630 	cpu_type_t              cputype,
631 	cpu_subtype_t           cpu_subtype,
632 	boolean_t               is_64bit,
633 #if !__has_feature(ptrauth_calls)
634 	__unused
635 #endif /* __has_feature(ptrauth_calls) */
636 	boolean_t               reslide,
637 	boolean_t               is_driverkit)
638 {
639 	vm_named_entry_t        mem_entry;
640 	ipc_port_t              mem_entry_port;
641 	vm_shared_region_t      shared_region;
642 	vm_map_t                sub_map;
643 	mach_vm_offset_t        base_address, pmap_nesting_start;
644 	mach_vm_size_t          size, pmap_nesting_size;
645 
646 	SHARED_REGION_TRACE_INFO(
647 		("shared_region: -> create(root=%p,cpu=<%d,%d>,64bit=%d,reslide=%d,driverkit=%d)\n",
648 		(void *)VM_KERNEL_ADDRPERM(root_dir),
649 		cputype, cpu_subtype, is_64bit, reslide, is_driverkit));
650 
651 	base_address = 0;
652 	size = 0;
653 	mem_entry = NULL;
654 	mem_entry_port = IPC_PORT_NULL;
655 	sub_map = VM_MAP_NULL;
656 
657 	/* create a new shared region structure... */
658 	shared_region = kalloc_type(struct vm_shared_region,
659 	    Z_WAITOK | Z_NOFAIL);
660 
661 	/* figure out the correct settings for the desired environment */
662 	if (is_64bit) {
663 		switch (cputype) {
664 #if defined(__arm64__)
665 		case CPU_TYPE_ARM64:
666 			base_address = SHARED_REGION_BASE_ARM64;
667 			size = SHARED_REGION_SIZE_ARM64;
668 			pmap_nesting_start = SHARED_REGION_NESTING_BASE_ARM64;
669 			pmap_nesting_size = SHARED_REGION_NESTING_SIZE_ARM64;
670 			break;
671 #elif !defined(__arm__)
672 		case CPU_TYPE_I386:
673 			base_address = SHARED_REGION_BASE_X86_64;
674 			size = SHARED_REGION_SIZE_X86_64;
675 			pmap_nesting_start = SHARED_REGION_NESTING_BASE_X86_64;
676 			pmap_nesting_size = SHARED_REGION_NESTING_SIZE_X86_64;
677 			break;
678 		case CPU_TYPE_POWERPC:
679 			base_address = SHARED_REGION_BASE_PPC64;
680 			size = SHARED_REGION_SIZE_PPC64;
681 			pmap_nesting_start = SHARED_REGION_NESTING_BASE_PPC64;
682 			pmap_nesting_size = SHARED_REGION_NESTING_SIZE_PPC64;
683 			break;
684 #endif
685 		default:
686 			SHARED_REGION_TRACE_ERROR(
687 				("shared_region: create: unknown cpu type %d\n",
688 				cputype));
689 			kfree_type(struct vm_shared_region, shared_region);
690 			shared_region = NULL;
691 			goto done;
692 		}
693 	} else {
694 		switch (cputype) {
695 #if defined(__arm__) || defined(__arm64__)
696 		case CPU_TYPE_ARM:
697 			base_address = SHARED_REGION_BASE_ARM;
698 			size = SHARED_REGION_SIZE_ARM;
699 			pmap_nesting_start = SHARED_REGION_NESTING_BASE_ARM;
700 			pmap_nesting_size = SHARED_REGION_NESTING_SIZE_ARM;
701 			break;
702 #else
703 		case CPU_TYPE_I386:
704 			base_address = SHARED_REGION_BASE_I386;
705 			size = SHARED_REGION_SIZE_I386;
706 			pmap_nesting_start = SHARED_REGION_NESTING_BASE_I386;
707 			pmap_nesting_size = SHARED_REGION_NESTING_SIZE_I386;
708 			break;
709 		case CPU_TYPE_POWERPC:
710 			base_address = SHARED_REGION_BASE_PPC;
711 			size = SHARED_REGION_SIZE_PPC;
712 			pmap_nesting_start = SHARED_REGION_NESTING_BASE_PPC;
713 			pmap_nesting_size = SHARED_REGION_NESTING_SIZE_PPC;
714 			break;
715 #endif
716 		default:
717 			SHARED_REGION_TRACE_ERROR(
718 				("shared_region: create: unknown cpu type %d\n",
719 				cputype));
720 			kfree_type(struct vm_shared_region, shared_region);
721 			shared_region = NULL;
722 			goto done;
723 		}
724 	}
725 
726 	/* create a memory entry structure and a Mach port handle */
727 	mem_entry = mach_memory_entry_allocate(&mem_entry_port);
728 
729 #if     defined(__arm__) || defined(__arm64__)
730 	{
731 		struct pmap *pmap_nested;
732 		int pmap_flags = 0;
733 		pmap_flags |= is_64bit ? PMAP_CREATE_64BIT : 0;
734 
735 
736 		pmap_nested = pmap_create_options(NULL, 0, pmap_flags);
737 		if (pmap_nested != PMAP_NULL) {
738 			pmap_set_nested(pmap_nested);
739 			sub_map = vm_map_create_options(pmap_nested, 0,
740 			    (vm_map_offset_t)size, VM_MAP_CREATE_PAGEABLE);
741 #if defined(__arm64__)
742 			if (is_64bit ||
743 			    page_shift_user32 == SIXTEENK_PAGE_SHIFT) {
744 				/* enforce 16KB alignment of VM map entries */
745 				vm_map_set_page_shift(sub_map, SIXTEENK_PAGE_SHIFT);
746 			}
747 
748 #elif (__ARM_ARCH_7K__ >= 2)
749 			/* enforce 16KB alignment for watch targets with new ABI */
750 			vm_map_set_page_shift(sub_map, SIXTEENK_PAGE_SHIFT);
751 #endif /* __arm64__ */
752 		} else {
753 			sub_map = VM_MAP_NULL;
754 		}
755 	}
756 #else /* defined(__arm__) || defined(__arm64__) */
757 	{
758 		/* create a VM sub map and its pmap */
759 		pmap_t pmap = pmap_create_options(NULL, 0, is_64bit);
760 		if (pmap != NULL) {
761 			sub_map = vm_map_create_options(pmap, 0,
762 			    (vm_map_offset_t)size, VM_MAP_CREATE_PAGEABLE);
763 		} else {
764 			sub_map = VM_MAP_NULL;
765 		}
766 	}
767 #endif /* defined(__arm__) || defined(__arm64__) */
768 	if (sub_map == VM_MAP_NULL) {
769 		ipc_port_release_send(mem_entry_port);
770 		kfree_type(struct vm_shared_region, shared_region);
771 		shared_region = NULL;
772 		SHARED_REGION_TRACE_ERROR(("shared_region: create: couldn't allocate map\n"));
773 		goto done;
774 	}
775 
776 	/* shared regions should always enforce code-signing */
777 	vm_map_cs_enforcement_set(sub_map, true);
778 	assert(vm_map_cs_enforcement(sub_map));
779 	assert(pmap_get_vm_map_cs_enforced(vm_map_pmap(sub_map)));
780 
781 	assert(!sub_map->disable_vmentry_reuse);
782 	sub_map->is_nested_map = TRUE;
783 
784 	/* make the memory entry point to the VM sub map */
785 	mem_entry->is_sub_map = TRUE;
786 	mem_entry->backing.map = sub_map;
787 	mem_entry->size = size;
788 	mem_entry->protection = VM_PROT_ALL;
789 
790 	/* make the shared region point at the memory entry */
791 	shared_region->sr_mem_entry = mem_entry_port;
792 
793 	/* fill in the shared region's environment and settings */
794 	shared_region->sr_base_address = base_address;
795 	shared_region->sr_size = size;
796 	shared_region->sr_pmap_nesting_start = pmap_nesting_start;
797 	shared_region->sr_pmap_nesting_size = pmap_nesting_size;
798 	shared_region->sr_cpu_type = cputype;
799 	shared_region->sr_cpu_subtype = cpu_subtype;
800 	shared_region->sr_64bit = (uint8_t)is_64bit;
801 	shared_region->sr_driverkit = (uint8_t)is_driverkit;
802 	shared_region->sr_root_dir = root_dir;
803 
804 	queue_init(&shared_region->sr_q);
805 	shared_region->sr_mapping_in_progress = FALSE;
806 	shared_region->sr_slide_in_progress = FALSE;
807 	shared_region->sr_persists = FALSE;
808 	shared_region->sr_stale = FALSE;
809 	shared_region->sr_timer_call = NULL;
810 	shared_region->sr_first_mapping = (mach_vm_offset_t) -1;
811 
812 	/* grab a reference for the caller */
813 	shared_region->sr_ref_count = 1;
814 
815 	shared_region->sr_slide = 0; /* not slid yet */
816 
817 	/* Initialize UUID and other metadata */
818 	memset(&shared_region->sr_uuid, '\0', sizeof(shared_region->sr_uuid));
819 	shared_region->sr_uuid_copied = FALSE;
820 	shared_region->sr_images_count = 0;
821 	shared_region->sr_images = NULL;
822 #if __has_feature(ptrauth_calls)
823 	shared_region->sr_reslide = reslide;
824 	shared_region->sr_num_auth_section = 0;
825 	for (uint_t i = 0; i < NUM_SR_AUTH_SECTIONS; ++i) {
826 		shared_region->sr_auth_section[i] = NULL;
827 	}
828 	shared_region->sr_num_auth_section = 0;
829 #endif /* __has_feature(ptrauth_calls) */
830 
831 done:
832 	if (shared_region) {
833 		SHARED_REGION_TRACE_INFO(
834 			("shared_region: create(root=%p,cpu=<%d,%d>,64bit=%d,reslide=%d,driverkit=%d,"
835 			"base=0x%llx,size=0x%llx) <- "
836 			"%p mem=(%p,%p) map=%p pmap=%p\n",
837 			(void *)VM_KERNEL_ADDRPERM(root_dir),
838 			cputype, cpu_subtype, is_64bit, reslide, is_driverkit,
839 			(long long)base_address,
840 			(long long)size,
841 			(void *)VM_KERNEL_ADDRPERM(shared_region),
842 			(void *)VM_KERNEL_ADDRPERM(mem_entry_port),
843 			(void *)VM_KERNEL_ADDRPERM(mem_entry),
844 			(void *)VM_KERNEL_ADDRPERM(sub_map),
845 			(void *)VM_KERNEL_ADDRPERM(sub_map->pmap)));
846 	} else {
847 		SHARED_REGION_TRACE_INFO(
848 			("shared_region: create(root=%p,cpu=<%d,%d>,64bit=%d,driverkit=%d,"
849 			"base=0x%llx,size=0x%llx) <- NULL",
850 			(void *)VM_KERNEL_ADDRPERM(root_dir),
851 			cputype, cpu_subtype, is_64bit, is_driverkit,
852 			(long long)base_address,
853 			(long long)size));
854 	}
855 	return shared_region;
856 }
857 
858 /*
859  * Destroy a now-unused shared region.
860  * The shared region is no longer in the queue and can not be looked up.
861  */
862 static void
vm_shared_region_destroy(vm_shared_region_t shared_region)863 vm_shared_region_destroy(
864 	vm_shared_region_t      shared_region)
865 {
866 	vm_named_entry_t        mem_entry;
867 	vm_map_t                map;
868 
869 	SHARED_REGION_TRACE_INFO(
870 		("shared_region: -> destroy(%p) (root=%p,cpu=<%d,%d>,64bit=%d,driverkit=%d)\n",
871 		(void *)VM_KERNEL_ADDRPERM(shared_region),
872 		(void *)VM_KERNEL_ADDRPERM(shared_region->sr_root_dir),
873 		shared_region->sr_cpu_type,
874 		shared_region->sr_cpu_subtype,
875 		shared_region->sr_64bit,
876 		shared_region->sr_driverkit));
877 
878 	assert(shared_region->sr_ref_count == 0);
879 	assert(!shared_region->sr_persists);
880 
881 	mem_entry = mach_memory_entry_from_port(shared_region->sr_mem_entry);
882 	assert(mem_entry->is_sub_map);
883 	assert(!mem_entry->internal);
884 	assert(!mem_entry->is_copy);
885 	map = mem_entry->backing.map;
886 
887 	/*
888 	 * Clean up the pmap first.  The virtual addresses that were
889 	 * entered in this possibly "nested" pmap may have different values
890 	 * than the VM map's min and max offsets, if the VM sub map was
891 	 * mapped at a non-zero offset in the processes' main VM maps, which
892 	 * is usually the case, so the clean-up we do in vm_map_destroy() would
893 	 * not be enough.
894 	 */
895 	if (map->pmap) {
896 		pmap_remove(map->pmap,
897 		    (vm_map_offset_t)shared_region->sr_base_address,
898 		    (vm_map_offset_t)(shared_region->sr_base_address + shared_region->sr_size));
899 	}
900 
901 	/*
902 	 * Release our (one and only) handle on the memory entry.
903 	 * This will generate a no-senders notification, which will be processed
904 	 * by ipc_kobject_notify_no_senders(), which will release the one and only
905 	 * reference on the memory entry and cause it to be destroyed, along
906 	 * with the VM sub map and its pmap.
907 	 */
908 	mach_memory_entry_port_release(shared_region->sr_mem_entry);
909 	mem_entry = NULL;
910 	shared_region->sr_mem_entry = IPC_PORT_NULL;
911 
912 	if (shared_region->sr_timer_call) {
913 		thread_call_free(shared_region->sr_timer_call);
914 	}
915 
916 #if __has_feature(ptrauth_calls)
917 	/*
918 	 * Free the cached copies of slide_info for the AUTH regions.
919 	 */
920 	for (uint_t i = 0; i < shared_region->sr_num_auth_section; ++i) {
921 		vm_shared_region_slide_info_t si = shared_region->sr_auth_section[i];
922 		if (si != NULL) {
923 			vm_object_deallocate(si->si_slide_object);
924 			kfree_data(si->si_slide_info_entry,
925 			    si->si_slide_info_size);
926 			kfree_type(struct vm_shared_region_slide_info, si);
927 			shared_region->sr_auth_section[i] = NULL;
928 		}
929 	}
930 	shared_region->sr_num_auth_section = 0;
931 #endif /* __has_feature(ptrauth_calls) */
932 
933 	/* release the shared region structure... */
934 	kfree_type(struct vm_shared_region, shared_region);
935 
936 	SHARED_REGION_TRACE_DEBUG(
937 		("shared_region: destroy(%p) <-\n",
938 		(void *)VM_KERNEL_ADDRPERM(shared_region)));
939 	shared_region = NULL;
940 }
941 
942 /*
943  * Gets the address of the first (in time) mapping in the shared region.
944  * If used during initial task setup by dyld, task should non-NULL.
945  */
946 kern_return_t
vm_shared_region_start_address(vm_shared_region_t shared_region,mach_vm_offset_t * start_address,task_t task)947 vm_shared_region_start_address(
948 	vm_shared_region_t      shared_region,
949 	mach_vm_offset_t        *start_address,
950 	task_t                  task)
951 {
952 	kern_return_t           kr;
953 	mach_vm_offset_t        sr_base_address;
954 	mach_vm_offset_t        sr_first_mapping;
955 
956 	SHARED_REGION_TRACE_DEBUG(
957 		("shared_region: -> start_address(%p)\n",
958 		(void *)VM_KERNEL_ADDRPERM(shared_region)));
959 
960 	vm_shared_region_lock();
961 
962 	/*
963 	 * Wait if there's another thread establishing a mapping
964 	 * in this shared region right when we're looking at it.
965 	 * We want a consistent view of the map...
966 	 */
967 	while (shared_region->sr_mapping_in_progress) {
968 		/* wait for our turn... */
969 		vm_shared_region_sleep(&shared_region->sr_mapping_in_progress,
970 		    THREAD_UNINT);
971 	}
972 	assert(!shared_region->sr_mapping_in_progress);
973 	assert(shared_region->sr_ref_count > 0);
974 
975 	sr_base_address = shared_region->sr_base_address;
976 	sr_first_mapping = shared_region->sr_first_mapping;
977 
978 	if (sr_first_mapping == (mach_vm_offset_t) -1) {
979 		/* shared region is empty */
980 		kr = KERN_INVALID_ADDRESS;
981 	} else {
982 		kr = KERN_SUCCESS;
983 		*start_address = sr_base_address + sr_first_mapping;
984 	}
985 
986 
987 	uint32_t slide = shared_region->sr_slide;
988 
989 	vm_shared_region_unlock();
990 
991 	/*
992 	 * Cache shared region info in the task for telemetry gathering, if we're
993 	 * passed in the task. No task lock here as we're still in intial task set up.
994 	 */
995 	if (kr == KERN_SUCCESS && task != NULL && task->task_shared_region_slide == -1) {
996 		uint_t sc_header_uuid_offset = offsetof(struct _dyld_cache_header, uuid);
997 		if (copyin((user_addr_t)(*start_address + sc_header_uuid_offset),
998 		    (char *)&task->task_shared_region_uuid,
999 		    sizeof(task->task_shared_region_uuid)) == 0) {
1000 			task->task_shared_region_slide = slide;
1001 		}
1002 	}
1003 
1004 	SHARED_REGION_TRACE_DEBUG(
1005 		("shared_region: start_address(%p) <- 0x%llx\n",
1006 		(void *)VM_KERNEL_ADDRPERM(shared_region),
1007 		(long long)shared_region->sr_base_address));
1008 
1009 	return kr;
1010 }
1011 
1012 /*
1013  * Look up a pre-existing mapping in shared region, for replacement.
1014  * Takes an extra object reference if found.
1015  */
1016 static kern_return_t
find_mapping_to_slide(vm_map_t map,vm_map_address_t addr,vm_map_entry_t entry)1017 find_mapping_to_slide(vm_map_t map, vm_map_address_t addr, vm_map_entry_t entry)
1018 {
1019 	vm_map_entry_t found;
1020 
1021 	/* find the shared region's map entry to slide */
1022 	vm_map_lock_read(map);
1023 	if (!vm_map_lookup_entry_allow_pgz(map, addr, &found)) {
1024 		/* no mapping there */
1025 		vm_map_unlock(map);
1026 		return KERN_INVALID_ARGUMENT;
1027 	}
1028 
1029 	*entry = *found;
1030 	/* extra ref to keep object alive while map is unlocked */
1031 	vm_object_reference(VME_OBJECT(found));
1032 	vm_map_unlock_read(map);
1033 	return KERN_SUCCESS;
1034 }
1035 
1036 #if __has_feature(ptrauth_calls)
1037 
1038 /*
1039  * Determine if this task is actually using pointer signing.
1040  */
1041 static boolean_t
task_sign_pointers(task_t task)1042 task_sign_pointers(task_t task)
1043 {
1044 	if (task->map &&
1045 	    task->map->pmap &&
1046 	    !task->map->pmap->disable_jop) {
1047 		return TRUE;
1048 	}
1049 	return FALSE;
1050 }
1051 
1052 /*
1053  * If the shared region contains mappings that are authenticated, then
1054  * remap them into the task private map.
1055  *
1056  * Failures are possible in this routine when jetsam kills a process
1057  * just as dyld is trying to set it up. The vm_map and task shared region
1058  * info get torn down w/o waiting for this thread to finish up.
1059  */
1060 __attribute__((noinline))
1061 kern_return_t
vm_shared_region_auth_remap(vm_shared_region_t sr)1062 vm_shared_region_auth_remap(vm_shared_region_t sr)
1063 {
1064 	memory_object_t               sr_pager = MEMORY_OBJECT_NULL;
1065 	task_t                        task = current_task();
1066 	vm_shared_region_slide_info_t si;
1067 	uint_t                        i;
1068 	vm_object_t                   object;
1069 	vm_map_t                      sr_map;
1070 	struct vm_map_entry           tmp_entry_store = {0};
1071 	vm_map_entry_t                tmp_entry = NULL;
1072 	int                           vm_flags;
1073 	vm_map_kernel_flags_t         vmk_flags;
1074 	vm_map_offset_t               map_addr;
1075 	kern_return_t                 kr = KERN_SUCCESS;
1076 	boolean_t                     use_ptr_auth = task_sign_pointers(task);
1077 
1078 	/*
1079 	 * Don't do this more than once and avoid any race conditions in finishing it.
1080 	 */
1081 	vm_shared_region_lock();
1082 	while (sr->sr_mapping_in_progress) {
1083 		/* wait for our turn... */
1084 		vm_shared_region_sleep(&sr->sr_mapping_in_progress, THREAD_UNINT);
1085 	}
1086 	assert(!sr->sr_mapping_in_progress);
1087 	assert(sr->sr_ref_count > 0);
1088 
1089 	/* Just return if already done. */
1090 	if (task->shared_region_auth_remapped) {
1091 		vm_shared_region_unlock();
1092 		return KERN_SUCCESS;
1093 	}
1094 
1095 	/* let others know to wait while we're working in this shared region */
1096 	sr->sr_mapping_in_progress = TRUE;
1097 	vm_shared_region_unlock();
1098 
1099 	/*
1100 	 * Remap any sections with pointer authentications into the private map.
1101 	 */
1102 	for (i = 0; i < sr->sr_num_auth_section; ++i) {
1103 		si = sr->sr_auth_section[i];
1104 		assert(si != NULL);
1105 		assert(si->si_ptrauth);
1106 
1107 		/*
1108 		 * We have mapping that needs to be private.
1109 		 * Look for an existing slid mapping's pager with matching
1110 		 * object, offset, slide info and shared_region_id to reuse.
1111 		 */
1112 		object = si->si_slide_object;
1113 		sr_pager = shared_region_pager_match(object, si->si_start, si,
1114 		    use_ptr_auth ? task->jop_pid : 0);
1115 		if (sr_pager == MEMORY_OBJECT_NULL) {
1116 			printf("%s(): shared_region_pager_match() failed\n", __func__);
1117 			kr = KERN_FAILURE;
1118 			goto done;
1119 		}
1120 
1121 		/*
1122 		 * verify matching jop_pid for this task and this pager
1123 		 */
1124 		if (use_ptr_auth) {
1125 			shared_region_pager_match_task_key(sr_pager, task);
1126 		}
1127 
1128 		sr_map = vm_shared_region_vm_map(sr);
1129 		tmp_entry = NULL;
1130 
1131 		kr = find_mapping_to_slide(sr_map, si->si_slid_address - sr->sr_base_address, &tmp_entry_store);
1132 		if (kr != KERN_SUCCESS) {
1133 			printf("%s(): find_mapping_to_slide() failed\n", __func__);
1134 			goto done;
1135 		}
1136 		tmp_entry = &tmp_entry_store;
1137 
1138 		/*
1139 		 * Check that the object exactly covers the region to slide.
1140 		 */
1141 		if (tmp_entry->vme_end - tmp_entry->vme_start != si->si_end - si->si_start) {
1142 			printf("%s(): doesn't fully cover\n", __func__);
1143 			kr = KERN_FAILURE;
1144 			goto done;
1145 		}
1146 
1147 		/*
1148 		 * map the pager over the portion of the mapping that needs sliding
1149 		 */
1150 		vm_flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
1151 		vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1152 		vmk_flags.vmkf_overwrite_immutable = TRUE;
1153 		map_addr = si->si_slid_address;
1154 		kr = vm_map_enter_mem_object(task->map,
1155 		    &map_addr,
1156 		    si->si_end - si->si_start,
1157 		    (mach_vm_offset_t) 0,
1158 		    vm_flags,
1159 		    vmk_flags,
1160 		    VM_KERN_MEMORY_NONE,
1161 		    (ipc_port_t)(uintptr_t) sr_pager,
1162 		    0,
1163 		    TRUE,
1164 		    tmp_entry->protection,
1165 		    tmp_entry->max_protection,
1166 		    tmp_entry->inheritance);
1167 		memory_object_deallocate(sr_pager);
1168 		sr_pager = MEMORY_OBJECT_NULL;
1169 		if (kr != KERN_SUCCESS) {
1170 			printf("%s(): vm_map_enter_mem_object() failed\n", __func__);
1171 			goto done;
1172 		}
1173 		assertf(map_addr == si->si_slid_address,
1174 		    "map_addr=0x%llx si_slid_address=0x%llx tmp_entry=%p\n",
1175 		    (uint64_t)map_addr,
1176 		    (uint64_t)si->si_slid_address,
1177 		    tmp_entry);
1178 
1179 		/* Drop the ref count grabbed by find_mapping_to_slide */
1180 		vm_object_deallocate(VME_OBJECT(tmp_entry));
1181 		tmp_entry = NULL;
1182 	}
1183 
1184 done:
1185 	if (tmp_entry) {
1186 		/* Drop the ref count grabbed by find_mapping_to_slide */
1187 		vm_object_deallocate(VME_OBJECT(tmp_entry));
1188 		tmp_entry = NULL;
1189 	}
1190 
1191 	/*
1192 	 * Drop any extra reference to the pager in case we're quitting due to an error above.
1193 	 */
1194 	if (sr_pager != MEMORY_OBJECT_NULL) {
1195 		memory_object_deallocate(sr_pager);
1196 	}
1197 
1198 	/*
1199 	 * Mark the region as having it's auth sections remapped.
1200 	 */
1201 	vm_shared_region_lock();
1202 	task->shared_region_auth_remapped = TRUE;
1203 	sr->sr_mapping_in_progress = FALSE;
1204 	thread_wakeup((event_t)&sr->sr_mapping_in_progress);
1205 	vm_shared_region_unlock();
1206 	return kr;
1207 }
1208 #endif /* __has_feature(ptrauth_calls) */
1209 
1210 void
vm_shared_region_undo_mappings(vm_map_t sr_map,mach_vm_offset_t sr_base_address,struct _sr_file_mappings * srf_mappings,struct _sr_file_mappings * srf_mappings_current,unsigned int srf_current_mappings_count)1211 vm_shared_region_undo_mappings(
1212 	vm_map_t                 sr_map,
1213 	mach_vm_offset_t         sr_base_address,
1214 	struct _sr_file_mappings *srf_mappings,
1215 	struct _sr_file_mappings *srf_mappings_current,
1216 	unsigned int             srf_current_mappings_count)
1217 {
1218 	unsigned int             j = 0;
1219 	vm_shared_region_t       shared_region = NULL;
1220 	boolean_t                reset_shared_region_state = FALSE;
1221 	struct _sr_file_mappings *srfmp;
1222 	unsigned int             mappings_count;
1223 	struct shared_file_mapping_slide_np *mappings;
1224 
1225 	shared_region = vm_shared_region_get(current_task());
1226 	if (shared_region == NULL) {
1227 		printf("Failed to undo mappings because of NULL shared region.\n");
1228 		return;
1229 	}
1230 
1231 	shared_region->sr_first_mapping = (mach_vm_offset_t) -1;
1232 
1233 	if (sr_map == NULL) {
1234 		ipc_port_t              sr_handle;
1235 		vm_named_entry_t        sr_mem_entry;
1236 
1237 		vm_shared_region_lock();
1238 		assert(shared_region->sr_ref_count > 0);
1239 
1240 		while (shared_region->sr_mapping_in_progress) {
1241 			/* wait for our turn... */
1242 			vm_shared_region_sleep(&shared_region->sr_mapping_in_progress,
1243 			    THREAD_UNINT);
1244 		}
1245 		assert(!shared_region->sr_mapping_in_progress);
1246 		assert(shared_region->sr_ref_count > 0);
1247 		/* let others know we're working in this shared region */
1248 		shared_region->sr_mapping_in_progress = TRUE;
1249 
1250 		vm_shared_region_unlock();
1251 
1252 		reset_shared_region_state = TRUE;
1253 
1254 		/* no need to lock because this data is never modified... */
1255 		sr_handle = shared_region->sr_mem_entry;
1256 		sr_mem_entry = mach_memory_entry_from_port(sr_handle);
1257 		sr_map = sr_mem_entry->backing.map;
1258 		sr_base_address = shared_region->sr_base_address;
1259 	}
1260 	/*
1261 	 * Undo the mappings we've established so far.
1262 	 */
1263 	for (srfmp = &srf_mappings[0];
1264 	    srfmp <= srf_mappings_current;
1265 	    srfmp++) {
1266 		mappings = srfmp->mappings;
1267 		mappings_count = srfmp->mappings_count;
1268 		if (srfmp == srf_mappings_current) {
1269 			mappings_count = srf_current_mappings_count;
1270 		}
1271 
1272 		for (j = 0; j < mappings_count; j++) {
1273 			kern_return_t kr2;
1274 
1275 			if (mappings[j].sms_size == 0) {
1276 				/*
1277 				 * We didn't establish this
1278 				 * mapping, so nothing to undo.
1279 				 */
1280 				continue;
1281 			}
1282 			SHARED_REGION_TRACE_INFO(
1283 				("shared_region: mapping[%d]: "
1284 				"address:0x%016llx "
1285 				"size:0x%016llx "
1286 				"offset:0x%016llx "
1287 				"maxprot:0x%x prot:0x%x: "
1288 				"undoing...\n",
1289 				j,
1290 				(long long)mappings[j].sms_address,
1291 				(long long)mappings[j].sms_size,
1292 				(long long)mappings[j].sms_file_offset,
1293 				mappings[j].sms_max_prot,
1294 				mappings[j].sms_init_prot));
1295 			kr2 = mach_vm_deallocate(
1296 				sr_map,
1297 				(mappings[j].sms_address -
1298 				sr_base_address),
1299 				mappings[j].sms_size);
1300 			assert(kr2 == KERN_SUCCESS);
1301 		}
1302 	}
1303 
1304 	if (reset_shared_region_state) {
1305 		vm_shared_region_lock();
1306 		assert(shared_region->sr_ref_count > 0);
1307 		assert(shared_region->sr_mapping_in_progress);
1308 		/* we're done working on that shared region */
1309 		shared_region->sr_mapping_in_progress = FALSE;
1310 		thread_wakeup((event_t) &shared_region->sr_mapping_in_progress);
1311 		vm_shared_region_unlock();
1312 		reset_shared_region_state = FALSE;
1313 	}
1314 
1315 	vm_shared_region_deallocate(shared_region);
1316 }
1317 
1318 /*
1319  * First part of vm_shared_region_map_file(). Split out to
1320  * avoid kernel stack overflow.
1321  */
1322 __attribute__((noinline))
1323 static kern_return_t
vm_shared_region_map_file_setup(vm_shared_region_t shared_region,int sr_file_mappings_count,struct _sr_file_mappings * sr_file_mappings,unsigned int * mappings_to_slide_cnt,struct shared_file_mapping_slide_np ** mappings_to_slide,mach_vm_offset_t * slid_mappings,memory_object_control_t * slid_file_controls,mach_vm_offset_t * sfm_min_address,mach_vm_offset_t * sfm_max_address,vm_map_t * sr_map_ptr,vm_map_offset_t * lowest_unnestable_addr_ptr,unsigned int vmsr_num_slides)1324 vm_shared_region_map_file_setup(
1325 	vm_shared_region_t              shared_region,
1326 	int                             sr_file_mappings_count,
1327 	struct _sr_file_mappings        *sr_file_mappings,
1328 	unsigned int                    *mappings_to_slide_cnt,
1329 	struct shared_file_mapping_slide_np **mappings_to_slide,
1330 	mach_vm_offset_t                *slid_mappings,
1331 	memory_object_control_t         *slid_file_controls,
1332 	mach_vm_offset_t                *sfm_min_address,
1333 	mach_vm_offset_t                *sfm_max_address,
1334 	vm_map_t                        *sr_map_ptr,
1335 	vm_map_offset_t                 *lowest_unnestable_addr_ptr,
1336 	unsigned int                    vmsr_num_slides)
1337 {
1338 	kern_return_t           kr = KERN_SUCCESS;
1339 	memory_object_control_t file_control;
1340 	vm_object_t             file_object;
1341 	ipc_port_t              sr_handle;
1342 	vm_named_entry_t        sr_mem_entry;
1343 	vm_map_t                sr_map;
1344 	mach_vm_offset_t        sr_base_address;
1345 	unsigned int            i = 0;
1346 	mach_port_t             map_port;
1347 	vm_map_offset_t         target_address;
1348 	vm_object_t             object;
1349 	vm_object_size_t        obj_size;
1350 	vm_map_offset_t         lowest_unnestable_addr = 0;
1351 	vm_map_kernel_flags_t   vmk_flags;
1352 	mach_vm_offset_t        sfm_end;
1353 	uint32_t                mappings_count;
1354 	struct shared_file_mapping_slide_np *mappings;
1355 	struct _sr_file_mappings *srfmp;
1356 
1357 	vm_shared_region_lock();
1358 	assert(shared_region->sr_ref_count > 0);
1359 
1360 	/*
1361 	 * Make sure we handle only one mapping at a time in a given
1362 	 * shared region, to avoid race conditions.  This should not
1363 	 * happen frequently...
1364 	 */
1365 	while (shared_region->sr_mapping_in_progress) {
1366 		/* wait for our turn... */
1367 		vm_shared_region_sleep(&shared_region->sr_mapping_in_progress,
1368 		    THREAD_UNINT);
1369 	}
1370 	assert(!shared_region->sr_mapping_in_progress);
1371 	assert(shared_region->sr_ref_count > 0);
1372 
1373 
1374 	/* let others know we're working in this shared region */
1375 	shared_region->sr_mapping_in_progress = TRUE;
1376 
1377 	/*
1378 	 * Did someone race in and map this shared region already?
1379 	 */
1380 	if (shared_region->sr_first_mapping != -1) {
1381 		vm_shared_region_unlock();
1382 #if DEVELOPMENT || DEBUG
1383 		printf("shared_region: caught race in map and slide\n");
1384 #endif /* DEVELOPMENT || DEBUG */
1385 		return KERN_FAILURE;
1386 	}
1387 
1388 	vm_shared_region_unlock();
1389 
1390 	/* no need to lock because this data is never modified... */
1391 	sr_handle = shared_region->sr_mem_entry;
1392 	sr_mem_entry = mach_memory_entry_from_port(sr_handle);
1393 	sr_map = sr_mem_entry->backing.map;
1394 	sr_base_address = shared_region->sr_base_address;
1395 
1396 	SHARED_REGION_TRACE_DEBUG(
1397 		("shared_region: -> map(%p)\n",
1398 		(void *)VM_KERNEL_ADDRPERM(shared_region)));
1399 
1400 	mappings_count = 0;
1401 	mappings = NULL;
1402 	srfmp = NULL;
1403 
1404 	/* process all the files to be mapped */
1405 	for (srfmp = &sr_file_mappings[0];
1406 	    srfmp < &sr_file_mappings[sr_file_mappings_count];
1407 	    srfmp++) {
1408 		mappings_count = srfmp->mappings_count;
1409 		mappings = srfmp->mappings;
1410 		file_control = srfmp->file_control;
1411 
1412 		if (mappings_count == 0) {
1413 			/* no mappings here... */
1414 			continue;
1415 		}
1416 
1417 		/*
1418 		 * The code below can only correctly "slide" (perform relocations) for one
1419 		 * value of the slide amount. So if a file has a non-zero slide, it has to
1420 		 * match any previous value. A zero slide value is ok for things that are
1421 		 * just directly mapped.
1422 		 */
1423 		if (shared_region->sr_slide == 0 && srfmp->slide != 0) {
1424 			shared_region->sr_slide = srfmp->slide;
1425 		} else if (shared_region->sr_slide != 0 &&
1426 		    srfmp->slide != 0 &&
1427 		    shared_region->sr_slide != srfmp->slide) {
1428 			SHARED_REGION_TRACE_ERROR(
1429 				("shared_region: more than 1 non-zero slide value amount "
1430 				"slide 1:0x%x slide 2:0x%x\n ",
1431 				shared_region->sr_slide, srfmp->slide));
1432 			kr = KERN_INVALID_ARGUMENT;
1433 			break;
1434 		}
1435 
1436 #if __arm64__
1437 		if ((shared_region->sr_64bit ||
1438 		    page_shift_user32 == SIXTEENK_PAGE_SHIFT) &&
1439 		    ((srfmp->slide & SIXTEENK_PAGE_MASK) != 0)) {
1440 			printf("FOURK_COMPAT: %s: rejecting mis-aligned slide 0x%x\n",
1441 			    __FUNCTION__, srfmp->slide);
1442 			kr = KERN_INVALID_ARGUMENT;
1443 			break;
1444 		}
1445 #endif /* __arm64__ */
1446 
1447 		/* get the VM object associated with the file to be mapped */
1448 		file_object = memory_object_control_to_vm_object(file_control);
1449 		assert(file_object);
1450 
1451 #if CONFIG_SECLUDED_MEMORY
1452 		/*
1453 		 * Camera will need the shared cache, so don't put the pages
1454 		 * on the secluded queue, assume that's the primary region.
1455 		 * Also keep DEXT shared cache pages off secluded.
1456 		 */
1457 		if (primary_system_shared_region == NULL ||
1458 		    primary_system_shared_region == shared_region ||
1459 		    shared_region->sr_driverkit) {
1460 			memory_object_mark_eligible_for_secluded(file_control, FALSE);
1461 		}
1462 #endif /* CONFIG_SECLUDED_MEMORY */
1463 
1464 		/* establish the mappings for that file */
1465 		for (i = 0; i < mappings_count; i++) {
1466 			SHARED_REGION_TRACE_INFO(
1467 				("shared_region: mapping[%d]: "
1468 				"address:0x%016llx size:0x%016llx offset:0x%016llx "
1469 				"maxprot:0x%x prot:0x%x\n",
1470 				i,
1471 				(long long)mappings[i].sms_address,
1472 				(long long)mappings[i].sms_size,
1473 				(long long)mappings[i].sms_file_offset,
1474 				mappings[i].sms_max_prot,
1475 				mappings[i].sms_init_prot));
1476 
1477 			if (mappings[i].sms_address < *sfm_min_address) {
1478 				*sfm_min_address = mappings[i].sms_address;
1479 			}
1480 
1481 			if (os_add_overflow(mappings[i].sms_address,
1482 			    mappings[i].sms_size,
1483 			    &sfm_end) ||
1484 			    (vm_map_round_page(sfm_end, VM_MAP_PAGE_MASK(sr_map)) <
1485 			    mappings[i].sms_address)) {
1486 				/* overflow */
1487 				kr = KERN_INVALID_ARGUMENT;
1488 				break;
1489 			}
1490 			if (sfm_end > *sfm_max_address) {
1491 				*sfm_max_address = sfm_end;
1492 			}
1493 
1494 			if (mappings[i].sms_init_prot & VM_PROT_ZF) {
1495 				/* zero-filled memory */
1496 				map_port = MACH_PORT_NULL;
1497 			} else {
1498 				/* file-backed memory */
1499 				__IGNORE_WCASTALIGN(map_port = (ipc_port_t) file_object->pager);
1500 			}
1501 
1502 			/*
1503 			 * Remember which mappings need sliding.
1504 			 */
1505 			if (mappings[i].sms_max_prot & VM_PROT_SLIDE) {
1506 				if (*mappings_to_slide_cnt == vmsr_num_slides) {
1507 					SHARED_REGION_TRACE_INFO(
1508 						("shared_region: mapping[%d]: "
1509 						"address:0x%016llx size:0x%016llx "
1510 						"offset:0x%016llx "
1511 						"maxprot:0x%x prot:0x%x "
1512 						"too many mappings to slide...\n",
1513 						i,
1514 						(long long)mappings[i].sms_address,
1515 						(long long)mappings[i].sms_size,
1516 						(long long)mappings[i].sms_file_offset,
1517 						mappings[i].sms_max_prot,
1518 						mappings[i].sms_init_prot));
1519 				} else {
1520 					mappings_to_slide[*mappings_to_slide_cnt] = &mappings[i];
1521 					*mappings_to_slide_cnt += 1;
1522 				}
1523 			}
1524 
1525 			/* mapping's address is relative to the shared region base */
1526 			target_address = (vm_map_offset_t)(mappings[i].sms_address - sr_base_address);
1527 
1528 			vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1529 			vmk_flags.vmkf_already = TRUE;
1530 			/* no copy-on-read for mapped binaries */
1531 			vmk_flags.vmkf_no_copy_on_read = 1;
1532 
1533 
1534 			/* establish that mapping, OK if it's "already" there */
1535 			if (map_port == MACH_PORT_NULL) {
1536 				/*
1537 				 * We want to map some anonymous memory in a shared region.
1538 				 * We have to create the VM object now, so that it can be mapped "copy-on-write".
1539 				 */
1540 				obj_size = vm_map_round_page(mappings[i].sms_size, VM_MAP_PAGE_MASK(sr_map));
1541 				object = vm_object_allocate(obj_size);
1542 				if (object == VM_OBJECT_NULL) {
1543 					kr = KERN_RESOURCE_SHORTAGE;
1544 				} else {
1545 					kr = vm_map_enter(
1546 						sr_map,
1547 						&target_address,
1548 						vm_map_round_page(mappings[i].sms_size, VM_MAP_PAGE_MASK(sr_map)),
1549 						0,
1550 						VM_FLAGS_FIXED,
1551 						vmk_flags,
1552 						VM_KERN_MEMORY_NONE,
1553 						object,
1554 						0,
1555 						TRUE,
1556 						mappings[i].sms_init_prot & VM_PROT_ALL,
1557 						mappings[i].sms_max_prot & VM_PROT_ALL,
1558 						VM_INHERIT_DEFAULT);
1559 				}
1560 			} else {
1561 				object = VM_OBJECT_NULL; /* no anonymous memory here */
1562 				kr = vm_map_enter_mem_object(
1563 					sr_map,
1564 					&target_address,
1565 					vm_map_round_page(mappings[i].sms_size, VM_MAP_PAGE_MASK(sr_map)),
1566 					0,
1567 					VM_FLAGS_FIXED,
1568 					vmk_flags,
1569 					VM_KERN_MEMORY_NONE,
1570 					map_port,
1571 					mappings[i].sms_file_offset,
1572 					TRUE,
1573 					mappings[i].sms_init_prot & VM_PROT_ALL,
1574 					mappings[i].sms_max_prot & VM_PROT_ALL,
1575 					VM_INHERIT_DEFAULT);
1576 			}
1577 
1578 			if (kr == KERN_SUCCESS) {
1579 				/*
1580 				 * Record the first successful mapping(s) in the shared
1581 				 * region by file. We're protected by "sr_mapping_in_progress"
1582 				 * here, so no need to lock "shared_region".
1583 				 *
1584 				 * Note that if we have an AOT shared cache (ARM) for a
1585 				 * translated task, then it's always the first file.
1586 				 * The original "native" (i.e. x86) shared cache is the
1587 				 * second file.
1588 				 */
1589 
1590 				if (shared_region->sr_first_mapping == (mach_vm_offset_t)-1) {
1591 					shared_region->sr_first_mapping = target_address;
1592 				}
1593 
1594 				if (*mappings_to_slide_cnt > 0 &&
1595 				    mappings_to_slide[*mappings_to_slide_cnt - 1] == &mappings[i]) {
1596 					slid_mappings[*mappings_to_slide_cnt - 1] = target_address;
1597 					slid_file_controls[*mappings_to_slide_cnt - 1] = file_control;
1598 				}
1599 
1600 				/*
1601 				 * Record the lowest writable address in this
1602 				 * sub map, to log any unexpected unnesting below
1603 				 * that address (see log_unnest_badness()).
1604 				 */
1605 				if ((mappings[i].sms_init_prot & VM_PROT_WRITE) &&
1606 				    sr_map->is_nested_map &&
1607 				    (lowest_unnestable_addr == 0 ||
1608 				    (target_address < lowest_unnestable_addr))) {
1609 					lowest_unnestable_addr = target_address;
1610 				}
1611 			} else {
1612 				if (map_port == MACH_PORT_NULL) {
1613 					/*
1614 					 * Get rid of the VM object we just created
1615 					 * but failed to map.
1616 					 */
1617 					vm_object_deallocate(object);
1618 					object = VM_OBJECT_NULL;
1619 				}
1620 				if (kr == KERN_MEMORY_PRESENT) {
1621 					/*
1622 					 * This exact mapping was already there:
1623 					 * that's fine.
1624 					 */
1625 					SHARED_REGION_TRACE_INFO(
1626 						("shared_region: mapping[%d]: "
1627 						"address:0x%016llx size:0x%016llx "
1628 						"offset:0x%016llx "
1629 						"maxprot:0x%x prot:0x%x "
1630 						"already mapped...\n",
1631 						i,
1632 						(long long)mappings[i].sms_address,
1633 						(long long)mappings[i].sms_size,
1634 						(long long)mappings[i].sms_file_offset,
1635 						mappings[i].sms_max_prot,
1636 						mappings[i].sms_init_prot));
1637 					/*
1638 					 * We didn't establish this mapping ourselves;
1639 					 * let's reset its size, so that we do not
1640 					 * attempt to undo it if an error occurs later.
1641 					 */
1642 					mappings[i].sms_size = 0;
1643 					kr = KERN_SUCCESS;
1644 				} else {
1645 					break;
1646 				}
1647 			}
1648 		}
1649 
1650 		if (kr != KERN_SUCCESS) {
1651 			break;
1652 		}
1653 	}
1654 
1655 	if (kr != KERN_SUCCESS) {
1656 		/* the last mapping we tried (mappings[i]) failed ! */
1657 		assert(i < mappings_count);
1658 		SHARED_REGION_TRACE_ERROR(
1659 			("shared_region: mapping[%d]: "
1660 			"address:0x%016llx size:0x%016llx "
1661 			"offset:0x%016llx "
1662 			"maxprot:0x%x prot:0x%x failed 0x%x\n",
1663 			i,
1664 			(long long)mappings[i].sms_address,
1665 			(long long)mappings[i].sms_size,
1666 			(long long)mappings[i].sms_file_offset,
1667 			mappings[i].sms_max_prot,
1668 			mappings[i].sms_init_prot,
1669 			kr));
1670 
1671 		/*
1672 		 * Respect the design of vm_shared_region_undo_mappings
1673 		 * as we are holding the sr_mapping_in_progress == true here.
1674 		 * So don't allow sr_map == NULL otherwise vm_shared_region_undo_mappings
1675 		 * will be blocked at waiting sr_mapping_in_progress to be false.
1676 		 */
1677 		assert(sr_map != NULL);
1678 		/* undo all the previous mappings */
1679 		vm_shared_region_undo_mappings(sr_map, sr_base_address, sr_file_mappings, srfmp, i);
1680 		return kr;
1681 	}
1682 
1683 	*lowest_unnestable_addr_ptr = lowest_unnestable_addr;
1684 	*sr_map_ptr = sr_map;
1685 	return KERN_SUCCESS;
1686 }
1687 
1688 /* forwared declaration */
1689 __attribute__((noinline))
1690 static void
1691 vm_shared_region_map_file_final(
1692 	vm_shared_region_t shared_region,
1693 	vm_map_t           sr_map,
1694 	mach_vm_offset_t   sfm_min_address,
1695 	mach_vm_offset_t   sfm_max_address);
1696 
1697 /*
1698  * Establish some mappings of a file in the shared region.
1699  * This is used by "dyld" via the shared_region_map_np() system call
1700  * to populate the shared region with the appropriate shared cache.
1701  *
1702  * One could also call it several times to incrementally load several
1703  * libraries, as long as they do not overlap.
1704  * It will return KERN_SUCCESS if the mappings were successfully established
1705  * or if they were already established identically by another process.
1706  */
1707 __attribute__((noinline))
1708 kern_return_t
vm_shared_region_map_file(vm_shared_region_t shared_region,int sr_file_mappings_count,struct _sr_file_mappings * sr_file_mappings)1709 vm_shared_region_map_file(
1710 	vm_shared_region_t       shared_region,
1711 	int                      sr_file_mappings_count,
1712 	struct _sr_file_mappings *sr_file_mappings)
1713 {
1714 	kern_return_t           kr = KERN_SUCCESS;
1715 	unsigned int            i;
1716 	unsigned int            mappings_to_slide_cnt = 0;
1717 	mach_vm_offset_t        sfm_min_address = (mach_vm_offset_t)-1;
1718 	mach_vm_offset_t        sfm_max_address = 0;
1719 	vm_map_t                sr_map = NULL;
1720 	vm_map_offset_t         lowest_unnestable_addr = 0;
1721 	unsigned int            vmsr_num_slides = 0;
1722 	mach_vm_offset_t        *slid_mappings = NULL;                  /* [0..vmsr_num_slides] */
1723 	memory_object_control_t *slid_file_controls = NULL;             /* [0..vmsr_num_slides] */
1724 	struct shared_file_mapping_slide_np **mappings_to_slide = NULL; /* [0..vmsr_num_slides] */
1725 	struct _sr_file_mappings *srfmp;
1726 
1727 	/*
1728 	 * Figure out how many of the mappings have slides.
1729 	 */
1730 	for (srfmp = &sr_file_mappings[0];
1731 	    srfmp < &sr_file_mappings[sr_file_mappings_count];
1732 	    srfmp++) {
1733 		for (i = 0; i < srfmp->mappings_count; ++i) {
1734 			if (srfmp->mappings[i].sms_max_prot & VM_PROT_SLIDE) {
1735 				++vmsr_num_slides;
1736 			}
1737 		}
1738 	}
1739 
1740 	/* Allocate per slide data structures */
1741 	if (vmsr_num_slides > 0) {
1742 		slid_mappings =
1743 		    kalloc_data(vmsr_num_slides * sizeof(*slid_mappings), Z_WAITOK);
1744 		slid_file_controls =
1745 		    kalloc_type(memory_object_control_t, vmsr_num_slides, Z_WAITOK);
1746 		mappings_to_slide =
1747 		    kalloc_type(struct shared_file_mapping_slide_np *, vmsr_num_slides, Z_WAITOK | Z_ZERO);
1748 	}
1749 
1750 	kr = vm_shared_region_map_file_setup(shared_region, sr_file_mappings_count, sr_file_mappings,
1751 	    &mappings_to_slide_cnt, mappings_to_slide, slid_mappings, slid_file_controls,
1752 	    &sfm_min_address, &sfm_max_address, &sr_map, &lowest_unnestable_addr, vmsr_num_slides);
1753 	if (kr != KERN_SUCCESS) {
1754 		vm_shared_region_lock();
1755 		goto done;
1756 	}
1757 	assert(vmsr_num_slides == mappings_to_slide_cnt);
1758 
1759 	/*
1760 	 * The call above installed direct mappings to the shared cache file.
1761 	 * Now we go back and overwrite the mappings that need relocation
1762 	 * with a special shared region pager.
1763 	 */
1764 	for (i = 0; i < mappings_to_slide_cnt; ++i) {
1765 		kr = vm_shared_region_slide(shared_region->sr_slide,
1766 		    mappings_to_slide[i]->sms_file_offset,
1767 		    mappings_to_slide[i]->sms_size,
1768 		    mappings_to_slide[i]->sms_slide_start,
1769 		    mappings_to_slide[i]->sms_slide_size,
1770 		    slid_mappings[i],
1771 		    slid_file_controls[i],
1772 		    mappings_to_slide[i]->sms_max_prot);
1773 		if (kr != KERN_SUCCESS) {
1774 			SHARED_REGION_TRACE_ERROR(
1775 				("shared_region: region_slide("
1776 				"slide:0x%x start:0x%016llx "
1777 				"size:0x%016llx) failed 0x%x\n",
1778 				shared_region->sr_slide,
1779 				(long long)mappings_to_slide[i]->sms_slide_start,
1780 				(long long)mappings_to_slide[i]->sms_slide_size,
1781 				kr));
1782 			vm_shared_region_lock();
1783 			goto done;
1784 		}
1785 	}
1786 
1787 	assert(kr == KERN_SUCCESS);
1788 
1789 	/* adjust the map's "lowest_unnestable_start" */
1790 	lowest_unnestable_addr &= ~(pmap_shared_region_size_min(sr_map->pmap) - 1);
1791 	if (lowest_unnestable_addr != sr_map->lowest_unnestable_start) {
1792 		vm_map_lock(sr_map);
1793 		sr_map->lowest_unnestable_start = lowest_unnestable_addr;
1794 		vm_map_unlock(sr_map);
1795 	}
1796 
1797 	vm_shared_region_lock();
1798 	assert(shared_region->sr_ref_count > 0);
1799 	assert(shared_region->sr_mapping_in_progress);
1800 
1801 	vm_shared_region_map_file_final(shared_region, sr_map, sfm_min_address, sfm_max_address);
1802 
1803 done:
1804 	/*
1805 	 * We're done working on that shared region.
1806 	 * Wake up any waiting threads.
1807 	 */
1808 	shared_region->sr_mapping_in_progress = FALSE;
1809 	thread_wakeup((event_t) &shared_region->sr_mapping_in_progress);
1810 	vm_shared_region_unlock();
1811 
1812 #if __has_feature(ptrauth_calls)
1813 	if (kr == KERN_SUCCESS) {
1814 		/*
1815 		 * Since authenticated mappings were just added to the shared region,
1816 		 * go back and remap them into private mappings for this task.
1817 		 */
1818 		kr = vm_shared_region_auth_remap(shared_region);
1819 	}
1820 #endif /* __has_feature(ptrauth_calls) */
1821 
1822 	/* Cache shared region info needed for telemetry in the task */
1823 	task_t task;
1824 	if (kr == KERN_SUCCESS && (task = current_task())->task_shared_region_slide == -1) {
1825 		mach_vm_offset_t start_address;
1826 		(void)vm_shared_region_start_address(shared_region, &start_address, task);
1827 	}
1828 
1829 	SHARED_REGION_TRACE_DEBUG(
1830 		("shared_region: map(%p) <- 0x%x \n",
1831 		(void *)VM_KERNEL_ADDRPERM(shared_region), kr));
1832 	if (vmsr_num_slides > 0) {
1833 		kfree_data(slid_mappings, vmsr_num_slides * sizeof(*slid_mappings));
1834 		kfree_type(memory_object_control_t, vmsr_num_slides, slid_file_controls);
1835 		kfree_type(struct shared_file_mapping_slide_np *, vmsr_num_slides,
1836 		    mappings_to_slide);
1837 	}
1838 	return kr;
1839 }
1840 
1841 /*
1842  * Final part of vm_shared_region_map_file().
1843  * Kept in separate function to avoid blowing out the stack.
1844  */
1845 __attribute__((noinline))
1846 static void
vm_shared_region_map_file_final(vm_shared_region_t shared_region,vm_map_t sr_map,mach_vm_offset_t sfm_min_address,mach_vm_offset_t sfm_max_address)1847 vm_shared_region_map_file_final(
1848 	vm_shared_region_t        shared_region,
1849 	vm_map_t                  sr_map,
1850 	mach_vm_offset_t          sfm_min_address,
1851 	mach_vm_offset_t          sfm_max_address)
1852 {
1853 	struct _dyld_cache_header sr_cache_header;
1854 	int                       error;
1855 	size_t                    image_array_length;
1856 	struct _dyld_cache_image_text_info *sr_image_layout;
1857 	boolean_t                 locally_built = FALSE;
1858 
1859 
1860 	/*
1861 	 * copy in the shared region UUID to the shared region structure.
1862 	 * we do this indirectly by first copying in the shared cache header
1863 	 * and then copying the UUID from there because we'll need to look
1864 	 * at other content from the shared cache header.
1865 	 */
1866 	if (!shared_region->sr_uuid_copied) {
1867 		error = copyin((user_addr_t)(shared_region->sr_base_address + shared_region->sr_first_mapping),
1868 		    (char *)&sr_cache_header,
1869 		    sizeof(sr_cache_header));
1870 		if (error == 0) {
1871 			memcpy(&shared_region->sr_uuid, &sr_cache_header.uuid, sizeof(shared_region->sr_uuid));
1872 			shared_region->sr_uuid_copied = TRUE;
1873 			locally_built = sr_cache_header.locallyBuiltCache;
1874 		} else {
1875 #if DEVELOPMENT || DEBUG
1876 			panic("shared_region: copyin shared_cache_header(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx "
1877 			    "offset:0 size:0x%016llx) failed with %d\n",
1878 			    (long long)shared_region->sr_base_address,
1879 			    (long long)shared_region->sr_first_mapping,
1880 			    (long long)sizeof(sr_cache_header),
1881 			    error);
1882 #endif /* DEVELOPMENT || DEBUG */
1883 			shared_region->sr_uuid_copied = FALSE;
1884 		}
1885 	}
1886 
1887 	/*
1888 	 * We save a pointer to the shared cache mapped by the "init task", i.e. launchd.  This is used by
1889 	 * the stackshot code to reduce output size in the common case that everything maps the same shared cache.
1890 	 * One gotcha is that "userspace reboots" can occur which can cause a new shared region to be the primary
1891 	 * region.  In that case, launchd re-exec's itself, so we may go through this path multiple times.  We
1892 	 * let the most recent one win.
1893 	 *
1894 	 * Check whether the shared cache is a custom built one and copy in the shared cache layout accordingly.
1895 	 */
1896 	bool is_init_task = (task_pid(current_task()) == 1);
1897 	if (shared_region->sr_uuid_copied && is_init_task) {
1898 		/* Copy in the shared cache layout if we're running with a locally built shared cache */
1899 		if (locally_built) {
1900 			KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION, PROCESS_SHARED_CACHE_LAYOUT)) | DBG_FUNC_START);
1901 			image_array_length = (size_t)(sr_cache_header.imagesTextCount * sizeof(struct _dyld_cache_image_text_info));
1902 			sr_image_layout = kalloc_data(image_array_length, Z_WAITOK);
1903 			error = copyin((user_addr_t)(shared_region->sr_base_address + shared_region->sr_first_mapping +
1904 			    sr_cache_header.imagesTextOffset), (char *)sr_image_layout, image_array_length);
1905 			if (error == 0) {
1906 				if (sr_cache_header.imagesTextCount >= UINT32_MAX) {
1907 					panic("shared_region: sr_cache_header.imagesTextCount >= UINT32_MAX");
1908 				}
1909 				shared_region->sr_images = kalloc_data((vm_size_t)(sr_cache_header.imagesTextCount * sizeof(struct dyld_uuid_info_64)), Z_WAITOK);
1910 				for (size_t index = 0; index < sr_cache_header.imagesTextCount; index++) {
1911 					memcpy((char *)&shared_region->sr_images[index].imageUUID, (char *)&sr_image_layout[index].uuid,
1912 					    sizeof(shared_region->sr_images[index].imageUUID));
1913 					shared_region->sr_images[index].imageLoadAddress = sr_image_layout[index].loadAddress;
1914 				}
1915 
1916 				shared_region->sr_images_count = (uint32_t) sr_cache_header.imagesTextCount;
1917 			} else {
1918 #if DEVELOPMENT || DEBUG
1919 				panic("shared_region: copyin shared_cache_layout(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx "
1920 				    "offset:0x%016llx size:0x%016llx) failed with %d\n",
1921 				    (long long)shared_region->sr_base_address,
1922 				    (long long)shared_region->sr_first_mapping,
1923 				    (long long)sr_cache_header.imagesTextOffset,
1924 				    (long long)image_array_length,
1925 				    error);
1926 #endif /* DEVELOPMENT || DEBUG */
1927 			}
1928 			KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION, PROCESS_SHARED_CACHE_LAYOUT)) | DBG_FUNC_END, shared_region->sr_images_count);
1929 			kfree_data(sr_image_layout, image_array_length);
1930 			sr_image_layout = NULL;
1931 		}
1932 		primary_system_shared_region = shared_region;
1933 	}
1934 
1935 	/*
1936 	 * If we succeeded, we know the bounds of the shared region.
1937 	 * Trim our pmaps to only cover this range (if applicable to
1938 	 * this platform).
1939 	 */
1940 	if (VM_MAP_PAGE_SHIFT(current_map()) == VM_MAP_PAGE_SHIFT(sr_map)) {
1941 		pmap_trim(current_map()->pmap, sr_map->pmap, sfm_min_address, sfm_max_address - sfm_min_address);
1942 	}
1943 }
1944 
1945 /*
1946  * Retrieve a task's shared region and grab an extra reference to
1947  * make sure it doesn't disappear while the caller is using it.
1948  * The caller is responsible for consuming that extra reference if
1949  * necessary.
1950  *
1951  * This also tries to trim the pmap for the shared region.
1952  */
1953 vm_shared_region_t
vm_shared_region_trim_and_get(task_t task)1954 vm_shared_region_trim_and_get(task_t task)
1955 {
1956 	vm_shared_region_t shared_region;
1957 	ipc_port_t sr_handle;
1958 	vm_named_entry_t sr_mem_entry;
1959 	vm_map_t sr_map;
1960 
1961 	/* Get the shared region and the map. */
1962 	shared_region = vm_shared_region_get(task);
1963 	if (shared_region == NULL) {
1964 		return NULL;
1965 	}
1966 
1967 	sr_handle = shared_region->sr_mem_entry;
1968 	sr_mem_entry = mach_memory_entry_from_port(sr_handle);
1969 	sr_map = sr_mem_entry->backing.map;
1970 
1971 	/* Trim the pmap if possible. */
1972 	if (VM_MAP_PAGE_SHIFT(task->map) == VM_MAP_PAGE_SHIFT(sr_map)) {
1973 		pmap_trim(task->map->pmap, sr_map->pmap, 0, 0);
1974 	}
1975 
1976 	return shared_region;
1977 }
1978 
1979 /*
1980  * Enter the appropriate shared region into "map" for "task".
1981  * This involves looking up the shared region (and possibly creating a new
1982  * one) for the desired environment, then mapping the VM sub map into the
1983  * task's VM "map", with the appropriate level of pmap-nesting.
1984  */
1985 kern_return_t
vm_shared_region_enter(struct _vm_map * map,struct task * task,boolean_t is_64bit,void * fsroot,cpu_type_t cpu,cpu_subtype_t cpu_subtype,boolean_t reslide,boolean_t is_driverkit)1986 vm_shared_region_enter(
1987 	struct _vm_map          *map,
1988 	struct task             *task,
1989 	boolean_t               is_64bit,
1990 	void                    *fsroot,
1991 	cpu_type_t              cpu,
1992 	cpu_subtype_t           cpu_subtype,
1993 	boolean_t               reslide,
1994 	boolean_t               is_driverkit)
1995 {
1996 	kern_return_t           kr;
1997 	vm_shared_region_t      shared_region;
1998 	vm_map_offset_t         sr_address, sr_offset, target_address;
1999 	vm_map_size_t           sr_size, mapping_size;
2000 	vm_map_offset_t         sr_pmap_nesting_start;
2001 	vm_map_size_t           sr_pmap_nesting_size;
2002 	ipc_port_t              sr_handle;
2003 	vm_prot_t               cur_prot, max_prot;
2004 
2005 	SHARED_REGION_TRACE_DEBUG(
2006 		("shared_region: -> "
2007 		"enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d,driverkit=%d)\n",
2008 		(void *)VM_KERNEL_ADDRPERM(map),
2009 		(void *)VM_KERNEL_ADDRPERM(task),
2010 		(void *)VM_KERNEL_ADDRPERM(fsroot),
2011 		cpu, cpu_subtype, is_64bit, is_driverkit));
2012 
2013 	/* lookup (create if needed) the shared region for this environment */
2014 	shared_region = vm_shared_region_lookup(fsroot, cpu, cpu_subtype, is_64bit, reslide, is_driverkit);
2015 	if (shared_region == NULL) {
2016 		/* this should not happen ! */
2017 		SHARED_REGION_TRACE_ERROR(
2018 			("shared_region: -> "
2019 			"enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d,reslide=%d,driverkit=%d): "
2020 			"lookup failed !\n",
2021 			(void *)VM_KERNEL_ADDRPERM(map),
2022 			(void *)VM_KERNEL_ADDRPERM(task),
2023 			(void *)VM_KERNEL_ADDRPERM(fsroot),
2024 			cpu, cpu_subtype, is_64bit, reslide, is_driverkit));
2025 		//panic("shared_region_enter: lookup failed");
2026 		return KERN_FAILURE;
2027 	}
2028 
2029 	kr = KERN_SUCCESS;
2030 	/* no need to lock since this data is never modified */
2031 	sr_address = (vm_map_offset_t)shared_region->sr_base_address;
2032 	sr_size = (vm_map_size_t)shared_region->sr_size;
2033 	sr_handle = shared_region->sr_mem_entry;
2034 	sr_pmap_nesting_start = (vm_map_offset_t)shared_region->sr_pmap_nesting_start;
2035 	sr_pmap_nesting_size = (vm_map_size_t)shared_region->sr_pmap_nesting_size;
2036 
2037 	cur_prot = VM_PROT_READ;
2038 	if (VM_MAP_POLICY_WRITABLE_SHARED_REGION(map)) {
2039 		/*
2040 		 * XXX BINARY COMPATIBILITY
2041 		 * java6 apparently needs to modify some code in the
2042 		 * dyld shared cache and needs to be allowed to add
2043 		 * write access...
2044 		 */
2045 		max_prot = VM_PROT_ALL;
2046 	} else {
2047 		max_prot = VM_PROT_READ;
2048 	}
2049 
2050 	/*
2051 	 * Start mapping the shared region's VM sub map into the task's VM map.
2052 	 */
2053 	sr_offset = 0;
2054 
2055 	if (sr_pmap_nesting_start > sr_address) {
2056 		/* we need to map a range without pmap-nesting first */
2057 		target_address = sr_address;
2058 		mapping_size = sr_pmap_nesting_start - sr_address;
2059 		kr = vm_map_enter_mem_object(
2060 			map,
2061 			&target_address,
2062 			mapping_size,
2063 			0,
2064 			VM_FLAGS_FIXED,
2065 			VM_MAP_KERNEL_FLAGS_NONE,
2066 			VM_KERN_MEMORY_NONE,
2067 			sr_handle,
2068 			sr_offset,
2069 			TRUE,
2070 			cur_prot,
2071 			max_prot,
2072 			VM_INHERIT_SHARE);
2073 		if (kr != KERN_SUCCESS) {
2074 			SHARED_REGION_TRACE_ERROR(
2075 				("shared_region: enter(%p,%p,%p,%d,%d,%d,%d,%d): "
2076 				"vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
2077 				(void *)VM_KERNEL_ADDRPERM(map),
2078 				(void *)VM_KERNEL_ADDRPERM(task),
2079 				(void *)VM_KERNEL_ADDRPERM(fsroot),
2080 				cpu, cpu_subtype, is_64bit, reslide, is_driverkit,
2081 				(long long)target_address,
2082 				(long long)mapping_size,
2083 				(void *)VM_KERNEL_ADDRPERM(sr_handle), kr));
2084 			goto done;
2085 		}
2086 		SHARED_REGION_TRACE_DEBUG(
2087 			("shared_region: enter(%p,%p,%p,%d,%d,%d,%d,%d): "
2088 			"vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
2089 			(void *)VM_KERNEL_ADDRPERM(map),
2090 			(void *)VM_KERNEL_ADDRPERM(task),
2091 			(void *)VM_KERNEL_ADDRPERM(fsroot),
2092 			cpu, cpu_subtype, is_64bit, reslide, is_driverkit,
2093 			(long long)target_address, (long long)mapping_size,
2094 			(void *)VM_KERNEL_ADDRPERM(sr_handle), kr));
2095 		sr_offset += mapping_size;
2096 		sr_size -= mapping_size;
2097 	}
2098 
2099 	/* The pmap-nesting is triggered by the "vmkf_nested_pmap" flag. */
2100 	vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
2101 	vmk_flags.vmkf_nested_pmap = TRUE;
2102 
2103 	/*
2104 	 * Use pmap-nesting to map the majority of the shared region into the task's
2105 	 * VM space. Very rarely will architectures have a shared region that isn't
2106 	 * the same size as the pmap-nesting region, or start at a different address
2107 	 * than the pmap-nesting region, so this code will map the entirety of the
2108 	 * shared region for most architectures.
2109 	 */
2110 	assert((sr_address + sr_offset) == sr_pmap_nesting_start);
2111 	target_address = sr_pmap_nesting_start;
2112 	kr = vm_map_enter_mem_object(
2113 		map,
2114 		&target_address,
2115 		sr_pmap_nesting_size,
2116 		0,
2117 		VM_FLAGS_FIXED,
2118 		vmk_flags,
2119 		VM_MEMORY_SHARED_PMAP,
2120 		sr_handle,
2121 		sr_offset,
2122 		TRUE,
2123 		cur_prot,
2124 		max_prot,
2125 		VM_INHERIT_SHARE);
2126 	if (kr != KERN_SUCCESS) {
2127 		SHARED_REGION_TRACE_ERROR(
2128 			("shared_region: enter(%p,%p,%p,%d,%d,%d,%d,%d): "
2129 			"vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
2130 			(void *)VM_KERNEL_ADDRPERM(map),
2131 			(void *)VM_KERNEL_ADDRPERM(task),
2132 			(void *)VM_KERNEL_ADDRPERM(fsroot),
2133 			cpu, cpu_subtype, is_64bit, reslide, is_driverkit,
2134 			(long long)target_address,
2135 			(long long)sr_pmap_nesting_size,
2136 			(void *)VM_KERNEL_ADDRPERM(sr_handle), kr));
2137 		goto done;
2138 	}
2139 	SHARED_REGION_TRACE_DEBUG(
2140 		("shared_region: enter(%p,%p,%p,%d,%d,%d,%d,%d): "
2141 		"nested vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
2142 		(void *)VM_KERNEL_ADDRPERM(map),
2143 		(void *)VM_KERNEL_ADDRPERM(task),
2144 		(void *)VM_KERNEL_ADDRPERM(fsroot),
2145 		cpu, cpu_subtype, is_64bit, reslide, is_driverkit,
2146 		(long long)target_address, (long long)sr_pmap_nesting_size,
2147 		(void *)VM_KERNEL_ADDRPERM(sr_handle), kr));
2148 
2149 	sr_offset += sr_pmap_nesting_size;
2150 	sr_size -= sr_pmap_nesting_size;
2151 
2152 	if (sr_size > 0) {
2153 		/* and there's some left to be mapped without pmap-nesting */
2154 		target_address = sr_address + sr_offset;
2155 		mapping_size = sr_size;
2156 		kr = vm_map_enter_mem_object(
2157 			map,
2158 			&target_address,
2159 			mapping_size,
2160 			0,
2161 			VM_FLAGS_FIXED,
2162 			VM_MAP_KERNEL_FLAGS_NONE,
2163 			VM_KERN_MEMORY_NONE,
2164 			sr_handle,
2165 			sr_offset,
2166 			TRUE,
2167 			cur_prot,
2168 			max_prot,
2169 			VM_INHERIT_SHARE);
2170 		if (kr != KERN_SUCCESS) {
2171 			SHARED_REGION_TRACE_ERROR(
2172 				("shared_region: enter(%p,%p,%p,%d,%d,%d,%d,%d): "
2173 				"vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
2174 				(void *)VM_KERNEL_ADDRPERM(map),
2175 				(void *)VM_KERNEL_ADDRPERM(task),
2176 				(void *)VM_KERNEL_ADDRPERM(fsroot),
2177 				cpu, cpu_subtype, is_64bit, reslide, is_driverkit,
2178 				(long long)target_address,
2179 				(long long)mapping_size,
2180 				(void *)VM_KERNEL_ADDRPERM(sr_handle), kr));
2181 			goto done;
2182 		}
2183 		SHARED_REGION_TRACE_DEBUG(
2184 			("shared_region: enter(%p,%p,%p,%d,%d,%d,%d,%d): "
2185 			"vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
2186 			(void *)VM_KERNEL_ADDRPERM(map),
2187 			(void *)VM_KERNEL_ADDRPERM(task),
2188 			(void *)VM_KERNEL_ADDRPERM(fsroot),
2189 			cpu, cpu_subtype, is_64bit, reslide, is_driverkit,
2190 			(long long)target_address, (long long)mapping_size,
2191 			(void *)VM_KERNEL_ADDRPERM(sr_handle), kr));
2192 		sr_offset += mapping_size;
2193 		sr_size -= mapping_size;
2194 	}
2195 	assert(sr_size == 0);
2196 
2197 done:
2198 	if (kr == KERN_SUCCESS) {
2199 		/* let the task use that shared region */
2200 		vm_shared_region_set(task, shared_region);
2201 	} else {
2202 		/* drop our reference since we're not using it */
2203 		vm_shared_region_deallocate(shared_region);
2204 		vm_shared_region_set(task, NULL);
2205 	}
2206 
2207 	SHARED_REGION_TRACE_DEBUG(
2208 		("shared_region: enter(%p,%p,%p,%d,%d,%d,%d,%d) <- 0x%x\n",
2209 		(void *)VM_KERNEL_ADDRPERM(map),
2210 		(void *)VM_KERNEL_ADDRPERM(task),
2211 		(void *)VM_KERNEL_ADDRPERM(fsroot),
2212 		cpu, cpu_subtype, is_64bit, reslide, is_driverkit,
2213 		kr));
2214 	return kr;
2215 }
2216 
2217 #define SANE_SLIDE_INFO_SIZE            (2560*1024) /*Can be changed if needed*/
2218 struct vm_shared_region_slide_info      slide_info;
2219 
2220 kern_return_t
vm_shared_region_sliding_valid(uint32_t slide)2221 vm_shared_region_sliding_valid(uint32_t slide)
2222 {
2223 	kern_return_t kr = KERN_SUCCESS;
2224 	vm_shared_region_t sr = vm_shared_region_get(current_task());
2225 
2226 	/* No region yet? we're fine. */
2227 	if (sr == NULL) {
2228 		return kr;
2229 	}
2230 
2231 	if (sr->sr_slide != 0 && slide != 0) {
2232 		if (slide == sr->sr_slide) {
2233 			/*
2234 			 * Request for sliding when we've
2235 			 * already done it with exactly the
2236 			 * same slide value before.
2237 			 * This isn't wrong technically but
2238 			 * we don't want to slide again and
2239 			 * so we return this value.
2240 			 */
2241 			kr = KERN_INVALID_ARGUMENT;
2242 		} else {
2243 			printf("Mismatched shared region slide\n");
2244 			kr = KERN_FAILURE;
2245 		}
2246 	}
2247 	vm_shared_region_deallocate(sr);
2248 	return kr;
2249 }
2250 
2251 /*
2252  * Actually create (really overwrite) the mapping to part of the shared cache which
2253  * undergoes relocation.  This routine reads in the relocation info from dyld and
2254  * verifies it. It then creates a (or finds a matching) shared region pager which
2255  * handles the actual modification of the page contents and installs the mapping
2256  * using that pager.
2257  */
2258 kern_return_t
vm_shared_region_slide_mapping(vm_shared_region_t sr,user_addr_t slide_info_addr,mach_vm_size_t slide_info_size,mach_vm_offset_t start,mach_vm_size_t size,mach_vm_offset_t slid_mapping,uint32_t slide,memory_object_control_t sr_file_control,vm_prot_t prot)2259 vm_shared_region_slide_mapping(
2260 	vm_shared_region_t      sr,
2261 	user_addr_t             slide_info_addr,
2262 	mach_vm_size_t          slide_info_size,
2263 	mach_vm_offset_t        start,
2264 	mach_vm_size_t          size,
2265 	mach_vm_offset_t        slid_mapping,
2266 	uint32_t                slide,
2267 	memory_object_control_t sr_file_control,
2268 	vm_prot_t               prot)
2269 {
2270 	kern_return_t           kr;
2271 	vm_object_t             object = VM_OBJECT_NULL;
2272 	vm_shared_region_slide_info_t si = NULL;
2273 	vm_map_entry_t          tmp_entry = VM_MAP_ENTRY_NULL;
2274 	struct vm_map_entry     tmp_entry_store;
2275 	memory_object_t         sr_pager = MEMORY_OBJECT_NULL;
2276 	vm_map_t                sr_map;
2277 	int                     vm_flags;
2278 	vm_map_kernel_flags_t   vmk_flags;
2279 	vm_map_offset_t         map_addr;
2280 	void                    *slide_info_entry = NULL;
2281 	int                     error;
2282 
2283 	assert(sr->sr_slide_in_progress);
2284 
2285 	if (sr_file_control == MEMORY_OBJECT_CONTROL_NULL) {
2286 		return KERN_INVALID_ARGUMENT;
2287 	}
2288 
2289 	/*
2290 	 * Copy in and verify the relocation information.
2291 	 */
2292 	if (slide_info_size < MIN_SLIDE_INFO_SIZE) {
2293 		printf("Slide_info_size too small: %lx\n", (uintptr_t)slide_info_size);
2294 		return KERN_FAILURE;
2295 	}
2296 	if (slide_info_size > SANE_SLIDE_INFO_SIZE) {
2297 		printf("Slide_info_size too large: %lx\n", (uintptr_t)slide_info_size);
2298 		return KERN_FAILURE;
2299 	}
2300 
2301 	slide_info_entry = kalloc_data((vm_size_t)slide_info_size, Z_WAITOK);
2302 	if (slide_info_entry == NULL) {
2303 		return KERN_RESOURCE_SHORTAGE;
2304 	}
2305 	error = copyin(slide_info_addr, slide_info_entry, (size_t)slide_info_size);
2306 	if (error) {
2307 		printf("copyin of slide_info failed\n");
2308 		kr = KERN_INVALID_ADDRESS;
2309 		goto done;
2310 	}
2311 
2312 	if ((kr = vm_shared_region_slide_sanity_check(slide_info_entry, slide_info_size)) != KERN_SUCCESS) {
2313 		printf("Sanity Check failed for slide_info\n");
2314 		goto done;
2315 	}
2316 
2317 	/*
2318 	 * Allocate and fill in a vm_shared_region_slide_info.
2319 	 * This will either be used by a new pager, or used to find
2320 	 * a pre-existing matching pager.
2321 	 */
2322 	object = memory_object_control_to_vm_object(sr_file_control);
2323 	if (object == VM_OBJECT_NULL || object->internal) {
2324 		object = VM_OBJECT_NULL;
2325 		kr = KERN_INVALID_ADDRESS;
2326 		goto done;
2327 	}
2328 
2329 	si = kalloc_type(struct vm_shared_region_slide_info,
2330 	    Z_WAITOK | Z_NOFAIL);
2331 	vm_object_lock(object);
2332 
2333 	vm_object_reference_locked(object);     /* for si->slide_object */
2334 	object->object_is_shared_cache = TRUE;
2335 	vm_object_unlock(object);
2336 
2337 	si->si_slide_info_entry = slide_info_entry;
2338 	si->si_slide_info_size = slide_info_size;
2339 
2340 	assert(slid_mapping != (mach_vm_offset_t) -1);
2341 	si->si_slid_address = slid_mapping + sr->sr_base_address;
2342 	si->si_slide_object = object;
2343 	si->si_start = start;
2344 	si->si_end = si->si_start + size;
2345 	si->si_slide = slide;
2346 #if __has_feature(ptrauth_calls)
2347 	/*
2348 	 * If there is authenticated pointer data in this slid mapping,
2349 	 * then just add the information needed to create new pagers for
2350 	 * different shared_region_id's later.
2351 	 */
2352 	if (sr->sr_cpu_type == CPU_TYPE_ARM64 &&
2353 	    sr->sr_cpu_subtype == CPU_SUBTYPE_ARM64E &&
2354 	    !(prot & VM_PROT_NOAUTH)) {
2355 		if (sr->sr_num_auth_section == NUM_SR_AUTH_SECTIONS) {
2356 			printf("Too many auth/private sections for shared region!!\n");
2357 			kr = KERN_INVALID_ARGUMENT;
2358 			goto done;
2359 		}
2360 		si->si_ptrauth = TRUE;
2361 		sr->sr_auth_section[sr->sr_num_auth_section++] = si;
2362 		/*
2363 		 * Remember the shared region, since that's where we'll
2364 		 * stash this info for all auth pagers to share. Each pager
2365 		 * will need to take a reference to it.
2366 		 */
2367 		si->si_shared_region = sr;
2368 		kr = KERN_SUCCESS;
2369 		goto done;
2370 	}
2371 	si->si_shared_region = NULL;
2372 	si->si_ptrauth = FALSE;
2373 #else /* __has_feature(ptrauth_calls) */
2374 	(void)prot;     /* silence unused warning */
2375 #endif /* __has_feature(ptrauth_calls) */
2376 
2377 	/*
2378 	 * find the pre-existing shared region's map entry to slide
2379 	 */
2380 	sr_map = vm_shared_region_vm_map(sr);
2381 	kr = find_mapping_to_slide(sr_map, (vm_map_address_t)slid_mapping, &tmp_entry_store);
2382 	if (kr != KERN_SUCCESS) {
2383 		goto done;
2384 	}
2385 	tmp_entry = &tmp_entry_store;
2386 
2387 	/*
2388 	 * The object must exactly cover the region to slide.
2389 	 */
2390 	assert(VME_OFFSET(tmp_entry) == start);
2391 	assert(tmp_entry->vme_end - tmp_entry->vme_start == size);
2392 
2393 	/* create a "shared_region" sliding pager */
2394 	sr_pager = shared_region_pager_setup(VME_OBJECT(tmp_entry), VME_OFFSET(tmp_entry), si, 0);
2395 	if (sr_pager == MEMORY_OBJECT_NULL) {
2396 		kr = KERN_RESOURCE_SHORTAGE;
2397 		goto done;
2398 	}
2399 
2400 #if CONFIG_SECLUDED_MEMORY
2401 	/*
2402 	 * The shared region pagers used by camera or DEXT should have
2403 	 * pagers that won't go on the secluded queue.
2404 	 */
2405 	if (primary_system_shared_region == NULL ||
2406 	    primary_system_shared_region == sr ||
2407 	    sr->sr_driverkit) {
2408 		memory_object_mark_eligible_for_secluded(sr_pager->mo_control, FALSE);
2409 	}
2410 #endif /* CONFIG_SECLUDED_MEMORY */
2411 
2412 	/* map that pager over the portion of the mapping that needs sliding */
2413 	vm_flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
2414 	vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
2415 	vmk_flags.vmkf_overwrite_immutable = TRUE;
2416 	map_addr = tmp_entry->vme_start;
2417 	kr = vm_map_enter_mem_object(sr_map,
2418 	    &map_addr,
2419 	    (tmp_entry->vme_end - tmp_entry->vme_start),
2420 	    (mach_vm_offset_t) 0,
2421 	    vm_flags,
2422 	    vmk_flags,
2423 	    VM_KERN_MEMORY_NONE,
2424 	    (ipc_port_t)(uintptr_t) sr_pager,
2425 	    0,
2426 	    TRUE,
2427 	    tmp_entry->protection,
2428 	    tmp_entry->max_protection,
2429 	    tmp_entry->inheritance);
2430 	assertf(kr == KERN_SUCCESS, "kr = 0x%x\n", kr);
2431 	assertf(map_addr == tmp_entry->vme_start,
2432 	    "map_addr=0x%llx vme_start=0x%llx tmp_entry=%p\n",
2433 	    (uint64_t)map_addr,
2434 	    (uint64_t) tmp_entry->vme_start,
2435 	    tmp_entry);
2436 
2437 	/* success! */
2438 	kr = KERN_SUCCESS;
2439 
2440 done:
2441 	if (sr_pager != NULL) {
2442 		/*
2443 		 * Release the sr_pager reference obtained by shared_region_pager_setup().
2444 		 * The mapping, if it succeeded, is now holding a reference on the memory object.
2445 		 */
2446 		memory_object_deallocate(sr_pager);
2447 		sr_pager = MEMORY_OBJECT_NULL;
2448 	}
2449 	if (tmp_entry != NULL) {
2450 		/* release extra ref on tmp_entry's VM object */
2451 		vm_object_deallocate(VME_OBJECT(tmp_entry));
2452 		tmp_entry = VM_MAP_ENTRY_NULL;
2453 	}
2454 
2455 	if (kr != KERN_SUCCESS) {
2456 		/* cleanup */
2457 		if (si != NULL) {
2458 			if (si->si_slide_object) {
2459 				vm_object_deallocate(si->si_slide_object);
2460 				si->si_slide_object = VM_OBJECT_NULL;
2461 			}
2462 			kfree_type(struct vm_shared_region_slide_info, si);
2463 			si = NULL;
2464 		}
2465 		if (slide_info_entry != NULL) {
2466 			kfree_data(slide_info_entry, (vm_size_t)slide_info_size);
2467 			slide_info_entry = NULL;
2468 		}
2469 	}
2470 	return kr;
2471 }
2472 
2473 static kern_return_t
vm_shared_region_slide_sanity_check_v2(vm_shared_region_slide_info_entry_v2_t s_info,mach_vm_size_t slide_info_size)2474 vm_shared_region_slide_sanity_check_v2(
2475 	vm_shared_region_slide_info_entry_v2_t s_info,
2476 	mach_vm_size_t slide_info_size)
2477 {
2478 	if (slide_info_size < sizeof(struct vm_shared_region_slide_info_entry_v2)) {
2479 		printf("%s bad slide_info_size: %lx\n", __func__, (uintptr_t)slide_info_size);
2480 		return KERN_FAILURE;
2481 	}
2482 	if (s_info->page_size != PAGE_SIZE_FOR_SR_SLIDE) {
2483 		return KERN_FAILURE;
2484 	}
2485 
2486 	/* Ensure that the slide info doesn't reference any data outside of its bounds. */
2487 
2488 	uint32_t page_starts_count = s_info->page_starts_count;
2489 	uint32_t page_extras_count = s_info->page_extras_count;
2490 	mach_vm_size_t num_trailing_entries = page_starts_count + page_extras_count;
2491 	if (num_trailing_entries < page_starts_count) {
2492 		return KERN_FAILURE;
2493 	}
2494 
2495 	/* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */
2496 	mach_vm_size_t trailing_size = num_trailing_entries << 1;
2497 	if (trailing_size >> 1 != num_trailing_entries) {
2498 		return KERN_FAILURE;
2499 	}
2500 
2501 	mach_vm_size_t required_size = sizeof(*s_info) + trailing_size;
2502 	if (required_size < sizeof(*s_info)) {
2503 		return KERN_FAILURE;
2504 	}
2505 
2506 	if (required_size > slide_info_size) {
2507 		return KERN_FAILURE;
2508 	}
2509 
2510 	return KERN_SUCCESS;
2511 }
2512 
2513 static kern_return_t
vm_shared_region_slide_sanity_check_v3(vm_shared_region_slide_info_entry_v3_t s_info,mach_vm_size_t slide_info_size)2514 vm_shared_region_slide_sanity_check_v3(
2515 	vm_shared_region_slide_info_entry_v3_t s_info,
2516 	mach_vm_size_t slide_info_size)
2517 {
2518 	if (slide_info_size < sizeof(struct vm_shared_region_slide_info_entry_v3)) {
2519 		printf("%s bad slide_info_size: %lx\n", __func__, (uintptr_t)slide_info_size);
2520 		return KERN_FAILURE;
2521 	}
2522 	if (s_info->page_size != PAGE_SIZE_FOR_SR_SLIDE) {
2523 		printf("vm_shared_region_slide_sanity_check_v3: s_info->page_size != PAGE_SIZE_FOR_SR_SL 0x%llx != 0x%llx\n", (uint64_t)s_info->page_size, (uint64_t)PAGE_SIZE_FOR_SR_SLIDE);
2524 		return KERN_FAILURE;
2525 	}
2526 
2527 	uint32_t page_starts_count = s_info->page_starts_count;
2528 	mach_vm_size_t num_trailing_entries = page_starts_count;
2529 	mach_vm_size_t trailing_size = num_trailing_entries << 1;
2530 	mach_vm_size_t required_size = sizeof(*s_info) + trailing_size;
2531 	if (required_size < sizeof(*s_info)) {
2532 		printf("vm_shared_region_slide_sanity_check_v3: required_size != sizeof(*s_info) 0x%llx != 0x%llx\n", (uint64_t)required_size, (uint64_t)sizeof(*s_info));
2533 		return KERN_FAILURE;
2534 	}
2535 
2536 	if (required_size > slide_info_size) {
2537 		printf("vm_shared_region_slide_sanity_check_v3: required_size != slide_info_size 0x%llx != 0x%llx\n", (uint64_t)required_size, (uint64_t)slide_info_size);
2538 		return KERN_FAILURE;
2539 	}
2540 
2541 	return KERN_SUCCESS;
2542 }
2543 
2544 static kern_return_t
vm_shared_region_slide_sanity_check_v4(vm_shared_region_slide_info_entry_v4_t s_info,mach_vm_size_t slide_info_size)2545 vm_shared_region_slide_sanity_check_v4(
2546 	vm_shared_region_slide_info_entry_v4_t s_info,
2547 	mach_vm_size_t slide_info_size)
2548 {
2549 	if (slide_info_size < sizeof(struct vm_shared_region_slide_info_entry_v4)) {
2550 		printf("%s bad slide_info_size: %lx\n", __func__, (uintptr_t)slide_info_size);
2551 		return KERN_FAILURE;
2552 	}
2553 	if (s_info->page_size != PAGE_SIZE_FOR_SR_SLIDE) {
2554 		return KERN_FAILURE;
2555 	}
2556 
2557 	/* Ensure that the slide info doesn't reference any data outside of its bounds. */
2558 
2559 	uint32_t page_starts_count = s_info->page_starts_count;
2560 	uint32_t page_extras_count = s_info->page_extras_count;
2561 	mach_vm_size_t num_trailing_entries = page_starts_count + page_extras_count;
2562 	if (num_trailing_entries < page_starts_count) {
2563 		return KERN_FAILURE;
2564 	}
2565 
2566 	/* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */
2567 	mach_vm_size_t trailing_size = num_trailing_entries << 1;
2568 	if (trailing_size >> 1 != num_trailing_entries) {
2569 		return KERN_FAILURE;
2570 	}
2571 
2572 	mach_vm_size_t required_size = sizeof(*s_info) + trailing_size;
2573 	if (required_size < sizeof(*s_info)) {
2574 		return KERN_FAILURE;
2575 	}
2576 
2577 	if (required_size > slide_info_size) {
2578 		return KERN_FAILURE;
2579 	}
2580 
2581 	return KERN_SUCCESS;
2582 }
2583 
2584 
2585 static kern_return_t
vm_shared_region_slide_sanity_check(vm_shared_region_slide_info_entry_t s_info,mach_vm_size_t s_info_size)2586 vm_shared_region_slide_sanity_check(
2587 	vm_shared_region_slide_info_entry_t s_info,
2588 	mach_vm_size_t s_info_size)
2589 {
2590 	kern_return_t kr;
2591 
2592 	switch (s_info->version) {
2593 	case 2:
2594 		kr = vm_shared_region_slide_sanity_check_v2(&s_info->v2, s_info_size);
2595 		break;
2596 	case 3:
2597 		kr = vm_shared_region_slide_sanity_check_v3(&s_info->v3, s_info_size);
2598 		break;
2599 	case 4:
2600 		kr = vm_shared_region_slide_sanity_check_v4(&s_info->v4, s_info_size);
2601 		break;
2602 	default:
2603 		kr = KERN_FAILURE;
2604 	}
2605 	return kr;
2606 }
2607 
2608 static kern_return_t
rebase_chain_32(uint8_t * page_content,uint16_t start_offset,uint32_t slide_amount,vm_shared_region_slide_info_entry_v2_t s_info)2609 rebase_chain_32(
2610 	uint8_t *page_content,
2611 	uint16_t start_offset,
2612 	uint32_t slide_amount,
2613 	vm_shared_region_slide_info_entry_v2_t s_info)
2614 {
2615 	const uint32_t last_page_offset = PAGE_SIZE_FOR_SR_SLIDE - sizeof(uint32_t);
2616 
2617 	const uint32_t delta_mask = (uint32_t)(s_info->delta_mask);
2618 	const uint32_t value_mask = ~delta_mask;
2619 	const uint32_t value_add = (uint32_t)(s_info->value_add);
2620 	const uint32_t delta_shift = __builtin_ctzll(delta_mask) - 2;
2621 
2622 	uint32_t page_offset = start_offset;
2623 	uint32_t delta = 1;
2624 
2625 	while (delta != 0 && page_offset <= last_page_offset) {
2626 		uint8_t *loc;
2627 		uint32_t value;
2628 
2629 		loc = page_content + page_offset;
2630 		memcpy(&value, loc, sizeof(value));
2631 		delta = (value & delta_mask) >> delta_shift;
2632 		value &= value_mask;
2633 
2634 		if (value != 0) {
2635 			value += value_add;
2636 			value += slide_amount;
2637 		}
2638 		memcpy(loc, &value, sizeof(value));
2639 		page_offset += delta;
2640 	}
2641 
2642 	/* If the offset went past the end of the page, then the slide data is invalid. */
2643 	if (page_offset > last_page_offset) {
2644 		return KERN_FAILURE;
2645 	}
2646 	return KERN_SUCCESS;
2647 }
2648 
2649 static kern_return_t
rebase_chain_64(uint8_t * page_content,uint16_t start_offset,uint32_t slide_amount,vm_shared_region_slide_info_entry_v2_t s_info)2650 rebase_chain_64(
2651 	uint8_t *page_content,
2652 	uint16_t start_offset,
2653 	uint32_t slide_amount,
2654 	vm_shared_region_slide_info_entry_v2_t s_info)
2655 {
2656 	const uint32_t last_page_offset = PAGE_SIZE_FOR_SR_SLIDE - sizeof(uint64_t);
2657 
2658 	const uint64_t delta_mask = s_info->delta_mask;
2659 	const uint64_t value_mask = ~delta_mask;
2660 	const uint64_t value_add = s_info->value_add;
2661 	const uint64_t delta_shift = __builtin_ctzll(delta_mask) - 2;
2662 
2663 	uint32_t page_offset = start_offset;
2664 	uint32_t delta = 1;
2665 
2666 	while (delta != 0 && page_offset <= last_page_offset) {
2667 		uint8_t *loc;
2668 		uint64_t value;
2669 
2670 		loc = page_content + page_offset;
2671 		memcpy(&value, loc, sizeof(value));
2672 		delta = (uint32_t)((value & delta_mask) >> delta_shift);
2673 		value &= value_mask;
2674 
2675 		if (value != 0) {
2676 			value += value_add;
2677 			value += slide_amount;
2678 		}
2679 		memcpy(loc, &value, sizeof(value));
2680 		page_offset += delta;
2681 	}
2682 
2683 	if (page_offset + sizeof(uint32_t) == PAGE_SIZE_FOR_SR_SLIDE) {
2684 		/* If a pointer straddling the page boundary needs to be adjusted, then
2685 		 * add the slide to the lower half. The encoding guarantees that the upper
2686 		 * half on the next page will need no masking.
2687 		 *
2688 		 * This assumes a little-endian machine and that the region being slid
2689 		 * never crosses a 4 GB boundary. */
2690 
2691 		uint8_t *loc = page_content + page_offset;
2692 		uint32_t value;
2693 
2694 		memcpy(&value, loc, sizeof(value));
2695 		value += slide_amount;
2696 		memcpy(loc, &value, sizeof(value));
2697 	} else if (page_offset > last_page_offset) {
2698 		return KERN_FAILURE;
2699 	}
2700 
2701 	return KERN_SUCCESS;
2702 }
2703 
2704 static kern_return_t
rebase_chain(boolean_t is_64,uint32_t pageIndex,uint8_t * page_content,uint16_t start_offset,uint32_t slide_amount,vm_shared_region_slide_info_entry_v2_t s_info)2705 rebase_chain(
2706 	boolean_t is_64,
2707 	uint32_t pageIndex,
2708 	uint8_t *page_content,
2709 	uint16_t start_offset,
2710 	uint32_t slide_amount,
2711 	vm_shared_region_slide_info_entry_v2_t s_info)
2712 {
2713 	kern_return_t kr;
2714 	if (is_64) {
2715 		kr = rebase_chain_64(page_content, start_offset, slide_amount, s_info);
2716 	} else {
2717 		kr = rebase_chain_32(page_content, start_offset, slide_amount, s_info);
2718 	}
2719 
2720 	if (kr != KERN_SUCCESS) {
2721 		printf("vm_shared_region_slide_page() offset overflow: pageIndex=%u, start_offset=%u, slide_amount=%u\n",
2722 		    pageIndex, start_offset, slide_amount);
2723 	}
2724 	return kr;
2725 }
2726 
2727 static kern_return_t
vm_shared_region_slide_page_v2(vm_shared_region_slide_info_t si,vm_offset_t vaddr,uint32_t pageIndex)2728 vm_shared_region_slide_page_v2(vm_shared_region_slide_info_t si, vm_offset_t vaddr, uint32_t pageIndex)
2729 {
2730 	vm_shared_region_slide_info_entry_v2_t s_info = &si->si_slide_info_entry->v2;
2731 	const uint32_t slide_amount = si->si_slide;
2732 
2733 	/* The high bits of the delta_mask field are nonzero precisely when the shared
2734 	 * cache is 64-bit. */
2735 	const boolean_t is_64 = (s_info->delta_mask >> 32) != 0;
2736 
2737 	const uint16_t *page_starts = (uint16_t *)((uintptr_t)s_info + s_info->page_starts_offset);
2738 	const uint16_t *page_extras = (uint16_t *)((uintptr_t)s_info + s_info->page_extras_offset);
2739 
2740 	uint8_t *page_content = (uint8_t *)vaddr;
2741 	uint16_t page_entry;
2742 
2743 	if (pageIndex >= s_info->page_starts_count) {
2744 		printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2745 		    pageIndex, s_info->page_starts_count);
2746 		return KERN_FAILURE;
2747 	}
2748 	page_entry = page_starts[pageIndex];
2749 
2750 	if (page_entry == DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE) {
2751 		return KERN_SUCCESS;
2752 	}
2753 
2754 	if (page_entry & DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA) {
2755 		uint16_t chain_index = page_entry & DYLD_CACHE_SLIDE_PAGE_VALUE;
2756 		uint16_t info;
2757 
2758 		do {
2759 			uint16_t page_start_offset;
2760 			kern_return_t kr;
2761 
2762 			if (chain_index >= s_info->page_extras_count) {
2763 				printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n",
2764 				    chain_index, s_info->page_extras_count);
2765 				return KERN_FAILURE;
2766 			}
2767 			info = page_extras[chain_index];
2768 			page_start_offset = (uint16_t)((info & DYLD_CACHE_SLIDE_PAGE_VALUE) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT);
2769 
2770 			kr = rebase_chain(is_64, pageIndex, page_content, page_start_offset, slide_amount, s_info);
2771 			if (kr != KERN_SUCCESS) {
2772 				return KERN_FAILURE;
2773 			}
2774 
2775 			chain_index++;
2776 		} while (!(info & DYLD_CACHE_SLIDE_PAGE_ATTR_END));
2777 	} else {
2778 		const uint16_t page_start_offset = (uint16_t)(page_entry << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT);
2779 		kern_return_t kr;
2780 
2781 		kr = rebase_chain(is_64, pageIndex, page_content, page_start_offset, slide_amount, s_info);
2782 		if (kr != KERN_SUCCESS) {
2783 			return KERN_FAILURE;
2784 		}
2785 	}
2786 
2787 	return KERN_SUCCESS;
2788 }
2789 
2790 
2791 static kern_return_t
vm_shared_region_slide_page_v3(vm_shared_region_slide_info_t si,vm_offset_t vaddr,__unused mach_vm_offset_t uservaddr,uint32_t pageIndex,__unused uint64_t jop_key)2792 vm_shared_region_slide_page_v3(
2793 	vm_shared_region_slide_info_t si,
2794 	vm_offset_t vaddr,
2795 	__unused mach_vm_offset_t uservaddr,
2796 	uint32_t pageIndex,
2797 #if !__has_feature(ptrauth_calls)
2798 	__unused
2799 #endif /* !__has_feature(ptrauth_calls) */
2800 	uint64_t jop_key)
2801 {
2802 	vm_shared_region_slide_info_entry_v3_t s_info = &si->si_slide_info_entry->v3;
2803 	const uint32_t slide_amount = si->si_slide;
2804 
2805 	uint8_t *page_content = (uint8_t *)vaddr;
2806 	uint16_t page_entry;
2807 
2808 	if (pageIndex >= s_info->page_starts_count) {
2809 		printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2810 		    pageIndex, s_info->page_starts_count);
2811 		return KERN_FAILURE;
2812 	}
2813 	page_entry = s_info->page_starts[pageIndex];
2814 
2815 	if (page_entry == DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE) {
2816 		return KERN_SUCCESS;
2817 	}
2818 
2819 	uint8_t* rebaseLocation = page_content;
2820 	uint64_t delta = page_entry;
2821 	do {
2822 		rebaseLocation += delta;
2823 		uint64_t value;
2824 		memcpy(&value, rebaseLocation, sizeof(value));
2825 		delta = ((value & 0x3FF8000000000000) >> 51) * sizeof(uint64_t);
2826 
2827 		// A pointer is one of :
2828 		// {
2829 		//	 uint64_t pointerValue : 51;
2830 		//	 uint64_t offsetToNextPointer : 11;
2831 		//	 uint64_t isBind : 1 = 0;
2832 		//	 uint64_t authenticated : 1 = 0;
2833 		// }
2834 		// {
2835 		//	 uint32_t offsetFromSharedCacheBase;
2836 		//	 uint16_t diversityData;
2837 		//	 uint16_t hasAddressDiversity : 1;
2838 		//	 uint16_t hasDKey : 1;
2839 		//	 uint16_t hasBKey : 1;
2840 		//	 uint16_t offsetToNextPointer : 11;
2841 		//	 uint16_t isBind : 1;
2842 		//	 uint16_t authenticated : 1 = 1;
2843 		// }
2844 
2845 		bool isBind = (value & (1ULL << 62)) == 1;
2846 		if (isBind) {
2847 			return KERN_FAILURE;
2848 		}
2849 
2850 #if __has_feature(ptrauth_calls)
2851 		uint16_t diversity_data = (uint16_t)(value >> 32);
2852 		bool hasAddressDiversity = (value & (1ULL << 48)) != 0;
2853 		ptrauth_key key = (ptrauth_key)((value >> 49) & 0x3);
2854 #endif /* __has_feature(ptrauth_calls) */
2855 		bool isAuthenticated = (value & (1ULL << 63)) != 0;
2856 
2857 		if (isAuthenticated) {
2858 			// The new value for a rebase is the low 32-bits of the threaded value plus the slide.
2859 			value = (value & 0xFFFFFFFF) + slide_amount;
2860 			// Add in the offset from the mach_header
2861 			const uint64_t value_add = s_info->value_add;
2862 			value += value_add;
2863 
2864 #if __has_feature(ptrauth_calls)
2865 			uint64_t discriminator = diversity_data;
2866 			if (hasAddressDiversity) {
2867 				// First calculate a new discriminator using the address of where we are trying to store the value
2868 				uintptr_t pageOffset = rebaseLocation - page_content;
2869 				discriminator = __builtin_ptrauth_blend_discriminator((void*)(((uintptr_t)uservaddr) + pageOffset), discriminator);
2870 			}
2871 
2872 			if (jop_key != 0 && si->si_ptrauth && !arm_user_jop_disabled()) {
2873 				/*
2874 				 * these pointers are used in user mode. disable the kernel key diversification
2875 				 * so we can sign them for use in user mode.
2876 				 */
2877 				value = (uintptr_t)pmap_sign_user_ptr((void *)value, key, discriminator, jop_key);
2878 			}
2879 #endif /* __has_feature(ptrauth_calls) */
2880 		} else {
2881 			// The new value for a rebase is the low 51-bits of the threaded value plus the slide.
2882 			// Regular pointer which needs to fit in 51-bits of value.
2883 			// C++ RTTI uses the top bit, so we'll allow the whole top-byte
2884 			// and the bottom 43-bits to be fit in to 51-bits.
2885 			uint64_t top8Bits = value & 0x0007F80000000000ULL;
2886 			uint64_t bottom43Bits = value & 0x000007FFFFFFFFFFULL;
2887 			uint64_t targetValue = (top8Bits << 13) | bottom43Bits;
2888 			value = targetValue + slide_amount;
2889 		}
2890 
2891 		memcpy(rebaseLocation, &value, sizeof(value));
2892 	} while (delta != 0);
2893 
2894 	return KERN_SUCCESS;
2895 }
2896 
2897 static kern_return_t
rebase_chainv4(uint8_t * page_content,uint16_t start_offset,uint32_t slide_amount,vm_shared_region_slide_info_entry_v4_t s_info)2898 rebase_chainv4(
2899 	uint8_t *page_content,
2900 	uint16_t start_offset,
2901 	uint32_t slide_amount,
2902 	vm_shared_region_slide_info_entry_v4_t s_info)
2903 {
2904 	const uint32_t last_page_offset = PAGE_SIZE_FOR_SR_SLIDE - sizeof(uint32_t);
2905 
2906 	const uint32_t delta_mask = (uint32_t)(s_info->delta_mask);
2907 	const uint32_t value_mask = ~delta_mask;
2908 	const uint32_t value_add = (uint32_t)(s_info->value_add);
2909 	const uint32_t delta_shift = __builtin_ctzll(delta_mask) - 2;
2910 
2911 	uint32_t page_offset = start_offset;
2912 	uint32_t delta = 1;
2913 
2914 	while (delta != 0 && page_offset <= last_page_offset) {
2915 		uint8_t *loc;
2916 		uint32_t value;
2917 
2918 		loc = page_content + page_offset;
2919 		memcpy(&value, loc, sizeof(value));
2920 		delta = (value & delta_mask) >> delta_shift;
2921 		value &= value_mask;
2922 
2923 		if ((value & 0xFFFF8000) == 0) {
2924 			// small positive non-pointer, use as-is
2925 		} else if ((value & 0x3FFF8000) == 0x3FFF8000) {
2926 			// small negative non-pointer
2927 			value |= 0xC0000000;
2928 		} else {
2929 			// pointer that needs rebasing
2930 			value += value_add;
2931 			value += slide_amount;
2932 		}
2933 		memcpy(loc, &value, sizeof(value));
2934 		page_offset += delta;
2935 	}
2936 
2937 	/* If the offset went past the end of the page, then the slide data is invalid. */
2938 	if (page_offset > last_page_offset) {
2939 		return KERN_FAILURE;
2940 	}
2941 	return KERN_SUCCESS;
2942 }
2943 
2944 static kern_return_t
vm_shared_region_slide_page_v4(vm_shared_region_slide_info_t si,vm_offset_t vaddr,uint32_t pageIndex)2945 vm_shared_region_slide_page_v4(vm_shared_region_slide_info_t si, vm_offset_t vaddr, uint32_t pageIndex)
2946 {
2947 	vm_shared_region_slide_info_entry_v4_t s_info = &si->si_slide_info_entry->v4;
2948 	const uint32_t slide_amount = si->si_slide;
2949 
2950 	const uint16_t *page_starts = (uint16_t *)((uintptr_t)s_info + s_info->page_starts_offset);
2951 	const uint16_t *page_extras = (uint16_t *)((uintptr_t)s_info + s_info->page_extras_offset);
2952 
2953 	uint8_t *page_content = (uint8_t *)vaddr;
2954 	uint16_t page_entry;
2955 
2956 	if (pageIndex >= s_info->page_starts_count) {
2957 		printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2958 		    pageIndex, s_info->page_starts_count);
2959 		return KERN_FAILURE;
2960 	}
2961 	page_entry = page_starts[pageIndex];
2962 
2963 	if (page_entry == DYLD_CACHE_SLIDE4_PAGE_NO_REBASE) {
2964 		return KERN_SUCCESS;
2965 	}
2966 
2967 	if (page_entry & DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA) {
2968 		uint16_t chain_index = page_entry & DYLD_CACHE_SLIDE4_PAGE_INDEX;
2969 		uint16_t info;
2970 
2971 		do {
2972 			uint16_t page_start_offset;
2973 			kern_return_t kr;
2974 
2975 			if (chain_index >= s_info->page_extras_count) {
2976 				printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n",
2977 				    chain_index, s_info->page_extras_count);
2978 				return KERN_FAILURE;
2979 			}
2980 			info = page_extras[chain_index];
2981 			page_start_offset = (uint16_t)((info & DYLD_CACHE_SLIDE4_PAGE_INDEX) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT);
2982 
2983 			kr = rebase_chainv4(page_content, page_start_offset, slide_amount, s_info);
2984 			if (kr != KERN_SUCCESS) {
2985 				return KERN_FAILURE;
2986 			}
2987 
2988 			chain_index++;
2989 		} while (!(info & DYLD_CACHE_SLIDE4_PAGE_EXTRA_END));
2990 	} else {
2991 		const uint16_t page_start_offset = (uint16_t)(page_entry << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT);
2992 		kern_return_t kr;
2993 
2994 		kr = rebase_chainv4(page_content, page_start_offset, slide_amount, s_info);
2995 		if (kr != KERN_SUCCESS) {
2996 			return KERN_FAILURE;
2997 		}
2998 	}
2999 
3000 	return KERN_SUCCESS;
3001 }
3002 
3003 
3004 
3005 kern_return_t
vm_shared_region_slide_page(vm_shared_region_slide_info_t si,vm_offset_t vaddr,mach_vm_offset_t uservaddr,uint32_t pageIndex,uint64_t jop_key)3006 vm_shared_region_slide_page(
3007 	vm_shared_region_slide_info_t si,
3008 	vm_offset_t vaddr,
3009 	mach_vm_offset_t uservaddr,
3010 	uint32_t pageIndex,
3011 	uint64_t jop_key)
3012 {
3013 	switch (si->si_slide_info_entry->version) {
3014 	case 2:
3015 		return vm_shared_region_slide_page_v2(si, vaddr, pageIndex);
3016 	case 3:
3017 		return vm_shared_region_slide_page_v3(si, vaddr, uservaddr, pageIndex, jop_key);
3018 	case 4:
3019 		return vm_shared_region_slide_page_v4(si, vaddr, pageIndex);
3020 	default:
3021 		return KERN_FAILURE;
3022 	}
3023 }
3024 
3025 /******************************************************************************/
3026 /* Comm page support                                                          */
3027 /******************************************************************************/
3028 
3029 SECURITY_READ_ONLY_LATE(ipc_port_t) commpage32_handle = IPC_PORT_NULL;
3030 SECURITY_READ_ONLY_LATE(ipc_port_t) commpage64_handle = IPC_PORT_NULL;
3031 SECURITY_READ_ONLY_LATE(vm_named_entry_t) commpage32_entry = NULL;
3032 SECURITY_READ_ONLY_LATE(vm_named_entry_t) commpage64_entry = NULL;
3033 SECURITY_READ_ONLY_LATE(vm_map_t) commpage32_map = VM_MAP_NULL;
3034 SECURITY_READ_ONLY_LATE(vm_map_t) commpage64_map = VM_MAP_NULL;
3035 
3036 SECURITY_READ_ONLY_LATE(ipc_port_t) commpage_text32_handle = IPC_PORT_NULL;
3037 SECURITY_READ_ONLY_LATE(ipc_port_t) commpage_text64_handle = IPC_PORT_NULL;
3038 SECURITY_READ_ONLY_LATE(vm_named_entry_t) commpage_text32_entry = NULL;
3039 SECURITY_READ_ONLY_LATE(vm_named_entry_t) commpage_text64_entry = NULL;
3040 SECURITY_READ_ONLY_LATE(vm_map_t) commpage_text32_map = VM_MAP_NULL;
3041 SECURITY_READ_ONLY_LATE(vm_map_t) commpage_text64_map = VM_MAP_NULL;
3042 
3043 SECURITY_READ_ONLY_LATE(user32_addr_t) commpage_text32_location = 0;
3044 SECURITY_READ_ONLY_LATE(user64_addr_t) commpage_text64_location = 0;
3045 
3046 #if defined(__i386__) || defined(__x86_64__)
3047 /*
3048  * Create a memory entry, VM submap and pmap for one commpage.
3049  */
3050 static void
_vm_commpage_init(ipc_port_t * handlep,vm_map_size_t size)3051 _vm_commpage_init(
3052 	ipc_port_t      *handlep,
3053 	vm_map_size_t   size)
3054 {
3055 	vm_named_entry_t        mem_entry;
3056 	vm_map_t                new_map;
3057 
3058 	SHARED_REGION_TRACE_DEBUG(
3059 		("commpage: -> _init(0x%llx)\n",
3060 		(long long)size));
3061 
3062 	pmap_t new_pmap = pmap_create_options(NULL, 0, 0);
3063 	if (new_pmap == NULL) {
3064 		panic("_vm_commpage_init: could not allocate pmap");
3065 	}
3066 	new_map = vm_map_create_options(new_pmap, 0, size, VM_MAP_CREATE_DEFAULT);
3067 
3068 	mem_entry = mach_memory_entry_allocate(handlep);
3069 	mem_entry->backing.map = new_map;
3070 	mem_entry->internal = TRUE;
3071 	mem_entry->is_sub_map = TRUE;
3072 	mem_entry->offset = 0;
3073 	mem_entry->protection = VM_PROT_ALL;
3074 	mem_entry->size = size;
3075 
3076 	SHARED_REGION_TRACE_DEBUG(
3077 		("commpage: _init(0x%llx) <- %p\n",
3078 		(long long)size, (void *)VM_KERNEL_ADDRPERM(*handlep)));
3079 }
3080 #endif
3081 
3082 
3083 /*
3084  * Initialize the comm text pages at boot time
3085  */
3086 void
vm_commpage_text_init(void)3087 vm_commpage_text_init(void)
3088 {
3089 	SHARED_REGION_TRACE_DEBUG(
3090 		("commpage text: ->init()\n"));
3091 #if defined(__i386__) || defined(__x86_64__)
3092 	/* create the 32 bit comm text page */
3093 	unsigned int offset = (random() % _PFZ32_SLIDE_RANGE) << PAGE_SHIFT; /* restricting to 32bMAX-2PAGE */
3094 	_vm_commpage_init(&commpage_text32_handle, _COMM_PAGE_TEXT_AREA_LENGTH);
3095 	commpage_text32_entry = mach_memory_entry_from_port(commpage_text32_handle);
3096 	commpage_text32_map = commpage_text32_entry->backing.map;
3097 	commpage_text32_location = (user32_addr_t) (_COMM_PAGE32_TEXT_START + offset);
3098 	/* XXX if (cpu_is_64bit_capable()) ? */
3099 	/* create the 64-bit comm page */
3100 	offset = (random() % _PFZ64_SLIDE_RANGE) << PAGE_SHIFT; /* restricting sliding upto 2Mb range */
3101 	_vm_commpage_init(&commpage_text64_handle, _COMM_PAGE_TEXT_AREA_LENGTH);
3102 	commpage_text64_entry = mach_memory_entry_from_port(commpage_text64_handle);
3103 	commpage_text64_map = commpage_text64_entry->backing.map;
3104 	commpage_text64_location = (user64_addr_t) (_COMM_PAGE64_TEXT_START + offset);
3105 #endif
3106 
3107 	commpage_text_populate();
3108 
3109 	/* populate the routines in here */
3110 	SHARED_REGION_TRACE_DEBUG(
3111 		("commpage text: init() <-\n"));
3112 }
3113 
3114 /*
3115  * Initialize the comm pages at boot time.
3116  */
3117 void
vm_commpage_init(void)3118 vm_commpage_init(void)
3119 {
3120 	SHARED_REGION_TRACE_DEBUG(
3121 		("commpage: -> init()\n"));
3122 
3123 #if defined(__i386__) || defined(__x86_64__)
3124 	/* create the 32-bit comm page */
3125 	_vm_commpage_init(&commpage32_handle, _COMM_PAGE32_AREA_LENGTH);
3126 	commpage32_entry = mach_memory_entry_from_port(commpage32_handle);
3127 	commpage32_map = commpage32_entry->backing.map;
3128 
3129 	/* XXX if (cpu_is_64bit_capable()) ? */
3130 	/* create the 64-bit comm page */
3131 	_vm_commpage_init(&commpage64_handle, _COMM_PAGE64_AREA_LENGTH);
3132 	commpage64_entry = mach_memory_entry_from_port(commpage64_handle);
3133 	commpage64_map = commpage64_entry->backing.map;
3134 
3135 #endif /* __i386__ || __x86_64__ */
3136 
3137 	/* populate them according to this specific platform */
3138 	commpage_populate();
3139 	__commpage_setup = 1;
3140 #if XNU_TARGET_OS_OSX
3141 	if (__system_power_source == 0) {
3142 		post_sys_powersource_internal(0, 1);
3143 	}
3144 #endif /* XNU_TARGET_OS_OSX */
3145 
3146 	SHARED_REGION_TRACE_DEBUG(
3147 		("commpage: init() <-\n"));
3148 }
3149 
3150 /*
3151  * Enter the appropriate comm page into the task's address space.
3152  * This is called at exec() time via vm_map_exec().
3153  */
3154 kern_return_t
vm_commpage_enter(vm_map_t map,task_t task,boolean_t is64bit)3155 vm_commpage_enter(
3156 	vm_map_t        map,
3157 	task_t          task,
3158 	boolean_t       is64bit)
3159 {
3160 #if     defined(__arm__)
3161 #pragma unused(is64bit)
3162 	(void)task;
3163 	(void)map;
3164 	return KERN_SUCCESS;
3165 #elif   defined(__arm64__)
3166 #pragma unused(is64bit)
3167 	(void)task;
3168 	(void)map;
3169 	pmap_insert_sharedpage(vm_map_pmap(map));
3170 	return KERN_SUCCESS;
3171 #else
3172 	ipc_port_t              commpage_handle, commpage_text_handle;
3173 	vm_map_offset_t         commpage_address, objc_address, commpage_text_address;
3174 	vm_map_size_t           commpage_size, objc_size, commpage_text_size;
3175 	int                     vm_flags;
3176 	vm_map_kernel_flags_t   vmk_flags;
3177 	kern_return_t           kr;
3178 
3179 	SHARED_REGION_TRACE_DEBUG(
3180 		("commpage: -> enter(%p,%p)\n",
3181 		(void *)VM_KERNEL_ADDRPERM(map),
3182 		(void *)VM_KERNEL_ADDRPERM(task)));
3183 
3184 	commpage_text_size = _COMM_PAGE_TEXT_AREA_LENGTH;
3185 	/* the comm page is likely to be beyond the actual end of the VM map */
3186 	vm_flags = VM_FLAGS_FIXED;
3187 	vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
3188 	vmk_flags.vmkf_beyond_max = TRUE;
3189 
3190 	/* select the appropriate comm page for this task */
3191 	assert(!(is64bit ^ vm_map_is_64bit(map)));
3192 	if (is64bit) {
3193 		commpage_handle = commpage64_handle;
3194 		commpage_address = (vm_map_offset_t) _COMM_PAGE64_BASE_ADDRESS;
3195 		commpage_size = _COMM_PAGE64_AREA_LENGTH;
3196 		objc_size = _COMM_PAGE64_OBJC_SIZE;
3197 		objc_address = _COMM_PAGE64_OBJC_BASE;
3198 		commpage_text_handle = commpage_text64_handle;
3199 		commpage_text_address = (vm_map_offset_t) commpage_text64_location;
3200 	} else {
3201 		commpage_handle = commpage32_handle;
3202 		commpage_address =
3203 		    (vm_map_offset_t)(unsigned) _COMM_PAGE32_BASE_ADDRESS;
3204 		commpage_size = _COMM_PAGE32_AREA_LENGTH;
3205 		objc_size = _COMM_PAGE32_OBJC_SIZE;
3206 		objc_address = _COMM_PAGE32_OBJC_BASE;
3207 		commpage_text_handle = commpage_text32_handle;
3208 		commpage_text_address = (vm_map_offset_t) commpage_text32_location;
3209 	}
3210 
3211 	vm_tag_t tag = VM_KERN_MEMORY_NONE;
3212 	if ((commpage_address & (pmap_commpage_size_min(map->pmap) - 1)) == 0 &&
3213 	    (commpage_size & (pmap_commpage_size_min(map->pmap) - 1)) == 0) {
3214 		/* the commpage is properly aligned or sized for pmap-nesting */
3215 		tag = VM_MEMORY_SHARED_PMAP;
3216 		vmk_flags.vmkf_nested_pmap = TRUE;
3217 	}
3218 	/* map the comm page in the task's address space */
3219 	assert(commpage_handle != IPC_PORT_NULL);
3220 	kr = vm_map_enter_mem_object(
3221 		map,
3222 		&commpage_address,
3223 		commpage_size,
3224 		0,
3225 		vm_flags,
3226 		vmk_flags,
3227 		tag,
3228 		commpage_handle,
3229 		0,
3230 		FALSE,
3231 		VM_PROT_READ,
3232 		VM_PROT_READ,
3233 		VM_INHERIT_SHARE);
3234 	if (kr != KERN_SUCCESS) {
3235 		SHARED_REGION_TRACE_ERROR(
3236 			("commpage: enter(%p,0x%llx,0x%llx) "
3237 			"commpage %p mapping failed 0x%x\n",
3238 			(void *)VM_KERNEL_ADDRPERM(map),
3239 			(long long)commpage_address,
3240 			(long long)commpage_size,
3241 			(void *)VM_KERNEL_ADDRPERM(commpage_handle), kr));
3242 	}
3243 
3244 	/* map the comm text page in the task's address space */
3245 	assert(commpage_text_handle != IPC_PORT_NULL);
3246 	kr = vm_map_enter_mem_object(
3247 		map,
3248 		&commpage_text_address,
3249 		commpage_text_size,
3250 		0,
3251 		vm_flags,
3252 		vmk_flags,
3253 		tag,
3254 		commpage_text_handle,
3255 		0,
3256 		FALSE,
3257 		VM_PROT_READ | VM_PROT_EXECUTE,
3258 		VM_PROT_READ | VM_PROT_EXECUTE,
3259 		VM_INHERIT_SHARE);
3260 	if (kr != KERN_SUCCESS) {
3261 		SHARED_REGION_TRACE_ERROR(
3262 			("commpage text: enter(%p,0x%llx,0x%llx) "
3263 			"commpage text %p mapping failed 0x%x\n",
3264 			(void *)VM_KERNEL_ADDRPERM(map),
3265 			(long long)commpage_text_address,
3266 			(long long)commpage_text_size,
3267 			(void *)VM_KERNEL_ADDRPERM(commpage_text_handle), kr));
3268 	}
3269 
3270 	/*
3271 	 * Since we're here, we also pre-allocate some virtual space for the
3272 	 * Objective-C run-time, if needed...
3273 	 */
3274 	if (objc_size != 0) {
3275 		kr = vm_map_enter_mem_object(
3276 			map,
3277 			&objc_address,
3278 			objc_size,
3279 			0,
3280 			VM_FLAGS_FIXED,
3281 			vmk_flags,
3282 			tag,
3283 			IPC_PORT_NULL,
3284 			0,
3285 			FALSE,
3286 			VM_PROT_ALL,
3287 			VM_PROT_ALL,
3288 			VM_INHERIT_DEFAULT);
3289 		if (kr != KERN_SUCCESS) {
3290 			SHARED_REGION_TRACE_ERROR(
3291 				("commpage: enter(%p,0x%llx,0x%llx) "
3292 				"objc mapping failed 0x%x\n",
3293 				(void *)VM_KERNEL_ADDRPERM(map),
3294 				(long long)objc_address,
3295 				(long long)objc_size, kr));
3296 		}
3297 	}
3298 
3299 	SHARED_REGION_TRACE_DEBUG(
3300 		("commpage: enter(%p,%p) <- 0x%x\n",
3301 		(void *)VM_KERNEL_ADDRPERM(map),
3302 		(void *)VM_KERNEL_ADDRPERM(task), kr));
3303 	return kr;
3304 #endif
3305 }
3306 
3307 int
vm_shared_region_slide(uint32_t slide,mach_vm_offset_t entry_start_address,mach_vm_size_t entry_size,mach_vm_offset_t slide_start,mach_vm_size_t slide_size,mach_vm_offset_t slid_mapping,memory_object_control_t sr_file_control,vm_prot_t prot)3308 vm_shared_region_slide(
3309 	uint32_t slide,
3310 	mach_vm_offset_t        entry_start_address,
3311 	mach_vm_size_t          entry_size,
3312 	mach_vm_offset_t        slide_start,
3313 	mach_vm_size_t          slide_size,
3314 	mach_vm_offset_t        slid_mapping,
3315 	memory_object_control_t sr_file_control,
3316 	vm_prot_t               prot)
3317 {
3318 	vm_shared_region_t      sr;
3319 	kern_return_t           error;
3320 
3321 	SHARED_REGION_TRACE_DEBUG(
3322 		("vm_shared_region_slide: -> slide %#x, entry_start %#llx, entry_size %#llx, slide_start %#llx, slide_size %#llx\n",
3323 		slide, entry_start_address, entry_size, slide_start, slide_size));
3324 
3325 	sr = vm_shared_region_get(current_task());
3326 	if (sr == NULL) {
3327 		printf("%s: no shared region?\n", __FUNCTION__);
3328 		SHARED_REGION_TRACE_DEBUG(
3329 			("vm_shared_region_slide: <- %d (no shared region)\n",
3330 			KERN_FAILURE));
3331 		return KERN_FAILURE;
3332 	}
3333 
3334 	/*
3335 	 * Protect from concurrent access.
3336 	 */
3337 	vm_shared_region_lock();
3338 	while (sr->sr_slide_in_progress) {
3339 		vm_shared_region_sleep(&sr->sr_slide_in_progress, THREAD_UNINT);
3340 	}
3341 
3342 	sr->sr_slide_in_progress = TRUE;
3343 	vm_shared_region_unlock();
3344 
3345 	error = vm_shared_region_slide_mapping(sr,
3346 	    (user_addr_t)slide_start,
3347 	    slide_size,
3348 	    entry_start_address,
3349 	    entry_size,
3350 	    slid_mapping,
3351 	    slide,
3352 	    sr_file_control,
3353 	    prot);
3354 	if (error) {
3355 		printf("slide_info initialization failed with kr=%d\n", error);
3356 	}
3357 
3358 	vm_shared_region_lock();
3359 
3360 	assert(sr->sr_slide_in_progress);
3361 	sr->sr_slide_in_progress = FALSE;
3362 	thread_wakeup(&sr->sr_slide_in_progress);
3363 
3364 #if XNU_TARGET_OS_OSX
3365 	if (error == KERN_SUCCESS) {
3366 		shared_region_completed_slide = TRUE;
3367 	}
3368 #endif /* XNU_TARGET_OS_OSX */
3369 	vm_shared_region_unlock();
3370 
3371 	vm_shared_region_deallocate(sr);
3372 
3373 	SHARED_REGION_TRACE_DEBUG(
3374 		("vm_shared_region_slide: <- %d\n",
3375 		error));
3376 
3377 	return error;
3378 }
3379 
3380 /*
3381  * Used during Authenticated Root Volume macOS boot.
3382  * Launchd re-execs itself and wants the new launchd to use
3383  * the shared cache from the new root volume. This call
3384  * makes all the existing shared caches stale to allow
3385  * that to happen.
3386  */
3387 void
vm_shared_region_pivot(void)3388 vm_shared_region_pivot(void)
3389 {
3390 	vm_shared_region_t      shared_region = NULL;
3391 
3392 	vm_shared_region_lock();
3393 
3394 	queue_iterate(&vm_shared_region_queue, shared_region, vm_shared_region_t, sr_q) {
3395 		assert(shared_region->sr_ref_count > 0);
3396 		shared_region->sr_stale = TRUE;
3397 		if (shared_region->sr_timer_call) {
3398 			/*
3399 			 * We have a shared region ready to be destroyed
3400 			 * and just waiting for a delayed timer to fire.
3401 			 * Marking it stale cements its ineligibility to
3402 			 * be used ever again. So let's shorten the timer
3403 			 * aggressively down to 10 milliseconds and get rid of it.
3404 			 * This is a single quantum and we don't need to go
3405 			 * shorter than this duration. We want it to be short
3406 			 * enough, however, because we could have an unmount
3407 			 * of the volume hosting this shared region just behind
3408 			 * us.
3409 			 */
3410 			uint64_t deadline;
3411 			assert(shared_region->sr_ref_count == 1);
3412 
3413 			/*
3414 			 * Free the old timer call. Returns with a reference held.
3415 			 * If the old timer has fired and is waiting for the vm_shared_region_lock
3416 			 * lock, we will just return with an additional ref_count i.e. 2.
3417 			 * The old timer will then fire and just drop the ref count down to 1
3418 			 * with no other modifications.
3419 			 */
3420 			vm_shared_region_reference_locked(shared_region);
3421 
3422 			/* set up the timer. Keep the reference from above for this timer.*/
3423 			shared_region->sr_timer_call = thread_call_allocate(
3424 				(thread_call_func_t) vm_shared_region_timeout,
3425 				(thread_call_param_t) shared_region);
3426 
3427 			/* schedule the timer */
3428 			clock_interval_to_deadline(10, /* 10 milliseconds */
3429 			    NSEC_PER_MSEC,
3430 			    &deadline);
3431 			thread_call_enter_delayed(shared_region->sr_timer_call,
3432 			    deadline);
3433 
3434 			SHARED_REGION_TRACE_DEBUG(
3435 				("shared_region: pivot(%p): armed timer\n",
3436 				(void *)VM_KERNEL_ADDRPERM(shared_region)));
3437 		}
3438 	}
3439 
3440 	vm_shared_region_unlock();
3441 }
3442 
3443 /*
3444  * Routine to mark any non-standard slide shared cache region as stale.
3445  * This causes the next "reslide" spawn to create a new shared region.
3446  */
3447 void
vm_shared_region_reslide_stale(void)3448 vm_shared_region_reslide_stale(void)
3449 {
3450 #if __has_feature(ptrauth_calls)
3451 	vm_shared_region_t      shared_region = NULL;
3452 
3453 	vm_shared_region_lock();
3454 
3455 	queue_iterate(&vm_shared_region_queue, shared_region, vm_shared_region_t, sr_q) {
3456 		assert(shared_region->sr_ref_count > 0);
3457 		if (!shared_region->sr_stale && shared_region->sr_reslide) {
3458 			shared_region->sr_stale = TRUE;
3459 			vm_shared_region_reslide_count++;
3460 		}
3461 	}
3462 
3463 	vm_shared_region_unlock();
3464 #endif /* __has_feature(ptrauth_calls) */
3465 }
3466 
3467 /*
3468  * report if the task is using a reslide shared cache region.
3469  */
3470 bool
vm_shared_region_is_reslide(__unused struct task * task)3471 vm_shared_region_is_reslide(__unused struct task *task)
3472 {
3473 	bool is_reslide = FALSE;
3474 #if __has_feature(ptrauth_calls)
3475 	vm_shared_region_t sr = vm_shared_region_get(task);
3476 
3477 	if (sr != NULL) {
3478 		is_reslide = sr->sr_reslide;
3479 		vm_shared_region_deallocate(sr);
3480 	}
3481 #endif /* __has_feature(ptrauth_calls) */
3482 	return is_reslide;
3483 }
3484 
3485 /*
3486  * This is called from powermanagement code to let kernel know the current source of power.
3487  * 0 if it is external source (connected to power )
3488  * 1 if it is internal power source ie battery
3489  */
3490 void
3491 #if XNU_TARGET_OS_OSX
post_sys_powersource(int i)3492 post_sys_powersource(int i)
3493 #else /* XNU_TARGET_OS_OSX */
3494 post_sys_powersource(__unused int i)
3495 #endif /* XNU_TARGET_OS_OSX */
3496 {
3497 #if XNU_TARGET_OS_OSX
3498 	post_sys_powersource_internal(i, 0);
3499 #endif /* XNU_TARGET_OS_OSX */
3500 }
3501 
3502 
3503 #if XNU_TARGET_OS_OSX
3504 static void
post_sys_powersource_internal(int i,int internal)3505 post_sys_powersource_internal(int i, int internal)
3506 {
3507 	if (internal == 0) {
3508 		__system_power_source = i;
3509 	}
3510 }
3511 #endif /* XNU_TARGET_OS_OSX */
3512 
3513 void *
vm_shared_region_root_dir(struct vm_shared_region * sr)3514 vm_shared_region_root_dir(
3515 	struct vm_shared_region *sr)
3516 {
3517 	void *vnode;
3518 
3519 	vm_shared_region_lock();
3520 	vnode = sr->sr_root_dir;
3521 	vm_shared_region_unlock();
3522 	return vnode;
3523 }
3524