xref: /xnu-8796.101.5/osfmk/kern/kext_alloc.c (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 /*
2  * Copyright (c) 2008 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <kern/assert.h>
29 #include <kern/debug.h>
30 #include <kern/kext_alloc.h>
31 #include <kern/misc_protos.h>
32 
33 #include <mach/host_priv_server.h>
34 #include <mach/kern_return.h>
35 #include <mach/mach_vm.h>
36 #include <mach/vm_map.h>
37 #include <mach/vm_types.h>
38 
39 #include <mach-o/loader.h>
40 #include <libkern/kernel_mach_header.h>
41 #include <libkern/prelink.h>
42 #include <libkern/OSKextLibPrivate.h>
43 #include <san/kasan.h>
44 
45 #define KASLR_IOREG_DEBUG 0
46 
47 
48 SECURITY_READ_ONLY_LATE(vm_map_t) g_kext_map = 0;
49 #if KASLR_IOREG_DEBUG
50 SECURITY_READ_ONLY_LATE(mach_vm_offset_t) kext_alloc_base = 0;
51 SECURITY_READ_ONLY_LATE(mach_vm_offset_t) kext_alloc_max = 0;
52 #else
53 static SECURITY_READ_ONLY_LATE(mach_vm_offset_t) kext_alloc_base = 0;
54 static SECURITY_READ_ONLY_LATE(mach_vm_offset_t) kext_alloc_max = 0;
55 #if CONFIG_KEXT_BASEMENT
56 static SECURITY_READ_ONLY_LATE(mach_vm_offset_t) kext_post_boot_base = 0;
57 #endif
58 #endif
59 
60 /*
61  * On x86_64 systems, kernel extension text must remain within 2GB of the
62  * kernel's text segment.  To ensure this happens, we snag 2GB of kernel VM
63  * as early as possible for kext allocations.
64  */
65 __startup_func
66 void
kext_alloc_init(void)67 kext_alloc_init(void)
68 {
69 #if CONFIG_KEXT_BASEMENT
70 	kernel_segment_command_t *text = NULL;
71 	kernel_segment_command_t *prelinkTextSegment = NULL;
72 	mach_vm_offset_t text_end, text_start;
73 	mach_vm_size_t text_size;
74 	mach_vm_size_t kext_alloc_size;
75 
76 	/* Determine the start of the kernel's __TEXT segment and determine the
77 	 * lower bound of the allocated submap for kext allocations.
78 	 */
79 
80 	text = getsegbyname(SEG_TEXT);
81 	text_start = vm_map_trunc_page(text->vmaddr,
82 	    VM_MAP_PAGE_MASK(kernel_map));
83 	text_start &= ~((512ULL * 1024 * 1024 * 1024) - 1);
84 	text_end = vm_map_round_page(text->vmaddr + text->vmsize,
85 	    VM_MAP_PAGE_MASK(kernel_map));
86 	text_size = text_end - text_start;
87 
88 	kext_alloc_base = KEXT_ALLOC_BASE(text_end);
89 	kext_alloc_size = KEXT_ALLOC_SIZE(text_size);
90 	kext_alloc_max = kext_alloc_base + kext_alloc_size;
91 
92 	/* Post boot kext allocation will start after the prelinked kexts */
93 	prelinkTextSegment = getsegbyname("__PRELINK_TEXT");
94 	if (prelinkTextSegment) {
95 		/* use kext_post_boot_base to start allocations past all the prelinked
96 		 * kexts
97 		 */
98 		kext_post_boot_base =
99 		    vm_map_round_page(kext_alloc_base + prelinkTextSegment->vmsize,
100 		    VM_MAP_PAGE_MASK(kernel_map));
101 	} else {
102 		kext_post_boot_base = kext_alloc_base;
103 	}
104 
105 	/* Allocate the sub block of the kernel map */
106 	vm_map_will_allocate_early_map(&g_kext_map);
107 	g_kext_map = kmem_suballoc(kernel_map, &kext_alloc_base,
108 	    kext_alloc_size, VM_MAP_CREATE_PAGEABLE,
109 	    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
110 	    KMS_PERMANENT | KMS_NOFAIL, VM_KERN_MEMORY_KEXT).kmr_submap;
111 
112 	if ((kext_alloc_base + kext_alloc_size) > kext_alloc_max) {
113 		panic("kext_alloc_init: failed to get first 2GB");
114 	}
115 
116 	if (kernel_map->min_offset > kext_alloc_base) {
117 		kernel_map->min_offset = kext_alloc_base;
118 	}
119 
120 	printf("kext submap [0x%lx - 0x%lx], kernel text [0x%lx - 0x%lx]\n",
121 	    VM_KERNEL_UNSLIDE(kext_alloc_base),
122 	    VM_KERNEL_UNSLIDE(kext_alloc_max),
123 	    VM_KERNEL_UNSLIDE(text->vmaddr),
124 	    VM_KERNEL_UNSLIDE(text->vmaddr + text->vmsize));
125 
126 #else
127 	g_kext_map = kernel_map;
128 	kext_alloc_base = VM_MIN_KERNEL_ADDRESS;
129 	kext_alloc_max = VM_MAX_KERNEL_ADDRESS;
130 #endif /* CONFIG_KEXT_BASEMENT */
131 }
132 
133 /*
134  * Get a vm addr in the kext submap where a kext
135  * collection of given size could be mapped.
136  */
137 vm_offset_t
get_address_from_kext_map(vm_size_t fsize)138 get_address_from_kext_map(vm_size_t fsize)
139 {
140 	vm_offset_t addr = 0;
141 	kern_return_t ret;
142 
143 	ret = kext_alloc(&addr, fsize, false);
144 	assert(ret == KERN_SUCCESS);
145 
146 	if (ret != KERN_SUCCESS) {
147 		return 0;
148 	}
149 
150 	kext_free(addr, fsize);
151 
152 	addr += VM_MAP_PAGE_SIZE(g_kext_map);
153 	addr = vm_map_trunc_page(addr,
154 	    VM_MAP_PAGE_MASK(g_kext_map));
155 	return addr;
156 }
157 
158 kern_return_t
kext_alloc(vm_offset_t * _addr,vm_size_t size,boolean_t fixed)159 kext_alloc(vm_offset_t *_addr, vm_size_t size, boolean_t fixed)
160 {
161 	kern_return_t rval = 0;
162 #if CONFIG_KEXT_BASEMENT
163 	mach_vm_offset_t addr = (fixed) ? *_addr : kext_post_boot_base;
164 #else
165 	mach_vm_offset_t addr = (fixed) ? *_addr : kext_alloc_base;
166 #endif
167 	vm_map_kernel_flags_t vmk_flags = {
168 		.vmf_fixed = (fixed != 0),
169 		.vm_tag    = VM_KERN_MEMORY_KEXT,
170 	};
171 
172 #if CONFIG_KEXT_BASEMENT
173 	kc_format_t kcformat;
174 	if (PE_get_primary_kc_format(&kcformat) && kcformat == KCFormatFileset) {
175 		/*
176 		 * There is no need for a kext basement when booting with the
177 		 * new MH_FILESET format kext collection.
178 		 */
179 		rval = mach_vm_allocate_kernel(g_kext_map, &addr, size,
180 		    vm_map_kernel_flags_vmflags(vmk_flags), vmk_flags.vm_tag);
181 		if (rval != KERN_SUCCESS) {
182 			printf("vm_allocate failed - %d\n", rval);
183 			goto finish;
184 		}
185 		goto check_reachable;
186 	}
187 
188 	/* Allocate the kext virtual memory
189 	 * 10608884 - use mach_vm_map since we want VM_FLAGS_ANYWHERE allocated past
190 	 * kext_post_boot_base (when possible).  mach_vm_allocate will always
191 	 * start at 0 into the map no matter what you pass in addr.  We want non
192 	 * fixed (post boot) kext allocations to start looking for free space
193 	 * just past where prelinked kexts have loaded.
194 	 */
195 	rval = mach_vm_map_kernel(g_kext_map,
196 	    &addr,
197 	    size,
198 	    0,
199 	    vmk_flags,
200 	    MACH_PORT_NULL,
201 	    0,
202 	    TRUE,
203 	    VM_PROT_DEFAULT,
204 	    VM_PROT_ALL,
205 	    VM_INHERIT_DEFAULT);
206 	if (rval != KERN_SUCCESS) {
207 		printf("mach_vm_map failed - %d\n", rval);
208 		goto finish;
209 	}
210 check_reachable:
211 #else
212 	rval = mach_vm_allocate_kernel(g_kext_map, &addr, size,
213 	    vm_map_kernel_flags_vmflags(vmk_flags), vmk_flags.vm_tag);
214 	if (rval != KERN_SUCCESS) {
215 		printf("vm_allocate failed - %d\n", rval);
216 		goto finish;
217 	}
218 #endif
219 
220 	/* Check that the memory is reachable by kernel text */
221 	if ((addr + size) > kext_alloc_max) {
222 		kext_free((vm_offset_t)addr, size);
223 		rval = KERN_INVALID_ADDRESS;
224 		goto finish;
225 	}
226 
227 	*_addr = (vm_offset_t)addr;
228 	rval = KERN_SUCCESS;
229 #if KASAN
230 	kasan_notify_address(addr, size);
231 #endif
232 
233 finish:
234 	return rval;
235 }
236 
237 void
kext_free(vm_offset_t addr,vm_size_t size)238 kext_free(vm_offset_t addr, vm_size_t size)
239 {
240 	kern_return_t rval;
241 
242 	rval = mach_vm_deallocate(g_kext_map, addr, size);
243 	assert(rval == KERN_SUCCESS);
244 }
245 
246 kern_return_t
kext_receipt(void ** addrp,size_t * sizep)247 kext_receipt(void **addrp, size_t *sizep)
248 {
249 	kern_return_t ret = KERN_FAILURE;
250 	if (addrp == NULL || sizep == NULL) {
251 		goto finish;
252 	}
253 
254 	kernel_mach_header_t *kc = PE_get_kc_header(KCKindAuxiliary);
255 	if (kc == NULL) {
256 		ret = KERN_MISSING_KC;
257 		goto finish;
258 	}
259 
260 	/*
261 	 * This will be set in early boot once we've successfully checked that
262 	 * the AuxKC is properly linked against the BootKC. If this isn't set,
263 	 * and we have a valid AuxKC mach header, then the booter gave us a
264 	 * bad KC.
265 	 */
266 	if (auxkc_uuid_valid == FALSE) {
267 		ret = KERN_INVALID_KC;
268 		goto finish;
269 	}
270 
271 	size_t size;
272 	void *addr = getsectdatafromheader(kc,
273 	    kReceiptInfoSegment, kAuxKCReceiptSection, &size);
274 	if (addr == NULL) {
275 		ret = KERN_INVALID_KC;
276 		goto finish;
277 	}
278 
279 	*addrp = addr;
280 	*sizep = size;
281 	ret = KERN_SUCCESS;
282 
283 finish:
284 	/*
285 	 * If we do return success, we'll want to wait for the other side to
286 	 * call kext_receipt_set_queried themselves, so we can confirm that
287 	 * it made the roundtrip before allowing third party kexts to load.
288 	 */
289 	if (ret != KERN_SUCCESS) {
290 		kext_receipt_set_queried();
291 	}
292 	return ret;
293 }
294 
295 /*
296  * Returns KERN_FAILURE if the variable was already set.
297  */
298 kern_return_t
kext_receipt_set_queried()299 kext_receipt_set_queried()
300 {
301 	return OSKextSetReceiptQueried();
302 }
303