1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System Copyright (c) 1991,1990 Carnegie Mellon University
33 * All Rights Reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright notice
37 * and this permission notice appear in all copies of the software,
38 * derivative works or modified versions, and any portions thereof, and that
39 * both notices appear in supporting documentation.
40 *
41 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42 * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43 * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * Carnegie Mellon requests users of this software to return to
46 *
47 * Software Distribution Coordinator or [email protected]
48 * School of Computer Science Carnegie Mellon University Pittsburgh PA
49 * 15213-3890
50 *
51 * any improvements or extensions that they make and grant Carnegie Mellon the
52 * rights to redistribute these changes.
53 */
54
55 #include <mach/vm_param.h>
56 #include <vm/vm_kern_xnu.h>
57 #include <vm/vm_map_xnu.h>
58 #include <arm/pmap.h>
59 #include <san/kasan.h>
60
61 extern vm_offset_t virtual_space_start; /* Next available kernel VA */
62
63 #define IO_MAP_SIZE (8ul << 20)
64
65 __startup_data static struct mach_vm_range io_range;
66 static SECURITY_READ_ONLY_LATE(vm_map_t) io_submap;
67 KMEM_RANGE_REGISTER_STATIC(io_submap, &io_range, IO_MAP_SIZE);
68
69 __startup_func
70 static void
io_map_init(void)71 io_map_init(void)
72 {
73 vm_map_will_allocate_early_map(&io_submap);
74 io_submap = kmem_suballoc(kernel_map, &io_range.min_address, IO_MAP_SIZE,
75 VM_MAP_CREATE_NEVER_FAULTS | VM_MAP_CREATE_DISABLE_HOLELIST,
76 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, KMS_PERMANENT | KMS_NOFAIL,
77 VM_KERN_MEMORY_IOKIT).kmr_submap;
78 }
79 #ifndef __BUILDING_XNU_LIB_UNITTEST__ /* io map is not supported in unit-tests */
80 STARTUP(KMEM, STARTUP_RANK_LAST, io_map_init);
81 #endif /* __BUILDING_XNU_LIB_UNITTEST__ */
82
83 /*
84 * Allocate and map memory for devices that may need to be mapped before
85 * Mach VM is running. Allows caller to specify mapping protection
86 */
87 vm_offset_t
io_map(vm_map_offset_t phys_addr,vm_size_t size,unsigned int flags,vm_prot_t prot,bool unmappable)88 io_map(
89 vm_map_offset_t phys_addr,
90 vm_size_t size,
91 unsigned int flags,
92 vm_prot_t prot,
93 bool unmappable)
94 {
95 vm_offset_t start_offset = phys_addr - trunc_page(phys_addr);
96 vm_offset_t alloc_size = round_page(size + start_offset);
97 vm_offset_t start;
98
99 phys_addr = trunc_page(phys_addr);
100 if (startup_phase < STARTUP_SUB_KMEM) {
101 /*
102 * VM is not initialized. Grab memory.
103 *
104 * We need to steal address space, not physical memory. Force
105 * alignment of virtual_space_start to support this... but be
106 * aware that if it is not already aligned we waste any
107 * trailing memory in the last page that was stolen.
108 */
109 virtual_space_start = round_page(virtual_space_start);
110
111 start = virtual_space_start;
112 virtual_space_start += round_page(size);
113
114 assert(flags == VM_WIMG_WCOMB || flags == VM_WIMG_IO);
115
116 if (flags == VM_WIMG_WCOMB) {
117 pmap_map_bd_with_options(start, phys_addr,
118 phys_addr + alloc_size, prot, PMAP_MAP_BD_WCOMB);
119 } else {
120 pmap_map_bd(start, phys_addr, phys_addr + alloc_size, prot);
121 }
122 #if KASAN
123 kasan_notify_address(start + start_offset, size);
124 #endif
125 } else {
126 kma_flags_t kmaflags = KMA_NOFAIL | KMA_PAGEABLE;
127
128 if (unmappable) {
129 kmaflags |= KMA_DATA_SHARED;
130 } else {
131 kmaflags |= KMA_PERMANENT;
132 }
133
134 kmem_alloc(unmappable ? kernel_map : io_submap,
135 &start, alloc_size, kmaflags, VM_KERN_MEMORY_IOKIT);
136 pmap_map(start, phys_addr, phys_addr + alloc_size, prot, flags);
137 }
138 return start + start_offset;
139 }
140