1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System Copyright (c) 1991,1990 Carnegie Mellon University
33 * All Rights Reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright notice
37 * and this permission notice appear in all copies of the software,
38 * derivative works or modified versions, and any portions thereof, and that
39 * both notices appear in supporting documentation.
40 *
41 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42 * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43 * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * Carnegie Mellon requests users of this software to return to
46 *
47 * Software Distribution Coordinator or [email protected]
48 * School of Computer Science Carnegie Mellon University Pittsburgh PA
49 * 15213-3890
50 *
51 * any improvements or extensions that they make and grant Carnegie Mellon the
52 * rights to redistribute these changes.
53 */
54
55 #include <mach/vm_param.h>
56 #include <vm/vm_kern_xnu.h>
57 #include <vm/vm_map_xnu.h>
58 #include <arm/pmap.h>
59 #include <san/kasan.h>
60
61 extern vm_offset_t virtual_space_start; /* Next available kernel VA */
62
63 #define IO_MAP_SIZE (8ul << 20)
64
65 __startup_data static struct mach_vm_range io_range;
66 static SECURITY_READ_ONLY_LATE(vm_map_t) io_submap;
67 KMEM_RANGE_REGISTER_STATIC(io_submap, &io_range, IO_MAP_SIZE);
68
69 __startup_func
70 static void
io_map_init(void)71 io_map_init(void)
72 {
73 vm_map_will_allocate_early_map(&io_submap);
74 io_submap = kmem_suballoc(kernel_map, &io_range.min_address, IO_MAP_SIZE,
75 VM_MAP_CREATE_NEVER_FAULTS | VM_MAP_CREATE_DISABLE_HOLELIST,
76 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, KMS_PERMANENT | KMS_NOFAIL,
77 VM_KERN_MEMORY_IOKIT).kmr_submap;
78 }
79 STARTUP(KMEM, STARTUP_RANK_LAST, io_map_init);
80
81 /*
82 * Allocate and map memory for devices that may need to be mapped before
83 * Mach VM is running. Allows caller to specify mapping protection
84 */
85 vm_offset_t
io_map(vm_map_offset_t phys_addr,vm_size_t size,unsigned int flags,vm_prot_t prot,bool unmappable)86 io_map(
87 vm_map_offset_t phys_addr,
88 vm_size_t size,
89 unsigned int flags,
90 vm_prot_t prot,
91 bool unmappable)
92 {
93 vm_offset_t start_offset = phys_addr - trunc_page(phys_addr);
94 vm_offset_t alloc_size = round_page(size + start_offset);
95 vm_offset_t start;
96
97 phys_addr = trunc_page(phys_addr);
98 if (startup_phase < STARTUP_SUB_KMEM) {
99 /*
100 * VM is not initialized. Grab memory.
101 *
102 * We need to steal address space, not physical memory. Force
103 * alignment of virtual_space_start to support this... but be
104 * aware that if it is not already aligned we waste any
105 * trailing memory in the last page that was stolen.
106 */
107 virtual_space_start = round_page(virtual_space_start);
108
109 start = virtual_space_start;
110 virtual_space_start += round_page(size);
111
112 assert(flags == VM_WIMG_WCOMB || flags == VM_WIMG_IO);
113
114 if (flags == VM_WIMG_WCOMB) {
115 pmap_map_bd_with_options(start, phys_addr,
116 phys_addr + alloc_size, prot, PMAP_MAP_BD_WCOMB);
117 } else {
118 pmap_map_bd(start, phys_addr, phys_addr + alloc_size, prot);
119 }
120 #if KASAN
121 kasan_notify_address(start + start_offset, size);
122 #endif
123 } else {
124 kma_flags_t kmaflags = KMA_NOFAIL | KMA_PAGEABLE;
125
126 if (unmappable) {
127 kmaflags |= KMA_DATA;
128 } else {
129 kmaflags |= KMA_PERMANENT;
130 }
131
132 kmem_alloc(unmappable ? kernel_map : io_submap,
133 &start, alloc_size, kmaflags, VM_KERN_MEMORY_IOKIT);
134 pmap_map(start, phys_addr, phys_addr + alloc_size, prot, flags);
135 }
136 return start + start_offset;
137 }
138