1 /*
2 * Copyright (c) 2019-2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*!
29 * ARM64-specific functions required to support hibernation entry, and also to
30 * support hibernation exit after wired pages have already been restored.
31 */
32
33 #include <kern/machine.h>
34 #include <kern/misc_protos.h>
35 #include <kern/thread.h>
36 #include <kern/processor.h>
37 #include <kern/kalloc.h>
38 #include <mach/machine.h>
39 #include <mach/processor_info.h>
40 #include <mach/mach_types.h>
41 #include <kern/cpu_data.h>
42 #include <kern/startup.h>
43 #include <IOKit/IOPlatformExpert.h>
44 #include <pexpert/device_tree.h>
45
46 #include <IOKit/IOHibernatePrivate.h>
47 #include <vm/vm_page.h>
48 #include <san/kasan.h>
49 #include <arm/cpu_internal.h>
50 #include <arm/cpu_data_internal.h>
51 #include <machine/pal_hibernate.h>
52
53
54 extern void
55 qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
56
57 void
pal_hib_teardown_pmap_structs(__unused addr64_t * unneeded_start,__unused addr64_t * unneeded_end)58 pal_hib_teardown_pmap_structs(__unused addr64_t *unneeded_start, __unused addr64_t *unneeded_end)
59 {
60 }
61
62 void
pal_hib_rebuild_pmap_structs(void)63 pal_hib_rebuild_pmap_structs(void)
64 {
65 }
66
67 static void
set_dram_range(hibernate_bitmap_t * range,uint64_t start_addr,uint64_t size)68 set_dram_range(hibernate_bitmap_t *range, uint64_t start_addr, uint64_t size)
69 {
70 uint64_t first_page = atop_64(start_addr);
71 uint64_t page_count = atop_64(size);
72 uint64_t last_page = first_page + page_count - 1;
73
74 range->first_page = (uint32_t)first_page;
75 assert(range->first_page == first_page); // make sure the truncation wasn't lossy
76
77 range->last_page = (uint32_t)last_page;
78 assert(range->last_page == last_page); // make sure the truncation wasn't lossy
79 }
80
81 // Comparison function used to sort the DRAM ranges list.
82 static int
dram_range_compare(const void * a,const void * b)83 dram_range_compare(const void *a, const void *b)
84 {
85 return ((const hibernate_bitmap_t *)a)->first_page - ((const hibernate_bitmap_t *)b)->first_page;
86 }
87
88 hibernate_page_list_t *
hibernate_page_list_allocate(boolean_t log)89 hibernate_page_list_allocate(boolean_t log)
90 {
91 vm_size_t size;
92 uint32_t bank;
93 uint32_t pages, page_count;
94 hibernate_page_list_t * list;
95 hibernate_bitmap_t * bitmap;
96
97 // Allocate a single DRAM range to cover the kernel-managed memory.
98 hibernate_bitmap_t dram_ranges[1];
99 uint32_t num_banks = sizeof(dram_ranges) / sizeof(dram_ranges[0]);
100
101 // All of kernel-managed memory can be described by one DRAM range
102 set_dram_range(&dram_ranges[0], gPhysBase, gPhysSize);
103
104 // Sort the DRAM ranges based on the first page. Other parts of the hibernation
105 // flow expect these ranges to be in order.
106 qsort((void*)dram_ranges, num_banks, sizeof(dram_ranges[0]), dram_range_compare);
107
108 // size the hibernation bitmap
109
110 size = sizeof(hibernate_page_list_t);
111 page_count = 0;
112 for (bank = 0; bank < num_banks; bank++) {
113 pages = dram_ranges[bank].last_page + 1 - dram_ranges[bank].first_page;
114 page_count += pages;
115 size += sizeof(hibernate_bitmap_t) + ((pages + 31) >> 5) * sizeof(uint32_t);
116 }
117
118 list = kalloc_data(size, Z_WAITOK);
119 if (!list) {
120 goto out;
121 }
122
123 list->list_size = (uint32_t)size;
124 list->page_count = page_count;
125 list->bank_count = num_banks;
126
127 // convert to hibernation bitmap.
128
129 bitmap = &list->bank_bitmap[0];
130 for (bank = 0; bank < num_banks; bank++) {
131 bitmap->first_page = dram_ranges[bank].first_page;
132 bitmap->last_page = dram_ranges[bank].last_page;
133 bitmap->bitmapwords = (bitmap->last_page + 1
134 - bitmap->first_page + 31) >> 5;
135 if (log) {
136 HIBLOG("hib bank[%d]: 0x%llx (%d) end 0x%llx (%d)\n",
137 bank,
138 ptoa_64(bitmap->first_page), bitmap->first_page,
139 ptoa_64(bitmap->last_page), bitmap->last_page);
140 }
141 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
142 }
143
144 out:
145
146 return list;
147 }
148
149 /**
150 * Return back page(s) used as the stack in HIBTEXT.
151 *
152 * @param first_page Output parameter representing the first page being used as
153 * a stack in HIBTEXT.
154 * @param page_count Output parameter representing the number of pages being
155 * used as a stack in HIBTEXT.
156 */
157 void
pal_hib_get_stack_pages(vm_offset_t * first_page,vm_offset_t * page_count)158 pal_hib_get_stack_pages(vm_offset_t *first_page, vm_offset_t *page_count)
159 {
160 #if CONFIG_SPTM
161 /* The SPTM determines which stack to use during HIBTEXT. */
162 *first_page = atop_64(SPTMArgs->hib_metadata->protected_metadata.hibtext_stack_top) - 1;
163 *page_count = 1;
164 #else
165 /* On non-SPTM systems, use the XNU interrupt stack as the HIBTEXT stack. */
166 vm_offset_t stack_end = BootCpuData.intstack_top;
167 vm_offset_t stack_begin = stack_end - INTSTACK_SIZE;
168 *first_page = atop_64(kvtophys(stack_begin));
169 *page_count = atop_64(round_page(stack_end) - trunc_page(stack_begin));
170 #endif /* CONFIG_SPTM */
171 }
172
173 // mark pages not to be saved, but available for scratch usage during restore
174 void
hibernate_page_list_setall_machine(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired,boolean_t preflight,uint32_t * pagesOut)175 hibernate_page_list_setall_machine(hibernate_page_list_t * page_list,
176 hibernate_page_list_t * page_list_wired,
177 boolean_t preflight,
178 uint32_t * pagesOut)
179 {
180 vm_offset_t stack_first_page, stack_page_count;
181 pal_hib_get_stack_pages(&stack_first_page, &stack_page_count);
182
183 #if XNU_MONITOR
184 extern pmap_paddr_t pmap_stacks_start_pa, pmap_stacks_end_pa;
185 vm_offset_t pmap_stack_page_count = atop_64(pmap_stacks_end_pa - pmap_stacks_start_pa);
186 #endif /* XNU_MONITOR */
187
188 if (!preflight) {
189 // mark the stack as unavailable for clobbering during restore;
190 // we won't actually save it because we mark these pages as free
191 // in hibernate_page_list_set_volatile
192 hibernate_set_page_state(page_list, page_list_wired,
193 stack_first_page, stack_page_count,
194 kIOHibernatePageStateWiredSave);
195
196 #if XNU_MONITOR
197 // Mark the PPL stack as not needing to be saved. Any PPL memory that is
198 // excluded from the image will need to be explicitly checked for in
199 // pmap_check_ppl_hashed_flag_all(). That function ensures that all
200 // PPL pages are contained within the image (so any memory explicitly
201 // not being saved, needs to be removed from the check).
202 hibernate_set_page_state(page_list, page_list_wired,
203 atop_64(pmap_stacks_start_pa), pmap_stack_page_count,
204 kIOHibernatePageStateFree);
205 #endif /* XNU_MONITOR */
206 }
207
208 *pagesOut += stack_page_count;
209 #if XNU_MONITOR
210 *pagesOut -= pmap_stack_page_count;
211 #endif /* XNU_MONITOR */
212 }
213
214 // mark pages not to be saved and not for scratch usage during restore
215 void
hibernate_page_list_set_volatile(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired,uint32_t * pagesOut)216 hibernate_page_list_set_volatile(hibernate_page_list_t * page_list,
217 hibernate_page_list_t * page_list_wired,
218 uint32_t * pagesOut)
219 {
220 vm_offset_t page, count;
221
222 // hibernation restore runs on the interrupt stack,
223 // so we need to make sure we don't save it
224 pal_hib_get_stack_pages(&page, &count);
225 hibernate_set_page_state(page_list, page_list_wired,
226 page, count,
227 kIOHibernatePageStateFree);
228 *pagesOut -= count;
229
230 #if CONFIG_SPTM
231 /**
232 * On SPTM-based systems, parts of the CTRR-protected regions will be
233 * loaded from disk by iBoot instead of being loaded from the hibernation
234 * image for security reasons. Because those regions are being loaded from
235 * disk, they don't need to be saved into the hibernation image, so update
236 * the bitmaps to reflect this.
237 */
238 assertf(SPTMArgs->hib_metadata->iboot_loaded_ranges != NULL,
239 "SPTM didn't setup iboot_loaded_ranges pointer in the hibernation metadata.");
240
241 for (size_t i = 0; i < SPTMArgs->hib_metadata->num_iboot_loaded_ranges; ++i) {
242 const hib_phys_range_t *range = &SPTMArgs->hib_metadata->iboot_loaded_ranges[i];
243 hibernate_set_page_state(page_list, page_list_wired,
244 range->first_page, range->page_count,
245 kIOHibernatePageStateFree);
246 *pagesOut -= range->page_count;
247 }
248 #endif /* CONFIG_SPTM */
249 }
250
251 kern_return_t
hibernate_processor_setup(IOHibernateImageHeader * header)252 hibernate_processor_setup(IOHibernateImageHeader * header)
253 {
254 cpu_datap(master_cpu)->cpu_hibernate = 1;
255 header->processorFlags = 0;
256 return KERN_SUCCESS;
257 }
258
259 static boolean_t hibernate_vm_locks_safe;
260
261 void
hibernate_vm_lock(void)262 hibernate_vm_lock(void)
263 {
264 if (kIOHibernateStateHibernating == gIOHibernateState) {
265 hibernate_vm_lock_queues();
266 hibernate_vm_locks_safe = TRUE;
267 }
268 }
269
270 void
hibernate_vm_unlock(void)271 hibernate_vm_unlock(void)
272 {
273 assert(FALSE == ml_get_interrupts_enabled());
274 if (kIOHibernateStateHibernating == gIOHibernateState) {
275 hibernate_vm_unlock_queues();
276 }
277 assert(ml_is_quiescing());
278 }
279
280 // processor_doshutdown() calls hibernate_vm_lock() and hibernate_vm_unlock() on sleep with interrupts disabled.
281 // ml_hibernate_active_post() calls hibernate_vm_lock_end() on wake before interrupts are enabled.
282 // VM locks are safely single threaded between hibernate_vm_lock() and hibernate_vm_lock_end().
283
284 void
hibernate_vm_lock_end(void)285 hibernate_vm_lock_end(void)
286 {
287 assert(FALSE == ml_get_interrupts_enabled());
288 hibernate_vm_locks_safe = FALSE;
289 }
290
291 boolean_t
hibernate_vm_locks_are_safe(void)292 hibernate_vm_locks_are_safe(void)
293 {
294 assert(FALSE == ml_get_interrupts_enabled());
295 return hibernate_vm_locks_safe;
296 }
297
298 void
pal_hib_init(void)299 pal_hib_init(void)
300 {
301 gHibernateGlobals.kernelSlide = gVirtBase - gPhysBase;
302 }
303
304 void
pal_hib_write_hook(void)305 pal_hib_write_hook(void)
306 {
307 }
308