xref: /xnu-12377.1.9/osfmk/arm64/hibernate_arm64.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2019-2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*!
29  * ARM64-specific functions required to support hibernation entry, and also to
30  * support hibernation exit after wired pages have already been restored.
31  */
32 
33 #include <kern/machine.h>
34 #include <kern/misc_protos.h>
35 #include <kern/thread.h>
36 #include <kern/processor.h>
37 #include <kern/kalloc.h>
38 #include <mach/machine.h>
39 #include <mach/processor_info.h>
40 #include <mach/mach_types.h>
41 #include <kern/cpu_data.h>
42 #include <kern/startup.h>
43 #include <IOKit/IOPlatformExpert.h>
44 #include <pexpert/device_tree.h>
45 
46 #include <IOKit/IOHibernatePrivate.h>
47 #include <vm/vm_page.h>
48 #include <san/kasan.h>
49 #include <arm/cpu_internal.h>
50 #include <arm/cpu_data_internal.h>
51 #include <machine/pal_hibernate.h>
52 
53 
54 extern void
55 qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
56 
57 void
pal_hib_teardown_pmap_structs(__unused addr64_t * unneeded_start,__unused addr64_t * unneeded_end)58 pal_hib_teardown_pmap_structs(__unused addr64_t *unneeded_start, __unused addr64_t *unneeded_end)
59 {
60 }
61 
62 void
pal_hib_rebuild_pmap_structs(void)63 pal_hib_rebuild_pmap_structs(void)
64 {
65 }
66 
67 static void
set_dram_range(hibernate_bitmap_t * range,uint64_t start_addr,uint64_t size)68 set_dram_range(hibernate_bitmap_t *range, uint64_t start_addr, uint64_t size)
69 {
70 	uint64_t first_page = atop_64(start_addr);
71 	uint64_t page_count = atop_64(size);
72 	uint64_t last_page = first_page + page_count - 1;
73 
74 	range->first_page = (uint32_t)first_page;
75 	assert(range->first_page == first_page); // make sure the truncation wasn't lossy
76 
77 	range->last_page = (uint32_t)last_page;
78 	assert(range->last_page == last_page); // make sure the truncation wasn't lossy
79 }
80 
81 // Comparison function used to sort the DRAM ranges list.
82 static int
dram_range_compare(const void * a,const void * b)83 dram_range_compare(const void *a, const void *b)
84 {
85 	return ((const hibernate_bitmap_t *)a)->first_page - ((const hibernate_bitmap_t *)b)->first_page;
86 }
87 
88 hibernate_page_list_t *
hibernate_page_list_allocate(boolean_t log)89 hibernate_page_list_allocate(boolean_t log)
90 {
91 	vm_size_t               size;
92 	uint32_t                bank;
93 	uint32_t                pages, page_count;
94 	hibernate_page_list_t * list;
95 	hibernate_bitmap_t *    bitmap;
96 
97 	// Allocate a single DRAM range to cover the kernel-managed memory.
98 	hibernate_bitmap_t      dram_ranges[1];
99 	uint32_t                num_banks = sizeof(dram_ranges) / sizeof(dram_ranges[0]);
100 
101 	// All of kernel-managed memory can be described by one DRAM range
102 	set_dram_range(&dram_ranges[0], gPhysBase, gPhysSize);
103 
104 	// Sort the DRAM ranges based on the first page. Other parts of the hibernation
105 	// flow expect these ranges to be in order.
106 	qsort((void*)dram_ranges, num_banks, sizeof(dram_ranges[0]), dram_range_compare);
107 
108 	// size the hibernation bitmap
109 
110 	size = sizeof(hibernate_page_list_t);
111 	page_count = 0;
112 	for (bank = 0; bank < num_banks; bank++) {
113 		pages = dram_ranges[bank].last_page + 1 - dram_ranges[bank].first_page;
114 		page_count += pages;
115 		size += sizeof(hibernate_bitmap_t) + ((pages + 31) >> 5) * sizeof(uint32_t);
116 	}
117 
118 	list = kalloc_data(size, Z_WAITOK);
119 	if (!list) {
120 		goto out;
121 	}
122 
123 	list->list_size  = (uint32_t)size;
124 	list->page_count = page_count;
125 	list->bank_count = num_banks;
126 
127 	// convert to hibernation bitmap.
128 
129 	bitmap = &list->bank_bitmap[0];
130 	for (bank = 0; bank < num_banks; bank++) {
131 		bitmap->first_page = dram_ranges[bank].first_page;
132 		bitmap->last_page  = dram_ranges[bank].last_page;
133 		bitmap->bitmapwords = (bitmap->last_page + 1
134 		    - bitmap->first_page + 31) >> 5;
135 		if (log) {
136 			HIBLOG("hib bank[%d]: 0x%llx (%d) end 0x%llx (%d)\n",
137 			    bank,
138 			    ptoa_64(bitmap->first_page), bitmap->first_page,
139 			    ptoa_64(bitmap->last_page), bitmap->last_page);
140 		}
141 		bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
142 	}
143 
144 out:
145 
146 	return list;
147 }
148 
149 /**
150  * Return back page(s) used as the stack in HIBTEXT.
151  *
152  * @param first_page Output parameter representing the first page being used as
153  *                   a stack in HIBTEXT.
154  * @param page_count Output parameter representing the number of pages being
155  *                   used as a stack in HIBTEXT.
156  */
157 void
pal_hib_get_stack_pages(vm_offset_t * first_page,vm_offset_t * page_count)158 pal_hib_get_stack_pages(vm_offset_t *first_page, vm_offset_t *page_count)
159 {
160 #if CONFIG_SPTM
161 	/* The SPTM determines which stack to use during HIBTEXT. */
162 	*first_page = atop_64(SPTMArgs->hib_metadata->protected_metadata.hibtext_stack_top) - 1;
163 	*page_count = 1;
164 #else
165 	/* On non-SPTM systems, use the XNU interrupt stack as the HIBTEXT stack. */
166 	vm_offset_t stack_end = BootCpuData.intstack_top;
167 	vm_offset_t stack_begin = stack_end - INTSTACK_SIZE;
168 	*first_page = atop_64(kvtophys(stack_begin));
169 	*page_count = atop_64(round_page(stack_end) - trunc_page(stack_begin));
170 #endif /* CONFIG_SPTM */
171 }
172 
173 // mark pages not to be saved, but available for scratch usage during restore
174 void
hibernate_page_list_setall_machine(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired,boolean_t preflight,uint32_t * pagesOut)175 hibernate_page_list_setall_machine(hibernate_page_list_t * page_list,
176     hibernate_page_list_t * page_list_wired,
177     boolean_t preflight,
178     uint32_t * pagesOut)
179 {
180 	vm_offset_t stack_first_page, stack_page_count;
181 	pal_hib_get_stack_pages(&stack_first_page, &stack_page_count);
182 
183 #if XNU_MONITOR
184 	extern pmap_paddr_t pmap_stacks_start_pa, pmap_stacks_end_pa;
185 	vm_offset_t pmap_stack_page_count = atop_64(pmap_stacks_end_pa - pmap_stacks_start_pa);
186 #endif /* XNU_MONITOR */
187 
188 	if (!preflight) {
189 		/*
190 		 * mark the stack as unavailable for clobbering during restore;
191 		 * we won't actually save it because we mark these pages as free
192 		 * in hibernate_page_list_set_volatile
193 		 */
194 		hibernate_set_page_state(page_list, page_list_wired,
195 		    stack_first_page, stack_page_count,
196 		    kIOHibernatePageStateWiredSave);
197 
198 #if XNU_MONITOR
199 		/*
200 		 * Mark the PPL stack as not needing to be saved. Any PPL memory that is
201 		 * excluded from the image will need to be explicitly checked for in
202 		 * pmap_check_ppl_hashed_flag_all(). That function ensures that all
203 		 * PPL pages are contained within the image (so any memory explicitly
204 		 * not being saved, needs to be removed from the check).
205 		 */
206 		hibernate_set_page_state(page_list, page_list_wired,
207 		    atop_64(pmap_stacks_start_pa), pmap_stack_page_count,
208 		    kIOHibernatePageStateFree);
209 #endif /* XNU_MONITOR */
210 
211 #if CONFIG_SPTM
212 		/*
213 		 * Pages for which a hibernate-io-range explicitly prohibits
214 		 * hibernation restore to write to them must not be
215 		 * clobbered. They also will not be saved, because
216 		 * hibernate_page_list_set_volatile() will mark them
217 		 * appropriately as well.
218 		 */
219 		bool (^exclude)(pmap_io_range_t const *) =
220 		    ^bool (pmap_io_range_t const *range) {
221 			if (range->wimg & PMAP_IO_RANGE_PROHIBIT_HIB_WRITE) {
222 				/* No-op if page not in any bitmap (i.e. not managed DRAM). */
223 				hibernate_set_page_state(page_list, page_list_wired,
224 		    range->addr >> PAGE_SHIFT, range->len >> PAGE_SHIFT,
225 		    kIOHibernatePageStateWiredSave);
226 			}
227 			return true;
228 		};
229 		pmap_range_iterate(exclude);
230 #endif /* CONFIG_SPTM */
231 	}
232 
233 	*pagesOut += stack_page_count;
234 #if XNU_MONITOR
235 	*pagesOut -= pmap_stack_page_count;
236 #endif /* XNU_MONITOR */
237 }
238 
239 // mark pages not to be saved and not for scratch usage during restore
240 void
hibernate_page_list_set_volatile(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired,uint32_t * pagesOut)241 hibernate_page_list_set_volatile(hibernate_page_list_t * page_list,
242     hibernate_page_list_t * page_list_wired,
243     uint32_t * pagesOut)
244 {
245 	vm_offset_t page, count;
246 
247 	/*
248 	 * hibernation restore runs on the interrupt stack,
249 	 * so we need to make sure we don't save it
250 	 */
251 	pal_hib_get_stack_pages(&page, &count);
252 	hibernate_set_page_state(page_list, page_list_wired,
253 	    page, count,
254 	    kIOHibernatePageStateFree);
255 	*pagesOut -= count;
256 
257 #if CONFIG_SPTM
258 	/*
259 	 * Pages that are explicitly prohibited to be restored by a
260 	 * pmap-io-range must also not be saved.
261 	 */
262 	bool (^exclude)(pmap_io_range_t const *) = ^bool (pmap_io_range_t const * range) {
263 		if (range->wimg & PMAP_IO_RANGE_PROHIBIT_HIB_WRITE) {
264 			/* No-op if page not in any bitmap (i.e. not managed DRAM). */
265 			hibernate_set_page_state(page_list, page_list_wired,
266 	    range->addr >> PAGE_SHIFT, range->len >> PAGE_SHIFT,
267 	    kIOHibernatePageStateFree);
268 		}
269 		return true;
270 	};
271 	pmap_range_iterate(exclude);
272 
273 	/*
274 	 * On SPTM-based systems, parts of the CTRR-protected regions will be
275 	 * loaded from disk by iBoot instead of being loaded from the hibernation
276 	 * image for security reasons. Because those regions are being loaded from
277 	 * disk, they don't need to be saved into the hibernation image, so update
278 	 * the bitmaps to reflect this.
279 	 */
280 	assertf(SPTMArgs->hib_metadata->iboot_loaded_ranges != NULL,
281 	    "SPTM didn't setup iboot_loaded_ranges pointer in the hibernation metadata.");
282 
283 	for (size_t i = 0; i < SPTMArgs->hib_metadata->num_iboot_loaded_ranges; ++i) {
284 		const hib_phys_range_t *range = &SPTMArgs->hib_metadata->iboot_loaded_ranges[i];
285 		hibernate_set_page_state(page_list, page_list_wired,
286 		    range->first_page, range->page_count, kIOHibernatePageStateFree);
287 		*pagesOut -= range->page_count;
288 	}
289 #endif /* CONFIG_SPTM */
290 }
291 
292 kern_return_t
hibernate_processor_setup(IOHibernateImageHeader * header)293 hibernate_processor_setup(IOHibernateImageHeader * header)
294 {
295 	cpu_datap(master_cpu)->cpu_hibernate = 1;
296 	header->processorFlags = 0;
297 	return KERN_SUCCESS;
298 }
299 
300 static boolean_t hibernate_vm_locks_safe;
301 
302 void
hibernate_vm_lock(void)303 hibernate_vm_lock(void)
304 {
305 	if (kIOHibernateStateHibernating == gIOHibernateState) {
306 		hibernate_vm_lock_queues();
307 		hibernate_vm_locks_safe = TRUE;
308 	}
309 }
310 
311 void
hibernate_vm_unlock(void)312 hibernate_vm_unlock(void)
313 {
314 	assert(FALSE == ml_get_interrupts_enabled());
315 	if (kIOHibernateStateHibernating == gIOHibernateState) {
316 		hibernate_vm_unlock_queues();
317 	}
318 	assert(ml_is_quiescing());
319 }
320 
321 // processor_doshutdown() calls hibernate_vm_lock() and hibernate_vm_unlock() on sleep with interrupts disabled.
322 // ml_hibernate_active_post() calls hibernate_vm_lock_end() on wake before interrupts are enabled.
323 // VM locks are safely single threaded between hibernate_vm_lock() and hibernate_vm_lock_end().
324 
325 void
hibernate_vm_lock_end(void)326 hibernate_vm_lock_end(void)
327 {
328 	assert(FALSE == ml_get_interrupts_enabled());
329 	hibernate_vm_locks_safe = FALSE;
330 }
331 
332 boolean_t
hibernate_vm_locks_are_safe(void)333 hibernate_vm_locks_are_safe(void)
334 {
335 	assert(FALSE == ml_get_interrupts_enabled());
336 	return hibernate_vm_locks_safe;
337 }
338 
339 void
pal_hib_init(void)340 pal_hib_init(void)
341 {
342 	gHibernateGlobals.kernelSlide = gVirtBase - gPhysBase;
343 }
344 
345 void
pal_hib_write_hook(void)346 pal_hib_write_hook(void)
347 {
348 }
349