xref: /xnu-12377.81.4/tests/decompression_failure.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 #include <darwintest.h>
2 #include <mach/mach.h>
3 #include <sys/sysctl.h>
4 #include <stdio.h>
5 #include <stdbool.h>
6 #include <stdlib.h>
7 #include <unistd.h>
8 #include <inttypes.h>
9 #include <pthread.h>
10 #include <TargetConditionals.h>
11 #include "try_read_write.h"
12 
13 extern int pid_hibernate(int pid);
14 
15 static vm_address_t page_size;
16 
17 T_GLOBAL_META(
18 	T_META_RADAR_COMPONENT_NAME("xnu"),
19 	T_META_RADAR_COMPONENT_VERSION("arm"),
20 	T_META_OWNER("peter_newman"),
21 	T_META_REQUIRES_SYSCTL_EQ("hw.optional.wkdm_popcount", 1)
22 	);
23 
24 static vm_address_t *blocks;
25 static uint64_t block_count;
26 static const uint64_t block_length = 0x800000;
27 
28 static uint32_t vm_pagesize;
29 
30 static void
dirty_page(const vm_address_t address)31 dirty_page(const vm_address_t address)
32 {
33 	assert((address & (page_size - 1)) == 0UL);
34 	uint32_t *const page_as_u32 = (uint32_t *)address;
35 	for (uint32_t i = 0; i < page_size / sizeof(uint32_t); i += 2) {
36 		page_as_u32[i + 0] = i % 4;
37 		page_as_u32[i + 1] = 0xcdcdcdcd;
38 	}
39 }
40 
41 static bool
try_to_corrupt_page(vm_address_t page_va)42 try_to_corrupt_page(vm_address_t page_va)
43 {
44 	int val;
45 	size_t size = sizeof(val);
46 	int result = sysctlbyname("vm.compressor_inject_error", &val, &size,
47 	    &page_va, sizeof(page_va));
48 	return result == 0;
49 }
50 
51 static void
create_corrupted_regions(void)52 create_corrupted_regions(void)
53 {
54 	uint64_t hw_memsize;
55 
56 	size_t size = sizeof(unsigned int);
57 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("vm.pagesize", &vm_pagesize, &size,
58 	    NULL, 0), "read vm.pagesize");
59 	size = sizeof(uint64_t);
60 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.memsize", &hw_memsize, &size,
61 	    NULL, 0), "read hw.memsize");
62 
63 #if TARGET_OS_OSX
64 	const uint64_t max_memsize = 32ULL * 0x40000000ULL; // 32 GB
65 #else
66 	const uint64_t max_memsize = 8ULL * 0x100000ULL; // 8 MB
67 #endif
68 	const uint64_t effective_memsize = (hw_memsize > max_memsize) ?
69 	    max_memsize : hw_memsize;
70 
71 	const uint64_t total_pages = effective_memsize / vm_pagesize;
72 	const uint64_t pages_per_block = block_length / vm_pagesize;
73 
74 	// Map a as much memory as we have physical memory to back. Dirtying all
75 	// of these pages will force a compressor sweep. The mapping is done using
76 	// the smallest number of malloc() calls to allocate the necessary VAs.
77 	block_count = total_pages / pages_per_block;
78 
79 	blocks = (vm_address_t *)malloc(sizeof(*blocks) * block_count);
80 	for (uint64_t i = 0; i < block_count; i++) {
81 		void *bufferp = malloc(block_length);
82 		blocks[i] = (vm_address_t)bufferp;
83 	}
84 
85 	for (uint32_t i = 0; i < block_count; i++) {
86 		for (size_t buffer_offset = 0; buffer_offset < block_length;
87 		    buffer_offset += vm_pagesize) {
88 			dirty_page(blocks[i] + buffer_offset);
89 		}
90 	}
91 
92 #if !TARGET_OS_OSX
93 	// We can't use a substantial amount of memory on embedded platforms, so
94 	// freeze the current process instead to cause everything to be compressed.
95 	T_ASSERT_POSIX_SUCCESS(pid_hibernate(-2), NULL);
96 	T_ASSERT_POSIX_SUCCESS(pid_hibernate(-2), NULL);
97 #endif
98 
99 	uint32_t corrupt = 0;
100 	for (uint32_t i = 0; i < block_count; i++) {
101 		for (size_t buffer_offset = 0; buffer_offset < block_length;
102 		    buffer_offset += vm_pagesize) {
103 			if (try_to_corrupt_page(blocks[i] + buffer_offset)) {
104 				corrupt++;
105 			}
106 		}
107 	}
108 
109 	T_LOG("corrupted %u/%llu pages. accessing...\n", corrupt, total_pages);
110 	if (corrupt == 0) {
111 		T_SKIP("no pages corrupted");
112 	}
113 }
114 
115 static bool
read_blocks(void)116 read_blocks(void)
117 {
118 	for (uint32_t i = 0; i < block_count; i++) {
119 		for (size_t buffer_offset = 0; buffer_offset < block_length;
120 		    buffer_offset += vm_pagesize) {
121 			// Access pages until the fault is detected.
122 			kern_return_t exception_kr;
123 			if (!try_write_byte(blocks[i] + buffer_offset, 1, &exception_kr)) {
124 				T_ASSERT_EQ(exception_kr, KERN_MEMORY_FAILURE,
125 				    "exception code should be KERN_MEMORY_FAILURE");
126 				T_LOG("test_thread breaking");
127 				return true;
128 			}
129 		}
130 	}
131 	return false;
132 }
133 
134 T_DECL(decompression_failure,
135     "Confirm that exception is raised on decompression failure",
136     // Disable software checks in development builds, as these would result in
137     // panics.
138     T_META_BOOTARGS_SET("vm_compressor_validation=0"),
139     T_META_ASROOT(true),
140     // This test intentionally corrupts pages backing heap memory, so it's
141     // not practical for it to release all the buffers properly.
142     T_META_CHECK_LEAKS(false))
143 {
144 	T_SETUPBEGIN;
145 
146 #if !TARGET_OS_OSX
147 	if (pid_hibernate(-2) != 0) {
148 		T_SKIP("compressor not active");
149 	}
150 #endif
151 
152 	int value;
153 	size_t size = sizeof(value);
154 	if (sysctlbyname("vm.compressor_inject_error", &value, &size, NULL, 0)
155 	    != 0) {
156 		T_SKIP("vm.compressor_inject_error not present");
157 	}
158 
159 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("vm.pagesize", &value, &size, NULL, 0),
160 	    NULL);
161 	T_ASSERT_EQ_ULONG(size, sizeof(value), NULL);
162 	page_size = (vm_address_t)value;
163 
164 	create_corrupted_regions();
165 	T_SETUPEND;
166 
167 	if (!read_blocks()) {
168 		T_SKIP("no faults");
169 	}
170 }
171