xref: /xnu-12377.81.4/tests/task_vm_info_decompressions.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 #include <stdlib.h>
2 #include <stdio.h>
3 #include <mach/error.h>
4 #include <mach/task_info.h>
5 #include <mach/mach.h>
6 #include <mach/mach_vm.h>
7 #include <mach/vm_statistics.h>
8 #include <unistd.h>
9 #include <signal.h>
10 #include <errno.h>
11 #include <sys/kern_memorystatus.h>
12 #include <sys/sysctl.h>
13 #include <stdatomic.h>
14 
15 #include <darwintest.h>
16 #include <TargetConditionals.h>
17 
18 T_GLOBAL_META(
19 	T_META_NAMESPACE("xnu.vm"),
20 	T_META_RADAR_COMPONENT_NAME("xnu"),
21 	T_META_RADAR_COMPONENT_VERSION("VM"));
22 
23 #define KB 1024
24 #define VM_SIZE_PER_THREAD (64 * KB)
25 
26 static _Atomic int thread_malloc_count = 0;
27 static _Atomic int thread_compressed_count = 0;
28 static _Atomic int thread_thawed_count = 0;
29 static _Atomic int phase = 0;
30 
31 struct thread_args {
32 	int    id;
33 };
34 
35 static void *
worker_thread_function(void * args)36 worker_thread_function(void *args)
37 {
38 	struct thread_args *targs = args;
39 	int thread_id = targs->id;
40 	char *array;
41 
42 	/* Allocate memory */
43 	mach_vm_address_t addr;
44 	kern_return_t kr;
45 	kr = mach_vm_allocate(mach_task_self(), &addr, VM_SIZE_PER_THREAD,
46 	    VM_FLAGS_ANYWHERE | VM_PROT_DEFAULT | VM_MAKE_TAG(VM_MEMORY_APPLICATION_SPECIFIC_1));
47 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_vm_allocate()");
48 	array = (char *)addr;
49 	T_QUIET; T_EXPECT_NOTNULL(array, "thread %d allocated heap memory to be dirtied", thread_id);
50 
51 	/* Waiting for phase 1 (touch pages) to start */
52 	while (atomic_load(&phase) != 1) {
53 		;
54 	}
55 
56 	/* Phase 1: touch pages */
57 	T_LOG("thread %d phase 1: dirtying %d heap pages (%d bytes)", thread_id, VM_SIZE_PER_THREAD / (int)PAGE_SIZE, VM_SIZE_PER_THREAD);
58 	memset(&array[0], 1, VM_SIZE_PER_THREAD);
59 	atomic_fetch_add(&thread_malloc_count, 1);
60 
61 	/* Wait for process to be frozen */
62 	while (atomic_load(&phase) != 2) {
63 		;
64 	}
65 
66 	/* Phase 2: compress pages */
67 	kr = mach_vm_behavior_set(mach_task_self(), addr, VM_SIZE_PER_THREAD, VM_BEHAVIOR_PAGEOUT);
68 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_vm_behavior_set()");
69 	atomic_fetch_add(&thread_compressed_count, 1);
70 
71 	while (atomic_load(&phase) != 3) {
72 		;
73 	}
74 
75 	/* Phase 3, process thawed, trigger decompressions by re-faulting pages */
76 	T_LOG("thread %d phase 3: faulting pages back in to trigger decompressions", thread_id);
77 	memset(&array[0], 1, VM_SIZE_PER_THREAD);
78 
79 	/* Main thread will retrieve vm statistics once all threads are thawed */
80 	atomic_fetch_add(&thread_thawed_count, 1);
81 
82 	kr = mach_vm_deallocate(mach_task_self(), addr, VM_SIZE_PER_THREAD);
83 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_vm_deallocate()");
84 
85 	return NULL;
86 }
87 
88 static pthread_t*
create_threads(int nthreads,pthread_t * threads,struct thread_args * targs)89 create_threads(int nthreads, pthread_t *threads, struct thread_args *targs)
90 {
91 	int i;
92 	int err;
93 	pthread_attr_t attr;
94 
95 	err = pthread_attr_init(&attr);
96 	T_ASSERT_POSIX_ZERO(err, "pthread_attr_init");
97 	for (i = 0; i < nthreads; i++) {
98 		targs[i].id = i;
99 		err = pthread_create(&threads[i], &attr, worker_thread_function, (void*)&targs[i]);
100 		T_QUIET; T_ASSERT_POSIX_ZERO(err, "pthread_create");
101 	}
102 
103 	return threads;
104 }
105 
106 static void
join_threads(int nthreads,pthread_t * threads)107 join_threads(int nthreads, pthread_t *threads)
108 {
109 	int i;
110 	int err;
111 
112 	for (i = 0; i < nthreads; i++) {
113 		err = pthread_join(threads[i], NULL);
114 		T_QUIET; T_ASSERT_POSIX_ZERO(err, "pthread_join");
115 	}
116 }
117 
118 T_DECL(task_vm_info_decompressions,
119     "Test multithreaded per-task decompressions counter", T_META_TAG_VM_NOT_ELIGIBLE)
120 {
121 	int     err;
122 	mach_error_t kr;
123 	int     ncpu;
124 	size_t  ncpu_size = sizeof(ncpu);
125 	int     npages;
126 	int     compressor_mode;
127 	size_t  compressor_mode_size = sizeof(compressor_mode);
128 	task_vm_info_data_t vm_info;
129 	mach_msg_type_number_t count;
130 	pthread_t *threads;
131 	struct thread_args *targs;
132 
133 	T_SETUPBEGIN;
134 
135 	/* Make sure freezer is enabled on target machine */
136 	err = sysctlbyname("vm.compressor.mode", &compressor_mode, &compressor_mode_size, NULL, 0);
137 	if (compressor_mode < 8) {
138 		T_SKIP("This test requires freezer which is not available on the testing platform (vm.compressor.mode is set to %d)", compressor_mode);
139 	}
140 #if TARGET_OS_BRIDGE
141 	T_SKIP("This test requires freezer which is not available on bridgeOS (vm.compressor.mode is set to %d)", compressor_mode);
142 #endif
143 
144 	/* Set number of threads to ncpu available on testing device */
145 	err = sysctlbyname("hw.ncpu", &ncpu, &ncpu_size, NULL, 0);
146 	T_EXPECT_EQ_INT(0, err, "Detected %d cpus\n", ncpu);
147 
148 	/* Set total number of pages to be frozen */
149 	npages = ncpu * VM_SIZE_PER_THREAD / (int)PAGE_SIZE;
150 	T_LOG("Test will be freezing at least %d heap pages\n", npages);
151 
152 	/* Change state to freezable */
153 	err = memorystatus_control(MEMORYSTATUS_CMD_SET_PROCESS_IS_FREEZABLE, getpid(), (uint32_t)1, NULL, 0);
154 	T_EXPECT_EQ(KERN_SUCCESS, err, "set pid %d to be freezable", getpid());
155 
156 	/* Call into kernel to retrieve vm_info and make sure we do not have any decompressions before the test */
157 	count = TASK_VM_INFO_COUNT;
158 	err = task_info(mach_task_self(), TASK_VM_INFO, (task_info_t)&vm_info, &count);
159 	T_EXPECT_EQ(count, TASK_VM_INFO_COUNT, "count == TASK_VM_INFO_COUNT: %d", count);
160 	T_EXPECT_EQ_INT(0, err, "task_info(TASK_VM_INFO) returned 0");
161 	T_EXPECT_EQ_INT(0, vm_info.decompressions, "Expected 0 decompressions before test starts");
162 
163 	/* Thread data */
164 	threads = malloc(sizeof(pthread_t) * (size_t)ncpu);
165 	targs = malloc(sizeof(struct thread_args) * (size_t)ncpu);
166 
167 	T_SETUPEND;
168 
169 	/* Phase 1: create threads to write to malloc memory */
170 	create_threads(ncpu, threads, targs);
171 	atomic_fetch_add(&phase, 1);
172 
173 	/* Wait for all threads to dirty their malloc pages */
174 	while (atomic_load(&thread_malloc_count) != ncpu) {
175 		sleep(1);
176 	}
177 	T_EXPECT_EQ(ncpu, atomic_load(&thread_malloc_count), "%d threads finished writing to malloc pages\n", ncpu);
178 
179 	count = TASK_VM_INFO_COUNT;
180 	err = task_info(mach_task_self(), TASK_VM_INFO, (task_info_t)&vm_info, &count);
181 	T_QUIET; T_EXPECT_EQ(count, TASK_VM_INFO_COUNT, "count == TASK_VM_INFO_COUNT: %d", count);
182 	T_QUIET; T_EXPECT_EQ_INT(0, err, "task_info(TASK_VM_INFO) returned 0");
183 	T_EXPECT_EQ(0, vm_info.decompressions, "Expected 0 decompressions before compressions");
184 
185 	/* Launch freezer to compress the dirty pages */
186 	atomic_fetch_add(&phase, 1);
187 	/* Wait for all threads to compress their pages */
188 	while (atomic_load(&thread_compressed_count) != ncpu) {
189 		sleep(1);
190 	}
191 	T_EXPECT_EQ(ncpu, atomic_load(&thread_compressed_count), "%d threads finished writing to malloc pages\n", ncpu);
192 
193 	/* Phase 2: triger decompression in threads */
194 	atomic_fetch_add(&phase, 1);
195 
196 	/* Wait for all threads to decompress their malloc pages */
197 	while (atomic_load(&thread_thawed_count) != ncpu) {
198 		sleep(1);
199 	}
200 
201 	/* Phase 3: Call into kernel to retrieve vm_info and to get the updated decompressions counter */
202 	count = TASK_VM_INFO_COUNT;
203 	kr = task_info(mach_task_self(), TASK_VM_INFO, (task_info_t)&vm_info, &count);
204 	T_QUIET; T_EXPECT_EQ(count, TASK_VM_INFO_COUNT, "count == TASK_VM_INFO_COUNT: %d", count);
205 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "task_info(TASK_VM_INFO)");
206 
207 	/* Make sure this task has decompressed at least all of the dirtied memory */
208 	T_EXPECT_GE_INT(vm_info.decompressions, npages, "decompressed %d pages (>= heap pages: %d)", vm_info.decompressions, npages);
209 	T_PASS("Correctly retrieve per-task decompressions stats");
210 
211 	/* Cleanup */
212 	join_threads(ncpu, threads);
213 	free(threads);
214 	free(targs);
215 }
216