xref: /xnu-11417.140.69/tests/task_vm_info_decompressions.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 #include <stdlib.h>
2 #include <stdio.h>
3 #include <mach/task_info.h>
4 #include <mach/mach.h>
5 #include <unistd.h>
6 #include <signal.h>
7 #include <errno.h>
8 #include <sys/kern_memorystatus.h>
9 #include <sys/sysctl.h>
10 #include <stdatomic.h>
11 
12 #include <darwintest.h>
13 #include <TargetConditionals.h>
14 
15 T_GLOBAL_META(
16 	T_META_NAMESPACE("xnu.vm"),
17 	T_META_RADAR_COMPONENT_NAME("xnu"),
18 	T_META_RADAR_COMPONENT_VERSION("VM"));
19 
20 #define KB 1024
21 #define MALLOC_SIZE_PER_THREAD (64 * KB)
22 #define freezer_path "/usr/local/bin/freeze"
23 
24 /* BridgeOS could spend more time execv freezer */
25 #if TARGET_OS_BRIDGE
26 static int timeout = 600;
27 #else
28 static int timeout = 120;
29 #endif
30 
31 static _Atomic int thread_malloc_count = 0;
32 static _Atomic int thread_thawed_count = 0;
33 static _Atomic int phase = 0;
34 
35 struct thread_args {
36 	int    id;
37 };
38 
39 static void
freeze_pid(pid_t pid)40 freeze_pid(pid_t pid)
41 {
42 	char pid_str[6];
43 	char *args[3];
44 	pid_t child_pid;
45 	int status;
46 
47 	sprintf(pid_str, "%d", pid);
48 	child_pid = fork();
49 	if (child_pid == 0) {
50 		/* Launch freezer */
51 		args[0] = freezer_path;
52 		args[1] = pid_str;
53 		args[2] = NULL;
54 		execv(freezer_path, args);
55 		/* execve() does not return on success */
56 		perror("execve");
57 		T_FAIL("execve() failed");
58 	}
59 
60 	/* Wait for freezer to complete */
61 	T_LOG("Waiting for freezer %d to complete", child_pid);
62 	while (0 == waitpid(child_pid, &status, WNOHANG)) {
63 		if (timeout < 0) {
64 			kill(child_pid, SIGKILL);
65 			T_FAIL("Freezer took too long to freeze the test");
66 		}
67 		sleep(1);
68 		timeout--;
69 	}
70 	if (WIFEXITED(status) != 1 || WEXITSTATUS(status) != 0) {
71 		T_FAIL("Freezer error'd out");
72 	}
73 }
74 static void *
worker_thread_function(void * args)75 worker_thread_function(void *args)
76 {
77 	struct thread_args *targs = args;
78 	int thread_id = targs->id;
79 	char *array;
80 
81 	/* Allocate memory */
82 	array = malloc(MALLOC_SIZE_PER_THREAD);
83 	T_EXPECT_NOTNULL(array, "thread %d allocated heap memory to be dirtied", thread_id);
84 
85 	/* Waiting for phase 1 (touch pages) to start */
86 	while (atomic_load(&phase) != 1) {
87 		;
88 	}
89 
90 	/* Phase 1: touch pages */
91 	T_LOG("thread %d phase 1: dirtying %d heap pages (%d bytes)", thread_id, MALLOC_SIZE_PER_THREAD / (int)PAGE_SIZE, MALLOC_SIZE_PER_THREAD);
92 	memset(&array[0], 1, MALLOC_SIZE_PER_THREAD);
93 	atomic_fetch_add(&thread_malloc_count, 1);
94 
95 	/* Wait for process to be frozen */
96 	while (atomic_load(&phase) != 2) {
97 		;
98 	}
99 
100 	/* Phase 2, process thawed, trigger decompressions by re-faulting pages */
101 	T_LOG("thread %d phase 2: faulting pages back in to trigger decompressions", thread_id);
102 	memset(&array[0], 1, MALLOC_SIZE_PER_THREAD);
103 
104 	/* Main thread will retrieve vm statistics once all threads are thawed */
105 	atomic_fetch_add(&thread_thawed_count, 1);
106 
107 	free(array);
108 
109 
110 #if 0 /* Test if the thread's decompressions counter was added to the task decompressions counter when a thread terminates */
111 	if (thread_id < 2) {
112 		sleep(10);
113 	}
114 #endif
115 
116 	return NULL;
117 }
118 
119 static pthread_t*
create_threads(int nthreads,pthread_t * threads,struct thread_args * targs)120 create_threads(int nthreads, pthread_t *threads, struct thread_args *targs)
121 {
122 	int i;
123 	int err;
124 	pthread_attr_t attr;
125 
126 	err = pthread_attr_init(&attr);
127 	T_ASSERT_POSIX_ZERO(err, "pthread_attr_init");
128 	for (i = 0; i < nthreads; i++) {
129 		targs[i].id = i;
130 		err = pthread_create(&threads[i], &attr, worker_thread_function, (void*)&targs[i]);
131 		T_QUIET; T_ASSERT_POSIX_ZERO(err, "pthread_create");
132 	}
133 
134 	return threads;
135 }
136 
137 static void
join_threads(int nthreads,pthread_t * threads)138 join_threads(int nthreads, pthread_t *threads)
139 {
140 	int i;
141 	int err;
142 
143 	for (i = 0; i < nthreads; i++) {
144 		err = pthread_join(threads[i], NULL);
145 		T_QUIET; T_ASSERT_POSIX_ZERO(err, "pthread_join");
146 	}
147 }
148 
149 T_DECL(task_vm_info_decompressions,
150     "Test multithreaded per-task decompressions counter", T_META_TAG_VM_NOT_ELIGIBLE)
151 {
152 	int     err;
153 	int     ncpu;
154 	size_t  ncpu_size = sizeof(ncpu);
155 	int     npages;
156 	int     compressor_mode;
157 	size_t  compressor_mode_size = sizeof(compressor_mode);
158 	task_vm_info_data_t vm_info;
159 	mach_msg_type_number_t count;
160 	pthread_t *threads;
161 	struct thread_args *targs;
162 
163 	T_SETUPBEGIN;
164 
165 	/* Make sure freezer is enabled on target machine */
166 	err = sysctlbyname("vm.compressor_mode", &compressor_mode, &compressor_mode_size, NULL, 0);
167 	if (compressor_mode < 8) {
168 		T_SKIP("This test requires freezer which is not available on the testing platform (vm.compressor_mode is set to %d)", compressor_mode);
169 	}
170 #if TARGET_OS_BRIDGE
171 	T_SKIP("This test requires freezer which is not available on bridgeOS (vm.compressor_mode is set to %d)", compressor_mode);
172 #endif
173 
174 	/* Set number of threads to ncpu available on testing device */
175 	err = sysctlbyname("hw.ncpu", &ncpu, &ncpu_size, NULL, 0);
176 	T_EXPECT_EQ_INT(0, err, "Detected %d cpus\n", ncpu);
177 
178 	/* Set total number of pages to be frozen */
179 	npages = ncpu * MALLOC_SIZE_PER_THREAD / (int)PAGE_SIZE;
180 	T_LOG("Test will be freezing at least %d heap pages\n", npages);
181 
182 	/* Change state to freezable */
183 	err = memorystatus_control(MEMORYSTATUS_CMD_SET_PROCESS_IS_FREEZABLE, getpid(), (uint32_t)1, NULL, 0);
184 	T_EXPECT_EQ(KERN_SUCCESS, err, "set pid %d to be freezable", getpid());
185 
186 	/* Call into kernel to retrieve vm_info and make sure we do not have any decompressions before the test */
187 	count = TASK_VM_INFO_COUNT;
188 	err = task_info(mach_task_self(), TASK_VM_INFO, (task_info_t)&vm_info, &count);
189 	T_EXPECT_EQ(count, TASK_VM_INFO_COUNT, "count == TASK_VM_INFO_COUNT: %d", count);
190 	T_EXPECT_EQ_INT(0, err, "task_info(TASK_VM_INFO) returned 0");
191 	T_EXPECT_EQ_INT(0, vm_info.decompressions, "Expected 0 decompressions before test starts");
192 
193 	/* Thread data */
194 	threads = malloc(sizeof(pthread_t) * (size_t)ncpu);
195 	targs = malloc(sizeof(struct thread_args) * (size_t)ncpu);
196 
197 	T_SETUPEND;
198 
199 	/* Phase 1: create threads to write to malloc memory */
200 	create_threads(ncpu, threads, targs);
201 	atomic_fetch_add(&phase, 1);
202 
203 	/* Wait for all threads to dirty their malloc pages */
204 	while (atomic_load(&thread_malloc_count) != ncpu) {
205 		sleep(1);
206 	}
207 	T_EXPECT_EQ(ncpu, atomic_load(&thread_malloc_count), "%d threads finished writing to malloc pages\n", ncpu);
208 
209 	/* Launch freezer to compress the dirty pages */
210 	T_LOG("Running freezer to compress pages for pid %d", getpid());
211 	freeze_pid(getpid());
212 
213 	/* Phase 2: triger decompression in threads */
214 	atomic_fetch_add(&phase, 1);
215 
216 	/* Wait for all threads to decompress their malloc pages */
217 	while (atomic_load(&thread_thawed_count) != ncpu) {
218 		sleep(1);
219 	}
220 
221 	/* Phase 3: Call into kernel to retrieve vm_info and to get the updated decompressions counter */
222 	count = TASK_VM_INFO_COUNT;
223 	err = task_info(mach_task_self(), TASK_VM_INFO, (task_info_t)&vm_info, &count);
224 	T_EXPECT_EQ(count, TASK_VM_INFO_COUNT, "count == TASK_VM_INFO_COUNT: %d", count);
225 	T_EXPECT_EQ(0, err, "task_info(TASK_VM_INFO) returned 0");
226 
227 	/* Make sure this task has decompressed at least all of the dirtied memory */
228 	T_EXPECT_GE_INT(vm_info.decompressions, npages, "decompressed %d pages (>= heap pages: %d)", vm_info.decompressions, npages);
229 	T_PASS("Correctly retrieve per-task decompressions stats");
230 
231 	/* Cleanup */
232 	join_threads(ncpu, threads);
233 	free(threads);
234 	free(targs);
235 }
236