xref: /xnu-12377.1.9/tests/vm/memorystatus_sort_test.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 #include <signal.h>
2 #include <spawn.h>
3 #include <stdlib.h>
4 #include <sys/sysctl.h>
5 
6 #include <darwintest.h>
7 #include <dispatch/dispatch.h>
8 #include <mach-o/dyld.h>
9 
10 /* internal */
11 #include <spawn_private.h>
12 #include <sys/coalition.h>
13 #include <sys/kern_memorystatus.h>
14 
15 T_GLOBAL_META(
16 	T_META_NAMESPACE("xnu.vm"),
17 	T_META_RADAR_COMPONENT_NAME("xnu"),
18 	T_META_RADAR_COMPONENT_VERSION("VM"));
19 
20 #define NUM_PER_ROLE 3 /* Number of procs per role in coalition (besides leader) */
21 #define NUM_PROCS_IN_COALITION (NUM_PER_ROLE * (COALITION_NUM_TASKROLES - 1) + 1)
22 #define NUM_COALITIONS 3
23 
24 #define COAL_ORDER_NUM_PIDS (NUM_PROCS_IN_COALITION + COALITION_NUM_TASKROLES - 1)
25 typedef struct {
26 	pid_t pids[NUM_PROCS_IN_COALITION]; // An array of pids in this coalition. Owned by this struct.
27 	pid_t expected_order[COAL_ORDER_NUM_PIDS]; // An array of pids in this coalition in proper sorted order.
28 	uint64_t ids[COALITION_NUM_TYPES];
29 	size_t leader_footprint;
30 } coalition_info_t;
31 
32 /*
33  * Children pids spawned by this test that need to be cleaned up.
34  * Has to be a global because the T_ATEND API doesn't take any arguments.
35  */
36 #define kMaxChildrenProcs NUM_PROCS_IN_COALITION * NUM_COALITIONS + 1
37 static pid_t children_pids[kMaxChildrenProcs];
38 static size_t num_children = 0;
39 
40 /*
41  * Sets up a new coalition.
42  */
43 static void init_coalition(coalition_info_t*, size_t leader_fp);
44 
45 /*
46  * Places all procs in the coalition in the given band.
47  */
48 static void place_coalition_in_band(const coalition_info_t *, int band);
49 
50 /*
51  * Place the given proc in the given band.
52  */
53 static void place_proc_in_band(pid_t pid, int band);
54 
55 /*
56  * Cleans up any children processes.
57  */
58 static void cleanup_children(void);
59 
60 /*
61  * Check if we're on a kernel where we can test coalitions.
62  */
63 static bool has_unrestrict_coalitions(void);
64 
65 /*
66  * Unrestrict coalition syscalls.
67  */
68 static void unrestrict_coalitions(void);
69 
70 /*
71  * Restrict coalition syscalls
72  */
73 static void restrict_coalitions(void);
74 
75 /*
76  * Allocate the requested number of pages and fault them in.
77  * Used to achieve a desired footprint.
78  */
79 static void *allocate_pages(int);
80 
81 /*
82  * Get the vm page size.
83  */
84 static int get_vmpage_size(void);
85 
86 /*
87  * Launch a proc with a role in a coalition.
88  * If coalition_ids is NULL, skip adding the proc to the coalition.
89  */
90 static pid_t
91 launch_proc_in_coalition(uint64_t *coalition_ids, int role, int num_pages);
92 
93 static void
bufprint(char ** buf,size_t * size,const char * fmt,...)94 bufprint(char **buf, size_t *size, const char *fmt, ...)
95 {
96 	va_list list;
97 	int n_written;
98 
99 	va_start(list, fmt);
100 	n_written = vsnprintf(*buf, *size, fmt, list);
101 	va_end(list);
102 
103 	if (n_written > 0) {
104 		*buf += n_written;
105 		*size -= n_written;
106 	}
107 }
108 
109 static char *
pids_str(pid_t * pids,int n_pids)110 pids_str(pid_t *pids, int n_pids)
111 {
112 	int i;
113 	size_t buf_len = n_pids * 8 + 2; /* For good measure */
114 	char *buf = malloc(buf_len);
115 	char *obuf = buf;
116 
117 	bufprint(&buf, &buf_len, "(");
118 
119 	for (i = 0; (i < n_pids) && (buf_len > 0); i++) {
120 		if (pids[i] == -1) {
121 			bufprint(&buf, &buf_len, "), (");
122 		} else {
123 			bool is_last = (i == (n_pids - 1)) || (pids[i + 1] == -1);
124 			bufprint(&buf, &buf_len, "%d%s", pids[i], is_last ? "" : ", ");
125 		}
126 	}
127 
128 	bufprint(&buf, &buf_len, ")");
129 
130 	return obuf;
131 }
132 
133 /*
134  * Sorts the given jetsam band with the desired order and verifies that the
135  * sort was done correctly.
136  * `expected_order` is an array of groups of PIDs separated by `-1`, where PIDs
137  * in each group are re-orderable. For instance, for the expected order:
138  * [1, 2, -1, 3, -1, 4]
139  * the orderings of
140  * 1, 2, 3, 4 and 2, 1, 3, 4 are both valid since 1 and 2 are in the same group.
141  */
142 static void
sort_and_verify(unsigned int prio,memorystatus_jetsam_sort_order_t order,pid_t * expected_order,size_t expected_order_len)143 sort_and_verify(
144 	unsigned int prio,
145 	memorystatus_jetsam_sort_order_t order,
146 	pid_t *expected_order,
147 	size_t expected_order_len)
148 {
149 	size_t i, j, n_pids, group_idx;
150 	bool in_order;
151 	pid_t *actual_order;
152 	pid_t *original_expected_order;
153 
154 	/* Bigger than we need it, but that's fine */
155 	actual_order = malloc(sizeof(pid_t) * expected_order_len);
156 
157 	/* Make a copy of expected_order since we'll be overwriting it */
158 	original_expected_order = malloc(sizeof(pid_t) * expected_order_len);
159 	memcpy(original_expected_order, expected_order, sizeof(pid_t) * expected_order_len);
160 
161 	/*
162 	 * Add only the actual pids from expected_order in to tell memorystatus which
163 	 * PIDs we care about
164 	 */
165 	n_pids = 0;
166 	for (i = 0; i < expected_order_len; i++) {
167 		if (expected_order[i] != -1) {
168 			actual_order[n_pids] = expected_order[i];
169 			n_pids++;
170 		}
171 	}
172 
173 	int ret = memorystatus_control(MEMORYSTATUS_CMD_TEST_JETSAM_SORT, prio, order,
174 	    actual_order, n_pids * sizeof(pid_t));
175 	T_QUIET; T_EXPECT_POSIX_SUCCESS(ret, "Band sorted and order copied out");
176 
177 	/* Check that the order we got was what we expected */
178 	group_idx = 0; /* idx of pid that starts current reorderable group */
179 	for (i = 0; i < n_pids; i++) {
180 		/*
181 		 * Check if the current pid in actual_order is in the current group.
182 		 * If not, advance to the next group until we find it. This is essentially
183 		 * a ratcheting mechanism - we can move our search group forwards, but not
184 		 * backwards.
185 		 */
186 		for (j = group_idx; j < expected_order_len; j++) {
187 			if (expected_order[j] == -1) {
188 				/* Made it to the end of a group w/o finding the pid */
189 				group_idx = j + 1;
190 				continue;
191 			} else if (expected_order[j] == actual_order[i]) {
192 				/* Found our pid. Mark it found */
193 				expected_order[j] = 0;
194 				break;
195 			}
196 		}
197 	}
198 
199 
200 	/* Check that all pids were actually found */
201 	in_order = true;
202 	for (i = 0; i < expected_order_len; i++) {
203 		if ((expected_order[i] != -1) && (expected_order[i] != 0)) {
204 			in_order = false;
205 			break;
206 		}
207 	}
208 
209 	T_EXPECT_TRUE(in_order, "Band in correct order when sorted in order (%d)", order);
210 
211 	if (!in_order) {
212 		char *exp_str = pids_str(original_expected_order, expected_order_len);
213 		char *actual_str = pids_str(actual_order, n_pids);
214 		T_LOG("Out of order! Expected:\n%s\nbut got\n%s\n", exp_str, actual_str);
215 		free(exp_str);
216 		free(actual_str);
217 	}
218 
219 	free(actual_order);
220 	free(original_expected_order);
221 }
222 
223 /*
224  * Background process that will munch some memory, signal its parent, and
225  * then sit in a loop.
226  */
227 T_HELPER_DECL(coalition_member, "Mock coalition member") {
228 	int num_pages = 0;
229 	if (argc == 1) {
230 		num_pages = atoi(argv[0]);
231 	}
232 	allocate_pages(num_pages);
233 	if (num_pages) {
234 		printf("%d has %d\n", getpid(), num_pages);
235 	}
236 	// Signal to the parent that we've touched all of our pages.
237 	if (kill(getppid(), SIGUSR1) != 0) {
238 		T_LOG("Unable to signal to parent process!");
239 		exit(1);
240 	}
241 	while (true) {
242 		sleep(100);
243 	}
244 }
245 
246 static void
random_order(int * arr,int size)247 random_order(int *arr, int size)
248 {
249 	int i, a, b, s;
250 	for (i = 0; i < size; i++) {
251 		arr[i] = i;
252 	}
253 	for (i = 0; i < size; i++) {
254 		a = rand() % size;
255 		b = rand() % size;
256 		s = arr[a];
257 		arr[a] = arr[b];
258 		arr[b] = s;
259 	}
260 }
261 
262 static void
add_coalition_to_order(pid_t * order,coalition_info_t * coal,int coal_idx)263 add_coalition_to_order(pid_t *order, coalition_info_t *coal, int coal_idx)
264 {
265 	int order_idx = coal_idx * (COAL_ORDER_NUM_PIDS + 1);
266 	memcpy(&order[order_idx], &coal->expected_order, sizeof(coal->expected_order));
267 	if (coal_idx != 0) {
268 		order[order_idx - 1] = -1;
269 	}
270 }
271 
272 /*
273  * Test that sorting the fg bucket in coalition order works properly.
274  * Spawns children in the same coalition in the fg band. Each child
275  * has a different coalition role. Verifies that the coalition
276  * is sorted properly by role.
277  */
278 #define COALS_EXPECTED_ORDER_LEN ((COAL_ORDER_NUM_PIDS * NUM_COALITIONS) + (NUM_COALITIONS - 1))
279 T_DECL(memorystatus_sort_coalitions_footprint, "Sort coalitions by leader footprint",
280     T_META_ASROOT(true),
281     T_META_TAG_VM_PREFERRED)
282 {
283 	int i;
284 	coalition_info_t *coalitions;
285 	int coalition_order[NUM_COALITIONS];
286 	pid_t *expected_order; /* Expected order of all pids in all coalitions */
287 
288 	if (!has_unrestrict_coalitions()) {
289 		T_SKIP("Unable to test coalitions on this kernel.");
290 	}
291 	unrestrict_coalitions();
292 
293 	T_ATEND(cleanup_children);
294 	T_ATEND(restrict_coalitions);
295 
296 	/* Initialize our coalitions */
297 	coalitions = malloc(sizeof(coalition_info_t) * NUM_COALITIONS);
298 	expected_order = malloc(sizeof(pid_t) * COALS_EXPECTED_ORDER_LEN);
299 
300 	/* Spawn the coalitions in random order */
301 	random_order(coalition_order, NUM_COALITIONS);
302 
303 	/* Spawn coalitions, each with a different leader footprint */
304 	for (i = 0; i < NUM_COALITIONS; i++) {
305 		int coal = coalition_order[i];
306 		init_coalition(&coalitions[coal], (NUM_COALITIONS - coal) * 50);
307 		add_coalition_to_order(expected_order, &coalitions[coal], coal);
308 		place_coalition_in_band(&coalitions[coal], JETSAM_PRIORITY_FOREGROUND);
309 	}
310 
311 	/* Sort by leader footprint and verify coalitions are sorted by leader footprint */
312 	sort_and_verify(JETSAM_PRIORITY_FOREGROUND, JETSAM_SORT_FOOTPRINT, expected_order, COALS_EXPECTED_ORDER_LEN);
313 
314 	free(coalitions);
315 	free(expected_order);
316 }
317 
318 T_DECL(memorystatus_sort_coalitions_lru, "Sort coalitions by leader LRU",
319     T_META_ASROOT(true),
320     T_META_TAG_VM_PREFERRED)
321 {
322 	int i;
323 	coalition_info_t *coalitions;
324 	int coalition_order[NUM_COALITIONS];
325 	pid_t *expected_order; /* Expected order of all pids in all coalitions */
326 
327 	if (!has_unrestrict_coalitions()) {
328 		T_SKIP("Unable to test coalitions on this kernel.");
329 	}
330 	unrestrict_coalitions();
331 
332 	T_ATEND(cleanup_children);
333 	T_ATEND(restrict_coalitions);
334 
335 	/* Initialize our coalitions */
336 	coalitions = malloc(sizeof(coalition_info_t) * NUM_COALITIONS);
337 	expected_order = malloc(sizeof(pid_t) * COALS_EXPECTED_ORDER_LEN);
338 
339 	/* Spawn coalitions */
340 	for (i = 0; i < NUM_COALITIONS; i++) {
341 		init_coalition(&coalitions[i], 0);
342 	}
343 
344 	/* Add coalitions to foreground in random order*/
345 	random_order(coalition_order, NUM_COALITIONS);
346 	for (i = 0; i < NUM_COALITIONS; i++) {
347 		int coal = coalition_order[i];
348 		place_coalition_in_band(&coalitions[coal], JETSAM_PRIORITY_FOREGROUND);
349 		add_coalition_to_order(expected_order, &coalitions[coal], i);
350 	}
351 
352 
353 	/* Sort by leader LRU and verify coalitions are sorted by leader LRU */
354 	sort_and_verify(JETSAM_PRIORITY_FOREGROUND, JETSAM_SORT_LRU, expected_order, COALS_EXPECTED_ORDER_LEN);
355 
356 	free(coalitions);
357 	free(expected_order);
358 }
359 
360 
361 /*
362  * Test that sorting the idle bucket in footprint order works properly.
363  *
364  * Spawns some children with very different footprints in the idle band,
365  * and then ensures that they get sorted properly.
366  */
367 T_DECL(memorystatus_sort_footprint, "Footprint sort order",
368     T_META_ASROOT(true), T_META_TAG_VM_PREFERRED) {
369 #define kNumChildren 3
370 	static const int kChildrenFootprints[kNumChildren] = {500, 0, 2500};
371 	/*
372 	 * The expected sort order of the children in the order that they were launched.
373 	 * Used to construct the expected_order pid array.
374 	 * Note that procs should be sorted in descending footprint order.
375 	 */
376 	static const int kExpectedOrder[kNumChildren] = {2, 0, 1};
377 	static const int kJetsamBand = JETSAM_PRIORITY_BACKGROUND;
378 	__block pid_t pid;
379 	sig_t res;
380 	dispatch_source_t ds_allocated;
381 	T_ATEND(cleanup_children);
382 
383 	// After we spawn the children, they'll signal that they've touched their pages.
384 	res = signal(SIGUSR1, SIG_IGN);
385 	T_WITH_ERRNO; T_ASSERT_NE(res, SIG_ERR, "SIG_IGN SIGUSR1");
386 	ds_allocated = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, dispatch_get_main_queue());
387 	T_QUIET; T_ASSERT_NOTNULL(ds_allocated, "dispatch_source_create (ds_allocated)");
388 
389 	dispatch_source_set_event_handler(ds_allocated, ^{
390 		if (num_children < kNumChildren) {
391 			pid = launch_proc_in_coalition(NULL, 0, kChildrenFootprints[num_children]);
392 			place_proc_in_band(pid, kJetsamBand);
393 		} else {
394 			pid_t expected_order[kNumChildren] = {0};
395 			for (int i = 0; i < kNumChildren; i++) {
396 				expected_order[i] = children_pids[kExpectedOrder[i]];
397 			}
398 			sort_and_verify(kJetsamBand, JETSAM_SORT_FOOTPRINT_NOCOAL, expected_order, kNumChildren);
399 			T_END;
400 		}
401 	});
402 	dispatch_activate(ds_allocated);
403 
404 	pid = launch_proc_in_coalition(NULL, 0, kChildrenFootprints[num_children]);
405 	place_proc_in_band(pid, kJetsamBand);
406 
407 	dispatch_main();
408 
409 #undef kNumChildren
410 }
411 
412 static pid_t
launch_proc_in_coalition(uint64_t * coalition_ids,int role,int num_pages)413 launch_proc_in_coalition(uint64_t *coalition_ids, int role, int num_pages)
414 {
415 	int ret;
416 	posix_spawnattr_t attr;
417 	pid_t pid;
418 	char testpath[PATH_MAX];
419 	uint32_t testpath_buf_size = PATH_MAX;
420 	char num_pages_str[32] = {0};
421 	char *argv[5] = {testpath, "-n", "coalition_member", num_pages_str, NULL};
422 	extern char **environ;
423 	T_QUIET; T_ASSERT_LT(num_children + 1, (size_t) kMaxChildrenProcs, "Don't create too many children.");
424 	ret = posix_spawnattr_init(&attr);
425 	T_QUIET; T_ASSERT_POSIX_ZERO(ret, "posix_spawnattr_init");
426 	if (coalition_ids != NULL) {
427 		for (int i = 0; i < COALITION_NUM_TYPES; i++) {
428 			ret = posix_spawnattr_setcoalition_np(&attr, coalition_ids[i], i, role);
429 			T_QUIET; T_ASSERT_POSIX_ZERO(ret, "posix_spawnattr_setcoalition_np");
430 		}
431 	}
432 
433 	ret = snprintf(num_pages_str, sizeof(num_pages_str), "%d", num_pages);
434 	T_QUIET; T_ASSERT_LE((size_t) ret, sizeof(num_pages_str), "Don't allocate too many pages.");
435 	ret = _NSGetExecutablePath(testpath, &testpath_buf_size);
436 	T_QUIET; T_ASSERT_EQ(ret, 0, "_NSGetExecutablePath");
437 	ret = posix_spawn(&pid, argv[0], NULL, &attr, argv, environ);
438 	T_QUIET; T_ASSERT_POSIX_ZERO(ret, "posix_spawn");
439 	ret = posix_spawnattr_destroy(&attr);
440 	T_QUIET; T_ASSERT_POSIX_ZERO(ret, "posix_spawnattr_destroy");
441 	children_pids[num_children++] = pid;
442 	return pid;
443 }
444 
445 static void
init_coalition(coalition_info_t * coalition,size_t leader_fp)446 init_coalition(coalition_info_t *coalition, size_t leader_fp)
447 {
448 	/* This code will need updating if we add a role */
449 	static_assert(COALITION_NUM_TASKROLES == 4);
450 
451 	sigset_t set;
452 	int ret, i, sig;
453 	uint32_t flags = 0;
454 	memset(coalition, 0, sizeof(coalition_info_t));
455 	for (int i = 0; i < COALITION_NUM_TYPES; i++) {
456 		COALITION_CREATE_FLAGS_SET_TYPE(flags, i);
457 		ret = coalition_create(&coalition->ids[i], flags);
458 		T_QUIET; T_ASSERT_POSIX_ZERO(ret, "coalition_create");
459 	}
460 
461 	sigemptyset(&set);
462 	ret = sigaddset(&set, SIGUSR1);
463 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sigaddset(SIGUSR1)");
464 
465 	coalition->leader_footprint = leader_fp;
466 
467 	/*
468 	 * Spawn procs for each coalition role, and construct the expected
469 	 * sorted order.
470 	 */
471 	int n_roles[COALITION_NUM_TASKROLES] = {0};
472 	int role_order_idx[COALITION_NUM_TASKROLES] = {
473 		/* COALITION_TASKROLE_UNDEF  */ 0,
474 		/* COALITION_TASKROLE_LEADER */ (NUM_PER_ROLE + 1) * 3,
475 		/* COALITION_TASKROLE_XPC    */ (NUM_PER_ROLE + 1) * 2,
476 		/* COALITION_TASKROLE_EXT    */ NUM_PER_ROLE + 1
477 	};
478 	for (i = 1; i < COALITION_NUM_TASKROLES; i++) {
479 		coalition->expected_order[role_order_idx[i] - 1] = -1;
480 	}
481 	for (size_t i = 0; i < NUM_PROCS_IN_COALITION; i++) {
482 		int role;
483 		size_t pages = 0;
484 
485 		while (true) {
486 			role = rand() % COALITION_NUM_TASKROLES;
487 			if ((role == COALITION_TASKROLE_LEADER) && n_roles[role]) {
488 				continue; /* Already have a leader */
489 			} else if (n_roles[role] == NUM_PER_ROLE) {
490 				continue; /* Already have all of this role */
491 			}
492 			n_roles[role]++;
493 			break;
494 		}
495 
496 		if (role == COALITION_TASKROLE_LEADER) {
497 			pages = leader_fp;
498 		}
499 
500 		pid_t pid = launch_proc_in_coalition(coalition->ids, role, pages);
501 		ret = sigwait(&set, &sig);
502 		T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sigwait");
503 		T_QUIET; T_ASSERT_EQ(sig, SIGUSR1, "sigwait == SIGUSR1");
504 		coalition->pids[i] = pid;
505 		coalition->expected_order[role_order_idx[role]] = pid;
506 		role_order_idx[role]++;
507 	}
508 }
509 
510 static void
place_proc_in_band(pid_t pid,int band)511 place_proc_in_band(pid_t pid, int band)
512 {
513 	memorystatus_priority_properties_t props = {0};
514 	int ret;
515 	props.priority = band;
516 	props.user_data = 0;
517 	ret = memorystatus_control(MEMORYSTATUS_CMD_SET_PRIORITY_PROPERTIES, pid, 0, &props, sizeof(props));
518 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "move proc to band");
519 }
520 
521 
522 static void
place_coalition_in_band(const coalition_info_t * coalition,int band)523 place_coalition_in_band(const coalition_info_t *coalition, int band)
524 {
525 	for (size_t i = 0; i < NUM_PROCS_IN_COALITION; i++) {
526 		pid_t curr = coalition->pids[i];
527 		place_proc_in_band(curr, band);
528 	}
529 }
530 
531 static void
cleanup_children(void)532 cleanup_children(void)
533 {
534 	int ret, status;
535 	for (size_t i = 0; i < num_children; i++) {
536 		pid_t exited_pid = 0;
537 		pid_t curr = children_pids[i];
538 		ret = kill(curr, SIGKILL);
539 		T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "kill");
540 		while (exited_pid == 0) {
541 			exited_pid = waitpid(curr, &status, 0);
542 		}
543 		T_QUIET; T_ASSERT_POSIX_SUCCESS(exited_pid, "waitpid");
544 		T_QUIET; T_ASSERT_TRUE(WIFSIGNALED(status), "proc was signaled.");
545 		T_QUIET; T_ASSERT_EQ(WTERMSIG(status), SIGKILL, "proc was killed");
546 	}
547 }
548 
549 static bool
has_unrestrict_coalitions()550 has_unrestrict_coalitions()
551 {
552 	int ret, val;
553 	size_t val_sz;
554 
555 	val = 0;
556 	val_sz = sizeof(val);
557 	ret = sysctlbyname("kern.unrestrict_coalitions", &val, &val_sz, NULL, 0);
558 	return ret >= 0;
559 }
560 
561 static void
unrestrict_coalitions()562 unrestrict_coalitions()
563 {
564 	int ret, val = 1;
565 	size_t val_sz;
566 	val_sz = sizeof(val);
567 	ret = sysctlbyname("kern.unrestrict_coalitions", NULL, 0, &val, val_sz);
568 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "kern.unrestrict_coalitions <- 1");
569 }
570 
571 static void
restrict_coalitions()572 restrict_coalitions()
573 {
574 	int ret, val = 0;
575 	size_t val_sz;
576 	val_sz = sizeof(val);
577 	ret = sysctlbyname("kern.unrestrict_coalitions", NULL, 0, &val, val_sz);
578 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "kern.unrestrict_coalitions <- 0");
579 }
580 
581 static void *
allocate_pages(int num_pages)582 allocate_pages(int num_pages)
583 {
584 	int page_size, i;
585 	unsigned char *buf;
586 
587 	page_size = get_vmpage_size();
588 	buf = malloc((unsigned long)(num_pages * page_size));
589 	for (i = 0; i < num_pages; i++) {
590 		((volatile unsigned char *)buf)[i * page_size] = 1;
591 	}
592 	return buf;
593 }
594 
595 static int
get_vmpage_size()596 get_vmpage_size()
597 {
598 	int vmpage_size;
599 	size_t size = sizeof(vmpage_size);
600 	int ret = sysctlbyname("vm.pagesize", &vmpage_size, &size, NULL, 0);
601 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "failed to query vm.pagesize");
602 	T_QUIET; T_ASSERT_GT(vmpage_size, 0, "vm.pagesize is not > 0");
603 	return vmpage_size;
604 }
605