xref: /xnu-12377.41.6/tests/vm/vm_stress.cpp (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 #include <chrono>
2 #include <cstdio>
3 #include <errno.h>
4 #include <fcntl.h>
5 #include <sys/stat.h>
6 #include <fstream>
7 #include <iostream>
8 #include <random>
9 #include <shared_mutex>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <sys/mman.h>
13 #include <sys/sysctl.h>
14 #include <sys/types.h>
15 #include <unistd.h>
16 #include <csignal>
17 #include <stdexcept>
18 #include <memory>
19 #include <getopt.h>
20 
21 #include <future>
22 #include <thread>
23 #include <map>
24 #include <vector>
25 
26 #include <mach/mach.h>
27 #include <mach/mach_vm.h>
28 #include <mach/vm_map.h>
29 #include <darwintest.h>
30 
31 T_GLOBAL_META(
32 	T_META_NAMESPACE("xnu.vm"),
33 	T_META_RADAR_COMPONENT_NAME("xnu"),
34 	T_META_RADAR_COMPONENT_VERSION("VM"),
35 	T_META_OWNER("tgal2"));
36 
37 /** The following are modes that determine the way in which the created objects will be re-mapped to the task's memory.
38  *  The test behaves as follows according to the chosen policy:
39  *  RandomPartition - creates a buffer for each (randomly sized) part of each object. Every page of every object will be re-mapped exactly once.
40  *  OneToMany - creates multiple mappings of the entire object.
41  *  Overwrite - same as OneToMany, only that a portion of each mapping's pages will be overwritten, creating double the amount of mappings in total.
42  *  Topology - creates mappings according to different topologies.
43  */
44 enum class MappingPolicy {
45 	RandomPartition,
46 	OneToMany,
47 	Overwrite,
48 	Topology,
49 };
50 
51 struct TestParams {
52 	uint32_t num_objects;
53 	uint64_t obj_size;
54 	uint32_t runtime_secs;
55 	uint32_t num_threads;
56 	MappingPolicy policy;
57 	uint32_t mpng_flags;
58 	bool is_cow;
59 	bool is_file;
60 	bool slow_paging;
61 };
62 
63 struct MappingArgs {
64 	task_t arg_target_task = mach_task_self();
65 	mach_vm_address_t arg_target_address = 0;
66 	uint64_t arg_mapping_size = 0;
67 	uint32_t arg_mask = 0;
68 	uint32_t arg_flags = 0;
69 	task_t arg_src_task = mach_task_self();
70 	mach_vm_address_t arg_src_address = 0;
71 	bool arg_copy = false;
72 	uint32_t arg_cur_protection = 0;
73 	uint32_t arg_max_protection = 0;
74 	uint32_t arg_inheritance = VM_INHERIT_SHARE;
75 };
76 
77 struct status_counters {
78 	uint32_t success;
79 	uint32_t fail;
80 } status_counters;
81 
82 
83 static uint64_t
random_between(uint64_t a,uint64_t b)84 random_between(
85 	uint64_t a, uint64_t b)
86 {
87 	std::random_device rd;
88 	std::mt19937 gen(rd());
89 	std::uniform_int_distribution<> dis(a, b);
90 	return dis(gen);
91 }
92 
93 class TestRuntime
94 {
95 public:
96 	// Member functions:
97 	int
wait_for_status(int runtime_secs)98 	wait_for_status(
99 		int runtime_secs)
100 	{
101 		std::unique_lock<std::mutex> lock(mutex);
102 		auto now = std::chrono::system_clock::now();
103 		auto deadline = now + std::chrono::seconds(runtime_secs);
104 		state = running;
105 		while (state == running) {
106 			if (cond.wait_until(lock, deadline) == std::cv_status::timeout) {
107 				state = complete;
108 			}
109 		}
110 		if (state == complete) {
111 			return 0;
112 		} else {
113 			return 1;
114 		}
115 	}
116 
117 	enum state {
118 		paused,
119 		running,
120 		error,
121 		complete
122 	};
123 
124 	// Data members:
125 	std::atomic<state> state{paused};
126 	std::mutex mutex;
127 
128 private:
129 	std::condition_variable cond;
130 };
131 
132 TestRuntime runner;
133 
134 /**
135  * Responsible for creating the actual mapping into vm, performing actions on a
136  * mapping or a page, manage the threads which perform operations on this
137  * mapping.
138  */
139 class Mapping
140 {
141 	using vm_op = std::function<bool (Mapping *)>;
142 
143 public:
144 	// Constructor:
Mapping(uint32_t _id,uint64_t _offset_in_pages,MappingArgs _args,uint32_t _fd)145 	Mapping(uint32_t _id, uint64_t _offset_in_pages, MappingArgs _args, uint32_t _fd)
146 		: id(_id), offset_in_pages(_offset_in_pages), args(_args), fd(_fd), lock(std::make_shared<std::shared_mutex>()), src_mapping(std::nullopt), is_mapped(false)
147 	{
148 		num_pages = args.arg_mapping_size / PAGE_SIZE;
149 		op_denom = num_pages;
150 		create_mapping();
151 	}
152 
153 	// Comparator for sorting by id
154 	static bool
compare_by_id(const Mapping & a,const Mapping & b)155 	compare_by_id(
156 		const Mapping &a, const Mapping &b)
157 	{
158 		return a.id < b.id;
159 	}
160 
161 	// Member functions:
162 
163 	// Creation:
164 
165 	kern_return_t
remap_fixed()166 	remap_fixed()
167 	{
168 		kern_return_t kr = mach_vm_remap(args.arg_target_task, &args.arg_target_address, args.arg_mapping_size,
169 		    args.arg_mask, VM_FLAGS_OVERWRITE | VM_FLAGS_FIXED, args.arg_src_task,
170 		    args.arg_src_address + offset_in_pages * PAGE_SIZE, args.arg_copy, (vm_prot_t *)&(args.arg_cur_protection),
171 		    (vm_prot_t *)&(args.arg_max_protection), args.arg_inheritance);
172 		if (kr != KERN_SUCCESS) {
173 			return kr;
174 		}
175 		is_mapped = true;
176 		return kr;
177 	}
178 
179 	int
create_mapping()180 	create_mapping()
181 	{
182 		kern_return_t kr = remap_fixed();
183 		if (kr != KERN_SUCCESS) {
184 			throw std::runtime_error("mach_vm_remap failed: " + std::string(mach_error_string(kr)) + "\n");
185 		}
186 		return 0;
187 	}
188 
189 	void
set_src_mapping(Mapping & other)190 	set_src_mapping(
191 		Mapping &other)
192 	{
193 		src_mapping = other;
194 	}
195 
196 	// Operations to be done by the ran threads:
197 
198 	kern_return_t
deallocate_no_lock()199 	deallocate_no_lock()
200 	{
201 		is_mapped = false;
202 		kern_return_t kr = mach_vm_deallocate(args.arg_src_task, args.arg_target_address, args.arg_mapping_size);
203 		return kr;
204 	}
205 
206 	bool
realloc_no_parent()207 	realloc_no_parent()
208 	{
209 		std::unique_lock<std::shared_mutex> my_unique(*lock);
210 
211 		kern_return_t kr = remap_fixed();
212 		if (kr != KERN_SUCCESS) {
213 			return false;
214 		}
215 		return true;
216 	}
217 
218 	bool
realloc_with_parent()219 	realloc_with_parent()
220 	{
221 		std::unique_lock<std::shared_mutex> my_unique(*lock, std::defer_lock);
222 		std::unique_lock<std::shared_mutex> parent_unique(*(src_mapping->get().lock), std::defer_lock);
223 		std::scoped_lock l{my_unique, parent_unique};
224 
225 		kern_return_t kr = remap_fixed();
226 		if (kr != KERN_SUCCESS) {
227 			return false;
228 		}
229 		return true;
230 	}
231 
232 	bool
op_dealloc()233 	op_dealloc()
234 	{
235 		std::unique_lock<std::shared_mutex> my_unique(*lock);
236 
237 		kern_return_t kr = deallocate_no_lock();
238 		if (kr != KERN_SUCCESS) {
239 			return false;
240 		}
241 		return true;
242 	}
243 
244 	bool
op_realloc()245 	op_realloc()
246 	{
247 		// std::this_thread::sleep_for(std::chrono::microseconds(50));
248 		if (src_mapping) {
249 			return realloc_with_parent();
250 		} else {
251 			return realloc_no_parent();
252 		}
253 	}
254 
255 	bool
op_protect()256 	op_protect()
257 	{
258 		kern_return_t kr = mach_vm_protect(mach_task_self(), (mach_vm_address_t)args.arg_target_address,
259 		    (num_pages / op_denom) * PAGE_SIZE, 0, VM_PROT_READ | VM_PROT_WRITE);
260 		if (kr != KERN_SUCCESS) {
261 			return false;
262 		}
263 		return true;
264 	}
265 
266 	bool
op_wire()267 	op_wire()
268 	{
269 		std::this_thread::sleep_for(std::chrono::microseconds(50));
270 		uint32_t err = mlock((void *)args.arg_target_address, (num_pages / op_denom) * PAGE_SIZE);
271 		if (err) {
272 			return false;
273 		}
274 		return true;
275 	}
276 
277 	bool
op_write()278 	op_write()
279 	{
280 		std::shared_lock<std::shared_mutex> my_shared(*lock);
281 		if (!is_mapped) {
282 			return false;
283 		}
284 		// Modify only the last byte of each page.
285 		for (uint64_t i = 1; i <= num_pages / op_denom; i++) {
286 			((char *)args.arg_target_address)[i * PAGE_SIZE - 1] = 'M'; // M marks it was written via the mapping (for debugging purposes)
287 		}
288 
289 		// No need to sync to the file. It will be written when paged-out (which happens all the time).
290 
291 		return true;
292 	}
293 
294 
295 	bool
op_unwire()296 	op_unwire()
297 	{
298 		uint32_t err = munlock((void *)args.arg_target_address, (num_pages / op_denom) * PAGE_SIZE);
299 		if (err) {
300 			return false;
301 		}
302 		return true;
303 	}
304 
305 	bool
op_write_direct()306 	op_write_direct()
307 	{
308 		std::this_thread::sleep_for(std::chrono::microseconds(50));
309 
310 		if (!fd) {
311 			return false; // Return early if no file descriptor (no file-backed mapping)
312 		}
313 
314 		std::shared_lock<std::shared_mutex> my_shared(*lock);
315 		if (!is_mapped) {
316 			return false;
317 		}
318 
319 		// Modify only the last byte of each page.
320 		for (uint64_t i = 1; i <= num_pages / op_denom; i++) {
321 			((char *)args.arg_target_address)[i * PAGE_SIZE - 1] = 'D'; // D marks it was written using op_write_Direct (for debugging purposes)
322 		}
323 
324 		if (fcntl(fd, F_NOCACHE, true)) {
325 			auto err = errno;
326 			throw std::runtime_error("fcntl failed. err=" + std::to_string(err) + "\n");
327 		}
328 		if (lseek(fd, 0, SEEK_SET) == -1) {
329 			throw std::runtime_error("lseek failed to move cursor to beginning. err=" + std::to_string(errno));
330 		}
331 
332 		int num_bytes = write(fd, (void *)(args.arg_target_address), (num_pages / op_denom) * PAGE_SIZE);
333 
334 		if (num_bytes == -1) {
335 			printf("num_bytes=%d", num_bytes);
336 			return false;
337 		}
338 
339 		return true;
340 	}
341 
342 	bool
op_pageout()343 	op_pageout()
344 	{
345 		if (madvise((void *)args.arg_target_address, (num_pages / op_denom) * PAGE_SIZE, MADV_PAGEOUT)) {
346 			return false;
347 		}
348 		return true;
349 	}
350 
351 	bool
run_op(const std::pair<vm_op,std::string> * op)352 	run_op(const std::pair<vm_op, std::string> *op)
353 	{
354 		bool ret = false;
355 		ret = op->first(this);
356 
357 		/* Never let the denominator be zero. */
358 		uint32_t new_denom = (op_denom * 2) % num_pages;
359 		op_denom = new_denom > 0 ? new_denom : 1;
360 
361 		return ret;
362 	}
363 
364 	// Miscellaneous:
365 
366 	void
create_gap_before()367 	create_gap_before()
368 	{
369 		mach_vm_address_t to_dealloc = args.arg_target_address - PAGE_SIZE;
370 		kern_return_t kr = mach_vm_deallocate(mach_task_self(), to_dealloc, PAGE_SIZE);
371 		if (kr != KERN_SUCCESS) {
372 			throw std::runtime_error("mach_vm_deallocate failed: " + std::string(mach_error_string(kr)) + "\n");
373 		}
374 	}
375 
376 	void
adjust_addresses_and_offset(uint64_t detached_num_pages,uint64_t detached_size)377 	adjust_addresses_and_offset(
378 		uint64_t detached_num_pages, uint64_t detached_size)
379 	{
380 		args.arg_src_address += detached_size;
381 		args.arg_target_address += detached_size;
382 		offset_in_pages += detached_num_pages;
383 	}
384 
385 	void
shrink_size(uint64_t detached_num_pages,uint64_t detached_size)386 	shrink_size(
387 		uint64_t detached_num_pages, uint64_t detached_size)
388 	{
389 		num_pages -= detached_num_pages;
390 		args.arg_mapping_size -= detached_size;
391 	}
392 
393 	/* Fix the wrapper of the mapping after overwriting a part of it, to keep it aligned to real vmmap_entry */
394 	void
fix_overwritten_mapping(uint64_t detached_num_pages)395 	fix_overwritten_mapping(
396 		uint64_t detached_num_pages)
397 	{
398 		uint64_t detached_size = detached_num_pages * PAGE_SIZE;
399 		id *= 2;
400 		shrink_size(detached_num_pages, detached_size);
401 		adjust_addresses_and_offset(detached_num_pages, detached_size);
402 		create_gap_before();
403 	}
404 
405 	void
print_mapping()406 	print_mapping()
407 	{
408 		T_LOG("\tMAPPING #%2d, from address: %llx, to address: %llx, offset: %2llu, size: %4llu "
409 		    "pages\n",
410 		    id, args.arg_src_address, args.arg_target_address, offset_in_pages, num_pages);
411 	}
412 
413 	uint64_t
get_end()414 	get_end()
415 	{
416 		return offset_in_pages + args.arg_mapping_size / PAGE_SIZE - 1;
417 	}
418 
419 	void
add_child(Mapping * other)420 	add_child(Mapping *other)
421 	{
422 		children.emplace_back(other);
423 	}
424 
425 	void
print_as_tree(const std::string & prefix="",bool isLast=true)426 	print_as_tree(const std::string &prefix = "", bool isLast = true)
427 	{
428 		T_LOG("%s%s%d", prefix.c_str(), (isLast ? "└── " : "├── "), id);
429 
430 		std::string newPrefix = prefix + (isLast ? "    " : "│   ");
431 
432 		for (uint32_t i = 0; i < children.size(); i++) {
433 			children[i]->print_as_tree(newPrefix, i == children.size() - 1);
434 		}
435 	}
436 
437 	// Data members:
438 
439 	uint32_t id = 0;
440 	uint64_t offset_in_pages = 0;
441 	MappingArgs args;
442 	uint64_t num_pages = 0;
443 	std::vector<Mapping *> children;
444 	uint32_t fd = 0;
445 	std::shared_ptr<std::shared_mutex> lock;
446 	std::optional<std::reference_wrapper<Mapping> > src_mapping;
447 	bool is_mapped; // set on remap() and cleared on deallocate().
448 
449 	/**
450 	 * Regarding the locks: (reasoning for shared_ptr)
451 	 * In some cases (MAppingsManager::policy==MappingPolicy::Topology), the source for this mapping is another mapping.
452 	 * This case requires, in certain ops (op_de_re_allocate()), to also hold the source's lock.
453 	 * That means lock is going to be under shared ownership and therefore the locks should be in a shared_ptr.
454 	 */
455 	uint32_t op_denom = 1; // tells the various operations what part of num_pages to include.
456 	static inline std::vector<std::pair<vm_op, const std::string> > ops = {
457 		{&Mapping::op_protect, "protect"},
458 		{&Mapping::op_wire, "wire"},
459 		{&Mapping::op_write, "write"},
460 		{&Mapping::op_unwire, "unwire"},
461 		{&Mapping::op_pageout, "pageout"}};
462 	/*
463 	 * The following is disabled due to a deadlock it causes in the kernel too frequently
464 	 * (and we want a running stress test). See rdar://146761078
465 	 * Once this deadlock is solved, we should uncomment it.
466 	 */
467 	// {&Mapping::op_write_direct, "write_direct"},
468 };
469 
470 /**
471  * Creates and wraps the memory object
472  */
473 class Object
474 {
475 public:
476 	// Default constructor:
Object()477 	Object() : id(0), num_pages(0)
478 	{
479 	}
480 
481 	// Constructor:
Object(uint32_t _id,uint32_t num_pages)482 	Object(
483 		uint32_t _id, uint32_t num_pages)
484 		: id(_id), num_pages(num_pages)
485 	{
486 	}
487 
488 	// Memeber functions:
489 
490 	// Creation:
491 
492 	int
open_file_slow_paging()493 	open_file_slow_paging()
494 	{
495 		std::string slow_file = std::string(slow_dmg_path) + "/file.txt";
496 		fd = open(slow_file.c_str(), O_CREAT | O_RDWR, S_IWUSR | S_IRUSR);
497 		if (fd < 0) {
498 			throw std::runtime_error("open() failed. err=" + std::to_string(errno) + "\n");
499 		}
500 
501 		T_LOG("File created in slow ramdisk: %s\n", slow_file.c_str());
502 
503 		return fd;
504 	}
505 
506 	int
open_file()507 	open_file()
508 	{
509 		std::string template_str = "/tmp/some_file_" + std::to_string(id) + "XXXXXX";
510 		char template_filename[template_str.size() + 1];
511 		strcpy(template_filename, template_str.c_str());
512 
513 		fd = mkstemp(template_filename);
514 		if (fd == -1) {
515 			throw std::runtime_error("mkstemp failed. err=" + std::to_string(errno) + "\n");
516 		}
517 
518 		T_LOG("Temporary file created: %s\n", template_filename);
519 
520 		return fd;
521 	}
522 
523 	void
close_file()524 	close_file()
525 	{
526 		close(fd);
527 		fd = 0;
528 	}
529 
530 	int
create_source_from_file(bool slow_paging)531 	create_source_from_file(bool slow_paging)
532 	{
533 		// File opening/creation:
534 		int fd = 0;
535 		struct stat st;
536 
537 		if (slow_paging) {
538 			fd = open_file_slow_paging();
539 		} else {
540 			fd = open_file();
541 		}
542 
543 		if (fd < 0) {
544 			return fd;
545 		}
546 
547 		if (ftruncate(fd, num_pages * PAGE_SIZE) < 0) {
548 			throw std::runtime_error("ftruncate failed. err=" + std::to_string(errno) + "\n");
549 		}
550 
551 		// Mapping file to memory:
552 		src = (mach_vm_address_t)mmap(NULL, num_pages * PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
553 		if ((void *)src == MAP_FAILED) {
554 			throw std::runtime_error("mmap failed. err=" + std::to_string(errno) + "\n");
555 		}
556 
557 		return 0;
558 	}
559 
560 	int
create_source_anon()561 	create_source_anon()
562 	{
563 		uint32_t anywhere_flag = TRUE;
564 		kern_return_t kr = mach_vm_allocate(mach_task_self(), &src, num_pages * PAGE_SIZE, anywhere_flag);
565 		if (kr != KERN_SUCCESS) {
566 			throw std::runtime_error("mach_vm_allocate failed: " + std::string(mach_error_string(kr)) + "\n");
567 		}
568 		return 0;
569 	}
570 
571 	int
create_source(bool is_file,bool slow_paging)572 	create_source(
573 		bool is_file, bool slow_paging)
574 	{
575 		if (is_file) {
576 			return create_source_from_file(slow_paging);
577 		} else {
578 			return create_source_anon();
579 		}
580 	}
581 
582 	static uint64_t
random_object_size(uint64_t obj_size)583 	random_object_size(
584 		uint64_t obj_size)
585 	{
586 		uint32_t min_obj_size = 16; // (in pages)
587 		return random_between(min_obj_size, obj_size);
588 	}
589 
590 	// Miscellaneous:
591 
592 	void
print_object()593 	print_object()
594 	{
595 		T_LOG(" -----------------------------------------------------------------------------");
596 		T_LOG(" OBJECT #%d, size: %llu pages, object address: %llx\n", id, num_pages, src);
597 	}
598 
599 	// Data members:
600 	uint32_t id = 0;
601 	uint64_t num_pages = 0;
602 	mach_vm_address_t src = 0;
603 	int fd = 0;
604 	static inline char slow_dmg_path[] = "/Volumes/apfs-slow";
605 };
606 
607 /**
608  * Creates and manages the different mappings of an object.
609  */
610 class MappingsManager
611 {
612 public:
613 	// Constructor:
MappingsManager(const Object & _obj,MappingPolicy _policy)614 	MappingsManager(
615 		const Object &_obj, MappingPolicy _policy)
616 		: obj(_obj), policy(_policy)
617 	{
618 	}
619 
620 	// Destructor:
~MappingsManager()621 	~MappingsManager()
622 	{
623 		for (uint32_t i = 0; i < ranges.size(); i++) {
624 			if (buffers[i]) {
625 				mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)buffers[i], ranges[i].second - ranges[i].first + 2);
626 				buffers[i] = nullptr;
627 			}
628 		}
629 	}
630 
631 	enum topology {
632 		chain,
633 		star,
634 		ternary,
635 		random
636 	};
637 
638 	// Member functions:
639 
640 	std::string
topo_to_string()641 	topo_to_string()
642 	{
643 		switch (topo) {
644 		case chain:
645 			return "chain";
646 		case star:
647 			return "star";
648 		case ternary:
649 			return "ternary";
650 		case random:
651 			return "random";
652 		default:
653 			return "unknown";
654 		}
655 	}
656 
657 	// Partition stuff:
658 
659 	void
create_general_borders(std::vector<uint64_t> & general_borders)660 	create_general_borders(
661 		std::vector<uint64_t> &general_borders)
662 	{
663 		uint64_t gap = obj.num_pages / (num_mappings);
664 		general_borders.emplace_back(1);
665 		for (uint32_t i = 1; i < (num_mappings); i++) {
666 			general_borders.emplace_back(gap * i);
667 		}
668 	}
669 
670 	void
create_borders(std::vector<uint64_t> & borders)671 	create_borders(
672 		std::vector<uint64_t> &borders)
673 	{
674 		std::vector<uint64_t> general_borders;
675 		create_general_borders(general_borders);
676 		borders.emplace_back(0);
677 
678 		for (uint32_t i = 0; i < general_borders.size() - 1; i++) {
679 			borders.emplace_back(
680 				random_between(general_borders[i], general_borders[i + 1] - 1));
681 		}
682 		borders.emplace_back(obj.num_pages);
683 	}
684 
685 	void
convert_borders_to_ranges(std::vector<uint64_t> & borders)686 	convert_borders_to_ranges(
687 		std::vector<uint64_t> &borders)
688 	{
689 		for (uint32_t i = 0; i < borders.size() - 1; ++i) {
690 			ranges.emplace_back(borders[i], borders[i + 1] - 1);
691 		}
692 	}
693 
694 	void
make_random_partition()695 	make_random_partition()
696 	{
697 		std::vector<uint64_t> borders;
698 		create_borders(borders);
699 		convert_borders_to_ranges(borders);
700 	}
701 
702 	void
print_partition()703 	print_partition()
704 	{
705 		printf("| PARTITION:\t| ");
706 		for (const auto &range : ranges) {
707 			printf("%3d -- %3d", range.first, range.second);
708 		}
709 		printf("%*s|\n", 30, "");
710 		for (auto &m : mappings) {
711 			m.print_mapping();
712 		}
713 	}
714 
715 	// Creation:
716 
717 	void
create_seq(std::vector<uint32_t> & seq)718 	create_seq(std::vector<uint32_t> &seq)
719 	{
720 		seq.emplace_back(0);
721 		for (uint32_t i = 1; i < num_mappings; i++) {
722 			switch (topo) {
723 			case chain:
724 				seq.emplace_back(i);
725 				break;
726 
727 			case random:
728 				seq.emplace_back(random_between(0, i));
729 				break;
730 
731 			case star:
732 				seq.emplace_back(0);
733 				break;
734 
735 			case ternary:
736 				seq.emplace_back(i / 3);
737 				break;
738 
739 			default:
740 				throw std::runtime_error("create_seq: topology undefined");
741 				break;
742 			}
743 		}
744 		T_LOG("topology: %s", topo_to_string().c_str());
745 	}
746 
747 	void
allocate_buffer(uint64_t num_pages_to_alloc)748 	allocate_buffer(
749 		uint64_t num_pages_to_alloc)
750 	{
751 		// buffers.emplace_back((char *)malloc((obj.num_pages + 1) * PAGE_SIZE)); // One extra page for a gap
752 		mach_vm_address_t buff;
753 		kern_return_t kr = mach_vm_allocate(mach_task_self(), &buff, num_pages_to_alloc * PAGE_SIZE, TRUE);
754 		if (kr != KERN_SUCCESS) {
755 			throw std::runtime_error("Failed to allocate buffer in object #" + std::to_string(obj.id) + "\n");
756 		}
757 		buffers.push_back((char *)buff);
758 	}
759 
760 	void
initialize_partition_buffers()761 	initialize_partition_buffers()
762 	{
763 		for (auto &range : ranges) {
764 			allocate_buffer(range.second - range.first + 2);
765 		}
766 	}
767 
768 	MappingArgs
initialize_basic_args()769 	initialize_basic_args()
770 	{
771 		MappingArgs args;
772 		args.arg_src_address = obj.src;
773 		args.arg_copy = is_cow;
774 		args.arg_flags = mpng_flags;
775 		return args;
776 	}
777 
778 	void
map_by_seq(std::vector<uint32_t> & seq)779 	map_by_seq(std::vector<uint32_t> &seq)
780 	{
781 		// First mapping of the source object:
782 		MappingArgs args = initialize_basic_args();
783 		allocate_buffer(obj.num_pages + 1);
784 		args.arg_target_address = (mach_vm_address_t)(buffers[0] + PAGE_SIZE);
785 		args.arg_mapping_size = obj.num_pages * PAGE_SIZE;
786 		mappings.emplace_back(Mapping(1, 0, args, obj.fd));
787 
788 		// Re-mappings of the first mappings, according to the given seqence:
789 		for (uint32_t i = 1; i < num_mappings; i++) {
790 			allocate_buffer(obj.num_pages + 1);
791 			args.arg_src_address = mappings[seq[i - 1]].args.arg_target_address;
792 			args.arg_target_address = (mach_vm_address_t)(buffers[i]);
793 			mappings.emplace_back(Mapping(i + 1, 0, args, obj.fd));
794 			mappings[seq[i - 1]].add_child(&mappings[i]);
795 			mappings[i].set_src_mapping(mappings[seq[i - 1]]);
796 		}
797 		mappings[0].print_as_tree();
798 	}
799 
800 	/* Mode 1 - maps parts of the object to parts of the (only) buffer. Every page is mapped exactly once. */
801 	void
map_by_random_partition()802 	map_by_random_partition()
803 	{
804 		make_random_partition();
805 		initialize_partition_buffers();
806 		MappingArgs args = initialize_basic_args();
807 		for (uint32_t i = 0; i < num_mappings; i++) {
808 			args.arg_target_address = (mach_vm_address_t)(buffers[i] + PAGE_SIZE);
809 			args.arg_mapping_size = (ranges[i].second - ranges[i].first + 1) * PAGE_SIZE;
810 			mappings.emplace_back(Mapping(i + 1, ranges[i].first, args, obj.fd));
811 		}
812 	}
813 
814 	/* Modes 2,4 - maps the entire object to different buffers (which all have the same size as the object). */
815 	void
map_one_to_many(bool extra)816 	map_one_to_many(
817 		bool extra)
818 	{
819 		uint32_t num_pages_for_gaps = extra ? 2 : 1;
820 		MappingArgs args = initialize_basic_args();
821 		for (uint32_t i = 0; i < num_mappings; i++) {
822 			allocate_buffer(obj.num_pages + num_pages_for_gaps);
823 			args.arg_target_address = (mach_vm_address_t)(buffers[i] + PAGE_SIZE * num_pages_for_gaps);
824 			args.arg_mapping_size = obj.num_pages * PAGE_SIZE;
825 			mappings.emplace_back(Mapping(i + 1, 0, args, obj.fd));
826 		}
827 	}
828 
829 	/* Mode 3 - maps the source object in a certain CoW-topology, based on the given sequence. */
830 	void
map_topo()831 	map_topo()
832 	{
833 		std::vector<uint32_t> seq;
834 		create_seq(seq);
835 		map_by_seq(seq);
836 	}
837 
838 	void
map()839 	map()
840 	{
841 		switch (policy) {
842 		case MappingPolicy::RandomPartition:
843 			map_by_random_partition();
844 			break;
845 		case MappingPolicy::OneToMany:
846 			map_one_to_many(false);
847 			break;
848 		case MappingPolicy::Overwrite:
849 			map_one_to_many(true);
850 			break;
851 		case MappingPolicy::Topology:
852 			num_mappings *= 4;
853 			mappings.reserve(num_mappings);
854 			topo = static_cast<topology>((obj.id - 1) % 4); // Each object (out of every 4 consecutive objects) will be remapped in a different CoW topology.
855 			map_topo();
856 			break;
857 		default:
858 			break;
859 		}
860 	}
861 
862 	void
set_srcs()863 	set_srcs()
864 	{
865 		for (uint32_t i = 1; i < mappings.size(); i++) {
866 			mappings[i].set_src_mapping(mappings[i - 1]);
867 		}
868 	}
869 
870 	/* Overwrites the first n/x pages of each mapping */
871 	void
overwrite_mappings()872 	overwrite_mappings()
873 	{
874 		uint64_t num_pages_to_overwrite = obj.num_pages / overwrite_denom;
875 		MappingArgs args = initialize_basic_args();
876 		for (uint32_t i = 0; i < num_mappings; i++) {
877 			args.arg_target_address = (mach_vm_address_t)(buffers[i] + PAGE_SIZE);
878 			args.arg_mapping_size = num_pages_to_overwrite * PAGE_SIZE;
879 			mappings.emplace_back(Mapping(2 * i + 1, 0, args, obj.fd));
880 			mappings[i].fix_overwritten_mapping(num_pages_to_overwrite);
881 		}
882 		std::sort(mappings.begin(), mappings.end(), Mapping::compare_by_id);
883 		set_srcs(); // set the src (parent) lock for each newly created mapping to facilitate op_de_re_allocate().
884 	}
885 
886 	// "User space" validation:
887 
888 	bool
validate_sum()889 	validate_sum()
890 	{
891 		uint64_t sum = 0;
892 
893 		for (const auto &mapping : mappings) {
894 			sum += mapping.num_pages;
895 		}
896 		if (sum != obj.num_pages) {
897 			return false;
898 		}
899 		return true;
900 	}
901 
902 	bool
validate_consecutiveness()903 	validate_consecutiveness()
904 	{
905 		for (int i = 0; i < mappings.size() - 1; i++) {
906 			if (mappings[i].offset_in_pages + mappings[i].num_pages !=
907 			    mappings[i + 1].offset_in_pages) {
908 				return false;
909 			}
910 		}
911 		return true;
912 	}
913 
914 	bool
validate_start_and_end()915 	validate_start_and_end()
916 	{
917 		for (int i = 0; i < mappings.size() - 1; i++) {
918 			if (mappings[i].offset_in_pages + mappings[i].num_pages !=
919 			    mappings[i + 1].offset_in_pages) {
920 				return false;
921 			}
922 		}
923 		return true;
924 	}
925 
926 	bool
validate_all_sizes()927 	validate_all_sizes()
928 	{
929 		for (const auto &mapping : mappings) {
930 			if (mapping.num_pages != obj.num_pages) {
931 				return false;
932 			}
933 		}
934 		return true;
935 	}
936 
937 	bool
validate_partition()938 	validate_partition()
939 	{
940 		return validate_sum() && validate_consecutiveness() && validate_start_and_end();
941 	}
942 
943 	bool
validate_one_to_many()944 	validate_one_to_many()
945 	{
946 		return validate_all_sizes();
947 	}
948 
949 	bool
validate_user_space()950 	validate_user_space()
951 	{
952 		switch (policy) {
953 		case MappingPolicy::RandomPartition:
954 			return validate_partition();
955 			break;
956 		case MappingPolicy::OneToMany:
957 			return validate_one_to_many();
958 			break;
959 		default:
960 			return true;
961 			break;
962 		}
963 	}
964 
965 	// Miscellaneous:
966 
967 	void
set_flags(uint32_t flags)968 	set_flags(
969 		uint32_t flags)
970 	{
971 		mpng_flags = flags;
972 	}
973 
974 	void
set_is_cow(bool _is_cow)975 	set_is_cow(
976 		bool _is_cow)
977 	{
978 		is_cow = _is_cow;
979 	}
980 
981 	void
print_all_mappings()982 	print_all_mappings()
983 	{
984 		for (auto &mpng : mappings) {
985 			mpng.print_mapping();
986 		}
987 	}
988 
989 	// Data members:
990 	uint32_t num_mappings = 4;
991 	static inline uint32_t overwrite_denom = 2;
992 	/**
993 	 * Sets the part to overwrite in case MappingsManager::policy==MappingPolicy::Overwrite.
994 	 * It's the same for all of the mappings and has to be visible outside of the class for logging purposes. Therefore it's static.
995 	 */
996 	Object obj;
997 	std::vector<Mapping> mappings;
998 	MappingPolicy policy = MappingPolicy::OneToMany;
999 	std::vector<char *> buffers;
1000 	std::vector<std::pair<uint32_t, uint32_t> > ranges;
1001 	uint32_t mpng_flags = 0;
1002 	bool is_cow = false;
1003 	topology topo = topology::random;
1004 };
1005 
1006 class Memory
1007 {
1008 	using vm_op = std::function<bool (Mapping *)>;
1009 
1010 public:
1011 	// Member functions:
1012 
1013 	// Creation:
1014 
1015 	int
create_objects(uint32_t num_objects,uint64_t obj_size,MappingPolicy policy,bool is_file,bool is_cow,bool slow_paging)1016 	create_objects(
1017 		uint32_t num_objects, uint64_t obj_size, MappingPolicy policy, bool is_file, bool is_cow, bool slow_paging)
1018 	{
1019 		for (uint32_t i = 1; i <= num_objects; i++) {
1020 			Object o(i, obj_size);
1021 			if (o.create_source(is_file, slow_paging) == 0) {
1022 				managers.emplace_back(std::make_unique<MappingsManager>(o, policy));
1023 			} else {
1024 				throw std::runtime_error("Error creating source object #" + std::to_string(i) + "\n");
1025 			}
1026 		}
1027 		return 0;
1028 	}
1029 
1030 	void
create_mappings(uint32_t flags,bool is_cow)1031 	create_mappings(
1032 		uint32_t flags, bool is_cow)
1033 	{
1034 		for (auto &mngr : managers) {
1035 			mngr->set_flags(flags);
1036 			mngr->set_is_cow(is_cow);
1037 			mngr->map();
1038 		}
1039 	}
1040 
1041 	void
close_all_files()1042 	close_all_files()
1043 	{
1044 		for (auto &mngr : managers) {
1045 			mngr->obj.close_file();
1046 		}
1047 	}
1048 
1049 	// Thread-related operations:
1050 
1051 	bool
run_op_on_all_mappings(const std::pair<vm_op,std::string> * op,uint32_t op_idx)1052 	run_op_on_all_mappings(
1053 		const std::pair<vm_op, std::string> *op, uint32_t op_idx)
1054 	{
1055 		for (auto &mngr : managers) {
1056 			for (auto &m : mngr->mappings) {
1057 				if (m.run_op(op)) {
1058 					op_status_counters[op_idx].success++;
1059 				} else {
1060 					op_status_counters[op_idx].fail++;
1061 				}
1062 			}
1063 		}
1064 		return true;
1065 	}
1066 
1067 	void
num2op(std::pair<vm_op,std::string> * op,uint32_t thread_number)1068 	num2op(
1069 		std::pair<vm_op, std::string> *op, uint32_t thread_number)
1070 	{
1071 		op->first  = Mapping::ops[thread_number % Mapping::ops.size()].first;
1072 		op->second = Mapping::ops[thread_number % Mapping::ops.size()].second;
1073 	}
1074 
1075 	void
print_thread_started(uint32_t thread_number,std::string thread_name)1076 	print_thread_started(
1077 		uint32_t thread_number, std::string thread_name)
1078 	{
1079 		uint32_t allowed_prints = Mapping::ops.size() * 3;
1080 		if (thread_number < allowed_prints) {
1081 			T_LOG("Starting thread: %s", thread_name.c_str());
1082 		} else if (thread_number == allowed_prints) {
1083 			T_LOG("...\n");
1084 		}
1085 		// Else: we've printed enough, don't make a mess on the console
1086 	}
1087 
1088 	std::future<void>
start_thread(uint32_t thread_number)1089 	start_thread(
1090 		uint32_t thread_number)
1091 	{
1092 		uint32_t op_name_length = 16; // Just the length of the longest op name, for nicer printing of op_count
1093 		std::pair<vm_op, std::string> operation;
1094 		std::string thread_name;
1095 		uint32_t thread_number_remainder = thread_number / Mapping::ops.size();
1096 		num2op(&operation, thread_number);
1097 		std::string operation_name_aligned = operation.second; // For nice printing only
1098 		if (operation_name_aligned.length() < op_name_length) {
1099 			operation_name_aligned = operation_name_aligned + std::string(op_name_length - operation_name_aligned.length(), ' '); // Pad if shorter than op_name_length
1100 		}
1101 		thread_name = operation_name_aligned + " #" + std::to_string(thread_number_remainder + 1);
1102 
1103 		print_thread_started(thread_number, thread_name);
1104 
1105 		return std::async(std::launch::async, [this, operation, thread_name, thread_number]() { /* lambda: */
1106 			while (runner.state != TestRuntime::error &&
1107 			runner.state != TestRuntime::complete) {
1108 			        if (runner.state == TestRuntime::running) {
1109 			                bool running = this->run_op_on_all_mappings(&operation, thread_number % Mapping::ops.size());
1110 			                if (!running) {
1111 			                        break;
1112 					}
1113 				}
1114 			}
1115 		});
1116 	}
1117 
1118 	void
start_ops(uint32_t num_threads)1119 	start_ops(
1120 		uint32_t num_threads)
1121 	{
1122 		for (uint32_t i = 0; i < Mapping::ops.size(); i++) {
1123 			op_status_counters.emplace_back(0, 0);
1124 		}
1125 
1126 		for (uint32_t i = 0; i < num_threads * Mapping::ops.size(); i++) {
1127 			futures.emplace_back(start_thread(i));
1128 		}
1129 	}
1130 
1131 	void
join_threads()1132 	join_threads()
1133 	{
1134 		for (auto &f : futures) {
1135 			f.get(); // This replaces thread.join() in order to propogate the exceptions raised from non main threads
1136 		}
1137 	}
1138 
1139 	// Miscellaneous:
1140 
1141 	void
print_mem_layout()1142 	print_mem_layout()
1143 	{
1144 		T_LOG("\nmemory layout:");
1145 		uint32_t allowed_prints = 3;
1146 		for (uint32_t i = 0; i < managers.size() && i < allowed_prints; i++) {
1147 			managers[i]->obj.print_object();
1148 			managers[i]->print_all_mappings();
1149 		}
1150 		T_LOG(" -----------------------------------------------------------------------------");
1151 		T_LOG("...\n");
1152 	}
1153 
1154 	void
print_op_counts()1155 	print_op_counts()
1156 	{
1157 		for (uint32_t i = 0; i < Mapping::ops.size(); i++) {
1158 			T_LOG("%16s: successes %7d :|: fails: %7d", Mapping::ops[i].second.c_str(), op_status_counters[i].success, op_status_counters[i].fail);
1159 		}
1160 	}
1161 
1162 	void
overwrite_all()1163 	overwrite_all()
1164 	{
1165 		for (auto &mngr : managers) {
1166 			mngr->overwrite_mappings();
1167 		}
1168 	}
1169 
1170 	bool
validate()1171 	validate()
1172 	{
1173 		for (auto &mngr : managers) {
1174 			if (!mngr->validate_user_space()) {
1175 				return false;
1176 			}
1177 		}
1178 		return true;
1179 	}
1180 
1181 	void
print_test_result()1182 	print_test_result()
1183 	{
1184 		T_LOG("\ninner validation: OBJECTS AND MAPPINGS APPEAR %s", validate() ? "AS EXPECTED" : "*NOT* AS EXPECTED");
1185 	}
1186 
1187 	// Data members:
1188 
1189 	std::vector<std::unique_ptr<MappingsManager> > managers;
1190 	std::vector<std::future<void> > futures;
1191 	static inline std::vector<struct status_counters> op_status_counters;
1192 };
1193 
1194 uint32_t
run_test(const TestParams & tp)1195 run_test(
1196 	const TestParams &tp)
1197 {
1198 	Memory memory;
1199 	uint32_t status;
1200 
1201 	int src_created_successfully = memory.create_objects(tp.num_objects, tp.obj_size, tp.policy, tp.is_file, tp.is_cow, tp.slow_paging);
1202 	if (src_created_successfully != 0) {
1203 		throw std::runtime_error("problem with creating source objects\n");
1204 	}
1205 
1206 	memory.create_mappings(tp.mpng_flags, tp.is_cow);
1207 	memory.print_mem_layout();
1208 
1209 	if (tp.policy == MappingPolicy::Overwrite) {
1210 		memory.overwrite_all();
1211 		T_LOG("1 / %d of each mapping got overwritten\n", MappingsManager::overwrite_denom);
1212 		memory.print_mem_layout();
1213 	}
1214 
1215 	memory.start_ops(tp.num_threads);
1216 
1217 	status = runner.wait_for_status(tp.runtime_secs);
1218 
1219 	memory.join_threads();
1220 	memory.print_op_counts();
1221 	memory.close_all_files();
1222 	memory.print_test_result();
1223 
1224 	T_LOG("test finished\n");
1225 	return status;
1226 }
1227 
1228 void
try_catch_test(TestParams & tp)1229 try_catch_test(TestParams &tp)
1230 {
1231 	try
1232 	{
1233 		if (run_test(tp)) {
1234 			T_FAIL("Test failed");
1235 		} else {
1236 			T_PASS("Test passed");
1237 		}
1238 	}
1239 
1240 	catch (const std::runtime_error &e)
1241 	{
1242 		T_FAIL("Caught a runtime error: %s", e.what());
1243 	}
1244 }
1245 
1246 void
print_help()1247 print_help()
1248 {
1249 	printf("\n\nUsage: <path_to_executable>/vm_stress config -- <mapping_policy> <num_objects> <obj_size> <runtime_secs> <num_threads> <is_cow> <is_file> [-s]\n\n");
1250 
1251 	printf("  <num_objects>      Number of objects the test will create and work on\n");
1252 	printf("  <obj_size>         Size of each object (>=16)\n");
1253 	printf("  <runtime_secs>     Test duration in seconds\n");
1254 	printf("  <num_threads>      Number of threads to use for each operation\n");
1255 	printf("  <mapping_policy>   Policy for mapping (part/one_to_many/over/topo)\n");
1256 	printf("  <is_cow>           Copy-on-write flag (0 or 1)\n");
1257 	printf("  <is_file>          File flag (0 or 1)\n\n");
1258 }
1259 
1260 void
string_to_policy(MappingPolicy & policy,std::string policy_str)1261 string_to_policy(
1262 	MappingPolicy &policy, std::string policy_str)
1263 {
1264 	const std::map<std::string, MappingPolicy> string_to_policy =
1265 	{
1266 		{"part", MappingPolicy::RandomPartition},
1267 		{"one_to_many", MappingPolicy::OneToMany},
1268 		{"over", MappingPolicy::Overwrite},
1269 		{"topo", MappingPolicy::Topology},
1270 	};
1271 
1272 	auto it = string_to_policy.find(policy_str);
1273 
1274 	if (it != string_to_policy.end()) {
1275 		policy = it->second;
1276 	} else {
1277 		throw std::runtime_error("Invalid policy string: \"" + policy_str + "\"\n");
1278 	}
1279 }
1280 
1281 T_DECL(config, "configurable", T_META_ENABLED(false) /* rdar://142726486 */)
1282 {
1283 	bool slow_paging = false;
1284 	int opt;
1285 
1286 	for (int i = 0; i < argc; i++) {
1287 		if (strcmp(argv[i], "-s") == 0) {
1288 			slow_paging = true;
1289 		} else if (strcmp(argv[i], "-h") == 0) {
1290 			print_help();
1291 			T_PASS("help configs");
1292 			return;
1293 		}
1294 	}
1295 
1296 	if (argc == 0) {
1297 		printf("\n\n\nNo arguments for configurable test, assuming intention was to skip it.\n\n\n");
1298 		T_PASS("config - no args given");
1299 		return;
1300 	}
1301 
1302 	if (argc != 7 && argc != 8) {
1303 		printf("\n\n\nWrong number of arguments.\n");
1304 		printf("Usage: <path_to_executable>/vm_stress config -- <mapping_policy> <num_objects> <obj_size> <runtime_secs> <num_threads> <is_cow> <is_file>\nPolicies: part/one_to_many/over/topo\n\n");
1305 		printf("Run \"<path_to_executable>/vm_stress config -- -h\" for more info\n\n\n");
1306 		T_PASS("config - not enough/too many args");
1307 		return;
1308 	}
1309 
1310 	std::string policy_str(argv[0]);
1311 	MappingPolicy policy;
1312 	string_to_policy(policy, policy_str);
1313 
1314 	uint32_t num_objects = strtoul(argv[1], NULL, 0);
1315 
1316 	uint64_t obj_size = strtoull(argv[2], NULL, 0); // In pages
1317 
1318 	if (obj_size < 16) {
1319 		throw std::runtime_error("obj_size must be more than 16\n");
1320 	}
1321 
1322 	uint32_t runtime_secs = strtoul(argv[3], NULL, 0);
1323 
1324 	uint32_t num_threads = strtoul(argv[4], NULL, 0);
1325 
1326 	bool is_cow = strtoul(argv[5], NULL, 0);
1327 
1328 	bool is_file = strtoul(argv[6], NULL, 0);
1329 
1330 	TestParams params = {
1331 		.num_objects = num_objects,
1332 		.obj_size = obj_size,
1333 		.runtime_secs = runtime_secs,
1334 		.num_threads = num_threads,
1335 		.policy = policy,
1336 		.is_cow = is_cow,
1337 		.is_file = is_file,
1338 		.slow_paging = slow_paging};
1339 
1340 	try_catch_test(params);
1341 }
1342 
1343 T_DECL(vm_stress1, "partitions")
1344 {
1345 	TestParams params = {
1346 		.num_objects = 5,
1347 		.obj_size = 32,
1348 		.runtime_secs = 3,
1349 		.num_threads = 2,
1350 		.policy = MappingPolicy::RandomPartition,
1351 		.is_cow = true,
1352 		.is_file = true,
1353 		.slow_paging = false};
1354 
1355 	try_catch_test(params);
1356 }
1357 
1358 T_DECL(vm_stress2, "cow topologies")
1359 {
1360 	TestParams params = {
1361 		.num_objects = 10,
1362 		.obj_size = 32,
1363 		.runtime_secs = 4,
1364 		.num_threads = 4,
1365 		.policy = MappingPolicy::Topology,
1366 		.is_cow = true,
1367 		.is_file = true,
1368 		.slow_paging = false};
1369 
1370 	try_catch_test(params);
1371 }
1372 
1373 T_DECL(vm_stress3, "overwrite")
1374 {
1375 	TestParams params = {
1376 		.num_objects = 10,
1377 		.obj_size = 16,
1378 		.runtime_secs = 3,
1379 		.num_threads = 2,
1380 		.policy = MappingPolicy::Overwrite,
1381 		.is_cow = true,
1382 		.is_file = true,
1383 		.slow_paging = false};
1384 
1385 	try_catch_test(params);
1386 }
1387 
1388 T_DECL(vm_stress4, "partitions - not file-backed")
1389 {
1390 	TestParams params = {
1391 		.num_objects = 5,
1392 		.obj_size = 32,
1393 		.runtime_secs = 3,
1394 		.num_threads = 2,
1395 		.policy = MappingPolicy::RandomPartition,
1396 		.is_cow = true,
1397 		.is_file = false,
1398 		.slow_paging = false};
1399 
1400 	try_catch_test(params);
1401 }
1402 
1403 T_DECL(vm_stress5, "cow topologies - not file-backed")
1404 {
1405 	TestParams params = {
1406 		.num_objects = 10,
1407 		.obj_size = 32,
1408 		.runtime_secs = 4,
1409 		.num_threads = 4,
1410 		.policy = MappingPolicy::Topology,
1411 		.is_cow = true,
1412 		.is_file = false,
1413 		.slow_paging = false};
1414 
1415 	try_catch_test(params);
1416 }
1417 
1418 T_DECL(vm_stress6, "overwrite - not file-backed")
1419 {
1420 	TestParams params = {
1421 		.num_objects = 10,
1422 		.obj_size = 16,
1423 		.runtime_secs = 3,
1424 		.num_threads = 2,
1425 		.policy = MappingPolicy::Overwrite,
1426 		.is_cow = true,
1427 		.is_file = false,
1428 		.slow_paging = false};
1429 
1430 	try_catch_test(params);
1431 }
1432 
1433 T_DECL(vm_stress7, "one to many - not CoW and not file-backed")
1434 {
1435 	TestParams params = {
1436 		.num_objects = 5,
1437 		.obj_size = 100,
1438 		.runtime_secs = 10,
1439 		.num_threads = 3,
1440 		.policy = MappingPolicy::OneToMany,
1441 		.is_cow = false,
1442 		.is_file = false,
1443 		.slow_paging = false};
1444 
1445 	try_catch_test(params);
1446 }
1447 
1448 T_DECL(vm_stress_hole, "Test locking of ranges with holes in them.")
1449 {
1450 	uint32_t num_secs = 5;
1451 	uint32_t half_of_num_mappings = 5; // To ensure num_mappings is an even number.
1452 	std::vector<mach_vm_address_t> mappings;
1453 	mach_vm_address_t addr0;
1454 	mach_vm_allocate(mach_task_self(), &addr0, PAGE_SIZE, TRUE);
1455 	mappings.emplace_back(addr0);
1456 	for (uint32_t i = 1; i < half_of_num_mappings * 2; i++) {
1457 		mach_vm_address_t addri = addr0 + PAGE_SIZE * 2 * i;
1458 		mach_vm_allocate(mach_task_self(), &addri, PAGE_SIZE, FALSE);
1459 		mappings.emplace_back(addri);
1460 	}
1461 	auto start_time = std::chrono::steady_clock::now();
1462 	auto end_time = start_time + std::chrono::seconds(num_secs);
1463 	uint32_t inheritance = 1;
1464 	int err = 0;
1465 	while (std::chrono::steady_clock::now() < end_time) {
1466 		for (uint32_t i = 0; i < half_of_num_mappings * 2; i += 2) {
1467 			if ((err = minherit((void *)mappings[i], 2 * PAGE_SIZE, inheritance % 2)) != 0) {
1468 				break;
1469 			}
1470 		}
1471 		if (err < 0) {
1472 			break;
1473 		}
1474 		inheritance++;
1475 	}
1476 	T_QUIET;
1477 	T_ASSERT_EQ_INT(err, 0, "all calls to minherit returned successfully");
1478 	if (err == 0) {
1479 		T_PASS("HOLE LOCKING PASSED");
1480 	} else {
1481 		T_FAIL("SOME ERROR IN MINHERIT, err=%d", err);
1482 	}
1483 }
1484