xref: /xnu-12377.41.6/tests/unit/mocks/mock_thread.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2000-2025 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include "std_safe.h"
30 #include "dt_proxy.h"
31 #include "mock_thread.h"
32 #include "unit_test_utils.h"
33 #include "mock_thread.h"
34 
35 #include "fibers/fibers.h"
36 #include "fibers/mutex.h"
37 #include "fibers/condition.h"
38 #include "fibers/rwlock.h"
39 #include "fibers/random.h"
40 #include "fibers/checker.h"
41 
42 #include <arm/cpu_data_internal.h> // for cpu_data
43 #include <kern/thread.h>
44 #include <kern/lock_mtx.h>
45 #include <kern/lock_group.h>
46 #include <kern/compact_id.h>
47 #include <kern/task.h>
48 #include <vm/vm_object_xnu.h>
49 
50 #define UNDEFINED_MOCK \
51 	raw_printf("%s: WIP mock, this should not be called\n", __FUNCTION__); \
52 	print_current_backtrace();
53 
54 /*
55  * Unit tests that wants to use fibers must redefine this global with a value not 0.
56  * The test executable should not do this directly, instead it should call macro UT_USE_FIBERS in its global scope.
57  *
58  * We use a weak global and not a macro that defines a constructor to avoid initialization code running before such constructor to run
59  * with ut_mocks_use_fibers=0 before that the constructor change its value.
60  * Switching from the pthread mocks to fibers is not supported, we must be consistent from the very beginning.
61  */
62 int ut_mocks_use_fibers __attribute__((weak)) = 0;
63 
64 /*
65  * Unit tests that wants to use fibers with data race checking must redefine this global with a value not 0.
66  * FIBERS_CHECKER=1 as env var will do the same job too.
67  */
68 int ut_fibers_use_data_race_checker __attribute__((weak)) = 0;
69 
70 /*
71  * Unit tests can set this variable to force `lck_rw_lock_shared_to_exclusive` to fail.
72  *
73  * RANGELOCKINGTODO rdar://150846598 model when to return FALSE
74  */
75 bool ut_mocks_lock_upgrade_fail = 0;
76 
77 /*
78  * This constructor is used to set the configuration variables of the fibers using env vars.
79  * The main use case is fuzzing, unit tests should set the variables in the test function or
80  * by calling the correspondig macros (UT_FIBERS_*, see mock_thread.h) in their global scope.
81  */
82 __attribute__((constructor))
83 static void
initialize_fiber_settings(void)84 initialize_fiber_settings(void)
85 {
86 	const char *debug_env = getenv("FIBERS_DEBUG");
87 	if (debug_env != NULL) {
88 		fibers_debug = atoi(debug_env);
89 	}
90 
91 	const char *err_env = getenv("FIBERS_ABORT_ON_ERROR");
92 	if (err_env != NULL) {
93 		fibers_abort_on_error = atoi(err_env);
94 	}
95 
96 	const char *verbose_env = getenv("FIBERS_LOG");
97 	if (verbose_env != NULL) {
98 		fibers_log_level = atoi(verbose_env);
99 	}
100 
101 	const char *prob_env = getenv("FIBERS_MAY_YIELD_PROB");
102 	if (prob_env != NULL) {
103 		fibers_may_yield_probability = atoi(prob_env);
104 	}
105 
106 	const char *checker_env = getenv("FIBERS_CHECK_RACES");
107 	if (checker_env != NULL) {
108 #ifndef __BUILDING_WITH_SANCOV_LOAD_STORES__
109 		raw_printf("==== Fibers data race checker disabled ====\n");
110 		raw_printf("You cannot enable the data race checker if the FIBERS_PREEMPTION=1 flag was to not used as make parameter.");
111 		return;
112 #else
113 		if (!ut_mocks_use_fibers) {
114 			raw_printf("==== Fibers data race checker disabled ====\n");
115 			raw_printf("You cannot enable the data race checker if the test is not using fibers (see UT_USE_FIBERS in the readme).");
116 			return;
117 		}
118 		ut_fibers_use_data_race_checker = atoi(checker_env);
119 		if (ut_fibers_use_data_race_checker) {
120 			raw_printf("==== Fibers data race checker enabled ====\n");
121 		} else {
122 			raw_printf("==== Fibers data race checker disabled ====\n");
123 		}
124 #endif // __BUILDING_WITH_SANCOV_LOAD_STORES__
125 	}
126 }
127 
128 // --------------- proc and thread ------------------
129 
130 struct proc;
131 typedef struct proc * proc_t;
132 
133 extern void init_thread_from_template(thread_t thread);
134 extern void ctid_table_init(void);
135 extern void ctid_table_add(thread_t thread);
136 extern void ctid_table_remove(thread_t thread);
137 extern void thread_ro_create(task_t parent_task, thread_t th, thread_ro_t tro_tpl);
138 extern task_t proc_get_task_raw(proc_t proc);
139 extern void task_zone_init(void);
140 
141 extern struct compact_id_table ctid_table;
142 extern lck_grp_t thread_lck_grp;
143 extern size_t proc_struct_size;
144 extern proc_t kernproc;
145 
146 void mock_init_proc(proc_t p, void* (*calloc_call)(size_t, size_t));
147 
148 // a pointer to this object is kept per thread in thread-local-storage
149 struct mock_thread {
150 	struct thread th;
151 	fiber_t fiber;
152 	struct mock_thread* wq_next;
153 	bool interrupts_disabled;
154 };
155 
156 struct pthread_mock_event_table_entry {
157 	event_t ev;
158 	pthread_cond_t cond;
159 	// the condition variable is owned by the table and is initialized on the first use of the entry
160 	bool cond_inited;
161 };
162 #define PTHREAD_EVENTS_TABLE_SIZE 1000
163 
164 struct mock_process_state {
165 	void *proctask; // buffer for proc and task
166 	struct proc *main_proc;
167 	struct task *main_task;
168 	struct cpu_data cpud;
169 	struct mock_thread *main_thread;
170 	uint64_t thread_unique_id;
171 	uint64_t _faults;
172 	uint64_t _pageins;
173 	uint64_t _cow_faults;
174 
175 	// pthread
176 	pthread_key_t tls_thread_key;
177 	pthread_mutex_t interrupts_mutex; // if this mutex is locked interrupts are disabled
178 	pthread_mutex_t events_mutex; // for all event condition variables
179 	struct pthread_mock_event_table_entry events[PTHREAD_EVENTS_TABLE_SIZE];
180 	// !pthread
181 
182 	// fibers
183 	int interrupts_disabled;
184 	// !fibers
185 };
186 
187 static void
mock_destroy_thread(void * th_p)188 mock_destroy_thread(void *th_p)
189 {
190 	struct mock_thread *mth = (struct mock_thread *)th_p;
191 	// raw_printf("thread_t finished ctid=%u\n", mth->th.ctid);
192 
193 	ctid_table_remove(&mth->th);
194 
195 	free(mth->th.t_tro);
196 	free(mth);
197 }
198 
199 static struct mock_thread *
mock_init_new_thread(struct mock_process_state * s)200 mock_init_new_thread(struct mock_process_state* s)
201 {
202 	struct mock_thread *new_mock_thread = calloc(1, sizeof(struct mock_thread));
203 	struct thread *new_thread = &new_mock_thread->th;
204 
205 	if (ut_mocks_use_fibers) {
206 		new_mock_thread->fiber = fibers_current;
207 		fibers_current->extra = new_mock_thread;
208 		fibers_current->extra_cleanup_routine = &mock_destroy_thread;
209 	} else {
210 		pthread_setspecific(s->tls_thread_key, new_mock_thread);
211 	}
212 
213 	static int mock_init_new_thread_first_call = 1;
214 	if (mock_init_new_thread_first_call) {
215 		mock_init_new_thread_first_call = 0;
216 		compact_id_table_init(&ctid_table);
217 		ctid_table_init();
218 	}
219 
220 	init_thread_from_template(new_thread);
221 
222 	// maybe call thread_create_internal() ?
223 	// machine is needed by _enable_preemption_write_count()
224 	machine_thread_create(new_thread, s->main_task, true);
225 	new_thread->machine.CpuDatap = &s->cpud;
226 	new_thread->thread_id = ++s->thread_unique_id;
227 	//new_thread->ctid = (uint32_t)new_thread->thread_id;
228 	ctid_table_add(new_thread);
229 
230 	thread_lock_init(new_thread);
231 	wake_lock_init(new_thread);
232 
233 	fake_init_lock(&new_thread->mutex);
234 
235 	new_thread->t_tro = calloc(1, sizeof(struct thread_ro));
236 	new_thread->t_tro->tro_owner = new_thread;
237 	new_thread->t_tro->tro_task = s->main_task;
238 	new_thread->t_tro->tro_proc = s->main_proc;
239 
240 	// for the main thread this happens before zalloc init so don't do the following which uses zalloc
241 	//struct thread_ro tro_tpl = { };
242 	//thread_ro_create(&s->main_task, new_thread, &tro_tpl);
243 
244 	new_thread->state = TH_RUN;
245 
246 	// raw_printf("thread_t created ctid=%u\n", new_thread->ctid);
247 	return new_mock_thread;
248 }
249 
250 void
fake_init_task(task_t new_task)251 fake_init_task(task_t new_task)
252 {
253 	// can't call task_create_internal() since it does zalloc
254 	fake_init_lock(&new_task->lock);
255 	fake_init_lock(&new_task->task_objq_lock);
256 	queue_init(&new_task->task_objq);
257 	queue_init(&new_task->threads);
258 	new_task->suspend_count = 0;
259 	new_task->thread_count = 0;
260 	new_task->active_thread_count = 0;
261 	new_task->user_stop_count = 0;
262 	new_task->legacy_stop_count = 0;
263 	new_task->active = TRUE;
264 	new_task->halting = FALSE;
265 	new_task->priv_flags = 0;
266 	new_task->t_flags = 0;
267 	new_task->t_procflags = 0;
268 	new_task->t_returnwaitflags = 0;
269 	new_task->importance = 0;
270 	new_task->crashed_thread_id = 0;
271 	new_task->watchports = NULL;
272 	new_task->t_rr_ranges = NULL;
273 
274 	new_task->bank_context = NULL;
275 
276 	new_task->pageins = calloc(1, sizeof(uint64_t));
277 
278 	fake_init_lock(&new_task->task_objq_lock);
279 	queue_init(&new_task->task_objq);
280 }
281 
282 static void
mock_init_threads_state(struct mock_process_state * s)283 mock_init_threads_state(struct mock_process_state* s)
284 {
285 	//task_zone_init();
286 	s->proctask = calloc(1, proc_struct_size + sizeof(struct task));
287 	s->main_proc = (proc_t)s->proctask;
288 	s->main_task = proc_get_task_raw(s->main_proc);
289 
290 	memset(s->main_proc, 0, proc_struct_size);
291 	mock_init_proc(s->main_proc, calloc);
292 	kernproc = s->main_proc; // set global variable
293 
294 	memset(s->main_task, 0, sizeof(*s->main_task));
295 	fake_init_task(s->main_task);
296 	s->_faults = 0;
297 	s->main_task->faults = &s->_faults;
298 	s->_pageins = 0;
299 	s->main_task->pageins = &s->_pageins;
300 	s->_cow_faults = 0;
301 	s->main_task->cow_faults = &s->_cow_faults;
302 
303 	kernel_task = s->main_task; // without this machine_thread_create allocates
304 
305 	cpu_data_init(&s->cpud);
306 	s->thread_unique_id = 100;
307 
308 	if (!ut_mocks_use_fibers) {
309 		int ret = pthread_key_create(&s->tls_thread_key, &mock_destroy_thread);
310 		if (ret != 0) {
311 			raw_printf("failed pthread_key_create");
312 			exit(1);
313 		}
314 
315 		pthread_mutexattr_t attr;
316 		pthread_mutexattr_init(&attr);
317 		pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
318 		ret = pthread_mutex_init(&s->interrupts_mutex, &attr);
319 		if (ret != 0) {
320 			raw_printf("failed pthread_key_create");
321 			exit(1);
322 		}
323 		pthread_mutexattr_destroy(&attr);
324 
325 		ret = pthread_mutex_init(&s->events_mutex, NULL);
326 		if (ret != 0) {
327 			raw_printf("failed pthread_key_create");
328 			exit(1);
329 		}
330 		memset(&s->events, 0, sizeof(s->events));
331 	}
332 
333 	s->main_thread = mock_init_new_thread(s);
334 }
335 
336 struct mock_process_state *
get_proc_state(void)337 get_proc_state(void)
338 {
339 	static struct mock_process_state s;
340 	static bool initialized = false;
341 	if (!initialized) { // TODO move to fake_kinit.c ?
342 		initialized = true;
343 		mock_init_threads_state(&s);
344 	}
345 	return &s;
346 }
347 
348 struct mock_thread *
get_mock_thread(void)349 get_mock_thread(void)
350 {
351 	struct mock_process_state *s = get_proc_state();
352 
353 	struct mock_thread *mth;
354 	if (ut_mocks_use_fibers) {
355 		mth = (struct mock_thread *)fibers_current->extra;
356 	} else {
357 		mth = pthread_getspecific(s->tls_thread_key);
358 	}
359 
360 	if (mth == NULL) {
361 		mth = mock_init_new_thread(s);
362 	}
363 	return mth;
364 }
365 
366 T_MOCK(thread_t,
367 current_thread_fast, (void))
368 {
369 	return &get_mock_thread()->th;
370 }
371 
372 T_MOCK(uint32_t,
373 kauth_cred_getuid, (void* cred))
374 {
375 	return 0;
376 }
377 
378 // --------------- interrupts disable (spl) ---------------------
379 
380 T_MOCK(boolean_t,
381 ml_get_interrupts_enabled, (void))
382 {
383 	if (ut_mocks_use_fibers) {
384 		return get_mock_thread()->interrupts_disabled == 0;
385 	} else {
386 		pthread_mutex_t *m = &get_proc_state()->interrupts_mutex;
387 		int r = pthread_mutex_trylock(m);
388 		if (r == 0) {
389 			// it's locked, meaning interrupts are disabled
390 			pthread_mutex_unlock(m);
391 			return false;
392 		}
393 		PT_QUIET; PT_ASSERT_TRUE(r == EBUSY, "unexpected value in get_interrupts_enabled");
394 		return true;
395 	}
396 }
397 
398 // original calls DAIF
399 // interupts disable is mocked by disabling context switches with fiber_t.may_yield_disabled
400 T_MOCK(boolean_t,
401 ml_set_interrupts_enabled, (boolean_t enable))
402 {
403 	if (ut_mocks_use_fibers) {
404 		bool prev_interrupts_disabled = get_mock_thread()->interrupts_disabled;
405 
406 		FIBERS_LOG(FIBERS_LOG_DEBUG, "ml_set_interrupts_enabled: enable=%d, previous state=%d, may_yield_disabled=%d", enable, !get_mock_thread()->interrupts_disabled, fibers_current->may_yield_disabled);
407 
408 		fibers_may_yield_internal_with_reason(
409 			(enable ? FIBERS_YIELD_REASON_PREEMPTION_WILL_ENABLE : FIBERS_YIELD_REASON_PREEMPTION_WILL_DISABLE) |
410 			FIBERS_YIELD_REASON_ERROR_IF(enable != prev_interrupts_disabled));
411 
412 		// Track the interrupt state per fiber through yield_disabled
413 		if (enable && prev_interrupts_disabled) {
414 			get_mock_thread()->interrupts_disabled = false;
415 			fibers_current->may_yield_disabled--;
416 		} else if (!enable && !prev_interrupts_disabled) {
417 			get_mock_thread()->interrupts_disabled = true;
418 			fibers_current->may_yield_disabled++;
419 		}
420 
421 		FIBERS_LOG(FIBERS_LOG_DEBUG, "ml_set_interrupts_enabled exit: enable=%d, state=%d, may_yield_disabled=%d", enable, !get_mock_thread()->interrupts_disabled, fibers_current->may_yield_disabled);
422 
423 		fibers_may_yield_internal_with_reason(
424 			(enable ? FIBERS_YIELD_REASON_PREEMPTION_DID_ENABLE : FIBERS_YIELD_REASON_PREEMPTION_DID_DISABLE) |
425 			FIBERS_YIELD_REASON_ERROR_IF(enable != prev_interrupts_disabled));
426 
427 		return !prev_interrupts_disabled;
428 	} else {
429 		pthread_mutex_t *m = &get_proc_state()->interrupts_mutex;
430 		if (enable) {
431 			int ret = pthread_mutex_unlock(m);
432 			PT_QUIET; PT_ASSERT_POSIX_ZERO(ret, "interrupts pthread_mutex_unlock");
433 		} else {
434 			// disable interrupts locks
435 			int ret = pthread_mutex_lock(m);
436 			PT_QUIET; PT_ASSERT_POSIX_ZERO(ret, "interrupts pthread_mutex_lock");
437 		}
438 	}
439 	return true;
440 }
441 
442 T_MOCK(boolean_t,
443 ml_set_interrupts_enabled_with_debug, (boolean_t enable, boolean_t __unused debug))
444 {
445 	return MOCK_ml_set_interrupts_enabled(enable);
446 }
447 
448 T_MOCK(void,
449 _disable_preemption, (void))
450 {
451 	if (ut_mocks_use_fibers) {
452 		fibers_may_yield_internal_with_reason(
453 			FIBERS_YIELD_REASON_PREEMPTION_WILL_DISABLE |
454 			FIBERS_YIELD_REASON_ERROR_IF(fibers_current->may_yield_disabled != 0));
455 
456 		fibers_current->may_yield_disabled++;
457 
458 		FIBERS_LOG(FIBERS_LOG_DEBUG, "disable_preemption: may_yield_disabled=%d", fibers_current->may_yield_disabled);
459 
460 		thread_t thread = MOCK_current_thread_fast();
461 		unsigned int count = thread->machine.preemption_count;
462 		os_atomic_store(&thread->machine.preemption_count, count + 1, compiler_acq_rel);
463 
464 		fibers_may_yield_internal_with_reason(
465 			FIBERS_YIELD_REASON_PREEMPTION_DID_DISABLE |
466 			FIBERS_YIELD_REASON_ERROR_IF(fibers_current->may_yield_disabled != 1));
467 	} else {
468 		pthread_mutex_t *m = &get_proc_state()->interrupts_mutex;
469 
470 		int ret = pthread_mutex_lock(m);
471 		PT_QUIET; PT_ASSERT_POSIX_ZERO(ret, "_disable_preemption pthread_mutex_lock");
472 
473 		thread_t thread = MOCK_current_thread_fast();
474 		unsigned int count = thread->machine.preemption_count;
475 		os_atomic_store(&thread->machine.preemption_count, count + 1, compiler_acq_rel);
476 	}
477 }
478 
479 T_MOCK(void,
480 _disable_preemption_without_measurements, (void))
481 {
482 	MOCK__disable_preemption();
483 }
484 
485 T_MOCK(void,
486 lock_disable_preemption_for_thread, (thread_t t))
487 {
488 	MOCK__disable_preemption();
489 }
490 
491 T_MOCK(void,
492 _enable_preemption, (void))
493 {
494 	if (ut_mocks_use_fibers) {
495 		fibers_may_yield_internal_with_reason(
496 			FIBERS_YIELD_REASON_PREEMPTION_WILL_ENABLE |
497 			FIBERS_YIELD_REASON_ERROR_IF(fibers_current->may_yield_disabled != 1));
498 
499 		fibers_current->may_yield_disabled--;
500 
501 		FIBERS_LOG(FIBERS_LOG_DEBUG, "enable_preemption: may_yield_disabled=%d", fibers_current->may_yield_disabled);
502 
503 		thread_t thread = current_thread();
504 		unsigned int count = thread->machine.preemption_count;
505 		os_atomic_store(&thread->machine.preemption_count, count - 1, compiler_acq_rel);
506 
507 		fibers_may_yield_internal_with_reason(
508 			FIBERS_YIELD_REASON_PREEMPTION_DID_ENABLE |
509 			FIBERS_YIELD_REASON_ERROR_IF(fibers_current->may_yield_disabled != 0));
510 	} else {
511 		thread_t thread = current_thread();
512 		unsigned int count  = thread->machine.preemption_count;
513 		os_atomic_store(&thread->machine.preemption_count, count - 1, compiler_acq_rel);
514 
515 		pthread_mutex_t *m = &get_proc_state()->interrupts_mutex;
516 
517 		int ret = pthread_mutex_unlock(m);
518 		PT_QUIET; PT_ASSERT_POSIX_ZERO(ret, "_enable_preemption pthread_mutex_unlock");
519 	}
520 }
521 
522 // --------------- mutex ------------------
523 
524 struct mock_lck_mtx_t {
525 	union {
526 		pthread_mutex_t *pt_m;
527 		fibers_mutex_t *f_m;
528 	};
529 	lck_mtx_state_t lck_mtx;
530 };
531 static_assert(sizeof(struct mock_lck_mtx_t) == sizeof(lck_mtx_t));
532 
533 void
fake_init_lock(lck_mtx_t * lck)534 fake_init_lock(lck_mtx_t * lck)
535 {
536 	struct mock_lck_mtx_t* mlck = (struct mock_lck_mtx_t*)lck;
537 	if (ut_mocks_use_fibers) {
538 		mlck->f_m = calloc(1, sizeof(fibers_mutex_t));
539 		fibers_mutex_init(mlck->f_m);
540 	} else {
541 		mlck->pt_m = calloc(1, sizeof(pthread_mutex_t));
542 		int ret = pthread_mutex_init(mlck->pt_m, NULL);
543 		PT_QUIET; PT_ASSERT_POSIX_ZERO(ret, "pthread_mutex_init");
544 	}
545 }
546 
547 T_MOCK(void,
548 lck_mtx_init, (lck_mtx_t * lck, lck_grp_t * grp, lck_attr_t * attr))
549 {
550 	fake_init_lock(lck);
551 }
552 
553 T_MOCK(void,
554 lck_mtx_destroy, (lck_mtx_t * lck, lck_grp_t * grp))
555 {
556 	struct mock_lck_mtx_t* mlck = (struct mock_lck_mtx_t*)lck;
557 	if (ut_mocks_use_fibers) {
558 		fibers_mutex_destroy(mlck->f_m);
559 		free(mlck->f_m);
560 		mlck->f_m = NULL;
561 	} else {
562 		int ret = pthread_mutex_destroy(mlck->pt_m);
563 		PT_QUIET; PT_ASSERT_POSIX_ZERO(ret, "pthread_mutex_destroy");
564 		free(mlck->pt_m);
565 		mlck->pt_m = NULL;
566 	}
567 }
568 
569 T_MOCK(void,
570 lck_mtx_lock, (lck_mtx_t * lock))
571 {
572 	uint32_t ctid = MOCK_current_thread_fast()->ctid;
573 
574 	struct mock_lck_mtx_t* mlck = (struct mock_lck_mtx_t*)lock;
575 	if (ut_mocks_use_fibers) {
576 		fibers_mutex_lock(mlck->f_m, true);
577 	} else {
578 		int ret = pthread_mutex_lock(mlck->pt_m);
579 		PT_QUIET; PT_ASSERT_POSIX_ZERO(ret, "pthread_mutex_lock");
580 	}
581 	mlck->lck_mtx.owner = ctid;
582 }
583 
584 T_MOCK(void,
585 lck_mtx_lock_spin, (lck_mtx_t * lock))
586 {
587 	uint32_t ctid = MOCK_current_thread_fast()->ctid;
588 
589 	struct mock_lck_mtx_t* mlck = (struct mock_lck_mtx_t*)lock;
590 	if (ut_mocks_use_fibers) {
591 		fibers_mutex_lock(mlck->f_m, false); // do not check for disabled preemption if spinlock
592 	} else {
593 		int ret = pthread_mutex_lock(mlck->pt_m);
594 		PT_QUIET; PT_ASSERT_POSIX_ZERO(ret, "pthread_mutex_lock");
595 	}
596 	mlck->lck_mtx.owner = ctid;
597 }
598 
599 T_MOCK(boolean_t,
600 lck_mtx_try_lock, (lck_mtx_t * lock))
601 {
602 	uint32_t ctid = MOCK_current_thread_fast()->ctid;
603 
604 	struct mock_lck_mtx_t* mlck = (struct mock_lck_mtx_t*)lock;
605 	int ret;
606 	if (ut_mocks_use_fibers) {
607 		ret = fibers_mutex_try_lock(mlck->f_m);
608 	} else {
609 		int ret = pthread_mutex_trylock(mlck->pt_m);
610 	}
611 	if (ret == 0) {
612 		mlck->lck_mtx.owner = ctid;
613 		return TRUE;
614 	} else {
615 		return FALSE;
616 	}
617 }
618 
619 T_MOCK(void,
620 lck_mtx_unlock, (lck_mtx_t * lock))
621 {
622 	struct mock_lck_mtx_t* mlck = (struct mock_lck_mtx_t*)lock;
623 	mlck->lck_mtx.owner = 0;
624 	if (ut_mocks_use_fibers) {
625 		fibers_mutex_unlock(mlck->f_m);
626 	} else {
627 		int ret = pthread_mutex_unlock(mlck->pt_m);
628 		PT_QUIET; PT_ASSERT_POSIX_ZERO(ret, "pthread_mutex_unlock");
629 	}
630 }
631 
632 T_MOCK(void,
633 mutex_pause, (uint32_t collisions))
634 {
635 	if (ut_mocks_use_fibers) {
636 		// we can't sleep to not break determinism, trigger a ctxswitch instead
637 		fibers_yield();
638 	} else {
639 		mutex_pause(collisions);
640 	}
641 }
642 
643 // --------------- rwlocks ------------------
644 
645 struct mock_lck_rw_t {
646 	fibers_rwlock_t *rw;
647 	// lck_rw_word_t   lck_rw; // RANGELOCKINGTODO rdar://150846598
648 	uint32_t lck_rw_owner;
649 };
650 static_assert(sizeof(struct mock_lck_rw_t) == sizeof(lck_rw_t));
651 
652 static_assert(LCK_RW_ASSERT_SHARED == FIBERS_RWLOCK_ASSERT_SHARED);
653 static_assert(LCK_RW_ASSERT_EXCLUSIVE == FIBERS_RWLOCK_ASSERT_EXCLUSIVE);
654 static_assert(LCK_RW_ASSERT_HELD == FIBERS_RWLOCK_ASSERT_HELD);
655 static_assert(LCK_RW_ASSERT_NOTHELD == FIBERS_RWLOCK_ASSERT_NOTHELD);
656 
657 void
fake_init_rwlock(struct mock_lck_rw_t * mlck)658 fake_init_rwlock(struct mock_lck_rw_t *mlck)
659 {
660 	mlck->rw = calloc(1, sizeof(fibers_rwlock_t));
661 	fibers_rwlock_init(mlck->rw);
662 }
663 
664 static boolean_t
fake_rw_try_lock(struct mock_lck_rw_t * mlck,lck_rw_type_t lck_rw_type)665 fake_rw_try_lock(struct mock_lck_rw_t *mlck, lck_rw_type_t lck_rw_type)
666 {
667 	int ret;
668 	// RANGELOCKINGTODO rdar://150846598 handle old lock can_sleep
669 	lck_rw_lock_count_inc(MOCK_current_thread_fast(), (const void*)mlck);
670 
671 	if (lck_rw_type == LCK_RW_TYPE_SHARED) {
672 		ret = fibers_rwlock_try_rdlock(mlck->rw);
673 	} else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE) {
674 		ret = fibers_rwlock_try_wrlock(mlck->rw);
675 		if (ret == 0) {
676 			mlck->lck_rw_owner = MOCK_current_thread_fast()->ctid;
677 		}
678 	} else {
679 		PT_FAIL("lck_rw_try_lock: Invalid lock type");
680 	}
681 
682 	if (ret != 0) {
683 		// RANGELOCKINGTODO rdar://150846598 handle old lock can_sleep
684 		lck_rw_lock_count_dec(MOCK_current_thread_fast(), (const void*)mlck);
685 	}
686 	return ret == 0;
687 }
688 
689 static bool
fake_rw_lock_would_yield_exclusive(struct mock_lck_rw_t * mlck,lck_rw_yield_t mode)690 fake_rw_lock_would_yield_exclusive(struct mock_lck_rw_t *mlck, lck_rw_yield_t mode)
691 {
692 	fibers_rwlock_assert(mlck->rw, FIBERS_RWLOCK_ASSERT_EXCLUSIVE);
693 
694 	bool yield = false;
695 	if (mode == LCK_RW_YIELD_ALWAYS) {
696 		yield = true;
697 	} else {
698 		if (mlck->rw->writer_wait_queue.count > 0) {
699 			yield = true;
700 		} else if (mode == LCK_RW_YIELD_ANY_WAITER) {
701 			yield = (mlck->rw->reader_wait_queue.count != 0);
702 		}
703 	}
704 	return yield;
705 }
706 
707 T_MOCK(void,
708 lck_rw_init, (
709 	lck_rw_t * lck,
710 	lck_grp_t * grp,
711 	lck_attr_t * attr))
712 {
713 	if (!ut_mocks_use_fibers) {
714 		lck_rw_init(lck, grp, attr);
715 		return;
716 	}
717 
718 	// RANGELOCKINGTODO rdar://150846598 mock attr, especially lck_rw_can_sleep
719 	struct mock_lck_rw_t* mlck = (struct mock_lck_rw_t*)lck;
720 	fake_init_rwlock(mlck);
721 }
722 
723 T_MOCK(void,
724 lck_rw_destroy, (lck_rw_t * lck, lck_grp_t * grp))
725 {
726 	if (!ut_mocks_use_fibers) {
727 		lck_rw_destroy(lck, grp);
728 		return;
729 	}
730 
731 	struct mock_lck_rw_t* mlck = (struct mock_lck_rw_t*)lck;
732 	fibers_rwlock_destroy(mlck->rw);
733 	free(mlck->rw);
734 	mlck->rw = NULL;
735 }
736 
737 T_MOCK(void,
738 lck_rw_unlock, (lck_rw_t * lck, lck_rw_type_t lck_rw_type))
739 {
740 	if (!ut_mocks_use_fibers) {
741 		lck_rw_unlock(lck, lck_rw_type);
742 		return;
743 	}
744 
745 	struct mock_lck_rw_t* mlck = (struct mock_lck_rw_t*)lck;
746 	if (mlck->rw->writer_active) {
747 		mlck->lck_rw_owner = 0;
748 	}
749 	fibers_rwlock_unlock(mlck->rw);
750 
751 	// RANGELOCKINGTODO rdar://150846598 handle old lock can_sleep
752 	lck_rw_lock_count_dec(MOCK_current_thread_fast(), (const void*)mlck);
753 }
754 
755 static void
lck_rw_old_mock_unlock_shared(lck_rw_t * lck)756 lck_rw_old_mock_unlock_shared(lck_rw_t * lck)
757 {
758 	if (!ut_mocks_use_fibers) {
759 		lck_rw_unlock_shared(lck);
760 		return;
761 	}
762 
763 	struct mock_lck_rw_t* mlck = (struct mock_lck_rw_t*)lck;
764 	fibers_rwlock_rdunlock(mlck->rw);
765 
766 	// RANGELOCKINGTODO rdar://150846598 handle old lock can_sleep
767 	lck_rw_lock_count_dec(MOCK_current_thread_fast(), (const void*)mlck);
768 }
769 
770 T_MOCK(void,
771 lck_rw_unlock_shared, (lck_rw_t * lck))
772 {
773 	lck_rw_old_mock_unlock_shared(lck);
774 }
775 
776 T_MOCK(void,
777 lck_rw_unlock_exclusive, (lck_rw_t * lck))
778 {
779 	if (!ut_mocks_use_fibers) {
780 		lck_rw_unlock_exclusive(lck);
781 		return;
782 	}
783 
784 	struct mock_lck_rw_t* mlck = (struct mock_lck_rw_t*)lck;
785 	mlck->lck_rw_owner = 0;
786 	fibers_rwlock_wrunlock(mlck->rw);
787 
788 	// RANGELOCKINGTODO rdar://150846598 handle old lock can_sleep
789 	lck_rw_lock_count_dec(MOCK_current_thread_fast(), (const void*)mlck);
790 }
791 
792 T_MOCK(void,
793 lck_rw_lock_exclusive, (lck_rw_t * lck))
794 {
795 	if (!ut_mocks_use_fibers) {
796 		lck_rw_lock_exclusive(lck);
797 		return;
798 	}
799 
800 	// RANGELOCKINGTODO rdar://150846598 handle old lock can_sleep
801 	lck_rw_lock_count_inc(MOCK_current_thread_fast(), (const void*)lck);
802 
803 	struct mock_lck_rw_t* mlck = (struct mock_lck_rw_t*)lck;
804 	fibers_rwlock_wrlock(mlck->rw, true);
805 	mlck->lck_rw_owner = MOCK_current_thread_fast()->ctid;
806 }
807 
808 T_MOCK(void,
809 lck_rw_lock_shared, (lck_rw_t * lck))
810 {
811 	if (!ut_mocks_use_fibers) {
812 		lck_rw_lock_shared(lck);
813 		return;
814 	}
815 
816 	// RANGELOCKINGTODO rdar://150846598 handle old lock can_sleep
817 	lck_rw_lock_count_inc(MOCK_current_thread_fast(), (const void*)lck);
818 
819 	struct mock_lck_rw_t* mlck = (struct mock_lck_rw_t*)lck;
820 	fibers_rwlock_rdlock(mlck->rw, true);
821 }
822 
823 T_MOCK(boolean_t,
824 lck_rw_try_lock, (lck_rw_t * lck, lck_rw_type_t lck_rw_type))
825 {
826 	if (!ut_mocks_use_fibers) {
827 		return lck_rw_try_lock(lck, lck_rw_type);
828 	}
829 
830 	struct mock_lck_rw_t* mlck = (struct mock_lck_rw_t*)lck;
831 	return fake_rw_try_lock(mlck, lck_rw_type);
832 }
833 
834 T_MOCK(boolean_t,
835 lck_rw_try_lock_exclusive, (lck_rw_t * lck))
836 {
837 	if (!ut_mocks_use_fibers) {
838 		return lck_rw_try_lock_exclusive(lck);
839 	}
840 
841 	struct mock_lck_rw_t* mlck = (struct mock_lck_rw_t*)lck;
842 	return fake_rw_try_lock(mlck, LCK_RW_TYPE_EXCLUSIVE);
843 }
844 
845 T_MOCK(boolean_t,
846 lck_rw_try_lock_shared, (lck_rw_t * lck))
847 {
848 	if (!ut_mocks_use_fibers) {
849 		return lck_rw_try_lock_shared(lck);
850 	}
851 
852 	struct mock_lck_rw_t* mlck = (struct mock_lck_rw_t*)lck;
853 	return fake_rw_try_lock(mlck, LCK_RW_TYPE_SHARED);
854 }
855 
856 T_MOCK(lck_rw_type_t,
857 lck_rw_done, (lck_rw_t * lck))
858 {
859 	if (!ut_mocks_use_fibers) {
860 		return lck_rw_done(lck);
861 	}
862 
863 	struct mock_lck_rw_t* mlck = (struct mock_lck_rw_t*)lck;
864 	mlck->lck_rw_owner = 0;
865 	// If there is a writer locking it must be the current fiber or will trigger an assertion in fibers_rwlock_wrunlock
866 	lck_rw_type_t ret = mlck->rw->writer_active ? LCK_RW_TYPE_EXCLUSIVE : LCK_RW_TYPE_SHARED;
867 	fibers_rwlock_unlock(mlck->rw);
868 
869 	// RANGELOCKINGTODO rdar://150846598 handle old lock can_sleep
870 	lck_rw_lock_count_dec(MOCK_current_thread_fast(), (const void*)mlck);
871 
872 	return ret;
873 }
874 
875 T_MOCK(boolean_t,
876 lck_rw_lock_shared_to_exclusive, (lck_rw_t * lck))
877 {
878 	if (ut_mocks_lock_upgrade_fail) {
879 		lck_rw_old_mock_unlock_shared(lck);
880 		return false;
881 	}
882 
883 	if (!ut_mocks_use_fibers) {
884 		return lck_rw_lock_shared_to_exclusive(lck);
885 	}
886 
887 	struct mock_lck_rw_t* mlck = (struct mock_lck_rw_t*)lck;
888 	return fibers_rwlock_upgrade(mlck->rw);
889 }
890 
891 T_MOCK(void,
892 lck_rw_lock_exclusive_to_shared, (lck_rw_t * lck))
893 {
894 	if (!ut_mocks_use_fibers) {
895 		lck_rw_lock_exclusive_to_shared(lck);
896 		return;
897 	}
898 
899 	struct mock_lck_rw_t* mlck = (struct mock_lck_rw_t*)lck;
900 	fibers_rwlock_downgrade(mlck->rw);
901 }
902 
903 T_MOCK(void,
904 lck_rw_assert, (
905 	lck_rw_t * lck,
906 	unsigned int type))
907 {
908 	if (!ut_mocks_use_fibers) {
909 		lck_rw_assert(lck, type);
910 		return;
911 	}
912 
913 	struct mock_lck_rw_t* mlck = (struct mock_lck_rw_t*)lck;
914 	fibers_rwlock_assert(mlck->rw, type);
915 }
916 
917 T_MOCK(bool,
918 lck_rw_lock_would_yield_exclusive, (
919 	lck_rw_t * lck,
920 	lck_rw_yield_t mode))
921 {
922 	if (!ut_mocks_use_fibers) {
923 		return lck_rw_lock_would_yield_exclusive(lck, mode);
924 	}
925 
926 	struct mock_lck_rw_t* mlck = (struct mock_lck_rw_t*)lck;
927 	return fake_rw_lock_would_yield_exclusive(mlck, mode);
928 }
929 
930 T_MOCK(bool,
931 lck_rw_lock_would_yield_shared, (lck_rw_t * lck))
932 {
933 	if (!ut_mocks_use_fibers) {
934 		return lck_rw_lock_would_yield_shared(lck);
935 	}
936 
937 	struct mock_lck_rw_t* mlck = (struct mock_lck_rw_t*)lck;
938 	fibers_rwlock_assert(mlck->rw, FIBERS_RWLOCK_ASSERT_SHARED);
939 	return mlck->rw->writer_wait_queue.count != 0;
940 }
941 
942 // Note: No need to mock lck_rw_sleep as it uses lck_rw_* API and waitq, we already mock everything the function uses
943 
944 // --------------- waitq ------------------
945 
946 /*
947  *   If the 4 bytes of mock_waitq.mock_magic are not matching MOCK_WAITQ_MAGIC
948  *   it means the waitq comes from an unsupported location and was not created with mock_waitq_init().
949  */
950 #define MOCK_WAITQ_MAGIC 0xb60d0d8f
951 
952 struct mock_waitq_extra {
953 	bool valid;
954 	fibers_condition_t cond;
955 	fibers_mutex_t mutex;
956 
957 	struct mock_thread *waiting_threads;
958 	int waiting_thread_count; // Count of waiting threads
959 };
960 
961 struct mock_waitq { // 24 bytes
962 	WAITQ_FLAGS(waitq, waitq_eventmask:_EVENT_MASK_BITS);
963 	unsigned int mock_magic;
964 	event64_t current_event; // delete when every waiting thread is removed
965 	struct mock_waitq_extra *extra;
966 };
967 
968 static_assert(sizeof(struct waitq) == sizeof(struct mock_waitq));
969 
970 #define MWQCAST(xnu_wq) ((struct mock_waitq *)(xnu_wq).wq_q)
971 
972 static bool
waitq_use_real_impl(waitq_t wq)973 waitq_use_real_impl(waitq_t wq)
974 {
975 	return !ut_mocks_use_fibers || waitq_type(wq) != WQT_QUEUE;
976 }
977 
978 int
mock_waitq_init(struct mock_waitq * wq)979 mock_waitq_init(struct mock_waitq *wq)
980 {
981 	if (!wq) {
982 		return EINVAL;
983 	}
984 	wq->mock_magic = MOCK_WAITQ_MAGIC;
985 	wq->current_event = 0;
986 
987 	wq->extra = calloc(sizeof(struct mock_waitq_extra), 1);
988 	wq->extra->valid = true;
989 	fibers_mutex_init(&wq->extra->mutex);
990 
991 	return 0;
992 }
993 
994 int
mock_waitq_destroy(struct mock_waitq * wq)995 mock_waitq_destroy(struct mock_waitq *wq)
996 {
997 	if (!wq) {
998 		return EINVAL;
999 	}
1000 	PT_QUIET; PT_ASSERT_TRUE(wq->mock_magic == MOCK_WAITQ_MAGIC, "missing mock_waitq magic");
1001 
1002 	fibers_condition_destroy(&wq->extra->cond);
1003 	fibers_mutex_destroy(&wq->extra->mutex);
1004 	free(wq->extra);
1005 	wq->extra = NULL;
1006 
1007 	return 0;
1008 }
1009 
1010 static inline bool
waitq_should_unlock(waitq_wakeup_flags_t flags)1011 waitq_should_unlock(waitq_wakeup_flags_t flags)
1012 {
1013 	return (flags & (WAITQ_UNLOCK | WAITQ_KEEP_LOCKED)) == WAITQ_UNLOCK;
1014 }
1015 
1016 static inline bool
waitq_should_enable_interrupts(waitq_wakeup_flags_t flags)1017 waitq_should_enable_interrupts(waitq_wakeup_flags_t flags)
1018 {
1019 	return (flags & (WAITQ_UNLOCK | WAITQ_KEEP_LOCKED | WAITQ_ENABLE_INTERRUPTS)) == (WAITQ_UNLOCK | WAITQ_ENABLE_INTERRUPTS);
1020 }
1021 
1022 
1023 T_MOCK(void,
1024 waitq_init, (waitq_t wq, waitq_type_t type, int policy))
1025 {
1026 	if (!ut_mocks_use_fibers || type == WQT_PORT) {
1027 		waitq_init(wq, type, policy);
1028 		return;
1029 	}
1030 
1031 	*wq.wq_q = (struct waitq){
1032 		.waitq_type  = type,
1033 		.waitq_fifo  = ((policy & SYNC_POLICY_REVERSED) == 0),
1034 	};
1035 
1036 	// RANGELOCKINGTODO rdar://150846598
1037 	PT_QUIET; PT_ASSERT_TRUE(type == WQT_QUEUE, "invalid waitq type");
1038 	mock_waitq_init(MWQCAST(wq));
1039 
1040 	if (policy & SYNC_POLICY_INIT_LOCKED) {
1041 		fibers_mutex_lock(&MWQCAST(wq)->extra->mutex, false);
1042 	}
1043 }
1044 
1045 T_MOCK(void,
1046 waitq_deinit, (waitq_t wq))
1047 {
1048 	if (waitq_use_real_impl(wq)) {
1049 		waitq_deinit(wq);
1050 		return;
1051 	}
1052 
1053 	PT_QUIET; PT_ASSERT_TRUE(MWQCAST(wq)->mock_magic == MOCK_WAITQ_MAGIC, "missing mock_waitq magic");
1054 	mock_waitq_destroy(MWQCAST(wq));
1055 }
1056 
1057 T_MOCK(void,
1058 waitq_lock, (waitq_t wq))
1059 {
1060 	if (waitq_use_real_impl(wq)) {
1061 		waitq_lock(wq);
1062 		return;
1063 	}
1064 
1065 	PT_QUIET; PT_ASSERT_TRUE(MWQCAST(wq)->mock_magic == MOCK_WAITQ_MAGIC, "missing mock_waitq magic");
1066 	fibers_mutex_lock(&MWQCAST(wq)->extra->mutex, false);
1067 }
1068 
1069 T_MOCK(void,
1070 waitq_unlock, (waitq_t wq))
1071 {
1072 	if (waitq_use_real_impl(wq)) {
1073 		waitq_unlock(wq);
1074 		return;
1075 	}
1076 
1077 	PT_QUIET; PT_ASSERT_TRUE(MWQCAST(wq)->mock_magic == MOCK_WAITQ_MAGIC, "missing mock_waitq magic");
1078 	fibers_mutex_unlock(&MWQCAST(wq)->extra->mutex);
1079 }
1080 
1081 T_MOCK(bool,
1082 waitq_is_valid, (waitq_t wq))
1083 {
1084 	if (waitq_use_real_impl(wq)) {
1085 		return waitq_is_valid(wq);
1086 	}
1087 
1088 	PT_QUIET; PT_ASSERT_TRUE(MWQCAST(wq)->mock_magic == MOCK_WAITQ_MAGIC, "missing mock_waitq magic");
1089 	return MWQCAST(wq)->extra->valid;
1090 }
1091 
1092 T_MOCK(void,
1093 waitq_invalidate, (waitq_t wq))
1094 {
1095 	if (waitq_use_real_impl(wq)) {
1096 		return waitq_invalidate(wq);
1097 	}
1098 
1099 	PT_QUIET; PT_ASSERT_TRUE(MWQCAST(wq)->mock_magic == MOCK_WAITQ_MAGIC, "missing mock_waitq magic");
1100 	MWQCAST(wq)->extra->valid = false;
1101 }
1102 
1103 T_MOCK(bool,
1104 waitq_held, (waitq_t wq))
1105 {
1106 	if (waitq_use_real_impl(wq)) {
1107 		return waitq_held(wq);
1108 	}
1109 
1110 	PT_QUIET; PT_ASSERT_TRUE(MWQCAST(wq)->mock_magic == MOCK_WAITQ_MAGIC, "missing mock_waitq magic");
1111 	return MWQCAST(wq)->extra->mutex.holder != NULL;
1112 }
1113 
1114 T_MOCK(void,
1115 waitq_lock_wait, (waitq_t wq, uint32_t ticket))
1116 {
1117 	MOCK_waitq_lock(wq);
1118 }
1119 
1120 T_MOCK(bool,
1121 waitq_lock_try, (waitq_t wq))
1122 {
1123 	if (waitq_use_real_impl(wq)) {
1124 		return waitq_lock_try(wq);
1125 	}
1126 
1127 	PT_QUIET; PT_ASSERT_TRUE(MWQCAST(wq)->mock_magic == MOCK_WAITQ_MAGIC, "missing mock_waitq magic");
1128 	return fibers_mutex_try_lock(&MWQCAST(wq)->extra->mutex) == 0;
1129 }
1130 
1131 // --------------- events ------------------
1132 
1133 #define MOCK_WAITQS_NUM 4096
1134 static struct mock_waitq global_mock_waitqs[MOCK_WAITQS_NUM];
1135 static int global_mock_waitqs_inited = 0;
1136 
1137 static void
global_mock_waitqs_init(void)1138 global_mock_waitqs_init(void)
1139 {
1140 	for (int i = 0; i < MOCK_WAITQS_NUM; ++i) {
1141 		MOCK_waitq_init((struct waitq*)&global_mock_waitqs[i], WQT_QUEUE, SYNC_POLICY_FIFO);
1142 	}
1143 	global_mock_waitqs_inited = 1;
1144 }
1145 
1146 struct mock_waitq*
find_mock_waitq(event64_t event)1147 find_mock_waitq(event64_t event)
1148 {
1149 	if (!global_mock_waitqs_inited) {
1150 		global_mock_waitqs_init();
1151 	}
1152 	for (int i = 0; i < MOCK_WAITQS_NUM; ++i) {
1153 		if (global_mock_waitqs[i].current_event == event) {
1154 			return &global_mock_waitqs[i];
1155 		}
1156 	}
1157 	return NULL;
1158 }
1159 
1160 struct mock_waitq*
find_or_alloc_mock_waitq(event64_t event)1161 find_or_alloc_mock_waitq(event64_t event)
1162 {
1163 	if (!global_mock_waitqs_inited) {
1164 		global_mock_waitqs_init();
1165 	}
1166 	int first_free = -1;
1167 	for (int i = 0; i < MOCK_WAITQS_NUM; ++i) {
1168 		if (global_mock_waitqs[i].current_event == event) {
1169 			return &global_mock_waitqs[i];
1170 		} else if (first_free < 0 && global_mock_waitqs[i].current_event == 0) {
1171 			first_free = i;
1172 		}
1173 	}
1174 	PT_QUIET; PT_ASSERT_TRUE(first_free >= 0, "no more space in global_mock_waitqs");
1175 	global_mock_waitqs[first_free].current_event = event;
1176 	return &global_mock_waitqs[first_free];
1177 }
1178 
1179 // --------------- waitq mocks ------------------
1180 
1181 // pthread mocks
1182 
1183 struct pthread_mock_event_table_entry*
find_pthread_mock_event_entry(struct mock_process_state * s,event_t ev)1184 find_pthread_mock_event_entry(struct mock_process_state *s, event_t ev)
1185 {
1186 	for (int i = 0; i < PTHREAD_EVENTS_TABLE_SIZE; ++i) {
1187 		if (s->events[i].ev == ev) {
1188 			return &s->events[i];
1189 		}
1190 	}
1191 	return NULL;
1192 }
1193 
1194 T_MOCK_DYNAMIC(kern_return_t,
1195     thread_wakeup_prim, (
1196 	    event_t event,
1197 	    boolean_t one_thread,
1198 	    wait_result_t result),
1199     (event, one_thread, result),
1200 {
1201 	if (ut_mocks_use_fibers) {
1202 	        // fibers is mocking waitq apis, go forward calling the real thread_wakeup_prim
1203 	        return thread_wakeup_prim(event, one_thread, result);
1204 	}
1205 
1206 	kern_return_t kr = KERN_SUCCESS;
1207 
1208 	struct mock_process_state *s = get_proc_state();
1209 	int ret = pthread_mutex_lock(&s->events_mutex);
1210 	PT_QUIET; PT_ASSERT_POSIX_ZERO(ret, "thread_wakeup pthread_mutex_lock");
1211 
1212 	struct pthread_mock_event_table_entry* event_entry = find_pthread_mock_event_entry(s, event);
1213 	if (event_entry == NULL) {
1214 	        kr = KERN_NOT_WAITING;
1215 	        goto done;
1216 	}
1217 	if (one_thread) {
1218 	        ret = pthread_cond_signal(&event_entry->cond);
1219 	        PT_QUIET; PT_ASSERT_POSIX_ZERO(ret, "thread_wakeup pthread_cond_signal");
1220 	} else {
1221 	        ret = pthread_cond_broadcast(&event_entry->cond);
1222 	        PT_QUIET; PT_ASSERT_POSIX_ZERO(ret, "thread_wakeup pthread_cond_broadcast");
1223 	}
1224 	done:
1225 	pthread_mutex_unlock(&s->events_mutex);
1226 	return kr;
1227 });
1228 
1229 wait_result_t
pthread_mock_thread_block_reason(thread_continue_t continuation,void * parameter,ast_t reason)1230 pthread_mock_thread_block_reason(
1231 	thread_continue_t continuation,
1232 	void *parameter,
1233 	ast_t reason)
1234 {
1235 	PT_QUIET; PT_ASSERT_TRUE(continuation == THREAD_CONTINUE_NULL && parameter == NULL && reason == AST_NONE, "thread_block argument");
1236 
1237 	struct mock_process_state *s = get_proc_state();
1238 	int ret = pthread_mutex_lock(&s->events_mutex);
1239 	PT_QUIET; PT_ASSERT_POSIX_ZERO(ret, "thread_block pthread_mutex_lock");
1240 
1241 	// find empty entry in table
1242 	struct pthread_mock_event_table_entry *event_entry = find_pthread_mock_event_entry(s, 0);
1243 	PT_QUIET; PT_ASSERT_NOTNULL(event_entry, "empty entry not found");
1244 
1245 	// register the entry to this event
1246 	event_entry->ev = (event_t)MOCK_current_thread_fast()->wait_event;
1247 
1248 	// if it doesn't have a condition variable yet, create one
1249 	if (!event_entry->cond_inited) {
1250 		ret = pthread_cond_init(&event_entry->cond, NULL);
1251 		PT_QUIET; PT_ASSERT_POSIX_ZERO(ret, "thread_block pthread_cond_init");
1252 		event_entry->cond_inited = true;
1253 	}
1254 
1255 	// wait on variable. This releases the mutex, waits and reaquires it before returning
1256 	ret = pthread_cond_wait(&event_entry->cond, &s->events_mutex);
1257 	PT_QUIET; PT_ASSERT_POSIX_ZERO(ret, "thread_block pthread_cond_wait");
1258 
1259 	// reset the entry so that it can be reused (will be done by all waiters that woke up)
1260 	event_entry->ev = 0;
1261 
1262 	ret = pthread_mutex_unlock(&s->events_mutex);
1263 	PT_QUIET; PT_ASSERT_POSIX_ZERO(ret, "thread_block pthread_mutex_unlock");
1264 
1265 	return THREAD_AWAKENED;
1266 }
1267 
1268 kern_return_t
pthread_mock_clear_wait(thread_t thread,wait_result_t result)1269 pthread_mock_clear_wait(
1270 	thread_t thread,
1271 	wait_result_t result)
1272 {
1273 	struct mock_process_state *s = get_proc_state();
1274 	int ret = pthread_mutex_lock(&s->events_mutex);
1275 	PT_QUIET; PT_ASSERT_POSIX_ZERO(ret, "clear_wait pthread_mutex_lock");
1276 
1277 	struct pthread_mock_event_table_entry *event_entry = find_pthread_mock_event_entry(s, 0);
1278 	PT_QUIET; PT_ASSERT_NOTNULL(event_entry, "empty entry not found");
1279 
1280 	event_entry->ev = 0;
1281 
1282 	ret = pthread_mutex_unlock(&s->events_mutex);
1283 	PT_QUIET; PT_ASSERT_POSIX_ZERO(ret, "clear_wait pthread_mutex_unlock");
1284 	return KERN_SUCCESS;
1285 }
1286 
1287 // fibers mocks
1288 
1289 T_MOCK(struct waitq *,
1290 _global_eventq, (event64_t event))
1291 {
1292 	if (!ut_mocks_use_fibers) {
1293 		return _global_eventq(event);
1294 	}
1295 
1296 	struct waitq *ret = (struct waitq *)find_or_alloc_mock_waitq(event);
1297 	return ret;
1298 }
1299 
1300 T_MOCK(wait_result_t,
1301 waitq_assert_wait64_locked, (
1302 	waitq_t waitq,
1303 	event64_t wait_event,
1304 	wait_interrupt_t interruptible,
1305 	wait_timeout_urgency_t urgency,
1306 	uint64_t deadline,
1307 	uint64_t leeway,
1308 	thread_t thread))
1309 {
1310 	if (waitq_use_real_impl(waitq)) {
1311 		return waitq_assert_wait64_locked(waitq, wait_event, interruptible, urgency, deadline, leeway, thread);
1312 	}
1313 
1314 	struct mock_waitq *wq = MWQCAST(waitq);
1315 
1316 	if (wq->current_event == 0) {
1317 		wq->current_event = wait_event;
1318 	}
1319 
1320 	PT_QUIET; PT_ASSERT_TRUE(wq->current_event == wait_event, "waitq_assert_wait64_locked another event queue");
1321 
1322 	struct mock_thread * mock_thread = (struct mock_thread*)thread; // !!! ASSUME every thread_t is created from mock_thread
1323 	mock_thread->wq_next = wq->extra->waiting_threads;
1324 	wq->extra->waiting_threads = mock_thread;
1325 	wq->extra->waiting_thread_count++;
1326 
1327 	thread->wait_event = wait_event; // Store waiting event in thread context
1328 	thread->state |= TH_WAIT; // Set thread state to waiting
1329 	thread->waitq = waitq;
1330 
1331 	return THREAD_WAITING; // Indicate thread is now waiting, but not blocked yet
1332 }
1333 
1334 T_MOCK(wait_result_t,
1335 waitq_assert_wait64, (
1336 	struct waitq *waitq,
1337 	event64_t wait_event,
1338 	wait_interrupt_t interruptible,
1339 	uint64_t deadline))
1340 {
1341 	if (waitq_use_real_impl(waitq)) {
1342 		return waitq_assert_wait64(waitq, wait_event, interruptible, deadline);
1343 	}
1344 
1345 	thread_t thread = MOCK_current_thread_fast();
1346 
1347 	MOCK_waitq_lock(waitq);
1348 	wait_result_t res = MOCK_waitq_assert_wait64_locked(waitq, wait_event, interruptible,
1349 	    TIMEOUT_URGENCY_SYS_NORMAL, deadline, TIMEOUT_NO_LEEWAY, thread);
1350 	MOCK_waitq_unlock(waitq);
1351 	return res;
1352 }
1353 
1354 static void
mock_waitq_clear_wait(struct mock_thread * thread,struct mock_waitq * wq)1355 mock_waitq_clear_wait(struct mock_thread * thread, struct mock_waitq *wq)
1356 {
1357 	struct mock_thread ** mock_thread = &wq->extra->waiting_threads;
1358 	int removed = 0;
1359 	while (*mock_thread) {
1360 		if (*mock_thread == thread) {
1361 			*mock_thread = (*mock_thread)->wq_next;
1362 			removed = 1;
1363 			break;
1364 		}
1365 		mock_thread = &(*mock_thread)->wq_next;
1366 	}
1367 	PT_QUIET; PT_ASSERT_TRUE(removed, "thread_block thread not in wq");
1368 	thread->wq_next = NULL;
1369 
1370 	wq->extra->waiting_thread_count--;
1371 	if (wq->extra->waiting_thread_count == 0) {
1372 		wq->current_event = 0; // reset current_event
1373 	}
1374 	PT_QUIET; PT_ASSERT_TRUE(wq->extra->waiting_thread_count >= 0, "something bad");
1375 }
1376 
1377 static struct mock_thread *
mock_waitq_pop_wait(struct mock_waitq * wq)1378 mock_waitq_pop_wait(struct mock_waitq *wq)
1379 {
1380 	if (wq->extra->waiting_thread_count == 0) {
1381 		return NULL;
1382 	}
1383 
1384 	struct mock_thread * thread = wq->extra->waiting_threads;
1385 	wq->extra->waiting_threads = thread->wq_next;
1386 	thread->wq_next = NULL;
1387 
1388 	wq->extra->waiting_thread_count--;
1389 	if (wq->extra->waiting_thread_count == 0) {
1390 		wq->current_event = 0; // reset current_event
1391 	}
1392 	PT_QUIET; PT_ASSERT_TRUE(wq->extra->waiting_thread_count >= 0, "something bad");
1393 
1394 	return thread;
1395 }
1396 
1397 T_MOCK_DYNAMIC(wait_result_t,
1398     thread_block_reason, (
1399 	    thread_continue_t continuation,
1400 	    void *parameter,
1401 	    ast_t reason), (
1402 	    continuation,
1403 	    parameter,
1404 	    reason),
1405 {
1406 	if (!ut_mocks_use_fibers) {
1407 	        return pthread_mock_thread_block_reason(continuation, parameter, reason);
1408 	}
1409 
1410 	PT_QUIET; PT_ASSERT_TRUE(continuation == THREAD_CONTINUE_NULL && parameter == NULL && reason == AST_NONE, "thread_block argument");
1411 
1412 	thread_t thread = current_thread();
1413 	PT_QUIET; PT_ASSERT_TRUE(thread->state & TH_WAIT, "thread_block called but thread state is not TH_WAIT");
1414 
1415 	/*
1416 	 * In case of a window between assert_wait and thread_block
1417 	 * another thread could wake up the current thread after being added to the waitq
1418 	 * but before the block.
1419 	 * In this case, the thread will still be TH_WAIT but without an assigned waitq.
1420 	 * TH_WAKING must be set.
1421 	 */
1422 	struct mock_waitq *wq = MWQCAST(thread->waitq);
1423 	if (wq == NULL) {
1424 	        PT_QUIET; PT_ASSERT_TRUE(thread->state & TH_WAKING, "with waitq == NULL there must be TH_WAKING set");
1425 	        thread->state &= ~TH_WAKING;
1426 	        goto awake_thread;
1427 	}
1428 
1429 	fibers_condition_wait(&wq->extra->cond);
1430 
1431 	if (thread->state & TH_WAKING) {
1432 	        thread->state &= ~TH_WAKING;
1433 	} else {
1434 	        // is this possible? TH_WAKING is always set ATM in the mocks, keep this code to be more robust
1435 	        thread->waitq.wq_q = NULL;
1436 	        mock_waitq_clear_wait((struct mock_thread *)thread, wq);
1437 	}
1438 
1439 	awake_thread:
1440 	thread->state &= ~TH_WAIT;
1441 	thread->state |= TH_RUN;
1442 
1443 	return thread->wait_result;
1444 });
1445 
1446 T_MOCK(kern_return_t,
1447 clear_wait, (thread_t thread, wait_result_t wresult))
1448 {
1449 	if (!ut_mocks_use_fibers) {
1450 		return pthread_mock_clear_wait(thread, wresult);
1451 	}
1452 
1453 	struct mock_waitq *wq = MWQCAST(thread->waitq);
1454 	PT_QUIET; PT_ASSERT_TRUE(wq != NULL, "thread->waitq is NULL");
1455 
1456 	thread->state &= ~TH_WAIT;
1457 	thread->waitq.wq_q = NULL;
1458 	thread->wait_result = wresult;
1459 
1460 	mock_waitq_clear_wait((struct mock_thread *)thread, wq);
1461 
1462 	return KERN_SUCCESS;
1463 }
1464 
1465 typedef struct {
1466 	wait_result_t wait_result;
1467 } waitq_wakeup_args_t;
1468 
1469 static void
waitq_wakeup_fiber_callback(void * arg,fiber_t target)1470 waitq_wakeup_fiber_callback(void *arg, fiber_t target)
1471 {
1472 	waitq_wakeup_args_t *wakeup_args = (waitq_wakeup_args_t*)arg;
1473 	struct mock_thread *thread = (struct mock_thread *)target->extra;
1474 	assert(thread);
1475 
1476 	struct mock_waitq *wq = MWQCAST(thread->th.waitq);
1477 	assert(wq);
1478 
1479 	thread->th.state |= TH_WAKING;
1480 	thread->th.waitq.wq_q = NULL;
1481 	thread->th.wait_result = wakeup_args->wait_result;
1482 
1483 	mock_waitq_clear_wait(thread, wq);
1484 }
1485 
1486 // Called from thread_wakeup_nthreads_prim
1487 T_MOCK(uint32_t,
1488 waitq_wakeup64_nthreads_locked, (
1489 	waitq_t waitq,
1490 	event64_t wake_event,
1491 	wait_result_t result,
1492 	waitq_wakeup_flags_t flags,
1493 	uint32_t nthreads))
1494 {
1495 	if (waitq_use_real_impl(waitq)) {
1496 		return waitq_wakeup64_nthreads_locked(waitq, wake_event, result, flags, nthreads);
1497 	}
1498 
1499 	// RANGELOCKINGTODO rdar://150846598 flags
1500 	waitq_wakeup_args_t wakeup_args = {
1501 		.wait_result = result
1502 	};
1503 
1504 	struct mock_waitq *wq = MWQCAST(waitq);
1505 	PT_QUIET; PT_ASSERT_TRUE(wq->current_event == wake_event, "waitq_wakeup64_nthreads current_event is wrong");
1506 
1507 	// Avoid to trigger a switch in fibers_condition_wakeup_some before a valid state in the waitq
1508 	fibers_current->may_yield_disabled++;
1509 
1510 	FIBERS_LOG(FIBERS_LOG_DEBUG, "waitq_wakeup64_nthreads_locked nthreads=%u wake_event=%lld", nthreads, wake_event);
1511 
1512 	int count = fibers_condition_wakeup_some(&wq->extra->cond, nthreads, &waitq_wakeup_fiber_callback, &wakeup_args);
1513 
1514 	/*
1515 	 * In case of a window in which a thread is pushed to the waitq but thread_block was still not called
1516 	 * when another thread wakes up the threads in the waitq here.
1517 	 * fibers_condition_wakeup_some will not find these fibers as they are not waiting on the condition,
1518 	 * In this case these fibers must be in FIBER_STOP that means that they are ready to be scheduled,
1519 	 * but we still need to take action here to remove them from the waitq and clear the state.
1520 	 */
1521 	while (wq->extra->waiting_thread_count && count < nthreads) {
1522 		struct mock_thread *thread = mock_waitq_pop_wait(wq);
1523 		PT_QUIET; PT_ASSERT_TRUE(thread->fiber->state & FIBER_STOP, "leftover fiber in waitq not in FIBER_STOP");
1524 		thread->th.state |= TH_WAKING;
1525 		thread->th.waitq.wq_q = NULL;
1526 		thread->th.wait_result = result;
1527 		++count;
1528 	}
1529 
1530 	fibers_current->may_yield_disabled--;
1531 
1532 	if (waitq_should_unlock(flags)) {
1533 		MOCK_waitq_unlock(waitq);
1534 	}
1535 	if (waitq_should_enable_interrupts(flags)) {
1536 		MOCK_ml_set_interrupts_enabled(1);
1537 	}
1538 
1539 	return (uint32_t)count;
1540 }
1541 
1542 T_MOCK(thread_t,
1543 waitq_wakeup64_identify_locked, (
1544 	waitq_t waitq,
1545 	event64_t wake_event,
1546 	waitq_wakeup_flags_t flags))
1547 {
1548 	if (waitq_use_real_impl(waitq)) {
1549 		return waitq_wakeup64_identify_locked(waitq, wake_event, flags);
1550 	}
1551 
1552 	// RANGELOCKINGTODO rdar://150846598 flags
1553 
1554 	struct mock_waitq *wq = MWQCAST(waitq);
1555 	PT_QUIET; PT_ASSERT_TRUE(wq->current_event == wake_event, "waitq_wakeup64_identify_locked current_event is wrong");
1556 
1557 	// RANGELOCKINGTODO rdar://150845975 for fuzzing select random, not the top of the queue
1558 	struct mock_thread * mock_thread = wq->extra->waiting_threads;
1559 	if (mock_thread == NULL) {
1560 		return THREAD_NULL;
1561 	}
1562 
1563 	// Preemption will be re-enabled when the thread is resumed in `waitq_resume_identify_thread`
1564 	MOCK__disable_preemption();
1565 
1566 	mock_thread->th.state |= TH_WAKING;
1567 	mock_thread->th.waitq.wq_q = NULL;
1568 	mock_thread->th.wait_result = THREAD_AWAKENED;
1569 
1570 	mock_waitq_clear_wait(mock_thread, wq);
1571 
1572 	FIBERS_LOG(FIBERS_LOG_DEBUG, "waitq_wakeup64_identify_locked identified fiber %d", mock_thread->fiber->id);
1573 
1574 	if (waitq_should_unlock(flags)) {
1575 		MOCK_waitq_unlock(waitq);
1576 	}
1577 	if (waitq_should_enable_interrupts(flags)) {
1578 		MOCK_ml_set_interrupts_enabled(1);
1579 	}
1580 
1581 	fibers_may_yield_internal();
1582 
1583 	return &mock_thread->th;
1584 }
1585 
1586 T_MOCK(void,
1587 waitq_resume_identified_thread, (
1588 	waitq_t waitq,
1589 	thread_t thread,
1590 	wait_result_t result,
1591 	waitq_wakeup_flags_t flags))
1592 {
1593 	if (waitq_use_real_impl(waitq)) {
1594 		return waitq_resume_identified_thread(waitq, thread, result, flags);
1595 	}
1596 
1597 	// RANGELOCKINGTODO rdar://150846598 other flags
1598 
1599 	struct mock_thread * mock_thread = (struct mock_thread*)thread; // !!! ASSUME every thread_t is created from mock_thread
1600 	struct mock_waitq *wq = MWQCAST(waitq);
1601 
1602 	bool found = fibers_condition_wakeup_identified(&wq->extra->cond, mock_thread->fiber);
1603 	if (!found) {
1604 		/*
1605 		 * In case of a window in which a thread is pushed to the waitq but thread_block was still not called
1606 		 * when the thread is identified by another one and resumed, we pop it from the waitq in waitq_wakeup64_identify_locked
1607 		 * but we will not find it in wq->cond.wait_queue.
1608 		 * In this case it is not needed any action as the fiber must be in FIBER_STOP and can already be scheduled.
1609 		 */
1610 		PT_QUIET; PT_ASSERT_TRUE(mock_thread->fiber->state & FIBER_STOP, "waitq_resume_identified_thread fiber not found in condition and not in FIBER_STOP");
1611 	}
1612 
1613 	// Paired with the call to `waitq_wakeup64_identify_locked`
1614 	MOCK__enable_preemption();
1615 
1616 	fibers_may_yield_internal_with_reason(
1617 		FIBERS_YIELD_REASON_WAKEUP |
1618 		FIBERS_YIELD_REASON_ERROR_IF(!found));
1619 }
1620 
1621 // Allow to cause a context switch from a function that can be called from XNU
1622 T_MOCK(void,
1623 ut_fibers_ctxswitch, (void))
1624 {
1625 	if (ut_mocks_use_fibers) {
1626 		fibers_yield();
1627 	}
1628 }
1629 
1630 // Allow to cause a context switch to a specific fiber from a function that can be called from XNU
1631 T_MOCK(void,
1632 ut_fibers_ctxswitch_to, (int fiber_id))
1633 {
1634 	if (ut_mocks_use_fibers) {
1635 		fibers_yield_to(fiber_id);
1636 	}
1637 }
1638 
1639 // Get the current fiber id from a function that can be called from XNU
1640 T_MOCK(int,
1641 ut_fibers_current_id, (void))
1642 {
1643 	if (ut_mocks_use_fibers) {
1644 		return fibers_current->id;
1645 	}
1646 	return -1;
1647 }
1648 
1649 // --------------- preemption ------------------
1650 
1651 #ifdef __BUILDING_WITH_SANCOV_LOAD_STORES__
1652 // Optional: uncomment to enable yield at every basic block entry
1653 /*
1654  *  T_MOCK(void,
1655  *  __sanitizer_cov_trace_pc_guard, (uint32_t * guard))
1656  *  {
1657  *	   fibers_may_yield();
1658  *  }
1659  */
1660 
1661 #define IS_ALIGNED(ptr, size) ( (((uintptr_t)(ptr)) & (((uintptr_t)(size)) - 1)) == 0 )
1662 #define IS_ATOMIC(ptr, size) ( (size) <= sizeof(uint64_t) && IS_ALIGNED(ptr, size) )
1663 
1664 // These functions can be called from XNU to enter/exit atomic regions in which the data checker is disabled
1665 T_MOCK(void,
1666 data_race_checker_atomic_begin, (void))
1667 {
1668 	fibers_checker_atomic_begin();
1669 }
1670 T_MOCK(void,
1671 data_race_checker_atomic_end, (void))
1672 {
1673 	fibers_checker_atomic_end();
1674 }
1675 
1676 /*
1677  * Detecting data races on memory operations:
1678  * Memory operation functions are used to check for data races using the fibers checkers API, a software implementation of DataCollider.
1679  * The idea is to set a watchpoint before context switching and report a data race every time a concurrent access (watchpoint hit) is in between a write or a write in between a load.
1680  * To be more robust, we also check that the value pointed the memory operation address before the context switch is still the same after the context switch.
1681  * If not, very likely it is a data race. Atomic memory operations should be excluded from this, we use the IS_ATOMIC macro to filter memory loads.
1682  * Note: atomic_fetch_add_explicit() et al. on ARM64 are compiled to LDADD et al. that seem to not be supported by __sanitizer_cov_loadX, ok for us we want to skip atomic operations.
1683  */
1684 #define SANCOV_LOAD_STORE_DATA_CHECKER(type, size, access_type) do {                            \
1685 	    if (fibers_current->may_yield_disabled) {                                               \
1686 	        return;                                                                             \
1687 	    }                                                                                       \
1688 	    if (fibers_scheduler->fibers_should_yield(fibers_scheduler_context,                     \
1689 	        fibers_may_yield_probability, FIBERS_YIELD_REASON_PREEMPTION_TRIGGER)) {            \
1690 	        volatile type before = *addr;                                                       \
1691 	        void *pc = __builtin_return_address(0);                                             \
1692 	        bool has_wp = check_and_set_watchpoint(pc, (uintptr_t)addr, size, access_type);     \
1693                                                                                                 \
1694 	        fibers_queue_push(&fibers_run_queue, fibers_current);                               \
1695 	        fibers_choose_next(FIBER_STOP);                                                     \
1696                                                                                                 \
1697 	        if (has_wp) {                                                                       \
1698 	            post_check_and_remove_watchpoint((uintptr_t)addr, size, access_type);           \
1699 	        }                                                                                   \
1700 	        type after = *addr;                                                                 \
1701 	        if (before != after) {                                                              \
1702 	            report_value_race((uintptr_t)addr, size, access_type);                          \
1703 	        }                                                                                   \
1704 	    }                                                                                       \
1705 	} while (0)
1706 
1707 /*
1708  * Mock the SanitizerCoverage load/store instrumentation callbacks (original in san_attached.c).
1709  * The functions are execute at every memory operations in libxnu and in the test binary, libmocks is excluded.
1710  * Functions and files in tools/sanitizers-ignorelist are excluded from instrumentation.
1711  */
1712 #define MOCK_SANCOV_LOAD_STORE(type, size)                                                                       \
1713 	__attribute__((optnone))                                                                                     \
1714 	T_MOCK(void,                                                                                                 \
1715 	__sanitizer_cov_load##size, (type* addr))                                                                    \
1716 	{                                                                                                            \
1717 	    if (!ut_fibers_use_data_race_checker || IS_ATOMIC(addr, size) || fibers_current->disable_race_checker) { \
1718 	        fibers_may_yield_with_reason(FIBERS_YIELD_REASON_PREEMPTION_TRIGGER);                                \
1719 	        return;                                                                                              \
1720 	    }                                                                                                        \
1721 	    SANCOV_LOAD_STORE_DATA_CHECKER(type, size, ACCESS_TYPE_LOAD);                                            \
1722 	}                                                                                                            \
1723                                                                                                                  \
1724 	__attribute__((optnone))                                                                                     \
1725 	T_MOCK(void,                                                                                                 \
1726 	__sanitizer_cov_store##size, (type* addr))                                                                   \
1727 	{   /* do not care about atomicity for stores */                                                             \
1728 	    if (!ut_fibers_use_data_race_checker || fibers_current->disable_race_checker) {                          \
1729 	        fibers_may_yield_with_reason(FIBERS_YIELD_REASON_PREEMPTION_TRIGGER);                                \
1730 	        return;                                                                                              \
1731 	    }                                                                                                        \
1732 	    SANCOV_LOAD_STORE_DATA_CHECKER(type, size, ACCESS_TYPE_STORE);                                           \
1733 	}
1734 
1735 MOCK_SANCOV_LOAD_STORE(uint8_t, 1)
1736 MOCK_SANCOV_LOAD_STORE(uint16_t, 2)
1737 MOCK_SANCOV_LOAD_STORE(uint32_t, 4)
1738 MOCK_SANCOV_LOAD_STORE(uint64_t, 8)
1739 MOCK_SANCOV_LOAD_STORE(__uint128_t, 16)
1740 
1741 #endif // __BUILDING_WITH_SANCOV__
1742