xref: /xnu-8019.80.24/tests/turnstile_multihop.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * turnstile_multihop: Tests turnstile and multi hop priority propagation.
3  */
4 
5 #ifdef T_NAMESPACE
6 #undef T_NAMESPACE
7 #endif
8 
9 #include <darwintest.h>
10 #include <darwintest_multiprocess.h>
11 
12 #include <dispatch/dispatch.h>
13 #include <pthread.h>
14 #include <launch.h>
15 #include <mach/mach.h>
16 #include <mach/message.h>
17 #include <mach/mach_voucher.h>
18 #include <pthread/workqueue_private.h>
19 #include <voucher/ipc_pthread_priority_types.h>
20 #include <servers/bootstrap.h>
21 #include <stdlib.h>
22 #include <sys/event.h>
23 #include <unistd.h>
24 #include <crt_externs.h>
25 #include <signal.h>
26 #include <sys/types.h>
27 #include <sys/sysctl.h>
28 #include <libkern/OSAtomic.h>
29 #include <sys/wait.h>
30 
31 #include "turnstile_multihop_helper.h"
32 
33 T_GLOBAL_META(T_META_NAMESPACE("xnu.turnstile_multihop"));
34 
35 #define HELPER_TIMEOUT_SECS (3000)
36 
37 struct test_msg {
38 	mach_msg_header_t header;
39 	mach_msg_body_t body;
40 	mach_msg_port_descriptor_t port_descriptor;
41 };
42 
43 static boolean_t spin_for_ever = false;
44 
45 static boolean_t test_noimportance = false;
46 
47 #define EXPECTED_MESSAGE_ID 0x100
48 
49 static void
50 thread_create_at_qos(qos_class_t qos, void * (*function)(void *));
51 static uint64_t
52 nanoseconds_to_absolutetime(uint64_t nanoseconds);
53 static int
54 sched_create_load_at_qos(qos_class_t qos, void **load_token);
55 static int
56 sched_terminate_load(void *load_token) __unused;
57 static void do_work(int num);
58 static void
59 dispatch_sync_cancel(mach_port_t owner_thread, qos_class_t promote_qos);
60 
61 static void *sched_load_thread(void *);
62 
63 struct load_token_context {
64 	volatile int threads_should_exit;
65 	int thread_count;
66 	qos_class_t qos;
67 	pthread_t *threads;
68 };
69 
70 static struct mach_timebase_info sched_mti;
71 static pthread_once_t sched_mti_once_control = PTHREAD_ONCE_INIT;
72 
73 static void
sched_mti_init(void)74 sched_mti_init(void)
75 {
76 	mach_timebase_info(&sched_mti);
77 }
78 uint64_t
nanoseconds_to_absolutetime(uint64_t nanoseconds)79 nanoseconds_to_absolutetime(uint64_t nanoseconds)
80 {
81 	pthread_once(&sched_mti_once_control, sched_mti_init);
82 
83 	return (uint64_t)(nanoseconds * (((double)sched_mti.denom) / ((double)sched_mti.numer)));
84 }
85 
86 static int
sched_create_load_at_qos(qos_class_t qos,void ** load_token)87 sched_create_load_at_qos(qos_class_t qos, void **load_token)
88 {
89 	struct load_token_context *context = NULL;
90 	int ret;
91 	int ncpu;
92 	size_t ncpu_size = sizeof(ncpu);
93 	int nthreads;
94 	int i;
95 	pthread_attr_t attr;
96 
97 	ret = sysctlbyname("hw.ncpu", &ncpu, &ncpu_size, NULL, 0);
98 	if (ret == -1) {
99 		T_LOG("sysctlbyname(hw.ncpu)");
100 		return errno;
101 	}
102 
103 	T_QUIET; T_LOG("%s: Detected %d CPUs\n", __FUNCTION__, ncpu);
104 
105 	nthreads = ncpu;
106 	T_QUIET; T_LOG("%s: Will create %d threads\n", __FUNCTION__, nthreads);
107 
108 	ret = pthread_attr_init(&attr);
109 	T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_attr_init");
110 
111 	if (&pthread_attr_set_qos_class_np) {
112 		ret = pthread_attr_set_qos_class_np(&attr, qos, 0);
113 		T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_attr_set_qos_class_np");
114 	}
115 
116 	context = calloc(1, sizeof(*context));
117 	if (context == NULL) {
118 		T_QUIET; T_LOG("calloc returned error"); return ENOMEM;
119 	}
120 
121 	context->threads_should_exit = 0;
122 	context->thread_count = nthreads;
123 	context->qos = qos;
124 	context->threads = calloc((unsigned int)nthreads, sizeof(pthread_t));
125 
126 	OSMemoryBarrier();
127 
128 	for (i = 0; i < nthreads; i++) {
129 		ret = pthread_create(&context->threads[i], &attr, sched_load_thread, context);
130 		T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_create");
131 		T_QUIET; T_LOG("%s: Created thread %d (%p)\n", __FUNCTION__, i, (void *)context->threads[i]);
132 	}
133 
134 	ret = pthread_attr_destroy(&attr);
135 	T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_attr_destroy");
136 
137 	*load_token = context;
138 
139 	return 0;
140 }
141 
142 static void *
sched_load_thread(void * arg)143 sched_load_thread(void *arg)
144 {
145 	struct load_token_context *context = (struct load_token_context *)arg;
146 
147 	T_QUIET; T_LOG("%s: Thread started %p\n", __FUNCTION__, (void *)pthread_self());
148 
149 	while (!context->threads_should_exit) {
150 		uint64_t start = mach_absolute_time();
151 		uint64_t end = start + nanoseconds_to_absolutetime(900ULL * NSEC_PER_MSEC);
152 
153 		while ((mach_absolute_time() < end) && !context->threads_should_exit) {
154 			;
155 		}
156 	}
157 
158 	T_QUIET; T_LOG("%s: Thread terminating %p\n", __FUNCTION__, (void *)pthread_self());
159 
160 	return NULL;
161 }
162 
163 static int
sched_terminate_load(void * load_token)164 sched_terminate_load(void *load_token)
165 {
166 	int ret;
167 	int i;
168 	struct load_token_context *context = (struct load_token_context *)load_token;
169 
170 	context->threads_should_exit = 1;
171 	OSMemoryBarrier();
172 
173 	for (i = 0; i < context->thread_count; i++) {
174 		T_QUIET; T_LOG("%s: Joining thread %d (%p)\n", __FUNCTION__, i, (void *)context->threads[i]);
175 		ret = pthread_join(context->threads[i], NULL);
176 		T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_join");
177 	}
178 
179 	free(context->threads);
180 	free(context);
181 
182 	return 0;
183 }
184 
185 
186 // Find the first num primes, simply as a means of doing work
187 static void
do_work(int num)188 do_work(int num)
189 {
190 	volatile int i = 3, count, c;
191 
192 	for (count = 2; count <= num;) {
193 		for (c = 2; c <= i; c++) {
194 			if (i % c == 0) {
195 				break;
196 			}
197 		}
198 		if (c == i) {
199 			count++;
200 		}
201 		i++;
202 	}
203 }
204 
205 #pragma mark pthread callbacks
206 
207 static void
worker_cb(pthread_priority_t __unused priority)208 worker_cb(pthread_priority_t __unused priority)
209 {
210 	T_FAIL("a worker thread was created");
211 }
212 
213 static void
event_cb(void ** __unused events,int * __unused nevents)214 event_cb(void ** __unused events, int * __unused nevents)
215 {
216 	T_FAIL("a kevent routine was called instead of workloop");
217 }
218 
219 static uint32_t
get_user_promotion_basepri(void)220 get_user_promotion_basepri(void)
221 {
222 	mach_msg_type_number_t count = THREAD_POLICY_STATE_COUNT;
223 	struct thread_policy_state thread_policy;
224 	boolean_t get_default = FALSE;
225 	mach_port_t thread_port = pthread_mach_thread_np(pthread_self());
226 
227 	kern_return_t kr = thread_policy_get(thread_port, THREAD_POLICY_STATE,
228 	    (thread_policy_t)&thread_policy, &count, &get_default);
229 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_policy_get");
230 	return thread_policy.thps_user_promotion_basepri;
231 }
232 
233 static uint32_t
get_thread_base_priority(void)234 get_thread_base_priority(void)
235 {
236 	kern_return_t kr;
237 	mach_port_t thread_port = pthread_mach_thread_np(pthread_self());
238 
239 	policy_timeshare_info_data_t timeshare_info;
240 	mach_msg_type_number_t count = POLICY_TIMESHARE_INFO_COUNT;
241 
242 	kr = thread_info(thread_port, THREAD_SCHED_TIMESHARE_INFO,
243 	    (thread_info_t)&timeshare_info, &count);
244 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info");
245 
246 	return (uint32_t)timeshare_info.base_priority;
247 }
248 
249 
250 #define LISTENER_WLID  0x100
251 #define CONN_WLID      0x200
252 
253 static uint32_t
register_port_options(void)254 register_port_options(void)
255 {
256 	return MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY |
257 	       MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) |
258 	       MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) |
259 	       MACH_RCV_VOUCHER;
260 }
261 
262 static void
register_port(uint64_t wlid,mach_port_t port)263 register_port(uint64_t wlid, mach_port_t port)
264 {
265 	int r;
266 
267 	struct kevent_qos_s kev = {
268 		.ident  = port,
269 		.filter = EVFILT_MACHPORT,
270 		.flags  = EV_ADD | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED,
271 		.fflags = register_port_options(),
272 		.data   = 1,
273 		.qos    = (int32_t)_pthread_qos_class_encode(QOS_CLASS_MAINTENANCE, 0, 0)
274 	};
275 
276 	struct kevent_qos_s kev_err = { 0 };
277 
278 	/* Setup workloop for mach msg rcv */
279 	r = kevent_id(wlid, &kev, 1, &kev_err, 1, NULL,
280 	    NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS);
281 
282 	T_QUIET; T_ASSERT_POSIX_SUCCESS(r, "kevent_id");
283 	T_QUIET; T_ASSERT_EQ(r, 0, "no errors returned from kevent_id");
284 }
285 
286 /*
287  * Basic WL handler callback, it checks the
288  * effective Qos of the servicer thread.
289  */
290 static void
workloop_cb_test_intransit(uint64_t * workloop_id,void ** eventslist,int * events)291 workloop_cb_test_intransit(uint64_t *workloop_id, void **eventslist, int *events)
292 {
293 	static bool got_peer;
294 
295 	struct kevent_qos_s *kev = eventslist[0];
296 	mach_msg_header_t *hdr;
297 	struct test_msg *tmsg;
298 
299 	T_LOG("Workloop handler %s called. Received message on 0x%llx",
300 	    __func__, *workloop_id);
301 
302 	/* Skip the test if we can't check Qos */
303 	if (geteuid() != 0) {
304 		T_SKIP("kevent_qos test requires root privileges to run.");
305 	}
306 
307 	T_QUIET; T_ASSERT_EQ(*events, 1, "should have one event");
308 
309 	T_EXPECT_REQUESTED_QOS_EQ(QOS_CLASS_MAINTENANCE, "message handler should have MT requested QoS");
310 
311 	hdr = (mach_msg_header_t *)kev->ext[0];
312 	T_ASSERT_NOTNULL(hdr, "has a message");
313 	T_ASSERT_EQ(hdr->msgh_size, (uint32_t)sizeof(struct test_msg), "of the right size");
314 	tmsg = (struct test_msg *)hdr;
315 
316 	switch (*workloop_id) {
317 	case LISTENER_WLID:
318 		T_LOG("Registering peer connection");
319 		T_QUIET; T_ASSERT_FALSE(got_peer, "Should not have seen peer yet");
320 		got_peer = true;
321 		break;
322 
323 	case CONN_WLID:
324 		T_LOG("Received message on peer");
325 		break;
326 
327 	default:
328 		T_FAIL("???");
329 	}
330 
331 	sleep(5);
332 	T_LOG("Do some CPU work.");
333 	do_work(5000);
334 
335 	/* Check if the override now is IN + 60 boost */
336 	T_EXPECT_EFFECTIVE_QOS_EQ(QOS_CLASS_USER_INITIATED,
337 	    "dispatch_source event handler QoS should be QOS_CLASS_USER_INITIATED");
338 	T_EXPECT_EQ(get_user_promotion_basepri(), 60u,
339 	    "dispatch_source event handler should be overridden at 60");
340 
341 	T_EXPECT_EQ(get_thread_base_priority(), 60u,
342 	    "dispatch_source event handler should have base pri at 60");
343 
344 	if (*workloop_id == LISTENER_WLID) {
345 		register_port(CONN_WLID, tmsg->port_descriptor.name);
346 
347 		kev->flags = EV_ADD | EV_ENABLE | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED;
348 		kev->fflags = register_port_options();
349 		kev->ext[0] = kev->ext[1] = kev->ext[2] = kev->ext[3] = 0;
350 		*events = 1;
351 	} else {
352 		/* this will unblock the waiter */
353 		mach_msg_destroy(hdr);
354 		*events = 0;
355 
356 		/*
357 		 * Destroying the message will send a send-once notification for reply port, once the
358 		 * send-once notification is consumed (by the waiting thread), only then the actual
359 		 * send-once right is destroyed and only then the push will go away.
360 		 */
361 		T_LOG("Sleeping for 5 seconds so waiting thread is unblocked\n");
362 		sleep(5);
363 
364 		/* now that the message is destroyed, the priority should be gone */
365 		T_EXPECT_EFFECTIVE_QOS_EQ(QOS_CLASS_MAINTENANCE,
366 		    "dispatch_source event handler QoS should be QOS_CLASS_MAINTENANCE after destroying message");
367 		T_EXPECT_LE(get_user_promotion_basepri(), 0u,
368 		    "dispatch_source event handler should not be overridden after destroying message");
369 		T_EXPECT_LE(get_thread_base_priority(), 4u,
370 		    "dispatch_source event handler should have base pri at 4 or less after destroying message");
371 	}
372 }
373 
374 static void
run_client_server(const char * server_name,const char * client_name)375 run_client_server(const char *server_name, const char *client_name)
376 {
377 	dt_helper_t helpers[] = {
378 		dt_launchd_helper_domain("com.apple.xnu.test.turnstile_multihop.plist",
379 	    server_name, NULL, LAUNCH_SYSTEM_DOMAIN),
380 		dt_fork_helper(client_name)
381 	};
382 	dt_run_helpers(helpers, 2, HELPER_TIMEOUT_SECS);
383 }
384 
385 #pragma mark Mach receive
386 
387 #define TURNSTILE_MULTIHOP_SERVICE_NAME "com.apple.xnu.test.turnstile_multihop"
388 
389 static mach_port_t
get_server_port(void)390 get_server_port(void)
391 {
392 	mach_port_t port;
393 	kern_return_t kr = bootstrap_check_in(bootstrap_port,
394 	    TURNSTILE_MULTIHOP_SERVICE_NAME, &port);
395 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "server bootstrap_check_in");
396 	return port;
397 }
398 
399 static mach_voucher_t
create_pthpriority_voucher(mach_msg_priority_t qos)400 create_pthpriority_voucher(mach_msg_priority_t qos)
401 {
402 	char voucher_buf[sizeof(mach_voucher_attr_recipe_data_t) + sizeof(ipc_pthread_priority_value_t)];
403 
404 	mach_voucher_t voucher = MACH_PORT_NULL;
405 	kern_return_t ret;
406 	ipc_pthread_priority_value_t ipc_pthread_priority_value =
407 	    (ipc_pthread_priority_value_t)qos;
408 
409 	mach_voucher_attr_raw_recipe_array_t recipes;
410 	mach_voucher_attr_raw_recipe_size_t recipe_size = 0;
411 	mach_voucher_attr_recipe_t recipe =
412 	    (mach_voucher_attr_recipe_t)&voucher_buf[recipe_size];
413 
414 	recipe->key = MACH_VOUCHER_ATTR_KEY_PTHPRIORITY;
415 	recipe->command = MACH_VOUCHER_ATTR_PTHPRIORITY_CREATE;
416 	recipe->previous_voucher = MACH_VOUCHER_NULL;
417 	memcpy((char *)&recipe->content[0], &ipc_pthread_priority_value, sizeof(ipc_pthread_priority_value));
418 	recipe->content_size = sizeof(ipc_pthread_priority_value_t);
419 	recipe_size += sizeof(mach_voucher_attr_recipe_data_t) + recipe->content_size;
420 
421 	recipes = (mach_voucher_attr_raw_recipe_array_t)&voucher_buf[0];
422 
423 	ret = host_create_mach_voucher(mach_host_self(),
424 	    recipes,
425 	    recipe_size,
426 	    &voucher);
427 
428 	T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "client host_create_mach_voucher");
429 	return voucher;
430 }
431 
432 static void
send(mach_port_t send_port,mach_port_t reply_port,mach_port_t msg_port,mach_msg_priority_t qos,mach_msg_option_t options)433 send(
434 	mach_port_t send_port,
435 	mach_port_t reply_port,
436 	mach_port_t msg_port,
437 	mach_msg_priority_t qos,
438 	mach_msg_option_t options)
439 {
440 	kern_return_t ret = 0;
441 
442 	struct test_msg send_msg = {
443 		.header = {
444 			.msgh_remote_port = send_port,
445 			.msgh_local_port  = reply_port,
446 			.msgh_bits        = MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND,
447 	    reply_port ? MACH_MSG_TYPE_MAKE_SEND_ONCE : 0,
448 	    MACH_MSG_TYPE_MOVE_SEND,
449 	    MACH_MSGH_BITS_COMPLEX),
450 			.msgh_id          = EXPECTED_MESSAGE_ID,
451 			.msgh_size        = sizeof(send_msg),
452 		},
453 		.body = {
454 			.msgh_descriptor_count = 1,
455 		},
456 		.port_descriptor = {
457 			.name        = msg_port,
458 			.disposition = MACH_MSG_TYPE_MOVE_RECEIVE,
459 			.type        = MACH_MSG_PORT_DESCRIPTOR,
460 		},
461 	};
462 
463 	if (options & MACH_SEND_SYNC_USE_THRPRI) {
464 		send_msg.header.msgh_voucher_port = create_pthpriority_voucher(qos);
465 	}
466 
467 	if (msg_port == MACH_PORT_NULL) {
468 		send_msg.body.msgh_descriptor_count = 0;
469 	}
470 
471 	ret = mach_msg(&(send_msg.header),
472 	    MACH_SEND_MSG |
473 	    MACH_SEND_TIMEOUT |
474 	    MACH_SEND_OVERRIDE |
475 	    (test_noimportance ? MACH_SEND_NOIMPORTANCE : 0) |
476 	    options,
477 	    send_msg.header.msgh_size,
478 	    0,
479 	    MACH_PORT_NULL,
480 	    10000,
481 	    0);
482 
483 	T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "client mach_msg");
484 }
485 
486 static mach_msg_id_t
receive(mach_port_t rcv_port,mach_port_t notify_port)487 receive(
488 	mach_port_t rcv_port,
489 	mach_port_t notify_port)
490 {
491 	struct {
492 		mach_msg_header_t header;
493 		mach_msg_body_t body;
494 		mach_msg_port_descriptor_t port_descriptor;
495 	} rcv_msg = {
496 		.header =
497 		{
498 			.msgh_remote_port = MACH_PORT_NULL,
499 			.msgh_local_port  = rcv_port,
500 			.msgh_size        = sizeof(rcv_msg),
501 		},
502 	};
503 
504 	T_LOG("Client: Starting sync receive\n");
505 
506 	kern_return_t kr;
507 	kr = mach_msg(&(rcv_msg.header),
508 	    MACH_RCV_MSG |
509 	    MACH_RCV_SYNC_WAIT,
510 	    0,
511 	    rcv_msg.header.msgh_size,
512 	    rcv_port,
513 	    0,
514 	    notify_port);
515 
516 	T_ASSERT_MACH_SUCCESS(kr, "mach_msg rcv");
517 
518 	return rcv_msg.header.msgh_id;
519 }
520 
521 static lock_t lock_DEF;
522 static lock_t lock_IN;
523 static lock_t lock_UI;
524 
525 static mach_port_t main_thread_port;
526 static mach_port_t def_thread_port;
527 static mach_port_t in_thread_port;
528 static mach_port_t ui_thread_port;
529 static mach_port_t sixty_thread_port;
530 
531 static uint64_t dispatch_sync_owner;
532 
533 static int
get_pri(thread_t thread_port)534 get_pri(thread_t thread_port)
535 {
536 	kern_return_t kr;
537 
538 	thread_extended_info_data_t extended_info;
539 	mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT;
540 	kr = thread_info(thread_port, THREAD_EXTENDED_INFO,
541 	    (thread_info_t)&extended_info, &count);
542 
543 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info");
544 
545 	return extended_info.pth_curpri;
546 }
547 
548 static void
set_thread_name(const char * fn_name)549 set_thread_name(const char *fn_name)
550 {
551 	char name[50] = "";
552 
553 	thread_t thread_port = pthread_mach_thread_np(pthread_self());
554 
555 	int pri = get_pri(thread_port);
556 
557 	snprintf(name, sizeof(name), "%s at pri %2d", fn_name, pri);
558 	pthread_setname_np(name);
559 }
560 
561 static void
thread_wait_to_block(mach_port_t thread_port)562 thread_wait_to_block(mach_port_t thread_port)
563 {
564 	thread_extended_info_data_t extended_info;
565 	kern_return_t kr;
566 
567 	while (1) {
568 		mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT;
569 		kr = thread_info(thread_port, THREAD_EXTENDED_INFO,
570 		    (thread_info_t)&extended_info, &count);
571 
572 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info");
573 
574 		if (extended_info.pth_run_state == TH_STATE_WAITING) {
575 			T_LOG("Target thread blocked\n");
576 			break;
577 		}
578 		thread_switch(thread_port, SWITCH_OPTION_DEPRESS, 0);
579 	}
580 }
581 
582 static void
thread_wait_to_boost(mach_port_t thread_port,mach_port_t yield_thread,int priority)583 thread_wait_to_boost(mach_port_t thread_port, mach_port_t yield_thread, int priority)
584 {
585 	thread_extended_info_data_t extended_info;
586 	kern_return_t kr;
587 
588 	while (1) {
589 		mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT;
590 		kr = thread_info(thread_port, THREAD_EXTENDED_INFO,
591 		    (thread_info_t)&extended_info, &count);
592 
593 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info");
594 
595 		if (extended_info.pth_priority >= priority) {
596 			T_LOG("Target thread boosted\n");
597 			break;
598 		}
599 		thread_switch(yield_thread, SWITCH_OPTION_DEPRESS, 0);
600 	}
601 }
602 
603 static void
dispatch_sync_wait(mach_port_t owner_thread,qos_class_t promote_qos)604 dispatch_sync_wait(mach_port_t owner_thread, qos_class_t promote_qos)
605 {
606 	struct kevent_qos_s kev_err[] = {{ 0 }};
607 	uint32_t fflags = 0;
608 	uint64_t mask = 0;
609 	uint16_t action = 0;
610 	int r;
611 
612 	action = EV_ADD | EV_DISABLE;
613 	fflags = NOTE_WL_SYNC_WAIT | NOTE_WL_DISCOVER_OWNER;
614 
615 	dispatch_sync_owner = owner_thread;
616 
617 	struct kevent_qos_s kev[] =  {{
618 					      .ident = mach_thread_self(),
619 					      .filter = EVFILT_WORKLOOP,
620 					      .flags = action,
621 					      .fflags = fflags,
622 					      .udata = (uintptr_t) &def_thread_port,
623 					      .qos = (int32_t)_pthread_qos_class_encode(promote_qos, 0, 0),
624 					      .ext[EV_EXTIDX_WL_MASK] = mask,
625 					      .ext[EV_EXTIDX_WL_VALUE] = dispatch_sync_owner,
626 					      .ext[EV_EXTIDX_WL_ADDR] = (uint64_t)&dispatch_sync_owner,
627 				      }};
628 
629 	/* Setup workloop to fake dispatch sync wait on a workloop */
630 	r = kevent_id(30, kev, 1, kev_err, 1, NULL,
631 	    NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS);
632 	T_QUIET; T_LOG("dispatch_sync_wait returned\n");
633 }
634 
635 static void
dispatch_sync_cancel(mach_port_t owner_thread,qos_class_t promote_qos)636 dispatch_sync_cancel(mach_port_t owner_thread, qos_class_t promote_qos)
637 {
638 	struct kevent_qos_s kev_err[] = {{ 0 }};
639 	uint32_t fflags = 0;
640 	uint64_t mask = 0;
641 	uint16_t action = 0;
642 	int r;
643 
644 	action = EV_DELETE | EV_ENABLE;
645 	fflags = NOTE_WL_SYNC_WAKE | NOTE_WL_END_OWNERSHIP;
646 
647 	dispatch_sync_owner = owner_thread;
648 
649 	struct kevent_qos_s kev[] =  {{
650 					      .ident = def_thread_port,
651 					      .filter = EVFILT_WORKLOOP,
652 					      .flags = action,
653 					      .fflags = fflags,
654 					      .udata = (uintptr_t) &def_thread_port,
655 					      .qos = (int32_t)_pthread_qos_class_encode(promote_qos, 0, 0),
656 					      .ext[EV_EXTIDX_WL_MASK] = mask,
657 					      .ext[EV_EXTIDX_WL_VALUE] = dispatch_sync_owner,
658 					      .ext[EV_EXTIDX_WL_ADDR] = (uint64_t)&dispatch_sync_owner,
659 				      }};
660 
661 	/* Setup workloop to fake dispatch sync wake on a workloop */
662 	r = kevent_id(30, kev, 1, kev_err, 1, NULL,
663 	    NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS);
664 	T_QUIET; T_LOG("dispatch_sync_cancel returned\n");
665 }
666 
667 static void *
thread_at_sixty(void * arg __unused)668 thread_at_sixty(void *arg __unused)
669 {
670 	int policy;
671 	struct sched_param param;
672 	int ret;
673 	void *load_token;
674 	uint64_t before_lock_time, after_lock_time;
675 
676 	sixty_thread_port = mach_thread_self();
677 
678 	set_thread_name(__FUNCTION__);
679 
680 	/* Change our priority to 60 */
681 	ret = pthread_getschedparam(pthread_self(), &policy, &param);
682 	T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_getschedparam");
683 
684 	param.sched_priority = 60;
685 
686 	ret = pthread_setschedparam(pthread_self(), policy, &param);
687 	T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_setschedparam");
688 
689 	ret = pthread_getschedparam(pthread_self(), &policy, &param);
690 	T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_getschedparam");
691 
692 	T_LOG("My priority is %d", param.sched_priority);
693 
694 	thread_wait_to_boost(in_thread_port, ui_thread_port, 46);
695 
696 	if (spin_for_ever) {
697 		/* Schedule load at Default */
698 		sched_create_load_at_qos(QOS_CLASS_DEFAULT, &load_token);
699 	}
700 
701 	T_LOG("Thread at priority 60 trying to acquire UI lock");
702 
703 	before_lock_time = mach_absolute_time();
704 	ull_lock(&lock_UI, 3, UL_UNFAIR_LOCK, 0);
705 	after_lock_time = mach_absolute_time();
706 
707 	T_QUIET; T_LOG("The time for priority 60 thread to acquire lock was %llu \n",
708 	    (after_lock_time - before_lock_time));
709 
710 	T_LOG("Wait for 5 seconds for the server to terminate\n");
711 	sleep(5);
712 	T_END;
713 }
714 
715 static void *
thread_at_ui(void * arg __unused)716 thread_at_ui(void *arg __unused)
717 {
718 	ui_thread_port = mach_thread_self();
719 
720 	set_thread_name(__FUNCTION__);
721 
722 	/* Grab the first ulock */
723 	ull_lock(&lock_UI, 2, UL_UNFAIR_LOCK, 0);
724 
725 	thread_wait_to_boost(def_thread_port, in_thread_port, 37);
726 	thread_create_at_qos(QOS_CLASS_USER_INTERACTIVE, thread_at_sixty);
727 
728 	T_EXPECT_GE(get_thread_base_priority(), 46u,
729 	    "thread_at_ui should have base pri 46 or greater");
730 
731 	T_LOG("Thread at UI priority trying to acquire IN lock");
732 	ull_lock(&lock_IN, 2, UL_UNFAIR_LOCK, 0);
733 	ull_unlock(&lock_UI, 2, UL_UNFAIR_LOCK, 0);
734 	return NULL;
735 }
736 
737 static void *
thread_at_in(void * arg __unused)738 thread_at_in(void *arg __unused)
739 {
740 	in_thread_port = mach_thread_self();
741 
742 	set_thread_name(__FUNCTION__);
743 
744 	/* Grab the first ulock */
745 	ull_lock(&lock_IN, 2, UL_UNFAIR_LOCK, 0);
746 
747 	T_LOG("Thread at IN priority got first lock ");
748 
749 	thread_wait_to_boost(main_thread_port, def_thread_port, 31);
750 
751 	/* Create a new thread at QOS_CLASS_USER_INTERACTIVE qos */
752 	thread_create_at_qos(QOS_CLASS_USER_INTERACTIVE, thread_at_ui);
753 
754 	T_LOG("Thread at IN priority trying to acquire default lock");
755 	ull_lock(&lock_DEF, 1, UL_UNFAIR_LOCK, 0);
756 	ull_unlock(&lock_IN, 2, UL_UNFAIR_LOCK, 0);
757 	return NULL;
758 }
759 
760 static void *
thread_at_default(void * arg __unused)761 thread_at_default(void *arg __unused)
762 {
763 	def_thread_port = mach_thread_self();
764 
765 	set_thread_name(__FUNCTION__);
766 
767 	/* Grab the first ulock */
768 	ull_lock(&lock_DEF, 1, UL_UNFAIR_LOCK, 0);
769 
770 	T_LOG("Thread at DEFAULT priority got first lock ");
771 
772 	thread_wait_to_block(main_thread_port);
773 
774 	/* Create a new thread at QOS_CLASS_USER_INITIATED qos */
775 	thread_create_at_qos(QOS_CLASS_USER_INITIATED, thread_at_in);
776 
777 	T_LOG("Thread at Default priority trying to wait on dispatch sync for maintenance thread");
778 	dispatch_sync_wait(main_thread_port, QOS_CLASS_DEFAULT);
779 	ull_unlock(&lock_DEF, 1, UL_UNFAIR_LOCK, 0);
780 	return NULL;
781 }
782 
783 static void *
thread_at_maintenance(void * arg __unused)784 thread_at_maintenance(void *arg __unused)
785 {
786 	mach_port_t service_port;
787 	mach_port_t conn_port;
788 	mach_port_t special_reply_port;
789 	mach_port_options_t opts = {
790 		.flags = MPO_INSERT_SEND_RIGHT,
791 	};
792 
793 	main_thread_port = mach_thread_self();
794 
795 	set_thread_name(__FUNCTION__);
796 
797 	kern_return_t kr = bootstrap_look_up(bootstrap_port,
798 	    TURNSTILE_MULTIHOP_SERVICE_NAME, &service_port);
799 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "client bootstrap_look_up");
800 
801 	kr = mach_port_construct(mach_task_self(), &opts, 0ull, &conn_port);
802 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_port_construct");
803 
804 	special_reply_port = thread_get_special_reply_port();
805 	T_QUIET; T_ASSERT_TRUE(MACH_PORT_VALID(special_reply_port), "get_thread_special_reply_port");
806 
807 	/* Become the dispatch sync owner, dispatch_sync_owner will be set in dispatch_sync_wait function */
808 
809 	/* Send a sync message */
810 	send(conn_port, special_reply_port, MACH_PORT_NULL,
811 	    (uint32_t)_pthread_qos_class_encode(QOS_CLASS_MAINTENANCE, 0, 0), 0);
812 
813 	/* Send an async checkin message */
814 	send(service_port, MACH_PORT_NULL, conn_port,
815 	    (uint32_t)_pthread_qos_class_encode(QOS_CLASS_MAINTENANCE, 0, 0), 0);
816 
817 	/* Create a new thread at QOS_CLASS_DEFAULT qos */
818 	thread_create_at_qos(QOS_CLASS_DEFAULT, thread_at_default);
819 
820 	/* Block on Sync IPC */
821 	mach_msg_id_t message_id = receive(special_reply_port, conn_port);
822 
823 	T_ASSERT_EQ(message_id, MACH_NOTIFY_SEND_ONCE, "got the expected send-once notification");
824 
825 	T_LOG("received reply");
826 
827 	dispatch_sync_cancel(def_thread_port, QOS_CLASS_DEFAULT);
828 	return NULL;
829 }
830 
831 T_HELPER_DECL(three_ulock_sync_ipc_hop,
832     "Create chain of 4 threads with 3 ulocks and 1 sync IPC at different qos")
833 {
834 	thread_create_at_qos(QOS_CLASS_MAINTENANCE, thread_at_maintenance);
835 	sigsuspend(0);
836 }
837 
838 T_HELPER_DECL(three_ulock_sync_ipc_hop_noimportance,
839     "Create chain of 4 threads with 3 ulocks and 1 no-importance sync IPC at different qos")
840 {
841 	test_noimportance = true;
842 	thread_create_at_qos(QOS_CLASS_MAINTENANCE, thread_at_maintenance);
843 	sigsuspend(0);
844 }
845 
846 
847 static void
thread_create_at_qos(qos_class_t qos,void * (* function)(void *))848 thread_create_at_qos(qos_class_t qos, void * (*function)(void *))
849 {
850 	qos_class_t qos_thread;
851 	pthread_t thread;
852 	pthread_attr_t attr;
853 	int ret;
854 
855 	ret = setpriority(PRIO_DARWIN_ROLE, 0, PRIO_DARWIN_ROLE_UI_FOCAL);
856 	if (ret != 0) {
857 		T_LOG("set priority failed\n");
858 	}
859 
860 	pthread_attr_init(&attr);
861 	pthread_attr_set_qos_class_np(&attr, qos, 0);
862 	pthread_create(&thread, &attr, function, NULL);
863 
864 	T_LOG("pthread created\n");
865 	pthread_get_qos_class_np(thread, &qos_thread, NULL);
866 }
867 
868 #pragma mark Mach receive - kevent_qos
869 
870 T_HELPER_DECL(server_kevent_id,
871     "Reply with the QoS that a dispatch source event handler ran with")
872 {
873 	T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop(
874 		    worker_cb, event_cb,
875 		    (pthread_workqueue_function_workloop_t)workloop_cb_test_intransit, 0, 0), NULL);
876 
877 	register_port(LISTENER_WLID, get_server_port());
878 	sigsuspend(0);
879 	T_ASSERT_FAIL("should receive a message");
880 }
881 
882 #define TEST_MULTIHOP(server_name, client_name, name) \
883 	T_DECL(server_kevent_id_##name, \
884 	                "Event delivery using a kevent_id", \
885 	                T_META_ASROOT(YES)) \
886 	{ \
887 	        run_client_server(server_name, client_name); \
888 	}
889 
890 #define TEST_MULTIHOP_SPIN(server_name, client_name, name) \
891 	T_DECL(server_kevent_id_##name, \
892 	                "Event delivery using a kevent_id", \
893 	                T_META_ASROOT(YES), T_META_ENABLED(FALSE)) \
894 	{ \
895 	        spin_for_ever = true; \
896 	        run_client_server(server_name, client_name); \
897 	        spin_for_ever = false; \
898 	}
899 
900 /*
901  * Test 1: Test multihop priority boosting with ulocks, dispatch sync and sync IPC.
902  *
903  * Create thread's at different Qos and acquire a ulock and block on next ulock/dispatch sync
904  * creating a sync chain. The last hop the chain is blocked on Sync IPC.
905  */
906 TEST_MULTIHOP("server_kevent_id", "three_ulock_sync_ipc_hop", three_ulock_sync_ipc_hop)
907 
908 TEST_MULTIHOP("server_kevent_id", "three_ulock_sync_ipc_hop_noimportance", three_ulock_sync_ipc_hop_noimportance)
909 
910 /*
911  * Test 2: Test multihop priority boosting with ulocks, dispatch sync and sync IPC.
912  *
913  * Create thread's at different Qos and acquire a ulock and block on next ulock/dispatch sync
914  * creating a sync chain. The last hop the chain is blocked on Sync IPC.
915  * Before the last priority 60 thread blocks on ulock, it also starts spinforeverd at priority 31.
916  */
917 TEST_MULTIHOP_SPIN("server_kevent_id", "three_ulock_sync_ipc_hop", three_ulock_sync_ipc_hop_spin)
918