xref: /xnu-12377.41.6/bsd/pthread/pthread_shims.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2012-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #define PTHREAD_INTERNAL 1
30 
31 #include <stdatomic.h>
32 #include <kern/debug.h>
33 #include <kern/mach_param.h>
34 #include <kern/sched_prim.h>
35 #include <kern/task.h>
36 #include <kern/thread.h>
37 #include <kern/affinity.h>
38 #include <kern/zalloc.h>
39 #include <kern/policy_internal.h>
40 #include <kern/sync_sema.h>
41 #include <kern/cpu_data.h>
42 
43 #include <machine/machine_routines.h>
44 #include <mach/task.h>
45 #include <mach/thread_act.h>
46 #include <sys/param.h>
47 #include <sys/eventvar.h>
48 #include <sys/pthread_shims.h>
49 #include <pthread/workqueue_internal.h>
50 #include <sys/cdefs.h>
51 #include <sys/proc_info.h>
52 #include <sys/proc_internal.h>
53 #include <sys/sysproto.h>
54 #include <sys/systm.h>
55 #include <sys/ulock.h>
56 #include <vm/vm_map_xnu.h>
57 #include <vm/vm_protos.h>
58 #include <kern/kcdata.h>
59 
60 /* version number of the in-kernel shims given to pthread.kext */
61 #define PTHREAD_SHIMS_VERSION 1
62 
63 #define PTHREAD_CALLBACK_MEMBER kevent_workq_internal
64 
65 /* compile time asserts to check the length of structures in pthread_shims.h */
66 static_assert((sizeof(struct pthread_functions_s) - offsetof(struct pthread_functions_s, psynch_rw_yieldwrlock) - sizeof(void*)) == (sizeof(void*) * 100));
67 static_assert((sizeof(struct pthread_callbacks_s) - offsetof(struct pthread_callbacks_s, PTHREAD_CALLBACK_MEMBER) - sizeof(void*)) == (sizeof(void*) * 100));
68 
69 /* old pthread code had definitions for these as they don't exist in headers */
70 extern kern_return_t mach_port_deallocate(ipc_space_t, mach_port_name_t);
71 extern void thread_deallocate_safe(thread_t thread);
72 
73 #define PTHREAD_STRUCT_ACCESSOR(get, set, rettype, structtype, member) \
74 	static rettype \
75 	get(structtype x) { \
76 	        return (x)->member; \
77 	} \
78 	static void \
79 	set(structtype x, rettype y) { \
80 	        (x)->member = y; \
81 	}
82 
83 PTHREAD_STRUCT_ACCESSOR(proc_get_threadstart, proc_set_threadstart, user_addr_t, struct proc*, p_threadstart);
84 PTHREAD_STRUCT_ACCESSOR(proc_get_pthsize, proc_set_pthsize, int, struct proc*, p_pthsize);
85 PTHREAD_STRUCT_ACCESSOR(proc_get_wqthread, proc_set_wqthread, user_addr_t, struct proc*, p_wqthread);
86 PTHREAD_STRUCT_ACCESSOR(proc_get_stack_addr_hint, proc_set_stack_addr_hint, user_addr_t, struct proc *, p_stack_addr_hint);
87 PTHREAD_STRUCT_ACCESSOR(proc_get_pthread_tsd_offset, proc_set_pthread_tsd_offset, uint32_t, struct proc *, p_pth_tsd_offset);
88 PTHREAD_STRUCT_ACCESSOR(proc_get_mach_thread_self_tsd_offset, proc_set_mach_thread_self_tsd_offset, uint64_t, struct proc *, p_mach_thread_self_offset);
89 PTHREAD_STRUCT_ACCESSOR(proc_get_pthhash, proc_set_pthhash, void*, struct proc*, p_pthhash);
90 
91 #define WQPTR_IS_INITING_VALUE ((void *)~(uintptr_t)0)
92 
93 static void
proc_set_dispatchqueue_offset(struct proc * p,uint64_t offset)94 proc_set_dispatchqueue_offset(struct proc *p, uint64_t offset)
95 {
96 	p->p_dispatchqueue_offset = offset;
97 }
98 
99 static void
proc_set_workqueue_quantum_offset(struct proc * p,uint64_t offset)100 proc_set_workqueue_quantum_offset(struct proc *p, uint64_t offset)
101 {
102 	p->p_pthread_wq_quantum_offset = offset;
103 }
104 
105 static void
proc_set_return_to_kernel_offset(struct proc * p,uint64_t offset)106 proc_set_return_to_kernel_offset(struct proc *p, uint64_t offset)
107 {
108 	p->p_return_to_kernel_offset = offset;
109 }
110 
111 static user_addr_t
proc_get_user_stack(struct proc * p)112 proc_get_user_stack(struct proc *p)
113 {
114 	return p->user_stack;
115 }
116 
117 static void
uthread_set_returnval(struct uthread * uth,int retval)118 uthread_set_returnval(struct uthread *uth, int retval)
119 {
120 	uth->uu_rval[0] = retval;
121 }
122 
123 __attribute__((noreturn))
124 static void
pthread_returning_to_userspace(void)125 pthread_returning_to_userspace(void)
126 {
127 	thread_exception_return();
128 }
129 
130 __attribute__((noreturn))
131 static void
pthread_bootstrap_return(void)132 pthread_bootstrap_return(void)
133 {
134 	thread_bootstrap_return();
135 }
136 
137 static uint32_t
get_task_threadmax(void)138 get_task_threadmax(void)
139 {
140 	return task_threadmax;
141 }
142 
143 static uint64_t
proc_get_register(struct proc * p)144 proc_get_register(struct proc *p)
145 {
146 	return p->p_lflag & P_LREGISTER;
147 }
148 
149 static void
proc_set_register(struct proc * p)150 proc_set_register(struct proc *p)
151 {
152 	proc_setregister(p);
153 }
154 
155 static void*
uthread_get_uukwe(struct uthread * t)156 uthread_get_uukwe(struct uthread *t)
157 {
158 	return &t->uu_save.uus_kwe;
159 }
160 
161 static int
uthread_is_cancelled(struct uthread * t)162 uthread_is_cancelled(struct uthread *t)
163 {
164 	return (t->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL;
165 }
166 
167 static vm_map_t
_current_map(void)168 _current_map(void)
169 {
170 	return current_map();
171 }
172 
173 static boolean_t
qos_main_thread_active(void)174 qos_main_thread_active(void)
175 {
176 	return TRUE;
177 }
178 
179 static int
proc_usynch_get_requested_thread_qos(struct uthread * uth)180 proc_usynch_get_requested_thread_qos(struct uthread *uth)
181 {
182 	thread_t thread = uth ? get_machthread(uth) : current_thread();
183 	int      requested_qos;
184 
185 	requested_qos = proc_get_thread_policy(thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS);
186 
187 	/*
188 	 * For the purposes of userspace synchronization, it doesn't make sense to
189 	 * place an override of UNSPECIFIED on another thread, if the current thread
190 	 * doesn't have any QoS set. In these cases, upgrade to
191 	 * THREAD_QOS_USER_INTERACTIVE.
192 	 */
193 	if (requested_qos == THREAD_QOS_UNSPECIFIED) {
194 		requested_qos = THREAD_QOS_USER_INTERACTIVE;
195 	}
196 
197 	return requested_qos;
198 }
199 
200 static boolean_t
proc_usynch_thread_qos_add_override_for_resource(task_t task,struct uthread * uth,uint64_t tid,int override_qos,boolean_t first_override_for_resource,user_addr_t resource,int resource_type)201 proc_usynch_thread_qos_add_override_for_resource(task_t task, struct uthread *uth,
202     uint64_t tid, int override_qos, boolean_t first_override_for_resource,
203     user_addr_t resource, int resource_type)
204 {
205 	thread_t thread = uth ? get_machthread(uth) : THREAD_NULL;
206 
207 	return proc_thread_qos_add_override(task, thread, tid, override_qos,
208 	           first_override_for_resource, resource, resource_type) == 0;
209 }
210 
211 static boolean_t
proc_usynch_thread_qos_remove_override_for_resource(task_t task,struct uthread * uth,uint64_t tid,user_addr_t resource,int resource_type)212 proc_usynch_thread_qos_remove_override_for_resource(task_t task,
213     struct uthread *uth, uint64_t tid, user_addr_t resource, int resource_type)
214 {
215 	thread_t thread = uth ? get_machthread(uth) : THREAD_NULL;
216 
217 	return proc_thread_qos_remove_override(task, thread, tid, resource,
218 	           resource_type) == 0;
219 }
220 
221 
222 static wait_result_t
psynch_wait_prepare(uintptr_t kwq,struct turnstile ** tstore,thread_t owner,block_hint_t block_hint,uint64_t deadline)223 psynch_wait_prepare(uintptr_t kwq, struct turnstile **tstore,
224     thread_t owner, block_hint_t block_hint, uint64_t deadline)
225 {
226 	struct turnstile *ts;
227 	wait_result_t wr;
228 
229 	if (tstore) {
230 		ts = turnstile_prepare(kwq, tstore, TURNSTILE_NULL,
231 		    TURNSTILE_PTHREAD_MUTEX);
232 
233 		turnstile_update_inheritor(ts, owner,
234 		    (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
235 
236 		thread_set_pending_block_hint(current_thread(), block_hint);
237 
238 		wr = waitq_assert_wait64_leeway(&ts->ts_waitq, (event64_t)kwq,
239 		    THREAD_ABORTSAFE, TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
240 	} else {
241 		thread_set_pending_block_hint(current_thread(), block_hint);
242 
243 		wr = assert_wait_deadline_with_leeway((event_t)kwq, THREAD_ABORTSAFE,
244 		    TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
245 	}
246 
247 	return wr;
248 }
249 
250 static void
psynch_wait_update_complete(struct turnstile * ts)251 psynch_wait_update_complete(struct turnstile *ts)
252 {
253 	assert(ts);
254 	turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
255 }
256 
257 static void
psynch_wait_complete(uintptr_t kwq,struct turnstile ** tstore)258 psynch_wait_complete(uintptr_t kwq, struct turnstile **tstore)
259 {
260 	assert(tstore);
261 	turnstile_complete(kwq, tstore, NULL, TURNSTILE_PTHREAD_MUTEX);
262 }
263 
264 static void
psynch_wait_update_owner(uintptr_t kwq,thread_t owner,struct turnstile ** tstore)265 psynch_wait_update_owner(uintptr_t kwq, thread_t owner,
266     struct turnstile **tstore)
267 {
268 	struct turnstile *ts;
269 
270 	ts = turnstile_prepare(kwq, tstore, TURNSTILE_NULL,
271 	    TURNSTILE_PTHREAD_MUTEX);
272 
273 	turnstile_update_inheritor(ts, owner,
274 	    (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD));
275 	turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
276 	turnstile_complete(kwq, tstore, NULL, TURNSTILE_PTHREAD_MUTEX);
277 }
278 
279 static void
psynch_wait_cleanup(void)280 psynch_wait_cleanup(void)
281 {
282 	turnstile_cleanup();
283 }
284 
285 static kern_return_t
psynch_wait_wakeup(uintptr_t kwq,struct ksyn_waitq_element * kwe,struct turnstile ** tstore)286 psynch_wait_wakeup(uintptr_t kwq, struct ksyn_waitq_element *kwe,
287     struct turnstile **tstore)
288 {
289 	struct thread *th;
290 	struct turnstile *ts;
291 	kern_return_t kr;
292 
293 	th = get_machthread(__container_of(kwe, struct uthread, uu_save.uus_kwe));
294 
295 	if (tstore) {
296 		ts = turnstile_prepare(kwq, tstore, TURNSTILE_NULL,
297 		    TURNSTILE_PTHREAD_MUTEX);
298 		turnstile_update_inheritor(ts, th,
299 		    (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD));
300 
301 		kr = waitq_wakeup64_thread(&ts->ts_waitq, (event64_t)kwq, th,
302 		    THREAD_AWAKENED);
303 
304 		turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
305 		turnstile_complete(kwq, tstore, NULL, TURNSTILE_PTHREAD_MUTEX);
306 	} else {
307 		kr = thread_wakeup_thread((event_t)kwq, th);
308 	}
309 
310 	return kr;
311 }
312 
313 /* kernel (core) to kext shims */
314 
315 void
pthread_init(void)316 pthread_init(void)
317 {
318 	if (!pthread_functions) {
319 		panic("pthread kernel extension not loaded (function table is NULL).");
320 	}
321 	pthread_functions->pthread_init();
322 }
323 
324 void
pth_proc_hashinit(proc_t p)325 pth_proc_hashinit(proc_t p)
326 {
327 	pthread_functions->pth_proc_hashinit(p);
328 }
329 
330 void
pth_proc_hashdelete(proc_t p)331 pth_proc_hashdelete(proc_t p)
332 {
333 	pthread_functions->pth_proc_hashdelete(p);
334 }
335 
336 /* syscall shims */
337 int
bsdthread_create(struct proc * p,struct bsdthread_create_args * uap,user_addr_t * retval)338 bsdthread_create(struct proc *p, struct bsdthread_create_args *uap, user_addr_t *retval)
339 {
340 	return pthread_functions->bsdthread_create(p, uap->func, uap->func_arg, uap->stack, uap->pthread, uap->flags, retval);
341 }
342 
343 int
bsdthread_register(struct proc * p,struct bsdthread_register_args * uap,__unused int32_t * retval)344 bsdthread_register(struct proc *p, struct bsdthread_register_args *uap, __unused int32_t *retval)
345 {
346 	kern_return_t kr;
347 	static_assert(offsetof(struct bsdthread_register_args, threadstart) + sizeof(user_addr_t) ==
348 	    offsetof(struct bsdthread_register_args, wqthread));
349 	kr = machine_thread_function_pointers_convert_from_user(current_thread(), &uap->threadstart, 2);
350 	assert(kr == KERN_SUCCESS);
351 
352 	if (pthread_functions->version >= 1) {
353 		return pthread_functions->bsdthread_register2(p, uap->threadstart,
354 		           uap->wqthread, uap->flags, uap->stack_addr_hint,
355 		           uap->targetconc_ptr, uap->dispatchqueue_offset,
356 		           uap->tsd_offset, retval);
357 	} else {
358 		return pthread_functions->bsdthread_register(p, uap->threadstart,
359 		           uap->wqthread, uap->flags, uap->stack_addr_hint,
360 		           uap->targetconc_ptr, uap->dispatchqueue_offset,
361 		           retval);
362 	}
363 }
364 
365 int
bsdthread_terminate(struct proc * p,struct bsdthread_terminate_args * uap,int32_t * retval)366 bsdthread_terminate(struct proc *p, struct bsdthread_terminate_args *uap, int32_t *retval)
367 {
368 	thread_t th = current_thread();
369 	uthread_t uth = current_uthread();
370 	struct _bsdthread_terminate *bts = &uth->uu_save.uus_bsdthread_terminate;
371 	mach_port_name_t sem = (mach_port_name_t)uap->sema_or_ulock;
372 	mach_port_name_t thp = uap->port;
373 	uint16_t tag = thread_get_tag(th);
374 
375 	if (tag & THREAD_TAG_WORKQUEUE) {
376 		workq_thread_terminate(p, get_bsdthread_info(th));
377 	} else if (tag & THREAD_TAG_AIO_WORKQUEUE) {
378 		return ENOTSUP;
379 	}
380 
381 	/*
382 	 * Gross compatibility hack: ports end in 0x3 and ulocks are aligned.
383 	 * If the `semaphore` value doesn't look like a port, then it is
384 	 * a ulock address that will be woken by uthread_joiner_wake()
385 	 *
386 	 * We also need to delay destroying the thread port so that
387 	 * pthread_join()'s ulock_wait() can resolve the thread until
388 	 * uthread_joiner_wake() has run.
389 	 */
390 	if (uap->sema_or_ulock && uap->sema_or_ulock != ipc_entry_name_mask(sem)) {
391 		thread_set_tag(th, THREAD_TAG_USER_JOIN);
392 		bts->ulock_addr = uap->sema_or_ulock;
393 		bts->kport = thp;
394 
395 		sem = thp = MACH_PORT_NULL;
396 	}
397 
398 	return pthread_functions->bsdthread_terminate(p, uap->stackaddr, uap->freesize, thp, sem, retval);
399 }
400 
401 int
thread_selfid(struct proc * p,__unused struct thread_selfid_args * uap,uint64_t * retval)402 thread_selfid(struct proc *p, __unused struct thread_selfid_args *uap, uint64_t *retval)
403 {
404 	return pthread_functions->thread_selfid(p, retval);
405 }
406 
407 /* pthread synchroniser syscalls */
408 
409 int
psynch_mutexwait(proc_t p,struct psynch_mutexwait_args * uap,uint32_t * retval)410 psynch_mutexwait(proc_t p, struct psynch_mutexwait_args *uap, uint32_t *retval)
411 {
412 	return pthread_functions->psynch_mutexwait(p, uap->mutex, uap->mgen, uap->ugen, uap->tid, uap->flags, retval);
413 }
414 
415 int
psynch_mutexdrop(proc_t p,struct psynch_mutexdrop_args * uap,uint32_t * retval)416 psynch_mutexdrop(proc_t p, struct psynch_mutexdrop_args *uap, uint32_t *retval)
417 {
418 	return pthread_functions->psynch_mutexdrop(p, uap->mutex, uap->mgen, uap->ugen, uap->tid, uap->flags, retval);
419 }
420 
421 int
psynch_cvbroad(proc_t p,struct psynch_cvbroad_args * uap,uint32_t * retval)422 psynch_cvbroad(proc_t p, struct psynch_cvbroad_args *uap, uint32_t *retval)
423 {
424 	return pthread_functions->psynch_cvbroad(p, uap->cv, uap->cvlsgen, uap->cvudgen, uap->flags, uap->mutex, uap->mugen, uap->tid, retval);
425 }
426 
427 int
psynch_cvsignal(proc_t p,struct psynch_cvsignal_args * uap,uint32_t * retval)428 psynch_cvsignal(proc_t p, struct psynch_cvsignal_args *uap, uint32_t *retval)
429 {
430 	return pthread_functions->psynch_cvsignal(p, uap->cv, uap->cvlsgen, uap->cvugen, uap->thread_port, uap->mutex, uap->mugen, uap->tid, uap->flags, retval);
431 }
432 
433 int
psynch_cvwait(proc_t p,struct psynch_cvwait_args * uap,uint32_t * retval)434 psynch_cvwait(proc_t p, struct psynch_cvwait_args * uap, uint32_t * retval)
435 {
436 	return pthread_functions->psynch_cvwait(p, uap->cv, uap->cvlsgen, uap->cvugen, uap->mutex, uap->mugen, uap->flags, uap->sec, uap->nsec, retval);
437 }
438 
439 int
psynch_cvclrprepost(proc_t p,struct psynch_cvclrprepost_args * uap,int * retval)440 psynch_cvclrprepost(proc_t p, struct psynch_cvclrprepost_args * uap, int *retval)
441 {
442 	return pthread_functions->psynch_cvclrprepost(p, uap->cv, uap->cvgen, uap->cvugen, uap->cvsgen, uap->prepocnt, uap->preposeq, uap->flags, retval);
443 }
444 
445 int
psynch_rw_longrdlock(proc_t p,struct psynch_rw_longrdlock_args * uap,uint32_t * retval)446 psynch_rw_longrdlock(proc_t p, struct psynch_rw_longrdlock_args * uap, uint32_t *retval)
447 {
448 	return pthread_functions->psynch_rw_longrdlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
449 }
450 
451 int
psynch_rw_rdlock(proc_t p,struct psynch_rw_rdlock_args * uap,uint32_t * retval)452 psynch_rw_rdlock(proc_t p, struct psynch_rw_rdlock_args * uap, uint32_t * retval)
453 {
454 	return pthread_functions->psynch_rw_rdlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
455 }
456 
457 int
psynch_rw_unlock(proc_t p,struct psynch_rw_unlock_args * uap,uint32_t * retval)458 psynch_rw_unlock(proc_t p, struct psynch_rw_unlock_args *uap, uint32_t *retval)
459 {
460 	return pthread_functions->psynch_rw_unlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
461 }
462 
463 int
psynch_rw_unlock2(__unused proc_t p,__unused struct psynch_rw_unlock2_args * uap,__unused uint32_t * retval)464 psynch_rw_unlock2(__unused proc_t p, __unused struct psynch_rw_unlock2_args *uap, __unused uint32_t *retval)
465 {
466 	return ENOTSUP;
467 }
468 
469 int
psynch_rw_wrlock(proc_t p,struct psynch_rw_wrlock_args * uap,uint32_t * retval)470 psynch_rw_wrlock(proc_t p, struct psynch_rw_wrlock_args *uap, uint32_t *retval)
471 {
472 	return pthread_functions->psynch_rw_wrlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
473 }
474 
475 int
psynch_rw_yieldwrlock(proc_t p,struct psynch_rw_yieldwrlock_args * uap,uint32_t * retval)476 psynch_rw_yieldwrlock(proc_t p, struct psynch_rw_yieldwrlock_args *uap, uint32_t *retval)
477 {
478 	return pthread_functions->psynch_rw_yieldwrlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
479 }
480 
481 int
psynch_rw_upgrade(__unused proc_t p,__unused struct psynch_rw_upgrade_args * uap,__unused uint32_t * retval)482 psynch_rw_upgrade(__unused proc_t p, __unused struct psynch_rw_upgrade_args * uap, __unused uint32_t *retval)
483 {
484 	return 0;
485 }
486 
487 int
psynch_rw_downgrade(__unused proc_t p,__unused struct psynch_rw_downgrade_args * uap,__unused int * retval)488 psynch_rw_downgrade(__unused proc_t p, __unused struct psynch_rw_downgrade_args * uap, __unused int *retval)
489 {
490 	return 0;
491 }
492 
493 void
kdp_pthread_find_owner(thread_t thread,struct stackshot_thread_waitinfo * waitinfo)494 kdp_pthread_find_owner(thread_t thread, struct stackshot_thread_waitinfo *waitinfo)
495 {
496 	if (pthread_functions->pthread_find_owner) {
497 		pthread_functions->pthread_find_owner(thread, waitinfo);
498 	}
499 }
500 
501 void *
kdp_pthread_get_thread_kwq(thread_t thread)502 kdp_pthread_get_thread_kwq(thread_t thread)
503 {
504 	if (pthread_functions->pthread_get_thread_kwq) {
505 		return pthread_functions->pthread_get_thread_kwq(thread);
506 	}
507 
508 	return NULL;
509 }
510 
511 void
thread_will_park_or_terminate(__unused thread_t thread)512 thread_will_park_or_terminate(__unused thread_t thread)
513 {
514 }
515 
516 static bool
proc_get_jit_entitled(struct proc * t)517 proc_get_jit_entitled(struct proc *t)
518 {
519 	task_t task = proc_task(t);
520 	if (!task) {
521 		return false;
522 	}
523 
524 	pmap_t pmap = get_task_pmap(task);
525 	return pmap_get_jit_entitled(pmap);
526 }
527 
528 /*
529  * The callbacks structure (defined in pthread_shims.h) contains a collection
530  * of kernel functions that were not deemed sensible to expose as a KPI to all
531  * kernel extensions. So the kext is given them in the form of a structure of
532  * function pointers.
533  */
534 static const struct pthread_callbacks_s pthread_callbacks = {
535 	.version = PTHREAD_SHIMS_VERSION,
536 	.config_thread_max = CONFIG_THREAD_MAX,
537 	.get_task_threadmax = get_task_threadmax,
538 
539 	.proc_get_threadstart = proc_get_threadstart,
540 	.proc_set_threadstart = proc_set_threadstart,
541 	.proc_get_pthsize = proc_get_pthsize,
542 	.proc_set_pthsize = proc_set_pthsize,
543 	.proc_get_wqthread = proc_get_wqthread,
544 	.proc_set_wqthread = proc_set_wqthread,
545 	.proc_set_dispatchqueue_offset = proc_set_dispatchqueue_offset,
546 	.proc_set_workqueue_quantum_offset = proc_set_workqueue_quantum_offset,
547 	.proc_get_pthhash = proc_get_pthhash,
548 	.proc_set_pthhash = proc_set_pthhash,
549 	.proc_get_register = proc_get_register,
550 	.proc_set_register = proc_set_register,
551 	.proc_get_jit_entitled = proc_get_jit_entitled,
552 	.proc_get_pthread_jit_allowlist2 = proc_get_pthread_jit_allowlist,
553 
554 	/* kernel IPI interfaces */
555 	.task_get_ipcspace = get_task_ipcspace,
556 	.vm_map_page_info = vm_map_page_info,
557 	.ipc_port_copyout_send_pinned = ipc_port_copyout_send_pinned,
558 	.thread_set_wq_state32 = thread_set_wq_state32,
559 	.thread_set_wq_state64 = thread_set_wq_state64,
560 
561 	.uthread_get_uukwe = uthread_get_uukwe,
562 	.uthread_set_returnval = uthread_set_returnval,
563 	.uthread_is_cancelled = uthread_is_cancelled,
564 
565 	.thread_exception_return = pthread_returning_to_userspace,
566 	.thread_bootstrap_return = pthread_bootstrap_return,
567 	.unix_syscall_return = unix_syscall_return,
568 
569 	.abandon_preemption_disable_measurement = abandon_preemption_disable_measurement,
570 
571 	.get_bsdthread_info = get_bsdthread_info,
572 	.thread_policy_set_internal = thread_policy_set_internal,
573 	.thread_policy_get = thread_policy_get,
574 
575 	.__pthread_testcancel = __pthread_testcancel,
576 
577 	.mach_port_deallocate = mach_port_deallocate,
578 	.semaphore_signal_internal_trap = semaphore_signal_internal_trap,
579 	.current_map = _current_map,
580 
581 	.thread_create_immovable = thread_create_immovable,
582 	.thread_terminate_pinned = thread_terminate_immovable,
583 	.thread_resume = thread_resume,
584 
585 	.kevent_workq_internal = kevent_workq_internal,
586 
587 	.convert_thread_to_port_pinned = convert_thread_to_port_immovable,
588 
589 	.proc_get_stack_addr_hint = proc_get_stack_addr_hint,
590 	.proc_set_stack_addr_hint = proc_set_stack_addr_hint,
591 	.proc_get_pthread_tsd_offset = proc_get_pthread_tsd_offset,
592 	.proc_set_pthread_tsd_offset = proc_set_pthread_tsd_offset,
593 	.proc_get_mach_thread_self_tsd_offset = proc_get_mach_thread_self_tsd_offset,
594 	.proc_set_mach_thread_self_tsd_offset = proc_set_mach_thread_self_tsd_offset,
595 
596 	.thread_set_tsd_base = thread_set_tsd_base,
597 
598 	.proc_usynch_get_requested_thread_qos = proc_usynch_get_requested_thread_qos,
599 
600 	.qos_main_thread_active = qos_main_thread_active,
601 	.thread_set_voucher_name = thread_set_voucher_name,
602 
603 	.proc_usynch_thread_qos_add_override_for_resource = proc_usynch_thread_qos_add_override_for_resource,
604 	.proc_usynch_thread_qos_remove_override_for_resource = proc_usynch_thread_qos_remove_override_for_resource,
605 
606 	.thread_set_tag = thread_set_tag,
607 	.thread_get_tag = thread_get_tag,
608 
609 	.proc_set_return_to_kernel_offset = proc_set_return_to_kernel_offset,
610 	.thread_will_park_or_terminate = thread_will_park_or_terminate,
611 
612 	.proc_get_user_stack = proc_get_user_stack,
613 	.task_findtid = task_findtid,
614 	.thread_deallocate_safe = thread_deallocate_safe,
615 
616 	.psynch_wait_prepare = psynch_wait_prepare,
617 	.psynch_wait_update_complete = psynch_wait_update_complete,
618 	.psynch_wait_complete = psynch_wait_complete,
619 	.psynch_wait_cleanup = psynch_wait_cleanup,
620 	.psynch_wait_wakeup = psynch_wait_wakeup,
621 	.psynch_wait_update_owner = psynch_wait_update_owner,
622 };
623 
624 SECURITY_READ_ONLY_LATE(pthread_callbacks_t) pthread_kern = &pthread_callbacks;
625 SECURITY_READ_ONLY_LATE(pthread_functions_t) pthread_functions = NULL;
626 
627 /*
628  * pthread_kext_register is called by pthread.kext upon load, it has to provide
629  * us with a function pointer table of pthread internal calls. In return, this
630  * file provides it with a table of function pointers it needs.
631  */
632 
633 void
pthread_kext_register(pthread_functions_t fns,pthread_callbacks_t * callbacks)634 pthread_kext_register(pthread_functions_t fns, pthread_callbacks_t *callbacks)
635 {
636 	if (pthread_functions != NULL) {
637 		panic("Re-initialisation of pthread kext callbacks.");
638 	}
639 
640 	if (callbacks != NULL) {
641 		*callbacks = &pthread_callbacks;
642 	} else {
643 		panic("pthread_kext_register called without callbacks pointer.");
644 	}
645 
646 	if (fns) {
647 		pthread_functions = fns;
648 	}
649 }
650