1 /*
2 * Copyright (c) 2012-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define PTHREAD_INTERNAL 1
30
31 #include <stdatomic.h>
32 #include <kern/debug.h>
33 #include <kern/mach_param.h>
34 #include <kern/sched_prim.h>
35 #include <kern/task.h>
36 #include <kern/thread.h>
37 #include <kern/affinity.h>
38 #include <kern/zalloc.h>
39 #include <kern/policy_internal.h>
40 #include <kern/sync_sema.h>
41
42 #include <machine/machine_routines.h>
43 #include <mach/task.h>
44 #include <mach/thread_act.h>
45 #include <sys/param.h>
46 #include <sys/eventvar.h>
47 #include <sys/pthread_shims.h>
48 #include <pthread/workqueue_internal.h>
49 #include <sys/cdefs.h>
50 #include <sys/proc_info.h>
51 #include <sys/proc_internal.h>
52 #include <sys/sysproto.h>
53 #include <sys/systm.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_protos.h>
56 #include <kern/kcdata.h>
57
58 /* version number of the in-kernel shims given to pthread.kext */
59 #define PTHREAD_SHIMS_VERSION 1
60
61 /* on arm, the callbacks function has two #ifdef arm pointers */
62 #if defined(__arm__)
63 #define PTHREAD_CALLBACK_MEMBER __unused_was_map_is_1gb
64 #else
65 #define PTHREAD_CALLBACK_MEMBER kevent_workq_internal
66 #endif
67
68 /* compile time asserts to check the length of structures in pthread_shims.h */
69 static_assert((sizeof(struct pthread_functions_s) - offsetof(struct pthread_functions_s, psynch_rw_yieldwrlock) - sizeof(void*)) == (sizeof(void*) * 100));
70 static_assert((sizeof(struct pthread_callbacks_s) - offsetof(struct pthread_callbacks_s, PTHREAD_CALLBACK_MEMBER) - sizeof(void*)) == (sizeof(void*) * 100));
71
72 /* old pthread code had definitions for these as they don't exist in headers */
73 extern kern_return_t mach_port_deallocate(ipc_space_t, mach_port_name_t);
74 extern void thread_deallocate_safe(thread_t thread);
75
76 #define PTHREAD_STRUCT_ACCESSOR(get, set, rettype, structtype, member) \
77 static rettype \
78 get(structtype x) { \
79 return (x)->member; \
80 } \
81 static void \
82 set(structtype x, rettype y) { \
83 (x)->member = y; \
84 }
85
86 PTHREAD_STRUCT_ACCESSOR(proc_get_threadstart, proc_set_threadstart, user_addr_t, struct proc*, p_threadstart);
87 PTHREAD_STRUCT_ACCESSOR(proc_get_pthsize, proc_set_pthsize, int, struct proc*, p_pthsize);
88 PTHREAD_STRUCT_ACCESSOR(proc_get_wqthread, proc_set_wqthread, user_addr_t, struct proc*, p_wqthread);
89 PTHREAD_STRUCT_ACCESSOR(proc_get_stack_addr_hint, proc_set_stack_addr_hint, user_addr_t, struct proc *, p_stack_addr_hint);
90 PTHREAD_STRUCT_ACCESSOR(proc_get_pthread_tsd_offset, proc_set_pthread_tsd_offset, uint32_t, struct proc *, p_pth_tsd_offset);
91 PTHREAD_STRUCT_ACCESSOR(proc_get_mach_thread_self_tsd_offset, proc_set_mach_thread_self_tsd_offset, uint64_t, struct proc *, p_mach_thread_self_offset);
92 PTHREAD_STRUCT_ACCESSOR(proc_get_pthhash, proc_set_pthhash, void*, struct proc*, p_pthhash);
93
94 #define WQPTR_IS_INITING_VALUE ((void *)~(uintptr_t)0)
95
96 static void
proc_set_dispatchqueue_offset(struct proc * p,uint64_t offset)97 proc_set_dispatchqueue_offset(struct proc *p, uint64_t offset)
98 {
99 p->p_dispatchqueue_offset = offset;
100 }
101
102 static void
proc_set_workqueue_quantum_offset(struct proc * p,uint64_t offset)103 proc_set_workqueue_quantum_offset(struct proc *p, uint64_t offset)
104 {
105 p->p_pthread_wq_quantum_offset = offset;
106 }
107
108 static void
proc_set_return_to_kernel_offset(struct proc * p,uint64_t offset)109 proc_set_return_to_kernel_offset(struct proc *p, uint64_t offset)
110 {
111 p->p_return_to_kernel_offset = offset;
112 }
113
114 static user_addr_t
proc_get_user_stack(struct proc * p)115 proc_get_user_stack(struct proc *p)
116 {
117 return p->user_stack;
118 }
119
120 static void
uthread_set_returnval(struct uthread * uth,int retval)121 uthread_set_returnval(struct uthread *uth, int retval)
122 {
123 uth->uu_rval[0] = retval;
124 }
125
126 __attribute__((noreturn))
127 static void
pthread_returning_to_userspace(void)128 pthread_returning_to_userspace(void)
129 {
130 thread_exception_return();
131 }
132
133 __attribute__((noreturn))
134 static void
pthread_bootstrap_return(void)135 pthread_bootstrap_return(void)
136 {
137 thread_bootstrap_return();
138 }
139
140 static uint32_t
get_task_threadmax(void)141 get_task_threadmax(void)
142 {
143 return task_threadmax;
144 }
145
146 static uint64_t
proc_get_register(struct proc * p)147 proc_get_register(struct proc *p)
148 {
149 return p->p_lflag & P_LREGISTER;
150 }
151
152 static void
proc_set_register(struct proc * p)153 proc_set_register(struct proc *p)
154 {
155 proc_setregister(p);
156 }
157
158 static void*
uthread_get_uukwe(struct uthread * t)159 uthread_get_uukwe(struct uthread *t)
160 {
161 return &t->uu_save.uus_kwe;
162 }
163
164 static int
uthread_is_cancelled(struct uthread * t)165 uthread_is_cancelled(struct uthread *t)
166 {
167 return (t->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL;
168 }
169
170 static vm_map_t
_current_map(void)171 _current_map(void)
172 {
173 return current_map();
174 }
175
176 static boolean_t
qos_main_thread_active(void)177 qos_main_thread_active(void)
178 {
179 return TRUE;
180 }
181
182 static int
proc_usynch_get_requested_thread_qos(struct uthread * uth)183 proc_usynch_get_requested_thread_qos(struct uthread *uth)
184 {
185 thread_t thread = uth ? get_machthread(uth) : current_thread();
186 int requested_qos;
187
188 requested_qos = proc_get_thread_policy(thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS);
189
190 /*
191 * For the purposes of userspace synchronization, it doesn't make sense to
192 * place an override of UNSPECIFIED on another thread, if the current thread
193 * doesn't have any QoS set. In these cases, upgrade to
194 * THREAD_QOS_USER_INTERACTIVE.
195 */
196 if (requested_qos == THREAD_QOS_UNSPECIFIED) {
197 requested_qos = THREAD_QOS_USER_INTERACTIVE;
198 }
199
200 return requested_qos;
201 }
202
203 static boolean_t
proc_usynch_thread_qos_add_override_for_resource(task_t task,struct uthread * uth,uint64_t tid,int override_qos,boolean_t first_override_for_resource,user_addr_t resource,int resource_type)204 proc_usynch_thread_qos_add_override_for_resource(task_t task, struct uthread *uth,
205 uint64_t tid, int override_qos, boolean_t first_override_for_resource,
206 user_addr_t resource, int resource_type)
207 {
208 thread_t thread = uth ? get_machthread(uth) : THREAD_NULL;
209
210 return proc_thread_qos_add_override(task, thread, tid, override_qos,
211 first_override_for_resource, resource, resource_type) == 0;
212 }
213
214 static boolean_t
proc_usynch_thread_qos_remove_override_for_resource(task_t task,struct uthread * uth,uint64_t tid,user_addr_t resource,int resource_type)215 proc_usynch_thread_qos_remove_override_for_resource(task_t task,
216 struct uthread *uth, uint64_t tid, user_addr_t resource, int resource_type)
217 {
218 thread_t thread = uth ? get_machthread(uth) : THREAD_NULL;
219
220 return proc_thread_qos_remove_override(task, thread, tid, resource,
221 resource_type) == 0;
222 }
223
224
225 static wait_result_t
psynch_wait_prepare(uintptr_t kwq,struct turnstile ** tstore,thread_t owner,block_hint_t block_hint,uint64_t deadline)226 psynch_wait_prepare(uintptr_t kwq, struct turnstile **tstore,
227 thread_t owner, block_hint_t block_hint, uint64_t deadline)
228 {
229 struct turnstile *ts;
230 wait_result_t wr;
231
232 if (tstore) {
233 ts = turnstile_prepare(kwq, tstore, TURNSTILE_NULL,
234 TURNSTILE_PTHREAD_MUTEX);
235
236 turnstile_update_inheritor(ts, owner,
237 (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
238
239 thread_set_pending_block_hint(current_thread(), block_hint);
240
241 wr = waitq_assert_wait64_leeway(&ts->ts_waitq, (event64_t)kwq,
242 THREAD_ABORTSAFE, TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
243 } else {
244 thread_set_pending_block_hint(current_thread(), block_hint);
245
246 wr = assert_wait_deadline_with_leeway((event_t)kwq, THREAD_ABORTSAFE,
247 TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
248 }
249
250 return wr;
251 }
252
253 static void
psynch_wait_update_complete(struct turnstile * ts)254 psynch_wait_update_complete(struct turnstile *ts)
255 {
256 assert(ts);
257 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
258 }
259
260 static void
psynch_wait_complete(uintptr_t kwq,struct turnstile ** tstore)261 psynch_wait_complete(uintptr_t kwq, struct turnstile **tstore)
262 {
263 assert(tstore);
264 turnstile_complete(kwq, tstore, NULL, TURNSTILE_PTHREAD_MUTEX);
265 }
266
267 static void
psynch_wait_update_owner(uintptr_t kwq,thread_t owner,struct turnstile ** tstore)268 psynch_wait_update_owner(uintptr_t kwq, thread_t owner,
269 struct turnstile **tstore)
270 {
271 struct turnstile *ts;
272
273 ts = turnstile_prepare(kwq, tstore, TURNSTILE_NULL,
274 TURNSTILE_PTHREAD_MUTEX);
275
276 turnstile_update_inheritor(ts, owner,
277 (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD));
278 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
279 turnstile_complete(kwq, tstore, NULL, TURNSTILE_PTHREAD_MUTEX);
280 }
281
282 static void
psynch_wait_cleanup(void)283 psynch_wait_cleanup(void)
284 {
285 turnstile_cleanup();
286 }
287
288 static kern_return_t
psynch_wait_wakeup(uintptr_t kwq,struct ksyn_waitq_element * kwe,struct turnstile ** tstore)289 psynch_wait_wakeup(uintptr_t kwq, struct ksyn_waitq_element *kwe,
290 struct turnstile **tstore)
291 {
292 struct thread *th;
293 struct turnstile *ts;
294 kern_return_t kr;
295
296 th = get_machthread(__container_of(kwe, struct uthread, uu_save.uus_kwe));
297
298 if (tstore) {
299 ts = turnstile_prepare(kwq, tstore, TURNSTILE_NULL,
300 TURNSTILE_PTHREAD_MUTEX);
301 turnstile_update_inheritor(ts, th,
302 (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD));
303
304 kr = waitq_wakeup64_thread(&ts->ts_waitq, (event64_t)kwq, th,
305 THREAD_AWAKENED);
306
307 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
308 turnstile_complete(kwq, tstore, NULL, TURNSTILE_PTHREAD_MUTEX);
309 } else {
310 kr = thread_wakeup_thread((event_t)kwq, th);
311 }
312
313 return kr;
314 }
315
316 /* kernel (core) to kext shims */
317
318 void
pthread_init(void)319 pthread_init(void)
320 {
321 if (!pthread_functions) {
322 panic("pthread kernel extension not loaded (function table is NULL).");
323 }
324 pthread_functions->pthread_init();
325 }
326
327 void
pth_proc_hashinit(proc_t p)328 pth_proc_hashinit(proc_t p)
329 {
330 pthread_functions->pth_proc_hashinit(p);
331 }
332
333 void
pth_proc_hashdelete(proc_t p)334 pth_proc_hashdelete(proc_t p)
335 {
336 pthread_functions->pth_proc_hashdelete(p);
337 }
338
339 /* syscall shims */
340 int
bsdthread_create(struct proc * p,struct bsdthread_create_args * uap,user_addr_t * retval)341 bsdthread_create(struct proc *p, struct bsdthread_create_args *uap, user_addr_t *retval)
342 {
343 return pthread_functions->bsdthread_create(p, uap->func, uap->func_arg, uap->stack, uap->pthread, uap->flags, retval);
344 }
345
346 int
bsdthread_register(struct proc * p,struct bsdthread_register_args * uap,__unused int32_t * retval)347 bsdthread_register(struct proc *p, struct bsdthread_register_args *uap, __unused int32_t *retval)
348 {
349 kern_return_t kr;
350 static_assert(offsetof(struct bsdthread_register_args, threadstart) + sizeof(user_addr_t) ==
351 offsetof(struct bsdthread_register_args, wqthread));
352 kr = machine_thread_function_pointers_convert_from_user(current_thread(), &uap->threadstart, 2);
353 assert(kr == KERN_SUCCESS);
354
355 if (pthread_functions->version >= 1) {
356 return pthread_functions->bsdthread_register2(p, uap->threadstart,
357 uap->wqthread, uap->flags, uap->stack_addr_hint,
358 uap->targetconc_ptr, uap->dispatchqueue_offset,
359 uap->tsd_offset, retval);
360 } else {
361 return pthread_functions->bsdthread_register(p, uap->threadstart,
362 uap->wqthread, uap->flags, uap->stack_addr_hint,
363 uap->targetconc_ptr, uap->dispatchqueue_offset,
364 retval);
365 }
366 }
367
368 int
bsdthread_terminate(struct proc * p,struct bsdthread_terminate_args * uap,int32_t * retval)369 bsdthread_terminate(struct proc *p, struct bsdthread_terminate_args *uap, int32_t *retval)
370 {
371 thread_t th = current_thread();
372 if (thread_get_tag(th) & THREAD_TAG_WORKQUEUE) {
373 workq_thread_terminate(p, get_bsdthread_info(th));
374 }
375 return pthread_functions->bsdthread_terminate(p, uap->stackaddr, uap->freesize, uap->port, uap->sem, retval);
376 }
377
378 int
thread_selfid(struct proc * p,__unused struct thread_selfid_args * uap,uint64_t * retval)379 thread_selfid(struct proc *p, __unused struct thread_selfid_args *uap, uint64_t *retval)
380 {
381 return pthread_functions->thread_selfid(p, retval);
382 }
383
384 /* pthread synchroniser syscalls */
385
386 int
psynch_mutexwait(proc_t p,struct psynch_mutexwait_args * uap,uint32_t * retval)387 psynch_mutexwait(proc_t p, struct psynch_mutexwait_args *uap, uint32_t *retval)
388 {
389 return pthread_functions->psynch_mutexwait(p, uap->mutex, uap->mgen, uap->ugen, uap->tid, uap->flags, retval);
390 }
391
392 int
psynch_mutexdrop(proc_t p,struct psynch_mutexdrop_args * uap,uint32_t * retval)393 psynch_mutexdrop(proc_t p, struct psynch_mutexdrop_args *uap, uint32_t *retval)
394 {
395 return pthread_functions->psynch_mutexdrop(p, uap->mutex, uap->mgen, uap->ugen, uap->tid, uap->flags, retval);
396 }
397
398 int
psynch_cvbroad(proc_t p,struct psynch_cvbroad_args * uap,uint32_t * retval)399 psynch_cvbroad(proc_t p, struct psynch_cvbroad_args *uap, uint32_t *retval)
400 {
401 return pthread_functions->psynch_cvbroad(p, uap->cv, uap->cvlsgen, uap->cvudgen, uap->flags, uap->mutex, uap->mugen, uap->tid, retval);
402 }
403
404 int
psynch_cvsignal(proc_t p,struct psynch_cvsignal_args * uap,uint32_t * retval)405 psynch_cvsignal(proc_t p, struct psynch_cvsignal_args *uap, uint32_t *retval)
406 {
407 return pthread_functions->psynch_cvsignal(p, uap->cv, uap->cvlsgen, uap->cvugen, uap->thread_port, uap->mutex, uap->mugen, uap->tid, uap->flags, retval);
408 }
409
410 int
psynch_cvwait(proc_t p,struct psynch_cvwait_args * uap,uint32_t * retval)411 psynch_cvwait(proc_t p, struct psynch_cvwait_args * uap, uint32_t * retval)
412 {
413 return pthread_functions->psynch_cvwait(p, uap->cv, uap->cvlsgen, uap->cvugen, uap->mutex, uap->mugen, uap->flags, uap->sec, uap->nsec, retval);
414 }
415
416 int
psynch_cvclrprepost(proc_t p,struct psynch_cvclrprepost_args * uap,int * retval)417 psynch_cvclrprepost(proc_t p, struct psynch_cvclrprepost_args * uap, int *retval)
418 {
419 return pthread_functions->psynch_cvclrprepost(p, uap->cv, uap->cvgen, uap->cvugen, uap->cvsgen, uap->prepocnt, uap->preposeq, uap->flags, retval);
420 }
421
422 int
psynch_rw_longrdlock(proc_t p,struct psynch_rw_longrdlock_args * uap,uint32_t * retval)423 psynch_rw_longrdlock(proc_t p, struct psynch_rw_longrdlock_args * uap, uint32_t *retval)
424 {
425 return pthread_functions->psynch_rw_longrdlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
426 }
427
428 int
psynch_rw_rdlock(proc_t p,struct psynch_rw_rdlock_args * uap,uint32_t * retval)429 psynch_rw_rdlock(proc_t p, struct psynch_rw_rdlock_args * uap, uint32_t * retval)
430 {
431 return pthread_functions->psynch_rw_rdlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
432 }
433
434 int
psynch_rw_unlock(proc_t p,struct psynch_rw_unlock_args * uap,uint32_t * retval)435 psynch_rw_unlock(proc_t p, struct psynch_rw_unlock_args *uap, uint32_t *retval)
436 {
437 return pthread_functions->psynch_rw_unlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
438 }
439
440 int
psynch_rw_unlock2(__unused proc_t p,__unused struct psynch_rw_unlock2_args * uap,__unused uint32_t * retval)441 psynch_rw_unlock2(__unused proc_t p, __unused struct psynch_rw_unlock2_args *uap, __unused uint32_t *retval)
442 {
443 return ENOTSUP;
444 }
445
446 int
psynch_rw_wrlock(proc_t p,struct psynch_rw_wrlock_args * uap,uint32_t * retval)447 psynch_rw_wrlock(proc_t p, struct psynch_rw_wrlock_args *uap, uint32_t *retval)
448 {
449 return pthread_functions->psynch_rw_wrlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
450 }
451
452 int
psynch_rw_yieldwrlock(proc_t p,struct psynch_rw_yieldwrlock_args * uap,uint32_t * retval)453 psynch_rw_yieldwrlock(proc_t p, struct psynch_rw_yieldwrlock_args *uap, uint32_t *retval)
454 {
455 return pthread_functions->psynch_rw_yieldwrlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
456 }
457
458 int
psynch_rw_upgrade(__unused proc_t p,__unused struct psynch_rw_upgrade_args * uap,__unused uint32_t * retval)459 psynch_rw_upgrade(__unused proc_t p, __unused struct psynch_rw_upgrade_args * uap, __unused uint32_t *retval)
460 {
461 return 0;
462 }
463
464 int
psynch_rw_downgrade(__unused proc_t p,__unused struct psynch_rw_downgrade_args * uap,__unused int * retval)465 psynch_rw_downgrade(__unused proc_t p, __unused struct psynch_rw_downgrade_args * uap, __unused int *retval)
466 {
467 return 0;
468 }
469
470 void
kdp_pthread_find_owner(thread_t thread,struct stackshot_thread_waitinfo * waitinfo)471 kdp_pthread_find_owner(thread_t thread, struct stackshot_thread_waitinfo *waitinfo)
472 {
473 if (pthread_functions->pthread_find_owner) {
474 pthread_functions->pthread_find_owner(thread, waitinfo);
475 }
476 }
477
478 void *
kdp_pthread_get_thread_kwq(thread_t thread)479 kdp_pthread_get_thread_kwq(thread_t thread)
480 {
481 if (pthread_functions->pthread_get_thread_kwq) {
482 return pthread_functions->pthread_get_thread_kwq(thread);
483 }
484
485 return NULL;
486 }
487
488 void
thread_will_park_or_terminate(__unused thread_t thread)489 thread_will_park_or_terminate(__unused thread_t thread)
490 {
491 }
492
493 static bool
old_proc_get_pthread_jit_allowlist(struct proc * t)494 old_proc_get_pthread_jit_allowlist(struct proc *t)
495 {
496 bool unused_late = false;
497 return proc_get_pthread_jit_allowlist(t, &unused_late);
498 }
499
500 /*
501 * The callbacks structure (defined in pthread_shims.h) contains a collection
502 * of kernel functions that were not deemed sensible to expose as a KPI to all
503 * kernel extensions. So the kext is given them in the form of a structure of
504 * function pointers.
505 */
506 static const struct pthread_callbacks_s pthread_callbacks = {
507 .version = PTHREAD_SHIMS_VERSION,
508 .config_thread_max = CONFIG_THREAD_MAX,
509 .get_task_threadmax = get_task_threadmax,
510
511 .proc_get_threadstart = proc_get_threadstart,
512 .proc_set_threadstart = proc_set_threadstart,
513 .proc_get_pthsize = proc_get_pthsize,
514 .proc_set_pthsize = proc_set_pthsize,
515 .proc_get_wqthread = proc_get_wqthread,
516 .proc_set_wqthread = proc_set_wqthread,
517 .proc_set_dispatchqueue_offset = proc_set_dispatchqueue_offset,
518 .proc_set_workqueue_quantum_offset = proc_set_workqueue_quantum_offset,
519 .proc_get_pthhash = proc_get_pthhash,
520 .proc_set_pthhash = proc_set_pthhash,
521 .proc_get_register = proc_get_register,
522 .proc_set_register = proc_set_register,
523 .proc_get_pthread_jit_allowlist = old_proc_get_pthread_jit_allowlist,
524 .proc_get_pthread_jit_allowlist2 = proc_get_pthread_jit_allowlist,
525
526 /* kernel IPI interfaces */
527 .ipc_port_copyout_send = ipc_port_copyout_send,
528 .task_get_ipcspace = get_task_ipcspace,
529 .vm_map_page_info = vm_map_page_info,
530 .ipc_port_copyout_send_pinned = ipc_port_copyout_send_pinned,
531 .thread_set_wq_state32 = thread_set_wq_state32,
532 #if !defined(__arm__)
533 .thread_set_wq_state64 = thread_set_wq_state64,
534 #endif
535
536 .uthread_get_uukwe = uthread_get_uukwe,
537 .uthread_set_returnval = uthread_set_returnval,
538 .uthread_is_cancelled = uthread_is_cancelled,
539
540 .thread_exception_return = pthread_returning_to_userspace,
541 .thread_bootstrap_return = pthread_bootstrap_return,
542 .unix_syscall_return = unix_syscall_return,
543
544 .get_bsdthread_info = get_bsdthread_info,
545 .thread_policy_set_internal = thread_policy_set_internal,
546 .thread_policy_get = thread_policy_get,
547
548 .__pthread_testcancel = __pthread_testcancel,
549
550 .mach_port_deallocate = mach_port_deallocate,
551 .semaphore_signal_internal_trap = semaphore_signal_internal_trap,
552 .current_map = _current_map,
553 .thread_create = thread_create,
554 .thread_create_immovable = thread_create_immovable,
555 .thread_terminate_pinned = thread_terminate_pinned,
556 .thread_resume = thread_resume,
557
558 .kevent_workq_internal = kevent_workq_internal,
559
560 .convert_thread_to_port = convert_thread_to_port,
561 .convert_thread_to_port_pinned = convert_thread_to_port_pinned,
562
563 .proc_get_stack_addr_hint = proc_get_stack_addr_hint,
564 .proc_set_stack_addr_hint = proc_set_stack_addr_hint,
565 .proc_get_pthread_tsd_offset = proc_get_pthread_tsd_offset,
566 .proc_set_pthread_tsd_offset = proc_set_pthread_tsd_offset,
567 .proc_get_mach_thread_self_tsd_offset = proc_get_mach_thread_self_tsd_offset,
568 .proc_set_mach_thread_self_tsd_offset = proc_set_mach_thread_self_tsd_offset,
569
570 .thread_set_tsd_base = thread_set_tsd_base,
571
572 .proc_usynch_get_requested_thread_qos = proc_usynch_get_requested_thread_qos,
573
574 .qos_main_thread_active = qos_main_thread_active,
575 .thread_set_voucher_name = thread_set_voucher_name,
576
577 .proc_usynch_thread_qos_add_override_for_resource = proc_usynch_thread_qos_add_override_for_resource,
578 .proc_usynch_thread_qos_remove_override_for_resource = proc_usynch_thread_qos_remove_override_for_resource,
579
580 .thread_set_tag = thread_set_tag,
581 .thread_get_tag = thread_get_tag,
582
583 .proc_set_return_to_kernel_offset = proc_set_return_to_kernel_offset,
584 .thread_will_park_or_terminate = thread_will_park_or_terminate,
585
586 .proc_get_user_stack = proc_get_user_stack,
587 .task_findtid = task_findtid,
588 .thread_deallocate_safe = thread_deallocate_safe,
589
590 .psynch_wait_prepare = psynch_wait_prepare,
591 .psynch_wait_update_complete = psynch_wait_update_complete,
592 .psynch_wait_complete = psynch_wait_complete,
593 .psynch_wait_cleanup = psynch_wait_cleanup,
594 .psynch_wait_wakeup = psynch_wait_wakeup,
595 .psynch_wait_update_owner = psynch_wait_update_owner,
596 };
597
598 pthread_callbacks_t pthread_kern = &pthread_callbacks;
599 pthread_functions_t pthread_functions = NULL;
600
601 /*
602 * pthread_kext_register is called by pthread.kext upon load, it has to provide
603 * us with a function pointer table of pthread internal calls. In return, this
604 * file provides it with a table of function pointers it needs.
605 */
606
607 void
pthread_kext_register(pthread_functions_t fns,pthread_callbacks_t * callbacks)608 pthread_kext_register(pthread_functions_t fns, pthread_callbacks_t *callbacks)
609 {
610 if (pthread_functions != NULL) {
611 panic("Re-initialisation of pthread kext callbacks.");
612 }
613
614 if (callbacks != NULL) {
615 *callbacks = &pthread_callbacks;
616 } else {
617 panic("pthread_kext_register called without callbacks pointer.");
618 }
619
620 if (fns) {
621 pthread_functions = fns;
622 }
623 }
624