1 /*
2 * C (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/task.h>
30 #include <kern/task_ref.h>
31 #include <libkern/OSKextLibPrivate.h>
32
33 #include <os/refcnt.h>
34
35 /*
36 * Task references.
37 *
38 * Each task reference/deallocate pair has an associated reference group:
39 * TASK_GRP_INTERNAL This group is used exclusively to track long-term
40 * references which are almost always present.
41 * Specifically, the importance task reference, the owning
42 * task reference and the thread references.
43 * TASK_GRP_EXTERNAL For kext references
44 * TASK_KERNEL For at-large kernel references other than those tracked
45 * by task_internal.
46 * TASK_GRP_MIG For references from the MIG layer
47 *
48 * Depending on configuration (see task_refgrp_config) os_refgrps are used to
49 * keep track of the context of the reference/deallocation.
50 *
51 * TASK_REF_CONFIG_OFF
52 * No refgrps are used other than the single 'task' reference group.
53 *
54 * TASK_REF_CONFIG_DEFAULT
55 * Global refgrps are used for 'kernel' and 'external' references. The
56 * primary 'task' reference group is set as their parent. Each kext also gets
57 * its own refgrp parented to the 'external' group.
58 * Each task gets two reference groups - one for 'kernel' references parented to
59 * the global 'kernel' group and as second which is dynamically assigned. All
60 * references tagged with TASK_GRP_INTERNAL, TASK_GRP_KERNEL and TASK_GRP_MIG
61 * use the task 'kernel' group. The dynamic group is initialized for the first
62 * 'external' reference to a kext specific group parented to the matching global
63 * kext group. For 'external' references not matching that group, the global
64 * 'external' group is used.
65 * This is the default configuration.
66 *
67 * TASK_REF_CONFIG_FULL
68 * Global refgrps are used for 'kernel', 'external', 'internal' and 'mig'
69 * references. The primary 'task' reference group is set as their parent. Each
70 * kext also gets is own refgrp parented to the 'external' group.
71 * Each task gets eight reference groups - one each mirroring the four global
72 * reference groups and four dynamic groups which are assigned to kexts. For
73 * 'external' references not matching any of the four dynamic groups, the global
74 * 'external' group is used.
75 *
76 * Kext callers have the calls which take or release task references mapped
77 * to '_external' equivalents via the .exports file.
78 *
79 * At-large kernel callers see calls redefined to call the '_kernel' variants
80 * (see task_ref.h).
81 *
82 * The mig layer generates code which uses the '_mig' variants.
83 *
84 * Other groups are selected explicitly.
85 *
86 * Reference groups support recording of back traces via the rlog boot arg.
87 * For example: rlog=task_external would keep a backtrace log of all external
88 * references.
89 */
90
91 #define TASK_REF_COUNT_INITIAL (2u)
92
93 extern void task_deallocate_internal(task_t task, os_ref_count_t refs);
94
95 #if DEVELOPMENT || DEBUG
96
97 #include <stdbool.h>
98
99 #define DYNAMIC_COUNT 4
100
101 /*
102 * Controlled by the boot arg 'task_refgrp=X'.
103 *
104 * Unspecified/default
105 * There are two task reference groups. One kext specific reference group, the
106 * other used for kernel/internal and mig references.
107 *
108 * "off"
109 * No task specific reference groups are used.
110 *
111 * "full"
112 * Each task gets its own set of kernel/internal/mig and external groups.
113 * Additionally four dynamic reference groups are made available to identify kext
114 * references.
115 */
116 __attribute__((used))
117 static enum {
118 TASK_REF_CONFIG_DEFAULT,
119 TASK_REF_CONFIG_FULL,
120 TASK_REF_CONFIG_OFF,
121 } task_refgrp_config = TASK_REF_CONFIG_DEFAULT;
122
123 /* Global reference groups. */
124 os_refgrp_decl(static, task_primary_refgrp, "task", NULL);
125 os_refgrp_decl(static, task_kernel_refgrp, "task_kernel", &task_primary_refgrp);
126 os_refgrp_decl(static, task_internal_refgrp, "task_internal", &task_primary_refgrp);
127 os_refgrp_decl(static, task_mig_refgrp, "task_mig", &task_primary_refgrp);
128 os_refgrp_decl(, task_external_refgrp, "task_external", &task_primary_refgrp);
129
130
131 /* 'task_refgrp' is used by lldb macros. */
132 __attribute__((used))
133 static struct os_refgrp * const task_refgrp[TASK_GRP_COUNT] = {
134 [TASK_GRP_KERNEL] = &task_kernel_refgrp,
135 [TASK_GRP_INTERNAL] = &task_internal_refgrp,
136 [TASK_GRP_MIG] = &task_mig_refgrp,
137 [TASK_GRP_EXTERNAL] = &task_external_refgrp,
138 };
139
140 /* Names used by local reference groups. */
141 static const char * const local_name[TASK_GRP_COUNT] = {
142 [TASK_GRP_KERNEL] = "task_local_kernel",
143 [TASK_GRP_INTERNAL] = "task_local_internal",
144 [TASK_GRP_MIG] = "task_local_mig",
145 [TASK_GRP_EXTERNAL] = "task_local_external",
146 };
147
148 /* Walk back the callstack calling cb for each address. */
149 static inline void
150 walk_kext_callstack(int (^cb)(uintptr_t))
151 {
152 uintptr_t* frameptr;
153 uintptr_t* frameptr_next;
154 uintptr_t retaddr;
155 uintptr_t kstackb, kstackt;
156 thread_t cthread;
157
158 cthread = current_thread();
159 assert3p(cthread, !=, NULL);
160
161 kstackb = thread_get_kernel_stack(cthread);
162 kstackt = kstackb + kernel_stack_size;
163
164 /* Load stack frame pointer (EBP on x86) into frameptr */
165 frameptr = __builtin_frame_address(0);
166
167 while (frameptr != NULL) {
168 /* Verify thread stack bounds */
169 if (((uintptr_t)(frameptr + 2) > kstackt) ||
170 ((uintptr_t)frameptr < kstackb)) {
171 break;
172 }
173
174 /* Next frame pointer is pointed to by the previous one */
175 frameptr_next = (uintptr_t*) *frameptr;
176
177 /* Pull return address from one spot above the frame pointer */
178 retaddr = *(frameptr + 1);
179
180 #if defined(HAS_APPLE_PAC)
181 retaddr = (uintptr_t) ptrauth_strip((void *)retaddr,
182 ptrauth_key_return_address);
183 #endif
184
185 if (((retaddr < vm_kernel_builtinkmod_text_end) &&
186 (retaddr >= vm_kernel_builtinkmod_text)) ||
187 (retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top)) {
188 if (cb(retaddr) != 0) {
189 return;
190 }
191 }
192 frameptr = frameptr_next;
193 }
194
195 return;
196 }
197
198 /* Return the reference group associated with the 'closest' kext. */
199 static struct os_refgrp *
lookup_kext_refgrp(void)200 lookup_kext_refgrp(void)
201 {
202 __block struct os_refgrp *refgrp = NULL;
203
204 /* Get the kext specific group based on the current stack. */
205 walk_kext_callstack(^(uintptr_t retaddr) {
206 OSKextGetRefGrpForCaller(retaddr, ^(struct os_refgrp *kext_grp) {
207 assert(kext_grp != NULL);
208 refgrp = kext_grp;
209 });
210 return 1;
211 });
212 return refgrp;
213 }
214
215
216 /*
217 * Given an array of reference groups, find one that matches the specified kext
218 * group. If there is no match and there is a empty slot, initialize a new
219 * refgrp with the kext group as the parent (only when `can_allocate` is true).
220 */
221 static struct os_refgrp *
lookup_dynamic_refgrp(struct os_refgrp * kext,struct os_refgrp * dynamic,int dynamic_count,bool can_allocate)222 lookup_dynamic_refgrp(struct os_refgrp *kext,
223 struct os_refgrp *dynamic, int dynamic_count, bool can_allocate)
224 {
225 /* First see if it exists. */
226 for (int i = 0; i < dynamic_count; i++) {
227 if (dynamic[i].grp_parent == kext) {
228 return &dynamic[i];
229 }
230 }
231
232 if (!can_allocate) {
233 return NULL;
234 }
235
236 /* Grab an empty one, if available. */
237 for (int i = 0; i < dynamic_count; i++) {
238 if (dynamic[i].grp_name == NULL) {
239 dynamic[i] = (struct os_refgrp)
240 os_refgrp_initializer(kext->grp_name, kext);
241 return &dynamic[i];
242 }
243 }
244
245 return NULL;
246 }
247
248 /*
249 * Find the best external reference group.
250 * - Task specific kext ref group
251 * else
252 * - Kext ref group
253 * else
254 * - Global external ref group
255 */
256 static struct os_refgrp *
find_external_refgrp(struct os_refgrp * dynamic,int dynamic_count,bool can_allocate)257 find_external_refgrp(struct os_refgrp *dynamic, int dynamic_count,
258 bool can_allocate)
259 {
260 struct os_refgrp *kext_refgrp = lookup_kext_refgrp();
261 if (kext_refgrp == NULL) {
262 return task_refgrp[TASK_GRP_EXTERNAL];
263 }
264
265 struct os_refgrp *refgrp = lookup_dynamic_refgrp(kext_refgrp, dynamic,
266 dynamic_count, can_allocate);
267 if (refgrp == NULL) {
268 return kext_refgrp;
269 }
270
271 return refgrp;
272 }
273
274 void
task_reference_grp(task_t task,task_grp_t grp)275 task_reference_grp(task_t task, task_grp_t grp)
276 {
277 assert3u(grp, <, TASK_GRP_COUNT);
278 assert(
279 task_refgrp_config == TASK_REF_CONFIG_OFF ||
280 task_refgrp_config == TASK_REF_CONFIG_DEFAULT ||
281 task_refgrp_config == TASK_REF_CONFIG_FULL);
282
283 struct os_refgrp *refgrp = NULL;
284
285 if (task == TASK_NULL) {
286 return;
287 }
288
289 task_require(task);
290
291 /*
292 * External ref groups need to search and potentially allocate from the
293 * dynamic task ref groups. This must be protected by a lock.
294 */
295 if (task_refgrp_config != TASK_REF_CONFIG_OFF &&
296 grp == TASK_GRP_EXTERNAL) {
297 lck_spin_lock(&task->ref_group_lock);
298 }
299
300 switch (task_refgrp_config) {
301 case TASK_REF_CONFIG_OFF:
302 refgrp = NULL;
303 break;
304
305 case TASK_REF_CONFIG_DEFAULT:
306
307 refgrp = (grp == TASK_GRP_EXTERNAL) ?
308 find_external_refgrp(&task->ref_group[1], 1, true) :
309 &task->ref_group[TASK_GRP_KERNEL];
310 break;
311
312 case TASK_REF_CONFIG_FULL:
313
314 refgrp = (grp == TASK_GRP_EXTERNAL) ?
315 find_external_refgrp(&task->ref_group[TASK_GRP_COUNT], DYNAMIC_COUNT, true) :
316 &task->ref_group[grp];
317 break;
318 }
319
320 os_ref_retain_raw(&task->ref_count.ref_count, refgrp);
321
322 if (task_refgrp_config != TASK_REF_CONFIG_OFF &&
323 grp == TASK_GRP_EXTERNAL) {
324 lck_spin_unlock(&task->ref_group_lock);
325 }
326 }
327
328 void
task_deallocate_grp(task_t task,task_grp_t grp)329 task_deallocate_grp(task_t task, task_grp_t grp)
330 {
331 assert3u(grp, <, TASK_GRP_COUNT);
332 assert(
333 task_refgrp_config == TASK_REF_CONFIG_OFF ||
334 task_refgrp_config == TASK_REF_CONFIG_DEFAULT ||
335 task_refgrp_config == TASK_REF_CONFIG_FULL);
336
337 os_ref_count_t refs = -1;
338 struct os_refgrp *refgrp = NULL;
339
340 if (task == TASK_NULL) {
341 return;
342 }
343
344 /*
345 * There is no need to take the ref_group_lock when de-allocating. The
346 * lock is only required when allocating a group.
347 */
348 switch (task_refgrp_config) {
349 case TASK_REF_CONFIG_OFF:
350 refgrp = NULL;
351 break;
352
353 case TASK_REF_CONFIG_DEFAULT:
354 refgrp = (grp == TASK_GRP_EXTERNAL) ?
355 find_external_refgrp(&task->ref_group[1], 1, false) :
356 &task->ref_group[TASK_GRP_KERNEL];
357 break;
358
359 case TASK_REF_CONFIG_FULL:
360 refgrp = (grp == TASK_GRP_EXTERNAL) ?
361 find_external_refgrp(&task->ref_group[TASK_GRP_COUNT], DYNAMIC_COUNT, false) :
362 &task->ref_group[grp];
363 break;
364 }
365
366
367 refs = os_ref_release_raw(&task->ref_count.ref_count, refgrp);
368 /* Beware - the task may have been freed after this point. */
369
370 task_deallocate_internal(task, refs);
371 }
372
373 void
task_reference_external(task_t task)374 task_reference_external(task_t task)
375 {
376 task_reference_grp(task, TASK_GRP_EXTERNAL);
377 }
378
379 void
task_deallocate_external(task_t task)380 task_deallocate_external(task_t task)
381 {
382 task_deallocate_grp(task, TASK_GRP_EXTERNAL);
383 }
384
385 static void
allocate_refgrp_default(task_t task)386 allocate_refgrp_default(task_t task)
387 {
388 /* Just one static group and one dynamic group. */
389 task->ref_group = kalloc_type(struct os_refgrp, 2,
390 Z_WAITOK | Z_ZERO | Z_NOFAIL);
391
392 task->ref_group[TASK_GRP_KERNEL] = (struct os_refgrp)
393 os_refgrp_initializer(local_name[TASK_GRP_KERNEL],
394 task_refgrp[TASK_GRP_KERNEL]);
395 os_ref_log_init(&task->ref_group[TASK_GRP_KERNEL]);
396 }
397
398 static void
free_refgrp_default(task_t task)399 free_refgrp_default(task_t task)
400 {
401 os_ref_log_fini(&task->ref_group[TASK_GRP_KERNEL]);
402 /* Just one static group and one dynamic group. */
403 kfree_type(struct os_refgrp, 2, task->ref_group);
404 }
405
406 static void
allocate_refgrp_full(task_t task)407 allocate_refgrp_full(task_t task)
408 {
409 task->ref_group = kalloc_type(struct os_refgrp,
410 TASK_GRP_COUNT + DYNAMIC_COUNT, Z_WAITOK | Z_ZERO | Z_NOFAIL);
411
412 for (int i = 0; i < TASK_GRP_COUNT; i++) {
413 task->ref_group[i] = (struct os_refgrp)
414 os_refgrp_initializer(local_name[i], task_refgrp[i]);
415 os_ref_log_init(&task->ref_group[i]);
416 }
417 }
418
419 static void
free_refgrp_full(task_t task)420 free_refgrp_full(task_t task)
421 {
422 for (int i = 0; i < TASK_GRP_COUNT; i++) {
423 os_ref_log_fini(&task->ref_group[i]);
424 }
425 kfree_type(struct os_refgrp, TASK_GRP_COUNT + DYNAMIC_COUNT, task->ref_group);
426 }
427
428 kern_return_t
task_ref_count_init(task_t task)429 task_ref_count_init(task_t task)
430 {
431 assert(
432 task_refgrp_config == TASK_REF_CONFIG_OFF ||
433 task_refgrp_config == TASK_REF_CONFIG_DEFAULT ||
434 task_refgrp_config == TASK_REF_CONFIG_FULL);
435
436 switch (task_refgrp_config) {
437 case TASK_REF_CONFIG_OFF:
438 os_ref_init_count(&task->ref_count, &task_primary_refgrp,
439 TASK_REF_COUNT_INITIAL);
440 return KERN_SUCCESS;
441
442
443 case TASK_REF_CONFIG_DEFAULT:
444 allocate_refgrp_default(task);
445 lck_spin_init(&task->ref_group_lock, &task_lck_grp, LCK_ATTR_NULL);
446 os_ref_init_count(&task->ref_count, &task->ref_group[TASK_GRP_KERNEL],
447 TASK_REF_COUNT_INITIAL);
448 return KERN_SUCCESS;
449
450 case TASK_REF_CONFIG_FULL:
451 allocate_refgrp_full(task);
452 lck_spin_init(&task->ref_group_lock, &task_lck_grp, LCK_ATTR_NULL);
453
454 os_ref_init_count_internal(&task->ref_count.ref_count,
455 &task->ref_group[TASK_GRP_KERNEL], 1);
456
457 task_reference_grp(task, TASK_GRP_INTERNAL);
458
459 return KERN_SUCCESS;
460 }
461 }
462
463 void
task_ref_count_fini(task_t task)464 task_ref_count_fini(task_t task)
465 {
466 assert(
467 task_refgrp_config == TASK_REF_CONFIG_OFF ||
468 task_refgrp_config == TASK_REF_CONFIG_DEFAULT ||
469 task_refgrp_config == TASK_REF_CONFIG_FULL);
470
471 switch (task_refgrp_config) {
472 case TASK_REF_CONFIG_OFF:
473 return;
474
475 case TASK_REF_CONFIG_DEFAULT:
476 lck_spin_destroy(&task->ref_group_lock, &task_lck_grp);
477 free_refgrp_default(task);
478 return;
479
480 case TASK_REF_CONFIG_FULL:
481 lck_spin_destroy(&task->ref_group_lock, &task_lck_grp);
482 free_refgrp_full(task);
483 return;
484 }
485 }
486
487 void
task_ref_init(void)488 task_ref_init(void)
489 {
490 char config[16] = {0};
491
492 /* Allow task reference group logging to be configured. */
493 (void) PE_parse_boot_arg_str("task_refgrp", config,
494 sizeof(config));
495
496 if (strncmp(config, "full", sizeof(config)) == 0) {
497 task_refgrp_config = TASK_REF_CONFIG_FULL;
498 }
499 if (strncmp(config, "off", sizeof(config)) == 0) {
500 task_refgrp_config = TASK_REF_CONFIG_OFF;
501 }
502
503 if (task_refgrp_config == TASK_REF_CONFIG_OFF) {
504 return;
505 }
506
507 for (int i = 0; i < TASK_GRP_COUNT; i++) {
508 os_ref_log_init(task_refgrp[i]);
509 }
510 }
511
512 #else /* DEVELOPMENT || DEBUG */
513
514 kern_return_t
task_ref_count_init(task_t task)515 task_ref_count_init(task_t task)
516 {
517 /* One ref for our caller, one for being alive. */
518 os_ref_init_count(&task->ref_count, &task_primary_refgrp,
519 TASK_REF_COUNT_INITIAL);
520 return KERN_SUCCESS;
521 }
522
523 void
task_reference_grp(task_t task,task_grp_t grp)524 task_reference_grp(task_t task, __attribute__((__unused__)) task_grp_t grp)
525 {
526 if (task == TASK_NULL) {
527 return;
528 }
529
530 task_require(task);
531 os_ref_retain(&task->ref_count);
532 }
533
534 void
task_deallocate_grp(task_t task,task_grp_t grp)535 task_deallocate_grp(task_t task, __attribute__((__unused__)) task_grp_t grp)
536 {
537 if (task == TASK_NULL) {
538 return;
539 }
540
541 os_ref_count_t refs = os_ref_release(&task->ref_count);
542 task_deallocate_internal(task, refs);
543 }
544
545 void
task_reference_external(task_t task)546 task_reference_external(task_t task)
547 {
548 task_reference_grp(task, 0);
549 }
550
551 void
task_deallocate_external(task_t task)552 task_deallocate_external(task_t task)
553 {
554 task_deallocate_grp(task, 0);
555 }
556
557 void
task_ref_count_fini(task_t task)558 task_ref_count_fini(__attribute__((__unused__)) task_t task)
559 {
560 }
561
562 void
task_ref_init(void)563 task_ref_init(void)
564 {
565 }
566
567 #endif /* DEVELOPMENT || DEBUG */
568