1 /*
2 * Copyright (c) 2013-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/notify.h>
31 #include <ipc/ipc_types.h>
32 #include <ipc/ipc_importance.h>
33 #include <ipc/ipc_port.h>
34 #include <ipc/ipc_voucher.h>
35 #include <kern/ipc_kobject.h>
36 #include <kern/ipc_tt.h>
37 #include <kern/mach_param.h>
38 #include <kern/misc_protos.h>
39 #include <kern/zalloc.h>
40 #include <kern/queue.h>
41 #include <kern/task.h>
42 #include <kern/policy_internal.h>
43
44 #include <sys/kdebug.h>
45
46 #include <mach/machine/sdt.h>
47
48 extern int proc_pid(void *);
49 extern int proc_selfpid(void);
50 extern uint64_t proc_uniqueid(void *p);
51 extern char *proc_name_address(void *p);
52
53 /*
54 * Globals for delayed boost drop processing.
55 */
56 static queue_head_t ipc_importance_delayed_drop_queue;
57 static thread_call_t ipc_importance_delayed_drop_call;
58 static uint64_t ipc_importance_delayed_drop_timestamp;
59 static boolean_t ipc_importance_delayed_drop_call_requested = FALSE;
60
61 #define DENAP_DROP_TARGET (1000 * NSEC_PER_MSEC) /* optimum denap delay */
62 #define DENAP_DROP_SKEW (100 * NSEC_PER_MSEC) /* request skew for wakeup */
63 #define DENAP_DROP_LEEWAY (2 * DENAP_DROP_SKEW) /* specified wakeup leeway */
64
65 #define DENAP_DROP_DELAY (DENAP_DROP_TARGET + DENAP_DROP_SKEW)
66 #define DENAP_DROP_FLAGS (THREAD_CALL_DELAY_SYS_NORMAL | THREAD_CALL_DELAY_LEEWAY)
67
68 /*
69 * Importance Voucher Attribute Manager
70 */
71 static LCK_SPIN_DECLARE_ATTR(ipc_importance_lock_data, &ipc_lck_grp, &ipc_lck_attr);
72
73 #define ipc_importance_lock() \
74 lck_spin_lock_grp(&ipc_importance_lock_data, &ipc_lck_grp)
75 #define ipc_importance_lock_try() \
76 lck_spin_try_lock_grp(&ipc_importance_lock_data, &ipc_lck_grp)
77 #define ipc_importance_unlock() \
78 lck_spin_unlock(&ipc_importance_lock_data)
79 #define ipc_importance_assert_held() \
80 lck_spin_assert(&ipc_importance_lock_data, LCK_ASSERT_OWNED)
81
82 #if IIE_REF_DEBUG
83 #define incr_ref_counter(x) (os_atomic_inc(&(x), relaxed))
84
85 static inline
86 uint32_t
ipc_importance_reference_internal(ipc_importance_elem_t elem)87 ipc_importance_reference_internal(ipc_importance_elem_t elem)
88 {
89 incr_ref_counter(elem->iie_refs_added);
90 return os_atomic_inc(&elem->iie_bits, relaxed) & IIE_REFS_MASK;
91 }
92
93 static inline
94 uint32_t
ipc_importance_release_internal(ipc_importance_elem_t elem)95 ipc_importance_release_internal(ipc_importance_elem_t elem)
96 {
97 incr_ref_counter(elem->iie_refs_dropped);
98 return os_atomic_dec(&elem->iie_bits, relaxed) & IIE_REFS_MASK;
99 }
100
101 static inline
102 uint32_t
ipc_importance_task_reference_internal(ipc_importance_task_t task_imp)103 ipc_importance_task_reference_internal(ipc_importance_task_t task_imp)
104 {
105 uint32_t out;
106 out = ipc_importance_reference_internal(&task_imp->iit_elem);
107 incr_ref_counter(task_imp->iit_elem.iie_task_refs_added);
108 return out;
109 }
110
111 static inline
112 uint32_t
ipc_importance_task_release_internal(ipc_importance_task_t task_imp)113 ipc_importance_task_release_internal(ipc_importance_task_t task_imp)
114 {
115 uint32_t out;
116
117 assert(1 < IIT_REFS(task_imp));
118 incr_ref_counter(task_imp->iit_elem.iie_task_refs_dropped);
119 out = ipc_importance_release_internal(&task_imp->iit_elem);
120 return out;
121 }
122
123 static inline
124 void
ipc_importance_counter_init(ipc_importance_elem_t elem)125 ipc_importance_counter_init(ipc_importance_elem_t elem)
126 {
127 elem->iie_refs_added = 0;
128 elem->iie_refs_dropped = 0;
129 elem->iie_kmsg_refs_added = 0;
130 elem->iie_kmsg_refs_inherited = 0;
131 elem->iie_kmsg_refs_coalesced = 0;
132 elem->iie_kmsg_refs_dropped = 0;
133 elem->iie_task_refs_added = 0;
134 elem->iie_task_refs_added_inherit_from = 0;
135 elem->iie_task_refs_added_transition = 0;
136 elem->iie_task_refs_self_added = 0;
137 elem->iie_task_refs_inherited = 0;
138 elem->iie_task_refs_coalesced = 0;
139 elem->iie_task_refs_dropped = 0;
140 }
141 #else
142 #define incr_ref_counter(x)
143 #endif
144
145 #if DEVELOPMENT || DEBUG
146 static queue_head_t global_iit_alloc_queue =
147 QUEUE_HEAD_INITIALIZER(global_iit_alloc_queue);
148 #endif
149
150 static ZONE_DEFINE_TYPE(ipc_importance_task_zone, "ipc task importance",
151 struct ipc_importance_task, ZC_ZFREE_CLEARMEM);
152 static ZONE_DEFINE_TYPE(ipc_importance_inherit_zone, "ipc importance inherit",
153 struct ipc_importance_inherit, ZC_ZFREE_CLEARMEM);
154 static zone_t ipc_importance_inherit_zone;
155
156 static ipc_voucher_attr_control_t ipc_importance_control;
157
158 static boolean_t ipc_importance_task_check_transition(ipc_importance_task_t task_imp,
159 iit_update_type_t type, uint32_t delta);
160
161 static void ipc_importance_task_propagate_assertion_locked(ipc_importance_task_t task_imp,
162 iit_update_type_t type, boolean_t update_task_imp);
163
164 static ipc_importance_inherit_t ipc_importance_inherit_from_task(task_t from_task, task_t to_task);
165
166 /*
167 * Routine: ipc_importance_kmsg_link
168 * Purpose:
169 * Link the kmsg onto the appropriate propagation chain.
170 * If the element is a task importance, we link directly
171 * on its propagation chain. Otherwise, we link onto the
172 * destination task of the inherit.
173 * Conditions:
174 * Importance lock held.
175 * Caller is donating an importance elem reference to the kmsg.
176 */
177 static void
ipc_importance_kmsg_link(ipc_kmsg_t kmsg,ipc_importance_elem_t elem)178 ipc_importance_kmsg_link(
179 ipc_kmsg_t kmsg,
180 ipc_importance_elem_t elem)
181 {
182 ipc_importance_elem_t link_elem;
183
184 assert(IIE_NULL == kmsg->ikm_importance);
185
186 link_elem = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
187 (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task :
188 elem;
189
190 queue_enter(&link_elem->iie_kmsgs, kmsg, ipc_kmsg_t, ikm_inheritance);
191 kmsg->ikm_importance = elem;
192 }
193
194 /*
195 * Routine: ipc_importance_kmsg_unlink
196 * Purpose:
197 * Unlink the kmsg from its current propagation chain.
198 * If the element is a task importance, we unlink directly
199 * from its propagation chain. Otherwise, we unlink from the
200 * destination task of the inherit.
201 * Returns:
202 * The reference to the importance element it was linked on.
203 * Conditions:
204 * Importance lock held.
205 * Caller is responsible for dropping reference on returned elem.
206 */
207 static ipc_importance_elem_t
ipc_importance_kmsg_unlink(ipc_kmsg_t kmsg)208 ipc_importance_kmsg_unlink(
209 ipc_kmsg_t kmsg)
210 {
211 ipc_importance_elem_t elem = kmsg->ikm_importance;
212
213 if (IIE_NULL != elem) {
214 ipc_importance_elem_t unlink_elem;
215
216 unlink_elem = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
217 (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task :
218 elem;
219
220 queue_remove(&unlink_elem->iie_kmsgs, kmsg, ipc_kmsg_t, ikm_inheritance);
221 kmsg->ikm_importance = IIE_NULL;
222 }
223 return elem;
224 }
225
226 /*
227 * Routine: ipc_importance_inherit_link
228 * Purpose:
229 * Link the inherit onto the appropriate propagation chain.
230 * If the element is a task importance, we link directly
231 * on its propagation chain. Otherwise, we link onto the
232 * destination task of the inherit.
233 * Conditions:
234 * Importance lock held.
235 * Caller is donating an elem importance reference to the inherit.
236 */
237 static void
ipc_importance_inherit_link(ipc_importance_inherit_t inherit,ipc_importance_elem_t elem)238 ipc_importance_inherit_link(
239 ipc_importance_inherit_t inherit,
240 ipc_importance_elem_t elem)
241 {
242 ipc_importance_task_t link_task;
243
244 assert(IIE_NULL == inherit->iii_from_elem);
245 link_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
246 ((ipc_importance_inherit_t)elem)->iii_to_task :
247 (ipc_importance_task_t)elem;
248
249 queue_enter(&link_task->iit_inherits, inherit,
250 ipc_importance_inherit_t, iii_inheritance);
251 inherit->iii_from_elem = elem;
252 }
253
254 /*
255 * Routine: ipc_importance_inherit_find
256 * Purpose:
257 * Find an existing inherit that links the from element to the
258 * to_task at a given nesting depth. As inherits from other
259 * inherits are actually linked off the original inherit's donation
260 * receiving task, we have to conduct our search from there if
261 * the from element is an inherit.
262 * Returns:
263 * A pointer (not a reference) to the matching inherit.
264 * Conditions:
265 * Importance lock held.
266 */
267 static ipc_importance_inherit_t
ipc_importance_inherit_find(ipc_importance_elem_t from,ipc_importance_task_t to_task,unsigned int depth)268 ipc_importance_inherit_find(
269 ipc_importance_elem_t from,
270 ipc_importance_task_t to_task,
271 unsigned int depth)
272 {
273 ipc_importance_task_t link_task;
274 ipc_importance_inherit_t inherit;
275
276 link_task = (IIE_TYPE_INHERIT == IIE_TYPE(from)) ?
277 ((ipc_importance_inherit_t)from)->iii_to_task :
278 (ipc_importance_task_t)from;
279
280 queue_iterate(&link_task->iit_inherits, inherit,
281 ipc_importance_inherit_t, iii_inheritance) {
282 if (inherit->iii_to_task == to_task && inherit->iii_depth == depth) {
283 return inherit;
284 }
285 }
286 return III_NULL;
287 }
288
289 /*
290 * Routine: ipc_importance_inherit_unlink
291 * Purpose:
292 * Unlink the inherit from its current propagation chain.
293 * If the element is a task importance, we unlink directly
294 * from its propagation chain. Otherwise, we unlink from the
295 * destination task of the inherit.
296 * Returns:
297 * The reference to the importance element it was linked on.
298 * Conditions:
299 * Importance lock held.
300 * Caller is responsible for dropping reference on returned elem.
301 */
302 static ipc_importance_elem_t
ipc_importance_inherit_unlink(ipc_importance_inherit_t inherit)303 ipc_importance_inherit_unlink(
304 ipc_importance_inherit_t inherit)
305 {
306 ipc_importance_elem_t elem = inherit->iii_from_elem;
307
308 if (IIE_NULL != elem) {
309 ipc_importance_task_t unlink_task;
310
311 unlink_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
312 ((ipc_importance_inherit_t)elem)->iii_to_task :
313 (ipc_importance_task_t)elem;
314
315 queue_remove(&unlink_task->iit_inherits, inherit,
316 ipc_importance_inherit_t, iii_inheritance);
317 inherit->iii_from_elem = IIE_NULL;
318 }
319 return elem;
320 }
321
322 /*
323 * Routine: ipc_importance_reference
324 * Purpose:
325 * Add a reference to the importance element.
326 * Conditions:
327 * Caller must hold a reference on the element.
328 */
329 void
ipc_importance_reference(ipc_importance_elem_t elem)330 ipc_importance_reference(ipc_importance_elem_t elem)
331 {
332 assert(0 < IIE_REFS(elem));
333 ipc_importance_reference_internal(elem);
334 }
335
336 /*
337 * Routine: ipc_importance_release_locked
338 * Purpose:
339 * Release a reference on an importance attribute value,
340 * unlinking and deallocating the attribute if the last reference.
341 * Conditions:
342 * Entered with importance lock held, leaves with it unlocked.
343 */
344 static void
ipc_importance_release_locked(ipc_importance_elem_t elem)345 ipc_importance_release_locked(ipc_importance_elem_t elem)
346 {
347 assert(0 < IIE_REFS(elem));
348
349 #if IMPORTANCE_DEBUG
350 ipc_importance_inherit_t temp_inherit;
351 ipc_importance_task_t link_task;
352 ipc_kmsg_t temp_kmsg;
353 uint32_t expected = 0;
354
355 if (0 < elem->iie_made) {
356 expected++;
357 }
358
359 link_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
360 ((ipc_importance_inherit_t)elem)->iii_to_task :
361 (ipc_importance_task_t)elem;
362
363 queue_iterate(&link_task->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance)
364 if (temp_kmsg->ikm_importance == elem) {
365 expected++;
366 }
367 queue_iterate(&link_task->iit_inherits, temp_inherit,
368 ipc_importance_inherit_t, iii_inheritance)
369 if (temp_inherit->iii_from_elem == elem) {
370 expected++;
371 }
372 if (IIE_REFS(elem) < expected + 1) {
373 panic("ipc_importance_release_locked (%p)", elem);
374 }
375 #endif /* IMPORTANCE_DEBUG */
376
377 if (0 < ipc_importance_release_internal(elem)) {
378 ipc_importance_unlock();
379 return;
380 }
381
382 /* last ref */
383
384 switch (IIE_TYPE(elem)) {
385 /* just a "from" task reference to drop */
386 case IIE_TYPE_TASK:
387 {
388 ipc_importance_task_t task_elem;
389
390 task_elem = (ipc_importance_task_t)elem;
391
392 /* the task can't still hold a reference on the task importance */
393 assert(TASK_NULL == task_elem->iit_task);
394
395 #if DEVELOPMENT || DEBUG
396 queue_remove(&global_iit_alloc_queue, task_elem, ipc_importance_task_t, iit_allocation);
397 #endif
398
399 ipc_importance_unlock();
400
401 zfree(ipc_importance_task_zone, task_elem);
402 break;
403 }
404
405 /* dropping an inherit element */
406 case IIE_TYPE_INHERIT:
407 {
408 ipc_importance_inherit_t inherit = (ipc_importance_inherit_t)elem;
409 ipc_importance_task_t to_task = inherit->iii_to_task;
410 ipc_importance_elem_t from_elem;
411
412 assert(IIT_NULL != to_task);
413 assert(ipc_importance_task_is_any_receiver_type(to_task));
414
415 /* unlink the inherit from its source element */
416 from_elem = ipc_importance_inherit_unlink(inherit);
417 assert(IIE_NULL != from_elem);
418
419 /*
420 * The attribute might have pending external boosts if the attribute
421 * was given out during exec, drop them from the appropriate destination
422 * task.
423 *
424 * The attribute will not have any pending external boosts if the
425 * attribute was given out to voucher system since it would have been
426 * dropped by ipc_importance_release_value, but there is not way to
427 * detect that, thus if the attribute has a pending external boost,
428 * drop them from the appropriate destination task.
429 *
430 * The inherit attribute from exec and voucher system would not
431 * get deduped to each other, thus dropping the external boost
432 * from destination task at two different places will not have
433 * any unintended side effects.
434 */
435 assert(inherit->iii_externcnt >= inherit->iii_externdrop);
436 if (inherit->iii_donating) {
437 uint32_t assertcnt = III_EXTERN(inherit);
438
439 assert(ipc_importance_task_is_any_receiver_type(to_task));
440 assert(to_task->iit_externcnt >= inherit->iii_externcnt);
441 assert(to_task->iit_externdrop >= inherit->iii_externdrop);
442 to_task->iit_externcnt -= inherit->iii_externcnt;
443 to_task->iit_externdrop -= inherit->iii_externdrop;
444 inherit->iii_externcnt = 0;
445 inherit->iii_externdrop = 0;
446 inherit->iii_donating = FALSE;
447
448 /* adjust the internal assertions - and propagate as needed */
449 if (ipc_importance_task_check_transition(to_task, IIT_UPDATE_DROP, assertcnt)) {
450 ipc_importance_task_propagate_assertion_locked(to_task, IIT_UPDATE_DROP, TRUE);
451 }
452 } else {
453 inherit->iii_externcnt = 0;
454 inherit->iii_externdrop = 0;
455 }
456
457 /* release the reference on the source element */
458 ipc_importance_release_locked(from_elem);
459 /* unlocked on return */
460
461 /* release the reference on the destination task */
462 ipc_importance_task_release(to_task);
463
464 /* free the inherit */
465 zfree(ipc_importance_inherit_zone, inherit);
466 break;
467 }
468 }
469 }
470
471 /*
472 * Routine: ipc_importance_release
473 * Purpose:
474 * Release a reference on an importance attribute value,
475 * unlinking and deallocating the attribute if the last reference.
476 * Conditions:
477 * nothing locked on entrance, nothing locked on exit.
478 * May block.
479 */
480 void
ipc_importance_release(ipc_importance_elem_t elem)481 ipc_importance_release(ipc_importance_elem_t elem)
482 {
483 if (IIE_NULL == elem) {
484 return;
485 }
486
487 ipc_importance_lock();
488 ipc_importance_release_locked(elem);
489 /* unlocked */
490 }
491
492 /*
493 * Routine: ipc_importance_task_reference
494 *
495 *
496 * Purpose:
497 * Retain a reference on a task importance attribute value.
498 * Conditions:
499 * nothing locked on entrance, nothing locked on exit.
500 * caller holds a reference already.
501 */
502 void
ipc_importance_task_reference(ipc_importance_task_t task_elem)503 ipc_importance_task_reference(ipc_importance_task_t task_elem)
504 {
505 if (IIT_NULL == task_elem) {
506 return;
507 }
508 #if IIE_REF_DEBUG
509 incr_ref_counter(task_elem->iit_elem.iie_task_refs_added);
510 #endif
511 ipc_importance_reference(&task_elem->iit_elem);
512 }
513
514 /*
515 * Routine: ipc_importance_task_release
516 * Purpose:
517 * Release a reference on a task importance attribute value,
518 * unlinking and deallocating the attribute if the last reference.
519 * Conditions:
520 * nothing locked on entrance, nothing locked on exit.
521 * May block.
522 */
523 void
ipc_importance_task_release(ipc_importance_task_t task_elem)524 ipc_importance_task_release(ipc_importance_task_t task_elem)
525 {
526 if (IIT_NULL == task_elem) {
527 return;
528 }
529
530 ipc_importance_lock();
531 #if IIE_REF_DEBUG
532 incr_ref_counter(task_elem->iit_elem.iie_task_refs_dropped);
533 #endif
534 ipc_importance_release_locked(&task_elem->iit_elem);
535 /* unlocked */
536 }
537
538 /*
539 * Routine: ipc_importance_task_release_locked
540 * Purpose:
541 * Release a reference on a task importance attribute value,
542 * unlinking and deallocating the attribute if the last reference.
543 * Conditions:
544 * importance lock held on entry, nothing locked on exit.
545 * May block.
546 */
547 static void
ipc_importance_task_release_locked(ipc_importance_task_t task_elem)548 ipc_importance_task_release_locked(ipc_importance_task_t task_elem)
549 {
550 if (IIT_NULL == task_elem) {
551 ipc_importance_unlock();
552 return;
553 }
554 #if IIE_REF_DEBUG
555 incr_ref_counter(task_elem->iit_elem.iie_task_refs_dropped);
556 #endif
557 ipc_importance_release_locked(&task_elem->iit_elem);
558 /* unlocked */
559 }
560
561 /*
562 * Routines for importance donation/inheritance/boosting
563 */
564
565
566 /*
567 * External importance assertions are managed by the process in userspace
568 * Internal importance assertions are the responsibility of the kernel
569 * Assertions are changed from internal to external via task_importance_externalize_assertion
570 */
571
572 /*
573 * Routine: ipc_importance_task_check_transition
574 * Purpose:
575 * Increase or decrement the internal task importance counter of the
576 * specified task and determine if propagation and a task policy
577 * update is required.
578 *
579 * If it is already enqueued for a policy update, steal it from that queue
580 * (as we are reversing that update before it happens).
581 *
582 * Conditions:
583 * Called with the importance lock held.
584 * It is the caller's responsibility to perform the propagation of the
585 * transition and/or policy changes by checking the return value.
586 */
587 static boolean_t
ipc_importance_task_check_transition(ipc_importance_task_t task_imp,iit_update_type_t type,uint32_t delta)588 ipc_importance_task_check_transition(
589 ipc_importance_task_t task_imp,
590 iit_update_type_t type,
591 uint32_t delta)
592 {
593 #if IMPORTANCE_TRACE
594 task_t target_task = task_imp->iit_task;
595 #endif
596 boolean_t boost = (IIT_UPDATE_HOLD == type);
597 boolean_t before_boosted, after_boosted;
598
599 ipc_importance_assert_held();
600
601 if (!ipc_importance_task_is_any_receiver_type(task_imp)) {
602 return FALSE;
603 }
604
605 #if IMPORTANCE_TRACE
606 int target_pid = task_pid(target_task);
607
608 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (((boost) ? IMP_HOLD : IMP_DROP) | TASK_POLICY_INTERNAL))) | DBG_FUNC_START,
609 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
610 #endif
611
612 /* snapshot the effective boosting status before making any changes */
613 before_boosted = (task_imp->iit_assertcnt > 0);
614
615 /* Adjust the assertcnt appropriately */
616 if (boost) {
617 task_imp->iit_assertcnt += delta;
618 #if IMPORTANCE_TRACE
619 DTRACE_BOOST6(send_boost, task_t, target_task, int, target_pid,
620 task_t, current_task(), int, proc_selfpid(), int, delta, int, task_imp->iit_assertcnt);
621 #endif
622 } else {
623 // assert(delta <= task_imp->iit_assertcnt);
624 if (task_imp->iit_assertcnt < delta + IIT_EXTERN(task_imp)) {
625 /* TODO: Turn this back into a panic <rdar://problem/12592649> */
626 task_imp->iit_assertcnt = IIT_EXTERN(task_imp);
627 } else {
628 task_imp->iit_assertcnt -= delta;
629 }
630 #if IMPORTANCE_TRACE
631 // This convers both legacy and voucher-based importance.
632 DTRACE_BOOST4(drop_boost, task_t, target_task, int, target_pid, int, delta, int, task_imp->iit_assertcnt);
633 #endif
634 }
635
636 #if IMPORTANCE_TRACE
637 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (((boost) ? IMP_HOLD : IMP_DROP) | TASK_POLICY_INTERNAL))) | DBG_FUNC_END,
638 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
639 #endif
640
641 /* did the change result in an effective donor status change? */
642 after_boosted = (task_imp->iit_assertcnt > 0);
643
644 if (after_boosted != before_boosted) {
645 /*
646 * If the task importance is already on an update queue, we just reversed the need for a
647 * pending policy update. If the queue is any other than the delayed-drop-queue, pull it
648 * off that queue and release the reference it got going onto the update queue. If it is
649 * the delayed-drop-queue we leave it in place in case it comes back into the drop state
650 * before its time delay is up.
651 *
652 * We still need to propagate the change downstream to reverse the assertcnt effects,
653 * but we no longer need to update this task's boost policy state.
654 *
655 * Otherwise, mark it as needing a policy update.
656 */
657 assert(0 == task_imp->iit_updatepolicy);
658 if (NULL != task_imp->iit_updateq) {
659 if (&ipc_importance_delayed_drop_queue != task_imp->iit_updateq) {
660 queue_remove(task_imp->iit_updateq, task_imp, ipc_importance_task_t, iit_updates);
661 task_imp->iit_updateq = NULL;
662 ipc_importance_task_release_internal(task_imp); /* can't be last ref */
663 }
664 } else {
665 task_imp->iit_updatepolicy = 1;
666 }
667 return TRUE;
668 }
669
670 return FALSE;
671 }
672
673
674 /*
675 * Routine: ipc_importance_task_propagate_helper
676 * Purpose:
677 * Increase or decrement the internal task importance counter of all
678 * importance tasks inheriting from the specified one. If this causes
679 * that importance task to change state, add it to the list of tasks
680 * to do a policy update against.
681 * Conditions:
682 * Called with the importance lock held.
683 * It is the caller's responsibility to iterate down the generated list
684 * and propagate any subsequent assertion changes from there.
685 */
686 static void
ipc_importance_task_propagate_helper(ipc_importance_task_t task_imp,iit_update_type_t type,queue_t propagation)687 ipc_importance_task_propagate_helper(
688 ipc_importance_task_t task_imp,
689 iit_update_type_t type,
690 queue_t propagation)
691 {
692 ipc_importance_task_t temp_task_imp;
693
694 /*
695 * iterate the downstream kmsgs, adjust their boosts,
696 * and capture the next task to adjust for each message
697 */
698
699 ipc_kmsg_t temp_kmsg;
700
701 queue_iterate(&task_imp->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance) {
702 mach_msg_header_t *hdr = ikm_header(temp_kmsg);
703 mach_port_delta_t delta;
704 ipc_port_t port;
705
706 /* toggle the kmsg importance bit as a barrier to parallel adjusts */
707 if (IIT_UPDATE_HOLD == type) {
708 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
709 continue;
710 }
711
712 /* mark the message as now carrying importance */
713 hdr->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
714 delta = 1;
715 } else {
716 if (!MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
717 continue;
718 }
719
720 /* clear the message as now carrying importance */
721 hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
722 delta = -1;
723 }
724
725 /* determine the task importance to adjust as result (if any) */
726 port = hdr->msgh_remote_port;
727 assert(IP_VALID(port));
728 ip_mq_lock(port);
729 temp_task_imp = IIT_NULL;
730 if (!ipc_port_importance_delta_internal(port, IPID_OPTION_NORMAL, &delta, &temp_task_imp)) {
731 ip_mq_unlock(port);
732 }
733
734 /* no task importance to adjust associated with the port? */
735 if (IIT_NULL == temp_task_imp) {
736 continue;
737 }
738
739 /* hold a reference on temp_task_imp */
740
741 /* Adjust the task assertions and determine if an edge was crossed */
742 if (ipc_importance_task_check_transition(temp_task_imp, type, 1)) {
743 incr_ref_counter(temp_task_imp->iit_elem.iie_task_refs_added_transition);
744 queue_enter(propagation, temp_task_imp, ipc_importance_task_t, iit_props);
745 /* reference donated */
746 } else {
747 ipc_importance_task_release_internal(temp_task_imp);
748 }
749 }
750
751 /*
752 * iterate the downstream importance inherits
753 * and capture the next task importance to boost for each
754 */
755 ipc_importance_inherit_t temp_inherit;
756
757 queue_iterate(&task_imp->iit_inherits, temp_inherit, ipc_importance_inherit_t, iii_inheritance) {
758 uint32_t assertcnt = III_EXTERN(temp_inherit);
759
760 temp_task_imp = temp_inherit->iii_to_task;
761 assert(IIT_NULL != temp_task_imp);
762
763 if (IIT_UPDATE_HOLD == type) {
764 /* if no undropped externcnts in the inherit, nothing to do */
765 if (0 == assertcnt) {
766 assert(temp_inherit->iii_donating == FALSE);
767 continue;
768 }
769
770 /* nothing to do if the inherit is already donating (forced donation) */
771 if (temp_inherit->iii_donating) {
772 continue;
773 }
774
775 /* mark it donating and contribute to the task externcnts */
776 temp_inherit->iii_donating = TRUE;
777 temp_task_imp->iit_externcnt += temp_inherit->iii_externcnt;
778 temp_task_imp->iit_externdrop += temp_inherit->iii_externdrop;
779 } else {
780 /* if no contributing assertions, move on */
781 if (0 == assertcnt) {
782 assert(temp_inherit->iii_donating == FALSE);
783 continue;
784 }
785
786 /* nothing to do if the inherit is not donating */
787 if (!temp_inherit->iii_donating) {
788 continue;
789 }
790
791 /* mark it no longer donating */
792 temp_inherit->iii_donating = FALSE;
793
794 /* remove the contribution the inherit made to the to-task */
795 assert(IIT_EXTERN(temp_task_imp) >= III_EXTERN(temp_inherit));
796 assert(temp_task_imp->iit_externcnt >= temp_inherit->iii_externcnt);
797 assert(temp_task_imp->iit_externdrop >= temp_inherit->iii_externdrop);
798 temp_task_imp->iit_externcnt -= temp_inherit->iii_externcnt;
799 temp_task_imp->iit_externdrop -= temp_inherit->iii_externdrop;
800 }
801
802 /* Adjust the task assertions and determine if an edge was crossed */
803 assert(ipc_importance_task_is_any_receiver_type(temp_task_imp));
804 if (ipc_importance_task_check_transition(temp_task_imp, type, assertcnt)) {
805 ipc_importance_task_reference(temp_task_imp);
806 incr_ref_counter(temp_task_imp->iit_elem.iie_task_refs_added_transition);
807 queue_enter(propagation, temp_task_imp, ipc_importance_task_t, iit_props);
808 }
809 }
810 }
811
812 /*
813 * Routine: ipc_importance_task_process_updates
814 * Purpose:
815 * Process the queue of task importances and apply the policy
816 * update called for. Only process tasks in the queue with an
817 * update timestamp less than the supplied max.
818 * Conditions:
819 * Called and returns with importance locked.
820 * May drop importance lock and block temporarily.
821 */
822 static void
ipc_importance_task_process_updates(queue_t supplied_queue,boolean_t boost,uint64_t max_timestamp)823 ipc_importance_task_process_updates(
824 queue_t supplied_queue,
825 boolean_t boost,
826 uint64_t max_timestamp)
827 {
828 ipc_importance_task_t task_imp;
829 queue_head_t second_chance;
830 queue_t queue = supplied_queue;
831
832 /*
833 * This queue will hold the task's we couldn't trylock on first pass.
834 * By using a second (private) queue, we guarantee all tasks that get
835 * entered on this queue have a timestamp under the maximum.
836 */
837 queue_init(&second_chance);
838
839 /* process any resulting policy updates */
840 retry:
841 while (!queue_empty(queue)) {
842 task_t target_task;
843 struct task_pend_token pend_token = {};
844
845 task_imp = (ipc_importance_task_t)queue_first(queue);
846 assert(0 == task_imp->iit_updatepolicy);
847 assert(queue == task_imp->iit_updateq);
848
849 /* if timestamp is too big, we're done */
850 if (task_imp->iit_updatetime > max_timestamp) {
851 break;
852 }
853
854 /* we were given a reference on each task in the queue */
855
856 /* remove it from the supplied queue */
857 queue_remove(queue, task_imp, ipc_importance_task_t, iit_updates);
858 task_imp->iit_updateq = NULL;
859
860 target_task = task_imp->iit_task;
861
862 /* Is it well on the way to exiting? */
863 if (TASK_NULL == target_task) {
864 ipc_importance_task_release_locked(task_imp);
865 /* importance unlocked */
866 ipc_importance_lock();
867 continue;
868 }
869
870 /* Has the update been reversed on the hysteresis queue? */
871 if (0 < task_imp->iit_assertcnt &&
872 queue == &ipc_importance_delayed_drop_queue) {
873 ipc_importance_task_release_locked(task_imp);
874 /* importance unlocked */
875 ipc_importance_lock();
876 continue;
877 }
878
879 /*
880 * Can we get the task lock out-of-order?
881 * If not, stick this back on the second-chance queue.
882 */
883 if (!task_lock_try(target_task)) {
884 boolean_t should_wait_lock = (queue == &second_chance);
885 task_imp->iit_updateq = &second_chance;
886
887 /*
888 * If we're already processing second-chances on
889 * tasks, keep this task on the front of the queue.
890 * We will wait for the task lock before coming
891 * back and trying again, and we have a better
892 * chance of re-acquiring the lock if we come back
893 * to it right away.
894 */
895 if (should_wait_lock) {
896 task_reference(target_task);
897 queue_enter_first(&second_chance, task_imp,
898 ipc_importance_task_t, iit_updates);
899 } else {
900 queue_enter(&second_chance, task_imp,
901 ipc_importance_task_t, iit_updates);
902 }
903 ipc_importance_unlock();
904
905 if (should_wait_lock) {
906 task_lock(target_task);
907 task_unlock(target_task);
908 task_deallocate(target_task);
909 }
910
911 ipc_importance_lock();
912 continue;
913 }
914
915 /* is it going away? */
916 if (!target_task->active) {
917 task_unlock(target_task);
918 ipc_importance_task_release_locked(task_imp);
919 /* importance unlocked */
920 ipc_importance_lock();
921 continue;
922 }
923
924 /* take a task reference for while we don't have the importance lock */
925 task_reference(target_task);
926
927 /* count the transition */
928 if (boost) {
929 task_imp->iit_transitions++;
930 }
931
932 ipc_importance_unlock();
933
934 /* apply the policy adjust to the target task (while it is still locked) */
935 task_update_boost_locked(target_task, boost, &pend_token);
936
937 /* complete the policy update with the task unlocked */
938 ipc_importance_task_release(task_imp);
939 task_unlock(target_task);
940 task_policy_update_complete_unlocked(target_task, &pend_token);
941 task_deallocate(target_task);
942
943 ipc_importance_lock();
944 }
945
946 /* If there are tasks we couldn't update the first time, try again */
947 if (!queue_empty(&second_chance)) {
948 queue = &second_chance;
949 goto retry;
950 }
951 }
952
953
954 /*
955 * Routine: ipc_importance_task_delayed_drop_scan
956 * Purpose:
957 * The thread call routine to scan the delayed drop queue,
958 * requesting all updates with a deadline up to the last target
959 * for the thread-call (which is DENAP_DROP_SKEW beyond the first
960 * thread's optimum delay).
961 * update to drop its boost.
962 * Conditions:
963 * Nothing locked
964 */
965 static void
ipc_importance_task_delayed_drop_scan(__unused void * arg1,__unused void * arg2)966 ipc_importance_task_delayed_drop_scan(
967 __unused void *arg1,
968 __unused void *arg2)
969 {
970 ipc_importance_lock();
971
972 /* process all queued task drops with timestamps up to TARGET(first)+SKEW */
973 ipc_importance_task_process_updates(&ipc_importance_delayed_drop_queue,
974 FALSE,
975 ipc_importance_delayed_drop_timestamp);
976
977 /* importance lock may have been temporarily dropped */
978
979 /* If there are any entries left in the queue, re-arm the call here */
980 if (!queue_empty(&ipc_importance_delayed_drop_queue)) {
981 ipc_importance_task_t task_imp;
982 uint64_t deadline;
983 uint64_t leeway;
984
985 task_imp = (ipc_importance_task_t)queue_first(&ipc_importance_delayed_drop_queue);
986
987 nanoseconds_to_absolutetime(DENAP_DROP_DELAY, &deadline);
988 deadline += task_imp->iit_updatetime;
989 ipc_importance_delayed_drop_timestamp = deadline;
990
991 nanoseconds_to_absolutetime(DENAP_DROP_LEEWAY, &leeway);
992
993 thread_call_enter_delayed_with_leeway(
994 ipc_importance_delayed_drop_call,
995 NULL,
996 deadline,
997 leeway,
998 DENAP_DROP_FLAGS);
999 } else {
1000 ipc_importance_delayed_drop_call_requested = FALSE;
1001 }
1002 ipc_importance_unlock();
1003 }
1004
1005 /*
1006 * Routine: ipc_importance_task_delayed_drop
1007 * Purpose:
1008 * Queue the specified task importance for delayed policy
1009 * update to drop its boost.
1010 * Conditions:
1011 * Called with the importance lock held.
1012 */
1013 static void
ipc_importance_task_delayed_drop(ipc_importance_task_t task_imp)1014 ipc_importance_task_delayed_drop(ipc_importance_task_t task_imp)
1015 {
1016 uint64_t timestamp = mach_absolute_time(); /* no mach_approximate_time() in kernel */
1017
1018 assert(ipc_importance_delayed_drop_call != NULL);
1019
1020 /*
1021 * If still on an update queue from a previous change,
1022 * remove it first (and use that reference). Otherwise, take
1023 * a new reference for the delay drop update queue.
1024 */
1025 if (NULL != task_imp->iit_updateq) {
1026 queue_remove(task_imp->iit_updateq, task_imp,
1027 ipc_importance_task_t, iit_updates);
1028 } else {
1029 ipc_importance_task_reference_internal(task_imp);
1030 }
1031
1032 task_imp->iit_updateq = &ipc_importance_delayed_drop_queue;
1033 task_imp->iit_updatetime = timestamp;
1034
1035 queue_enter(&ipc_importance_delayed_drop_queue, task_imp,
1036 ipc_importance_task_t, iit_updates);
1037
1038 /* request the delayed thread-call if not already requested */
1039 if (!ipc_importance_delayed_drop_call_requested) {
1040 uint64_t deadline;
1041 uint64_t leeway;
1042
1043 nanoseconds_to_absolutetime(DENAP_DROP_DELAY, &deadline);
1044 deadline += task_imp->iit_updatetime;
1045 ipc_importance_delayed_drop_timestamp = deadline;
1046
1047 nanoseconds_to_absolutetime(DENAP_DROP_LEEWAY, &leeway);
1048
1049 ipc_importance_delayed_drop_call_requested = TRUE;
1050 thread_call_enter_delayed_with_leeway(
1051 ipc_importance_delayed_drop_call,
1052 NULL,
1053 deadline,
1054 leeway,
1055 DENAP_DROP_FLAGS);
1056 }
1057 }
1058
1059
1060 /*
1061 * Routine: ipc_importance_task_propagate_assertion_locked
1062 * Purpose:
1063 * Propagate the importance transition type to every item
1064 * If this causes a boost to be applied, determine if that
1065 * boost should propagate downstream.
1066 * Conditions:
1067 * Called with the importance lock held.
1068 */
1069 static void
ipc_importance_task_propagate_assertion_locked(ipc_importance_task_t task_imp,iit_update_type_t type,boolean_t update_task_imp)1070 ipc_importance_task_propagate_assertion_locked(
1071 ipc_importance_task_t task_imp,
1072 iit_update_type_t type,
1073 boolean_t update_task_imp)
1074 {
1075 boolean_t boost = (IIT_UPDATE_HOLD == type);
1076 ipc_importance_task_t temp_task_imp;
1077 queue_head_t propagate;
1078 queue_head_t updates;
1079
1080 queue_init(&updates);
1081 queue_init(&propagate);
1082
1083 ipc_importance_assert_held();
1084
1085 /*
1086 * If we're going to update the policy for the provided task,
1087 * enqueue it on the propagate queue itself. Otherwise, only
1088 * enqueue downstream things.
1089 */
1090 if (update_task_imp) {
1091 ipc_importance_task_reference(task_imp);
1092 incr_ref_counter(task_imp->iit_elem.iie_task_refs_added_transition);
1093 queue_enter(&propagate, task_imp, ipc_importance_task_t, iit_props);
1094 } else {
1095 ipc_importance_task_propagate_helper(task_imp, type, &propagate);
1096 }
1097
1098 /*
1099 * for each item on the propagation list, propagate any change downstream,
1100 * adding new tasks to propagate further if they transistioned as well.
1101 */
1102 while (!queue_empty(&propagate)) {
1103 boolean_t need_update;
1104
1105 queue_remove_first(&propagate, temp_task_imp, ipc_importance_task_t, iit_props);
1106 /* hold a reference on temp_task_imp */
1107
1108 assert(IIT_NULL != temp_task_imp);
1109
1110 /* only propagate for receivers not already marked as a donor */
1111 if (!ipc_importance_task_is_marked_donor(temp_task_imp) &&
1112 ipc_importance_task_is_marked_receiver(temp_task_imp)) {
1113 ipc_importance_task_propagate_helper(temp_task_imp, type, &propagate);
1114 }
1115
1116 /* if we have a policy update to apply, enqueue a reference for later processing */
1117 need_update = (0 != temp_task_imp->iit_updatepolicy);
1118 temp_task_imp->iit_updatepolicy = 0;
1119 if (need_update && TASK_NULL != temp_task_imp->iit_task) {
1120 if (NULL == temp_task_imp->iit_updateq) {
1121 /*
1122 * If a downstream task that needs an update is subjects to AppNap,
1123 * drop boosts according to the delay hysteresis. Otherwise,
1124 * immediate update it.
1125 */
1126 if (!boost && temp_task_imp != task_imp &&
1127 ipc_importance_delayed_drop_call != NULL &&
1128 ipc_importance_task_is_marked_denap_receiver(temp_task_imp)) {
1129 ipc_importance_task_delayed_drop(temp_task_imp);
1130 } else {
1131 temp_task_imp->iit_updatetime = 0;
1132 temp_task_imp->iit_updateq = &updates;
1133 ipc_importance_task_reference_internal(temp_task_imp);
1134 if (boost) {
1135 queue_enter(&updates, temp_task_imp,
1136 ipc_importance_task_t, iit_updates);
1137 } else {
1138 queue_enter_first(&updates, temp_task_imp,
1139 ipc_importance_task_t, iit_updates);
1140 }
1141 }
1142 } else {
1143 /* Must already be on the AppNap hysteresis queue */
1144 assert(ipc_importance_delayed_drop_call != NULL);
1145 assert(ipc_importance_task_is_marked_denap_receiver(temp_task_imp));
1146 }
1147 }
1148
1149 ipc_importance_task_release_internal(temp_task_imp);
1150 }
1151
1152 /* apply updates to task (may drop importance lock) */
1153 if (!queue_empty(&updates)) {
1154 ipc_importance_task_process_updates(&updates, boost, 0);
1155 }
1156 }
1157
1158 /*
1159 * Routine: ipc_importance_task_hold_internal_assertion_locked
1160 * Purpose:
1161 * Increment the assertion count on the task importance.
1162 * If this results in a boost state change in that task,
1163 * prepare to update task policy for this task AND, if
1164 * if not just waking out of App Nap, all down-stream
1165 * tasks that have a similar transition through inheriting
1166 * this update.
1167 * Conditions:
1168 * importance locked on entry and exit.
1169 * May temporarily drop importance lock and block.
1170 */
1171 static kern_return_t
ipc_importance_task_hold_internal_assertion_locked(ipc_importance_task_t task_imp,uint32_t count)1172 ipc_importance_task_hold_internal_assertion_locked(ipc_importance_task_t task_imp, uint32_t count)
1173 {
1174 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_HOLD, count)) {
1175 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_HOLD, TRUE);
1176 }
1177 return KERN_SUCCESS;
1178 }
1179
1180 /*
1181 * Routine: ipc_importance_task_drop_internal_assertion_locked
1182 * Purpose:
1183 * Decrement the assertion count on the task importance.
1184 * If this results in a boost state change in that task,
1185 * prepare to update task policy for this task AND, if
1186 * if not just waking out of App Nap, all down-stream
1187 * tasks that have a similar transition through inheriting
1188 * this update.
1189 * Conditions:
1190 * importance locked on entry and exit.
1191 * May temporarily drop importance lock and block.
1192 */
1193 static kern_return_t
ipc_importance_task_drop_internal_assertion_locked(ipc_importance_task_t task_imp,uint32_t count)1194 ipc_importance_task_drop_internal_assertion_locked(ipc_importance_task_t task_imp, uint32_t count)
1195 {
1196 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_DROP, count)) {
1197 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, TRUE);
1198 }
1199 return KERN_SUCCESS;
1200 }
1201
1202 /*
1203 * Routine: ipc_importance_task_hold_internal_assertion
1204 * Purpose:
1205 * Increment the assertion count on the task importance.
1206 * If this results in a 0->1 change in that count,
1207 * prepare to update task policy for this task AND
1208 * (potentially) all down-stream tasks that have a
1209 * similar transition through inheriting this update.
1210 * Conditions:
1211 * Nothing locked
1212 * May block after dropping importance lock.
1213 */
1214 int
ipc_importance_task_hold_internal_assertion(ipc_importance_task_t task_imp,uint32_t count)1215 ipc_importance_task_hold_internal_assertion(ipc_importance_task_t task_imp, uint32_t count)
1216 {
1217 int ret = KERN_SUCCESS;
1218
1219 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1220 ipc_importance_lock();
1221 ret = ipc_importance_task_hold_internal_assertion_locked(task_imp, count);
1222 ipc_importance_unlock();
1223 }
1224 return ret;
1225 }
1226
1227 /*
1228 * Routine: ipc_importance_task_drop_internal_assertion
1229 * Purpose:
1230 * Decrement the assertion count on the task importance.
1231 * If this results in a X->0 change in that count,
1232 * prepare to update task policy for this task AND
1233 * all down-stream tasks that have a similar transition
1234 * through inheriting this drop update.
1235 * Conditions:
1236 * Nothing locked on entry.
1237 * May block after dropping importance lock.
1238 */
1239 kern_return_t
ipc_importance_task_drop_internal_assertion(ipc_importance_task_t task_imp,uint32_t count)1240 ipc_importance_task_drop_internal_assertion(ipc_importance_task_t task_imp, uint32_t count)
1241 {
1242 kern_return_t ret = KERN_SUCCESS;
1243
1244 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1245 ipc_importance_lock();
1246 ret = ipc_importance_task_drop_internal_assertion_locked(task_imp, count);
1247 ipc_importance_unlock();
1248 }
1249 return ret;
1250 }
1251
1252 /*
1253 * Routine: ipc_importance_task_hold_file_lock_assertion
1254 * Purpose:
1255 * Increment the file lock assertion count on the task importance.
1256 * If this results in a 0->1 change in that count,
1257 * prepare to update task policy for this task AND
1258 * (potentially) all down-stream tasks that have a
1259 * similar transition through inheriting this update.
1260 * Conditions:
1261 * Nothing locked
1262 * May block after dropping importance lock.
1263 */
1264 kern_return_t
ipc_importance_task_hold_file_lock_assertion(ipc_importance_task_t task_imp,uint32_t count)1265 ipc_importance_task_hold_file_lock_assertion(ipc_importance_task_t task_imp, uint32_t count)
1266 {
1267 kern_return_t ret = KERN_SUCCESS;
1268
1269 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1270 ipc_importance_lock();
1271 ret = ipc_importance_task_hold_internal_assertion_locked(task_imp, count);
1272 if (KERN_SUCCESS == ret) {
1273 task_imp->iit_filelocks += count;
1274 }
1275 ipc_importance_unlock();
1276 }
1277 return ret;
1278 }
1279
1280 /*
1281 * Routine: ipc_importance_task_drop_file_lock_assertion
1282 * Purpose:
1283 * Decrement the assertion count on the task importance.
1284 * If this results in a X->0 change in that count,
1285 * prepare to update task policy for this task AND
1286 * all down-stream tasks that have a similar transition
1287 * through inheriting this drop update.
1288 * Conditions:
1289 * Nothing locked on entry.
1290 * May block after dropping importance lock.
1291 */
1292 kern_return_t
ipc_importance_task_drop_file_lock_assertion(ipc_importance_task_t task_imp,uint32_t count)1293 ipc_importance_task_drop_file_lock_assertion(ipc_importance_task_t task_imp, uint32_t count)
1294 {
1295 kern_return_t ret = KERN_SUCCESS;
1296
1297 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1298 ipc_importance_lock();
1299 if (count <= task_imp->iit_filelocks) {
1300 task_imp->iit_filelocks -= count;
1301 ret = ipc_importance_task_drop_internal_assertion_locked(task_imp, count);
1302 } else {
1303 ret = KERN_INVALID_ARGUMENT;
1304 }
1305 ipc_importance_unlock();
1306 }
1307 return ret;
1308 }
1309
1310 /*
1311 * Routine: ipc_importance_task_hold_legacy_external_assertion
1312 * Purpose:
1313 * Increment the external assertion count on the task importance.
1314 * This cannot result in an 0->1 transition, as the caller must
1315 * already hold an external boost.
1316 * Conditions:
1317 * Nothing locked on entry.
1318 * May block after dropping importance lock.
1319 * A queue of task importance structures is returned
1320 * by ipc_importance_task_hold_assertion_locked(). Each
1321 * needs to be updated (outside the importance lock hold).
1322 */
1323 kern_return_t
ipc_importance_task_hold_legacy_external_assertion(ipc_importance_task_t task_imp,uint32_t count)1324 ipc_importance_task_hold_legacy_external_assertion(ipc_importance_task_t task_imp, uint32_t count)
1325 {
1326 task_t target_task;
1327 uint32_t target_assertcnt;
1328 uint32_t target_externcnt;
1329 uint32_t target_legacycnt;
1330
1331 kern_return_t ret;
1332
1333 ipc_importance_lock();
1334 target_task = task_imp->iit_task;
1335
1336 #if IMPORTANCE_TRACE
1337 int target_pid = task_pid(target_task);
1338
1339 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | TASK_POLICY_EXTERNAL))) | DBG_FUNC_START,
1340 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1341 #endif
1342
1343 if (IIT_LEGACY_EXTERN(task_imp) == 0) {
1344 /* Only allowed to take a new boost assertion when holding an external boost */
1345 /* save data for diagnostic printf below */
1346 target_assertcnt = task_imp->iit_assertcnt;
1347 target_externcnt = IIT_EXTERN(task_imp);
1348 target_legacycnt = IIT_LEGACY_EXTERN(task_imp);
1349 ret = KERN_FAILURE;
1350 count = 0;
1351 } else {
1352 assert(ipc_importance_task_is_any_receiver_type(task_imp));
1353 assert(0 < task_imp->iit_assertcnt);
1354 assert(0 < IIT_EXTERN(task_imp));
1355 task_imp->iit_assertcnt += count;
1356 task_imp->iit_externcnt += count;
1357 task_imp->iit_legacy_externcnt += count;
1358 ret = KERN_SUCCESS;
1359 }
1360 ipc_importance_unlock();
1361
1362 #if IMPORTANCE_TRACE
1363 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | TASK_POLICY_EXTERNAL))) | DBG_FUNC_END,
1364 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1365 // This covers the legacy case where a task takes an extra boost.
1366 DTRACE_BOOST5(receive_boost, task_t, target_task, int, target_pid, int, proc_selfpid(), int, count, int, task_imp->iit_assertcnt);
1367 #endif
1368
1369 if (KERN_FAILURE == ret && target_task != TASK_NULL) {
1370 printf("BUG in process %s[%d]: "
1371 "attempt to acquire an additional legacy external boost assertion without holding an existing legacy external assertion. "
1372 "(%d total, %d external, %d legacy-external)\n",
1373 proc_name_address(get_bsdtask_info(target_task)), task_pid(target_task),
1374 target_assertcnt, target_externcnt, target_legacycnt);
1375 }
1376
1377 return ret;
1378 }
1379
1380 /*
1381 * Routine: ipc_importance_task_drop_legacy_external_assertion
1382 * Purpose:
1383 * Drop the legacy external assertion count on the task and
1384 * reflect that change to total external assertion count and
1385 * then onto the internal importance count.
1386 *
1387 * If this results in a X->0 change in the internal,
1388 * count, prepare to update task policy for this task AND
1389 * all down-stream tasks that have a similar transition
1390 * through inheriting this update.
1391 * Conditions:
1392 * Nothing locked on entry.
1393 */
1394 kern_return_t
ipc_importance_task_drop_legacy_external_assertion(ipc_importance_task_t task_imp,uint32_t count)1395 ipc_importance_task_drop_legacy_external_assertion(ipc_importance_task_t task_imp, uint32_t count)
1396 {
1397 int ret = KERN_SUCCESS;
1398 task_t target_task;
1399 uint32_t target_assertcnt;
1400 uint32_t target_externcnt;
1401 uint32_t target_legacycnt;
1402
1403 if (count > 1) {
1404 return KERN_INVALID_ARGUMENT;
1405 }
1406
1407 ipc_importance_lock();
1408 target_task = task_imp->iit_task;
1409
1410 #if IMPORTANCE_TRACE
1411 int target_pid = task_pid(target_task);
1412
1413 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | TASK_POLICY_EXTERNAL))) | DBG_FUNC_START,
1414 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1415 #endif
1416
1417 if (count > IIT_LEGACY_EXTERN(task_imp)) {
1418 /* Process over-released its boost count - save data for diagnostic printf */
1419 /* TODO: If count > 1, we should clear out as many external assertions as there are left. */
1420 target_assertcnt = task_imp->iit_assertcnt;
1421 target_externcnt = IIT_EXTERN(task_imp);
1422 target_legacycnt = IIT_LEGACY_EXTERN(task_imp);
1423 ret = KERN_FAILURE;
1424 } else {
1425 /*
1426 * decrement legacy external count from the top level and reflect
1427 * into internal for this and all subsequent updates.
1428 */
1429 assert(ipc_importance_task_is_any_receiver_type(task_imp));
1430 assert(IIT_EXTERN(task_imp) >= count);
1431
1432 task_imp->iit_legacy_externdrop += count;
1433 task_imp->iit_externdrop += count;
1434
1435 /* reset extern counters (if appropriate) */
1436 if (IIT_LEGACY_EXTERN(task_imp) == 0) {
1437 if (IIT_EXTERN(task_imp) != 0) {
1438 task_imp->iit_externcnt -= task_imp->iit_legacy_externcnt;
1439 task_imp->iit_externdrop -= task_imp->iit_legacy_externdrop;
1440 } else {
1441 task_imp->iit_externcnt = 0;
1442 task_imp->iit_externdrop = 0;
1443 }
1444 task_imp->iit_legacy_externcnt = 0;
1445 task_imp->iit_legacy_externdrop = 0;
1446 }
1447
1448 /* reflect the drop to the internal assertion count (and effect any importance change) */
1449 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_DROP, count)) {
1450 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, TRUE);
1451 }
1452 ret = KERN_SUCCESS;
1453 }
1454
1455 #if IMPORTANCE_TRACE
1456 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | TASK_POLICY_EXTERNAL))) | DBG_FUNC_END,
1457 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1458 #endif
1459
1460 ipc_importance_unlock();
1461
1462 /* delayed printf for user-supplied data failures */
1463 if (KERN_FAILURE == ret && TASK_NULL != target_task) {
1464 printf("BUG in process %s[%d]: over-released legacy external boost assertions (%d total, %d external, %d legacy-external)\n",
1465 proc_name_address(get_bsdtask_info(target_task)), task_pid(target_task),
1466 target_assertcnt, target_externcnt, target_legacycnt);
1467 }
1468
1469 return ret;
1470 }
1471
1472
1473 #if LEGACY_IMPORTANCE_DELIVERY
1474 /* Transfer an assertion to legacy userspace responsibility */
1475 static kern_return_t
ipc_importance_task_externalize_legacy_assertion(ipc_importance_task_t task_imp,uint32_t count,__unused int sender_pid)1476 ipc_importance_task_externalize_legacy_assertion(ipc_importance_task_t task_imp, uint32_t count, __unused int sender_pid)
1477 {
1478 task_t target_task;
1479
1480 assert(IIT_NULL != task_imp);
1481 target_task = task_imp->iit_task;
1482
1483 if (TASK_NULL == target_task ||
1484 !ipc_importance_task_is_any_receiver_type(task_imp)) {
1485 return KERN_FAILURE;
1486 }
1487
1488 #if IMPORTANCE_TRACE
1489 int target_pid = task_pid(target_task);
1490
1491 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_START,
1492 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
1493 #endif
1494
1495 ipc_importance_lock();
1496 /* assert(task_imp->iit_assertcnt >= IIT_EXTERN(task_imp) + count); */
1497 assert(IIT_EXTERN(task_imp) >= IIT_LEGACY_EXTERN(task_imp));
1498 task_imp->iit_legacy_externcnt += count;
1499 task_imp->iit_externcnt += count;
1500 ipc_importance_unlock();
1501
1502 #if IMPORTANCE_TRACE
1503 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_END,
1504 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1505 // This is the legacy boosting path
1506 DTRACE_BOOST5(receive_boost, task_t, target_task, int, target_pid, int, sender_pid, int, count, int, IIT_LEGACY_EXTERN(task_imp));
1507 #endif /* IMPORTANCE_TRACE */
1508
1509 return KERN_SUCCESS;
1510 }
1511 #endif /* LEGACY_IMPORTANCE_DELIVERY */
1512
1513 /*
1514 * Routine: ipc_importance_task_update_live_donor
1515 * Purpose:
1516 * Read the live donor status and update the live_donor bit/propagate the change in importance.
1517 * Conditions:
1518 * Nothing locked on entrance, nothing locked on exit.
1519 *
1520 * TODO: Need tracepoints around this function...
1521 */
1522 void
ipc_importance_task_update_live_donor(ipc_importance_task_t task_imp)1523 ipc_importance_task_update_live_donor(ipc_importance_task_t task_imp)
1524 {
1525 uint32_t task_live_donor;
1526 boolean_t before_donor;
1527 boolean_t after_donor;
1528 task_t target_task;
1529
1530 assert(task_imp != NULL);
1531
1532 /*
1533 * Nothing to do if the task is not marked as expecting
1534 * live donor updates.
1535 */
1536 if (!ipc_importance_task_is_marked_live_donor(task_imp)) {
1537 return;
1538 }
1539
1540 ipc_importance_lock();
1541
1542 /* If the task got disconnected on the way here, no use (or ability) adjusting live donor status */
1543 target_task = task_imp->iit_task;
1544 if (TASK_NULL == target_task) {
1545 ipc_importance_unlock();
1546 return;
1547 }
1548 before_donor = ipc_importance_task_is_marked_donor(task_imp);
1549
1550 /* snapshot task live donor status - may change, but another call will accompany the change */
1551 task_live_donor = target_task->effective_policy.tep_live_donor;
1552
1553 #if IMPORTANCE_TRACE
1554 int target_pid = task_pid(target_task);
1555
1556 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1557 (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_UPDATE_LIVE_DONOR_STATE)) | DBG_FUNC_START,
1558 target_pid, task_imp->iit_donor, task_live_donor, before_donor, 0);
1559 #endif
1560
1561 /* update the task importance live donor status based on the task's value */
1562 task_imp->iit_donor = task_live_donor;
1563
1564 after_donor = ipc_importance_task_is_marked_donor(task_imp);
1565
1566 /* Has the effectiveness of being a donor changed as a result of this update? */
1567 if (before_donor != after_donor) {
1568 iit_update_type_t type;
1569
1570 /* propagate assertions without updating the current task policy (already handled) */
1571 if (0 == before_donor) {
1572 task_imp->iit_transitions++;
1573 type = IIT_UPDATE_HOLD;
1574 } else {
1575 type = IIT_UPDATE_DROP;
1576 }
1577 ipc_importance_task_propagate_assertion_locked(task_imp, type, FALSE);
1578 }
1579
1580 #if IMPORTANCE_TRACE
1581 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1582 (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_UPDATE_LIVE_DONOR_STATE)) | DBG_FUNC_END,
1583 target_pid, task_imp->iit_donor, task_live_donor, after_donor, 0);
1584 #endif
1585
1586 ipc_importance_unlock();
1587 }
1588
1589
1590 /*
1591 * Routine: ipc_importance_task_mark_donor
1592 * Purpose:
1593 * Set the task importance donor flag.
1594 * Conditions:
1595 * Nothing locked on entrance, nothing locked on exit.
1596 *
1597 * This is only called while the task is being constructed,
1598 * so no need to update task policy or propagate downstream.
1599 */
1600 void
ipc_importance_task_mark_donor(ipc_importance_task_t task_imp,boolean_t donating)1601 ipc_importance_task_mark_donor(ipc_importance_task_t task_imp, boolean_t donating)
1602 {
1603 assert(task_imp != NULL);
1604
1605 ipc_importance_lock();
1606
1607 int old_donor = task_imp->iit_donor;
1608
1609 task_imp->iit_donor = (donating ? 1 : 0);
1610
1611 if (task_imp->iit_donor > 0 && old_donor == 0) {
1612 task_imp->iit_transitions++;
1613 }
1614
1615 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1616 (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_INIT_DONOR_STATE)) | DBG_FUNC_NONE,
1617 task_pid(task_imp->iit_task), donating,
1618 old_donor, task_imp->iit_donor, 0);
1619
1620 ipc_importance_unlock();
1621 }
1622
1623 /*
1624 * Routine: ipc_importance_task_marked_donor
1625 * Purpose:
1626 * Query the donor flag for the given task importance.
1627 * Conditions:
1628 * May be called without taking the importance lock.
1629 * In that case, donor status can change so you must
1630 * check only once for each donation event.
1631 */
1632 boolean_t
ipc_importance_task_is_marked_donor(ipc_importance_task_t task_imp)1633 ipc_importance_task_is_marked_donor(ipc_importance_task_t task_imp)
1634 {
1635 if (IIT_NULL == task_imp) {
1636 return FALSE;
1637 }
1638 return 0 != task_imp->iit_donor;
1639 }
1640
1641 /*
1642 * Routine: ipc_importance_task_mark_live_donor
1643 * Purpose:
1644 * Indicate that the task is eligible for live donor updates.
1645 * Conditions:
1646 * Nothing locked on entrance, nothing locked on exit.
1647 *
1648 * This is only called while the task is being constructed.
1649 */
1650 void
ipc_importance_task_mark_live_donor(ipc_importance_task_t task_imp,boolean_t live_donating)1651 ipc_importance_task_mark_live_donor(ipc_importance_task_t task_imp, boolean_t live_donating)
1652 {
1653 assert(task_imp != NULL);
1654
1655 ipc_importance_lock();
1656 task_imp->iit_live_donor = (live_donating ? 1 : 0);
1657 ipc_importance_unlock();
1658 }
1659
1660 /*
1661 * Routine: ipc_importance_task_is_marked_live_donor
1662 * Purpose:
1663 * Query the live donor and donor flags for the given task importance.
1664 * Conditions:
1665 * May be called without taking the importance lock.
1666 * In that case, donor status can change so you must
1667 * check only once for each donation event.
1668 */
1669 boolean_t
ipc_importance_task_is_marked_live_donor(ipc_importance_task_t task_imp)1670 ipc_importance_task_is_marked_live_donor(ipc_importance_task_t task_imp)
1671 {
1672 if (IIT_NULL == task_imp) {
1673 return FALSE;
1674 }
1675 return 0 != task_imp->iit_live_donor;
1676 }
1677
1678 /*
1679 * Routine: ipc_importance_task_is_donor
1680 * Purpose:
1681 * Query the full donor status for the given task importance.
1682 * Conditions:
1683 * May be called without taking the importance lock.
1684 * In that case, donor status can change so you must
1685 * check only once for each donation event.
1686 */
1687 boolean_t
ipc_importance_task_is_donor(ipc_importance_task_t task_imp)1688 ipc_importance_task_is_donor(ipc_importance_task_t task_imp)
1689 {
1690 if (IIT_NULL == task_imp) {
1691 return FALSE;
1692 }
1693 return ipc_importance_task_is_marked_donor(task_imp) ||
1694 (ipc_importance_task_is_marked_receiver(task_imp) &&
1695 task_imp->iit_assertcnt > 0);
1696 }
1697
1698 /*
1699 * Routine: ipc_importance_task_is_never_donor
1700 * Purpose:
1701 * Query if a given task can ever donate importance.
1702 * Conditions:
1703 * May be called without taking the importance lock.
1704 * Condition is permanent for a give task.
1705 */
1706 boolean_t
ipc_importance_task_is_never_donor(ipc_importance_task_t task_imp)1707 ipc_importance_task_is_never_donor(ipc_importance_task_t task_imp)
1708 {
1709 if (IIT_NULL == task_imp) {
1710 return TRUE;
1711 }
1712 return !ipc_importance_task_is_marked_donor(task_imp) &&
1713 !ipc_importance_task_is_marked_live_donor(task_imp) &&
1714 !ipc_importance_task_is_marked_receiver(task_imp);
1715 }
1716
1717 /*
1718 * Routine: ipc_importance_task_mark_receiver
1719 * Purpose:
1720 * Update the task importance receiver flag.
1721 * Conditions:
1722 * Nothing locked on entrance, nothing locked on exit.
1723 * This can only be invoked before the task is discoverable,
1724 * so no worries about atomicity(?)
1725 */
1726 void
ipc_importance_task_mark_receiver(ipc_importance_task_t task_imp,boolean_t receiving)1727 ipc_importance_task_mark_receiver(ipc_importance_task_t task_imp, boolean_t receiving)
1728 {
1729 assert(task_imp != NULL);
1730
1731 ipc_importance_lock();
1732 if (receiving) {
1733 assert(task_imp->iit_assertcnt == 0);
1734 assert(task_imp->iit_externcnt == 0);
1735 assert(task_imp->iit_externdrop == 0);
1736 assert(task_imp->iit_denap == 0);
1737 task_imp->iit_receiver = 1; /* task can receive importance boost */
1738 } else if (task_imp->iit_receiver) {
1739 assert(task_imp->iit_denap == 0);
1740 if (task_imp->iit_assertcnt != 0 || IIT_EXTERN(task_imp) != 0) {
1741 panic("disabling imp_receiver on task with pending importance boosts!");
1742 }
1743 task_imp->iit_receiver = 0;
1744 }
1745 ipc_importance_unlock();
1746 }
1747
1748
1749 /*
1750 * Routine: ipc_importance_task_marked_receiver
1751 * Purpose:
1752 * Query the receiver flag for the given task importance.
1753 * Conditions:
1754 * May be called without taking the importance lock as
1755 * the importance flag can never change after task init.
1756 */
1757 boolean_t
ipc_importance_task_is_marked_receiver(ipc_importance_task_t task_imp)1758 ipc_importance_task_is_marked_receiver(ipc_importance_task_t task_imp)
1759 {
1760 return IIT_NULL != task_imp && 0 != task_imp->iit_receiver;
1761 }
1762
1763
1764 /*
1765 * Routine: ipc_importance_task_mark_denap_receiver
1766 * Purpose:
1767 * Update the task importance de-nap receiver flag.
1768 * Conditions:
1769 * Nothing locked on entrance, nothing locked on exit.
1770 * This can only be invoked before the task is discoverable,
1771 * so no worries about atomicity(?)
1772 */
1773 void
ipc_importance_task_mark_denap_receiver(ipc_importance_task_t task_imp,boolean_t denap)1774 ipc_importance_task_mark_denap_receiver(ipc_importance_task_t task_imp, boolean_t denap)
1775 {
1776 assert(task_imp != NULL);
1777
1778 ipc_importance_lock();
1779 if (denap) {
1780 assert(task_imp->iit_assertcnt == 0);
1781 assert(task_imp->iit_externcnt == 0);
1782 assert(task_imp->iit_receiver == 0);
1783 task_imp->iit_denap = 1; /* task can receive de-nap boost */
1784 } else if (task_imp->iit_denap) {
1785 assert(task_imp->iit_receiver == 0);
1786 if (0 < task_imp->iit_assertcnt || 0 < IIT_EXTERN(task_imp)) {
1787 panic("disabling de-nap on task with pending de-nap boosts!");
1788 }
1789 task_imp->iit_denap = 0;
1790 }
1791 ipc_importance_unlock();
1792 }
1793
1794
1795 /*
1796 * Routine: ipc_importance_task_marked_denap_receiver
1797 * Purpose:
1798 * Query the de-nap receiver flag for the given task importance.
1799 * Conditions:
1800 * May be called without taking the importance lock as
1801 * the de-nap flag can never change after task init.
1802 */
1803 boolean_t
ipc_importance_task_is_marked_denap_receiver(ipc_importance_task_t task_imp)1804 ipc_importance_task_is_marked_denap_receiver(ipc_importance_task_t task_imp)
1805 {
1806 return IIT_NULL != task_imp && 0 != task_imp->iit_denap;
1807 }
1808
1809 /*
1810 * Routine: ipc_importance_task_is_denap_receiver
1811 * Purpose:
1812 * Query the full de-nap receiver status for the given task importance.
1813 * For now, that is simply whether the receiver flag is set.
1814 * Conditions:
1815 * May be called without taking the importance lock as
1816 * the de-nap receiver flag can never change after task init.
1817 */
1818 boolean_t
ipc_importance_task_is_denap_receiver(ipc_importance_task_t task_imp)1819 ipc_importance_task_is_denap_receiver(ipc_importance_task_t task_imp)
1820 {
1821 return ipc_importance_task_is_marked_denap_receiver(task_imp);
1822 }
1823
1824 /*
1825 * Routine: ipc_importance_task_is_any_receiver_type
1826 * Purpose:
1827 * Query if the task is marked to receive boosts - either
1828 * importance or denap.
1829 * Conditions:
1830 * May be called without taking the importance lock as both
1831 * the importance and de-nap receiver flags can never change
1832 * after task init.
1833 */
1834 boolean_t
ipc_importance_task_is_any_receiver_type(ipc_importance_task_t task_imp)1835 ipc_importance_task_is_any_receiver_type(ipc_importance_task_t task_imp)
1836 {
1837 return ipc_importance_task_is_marked_receiver(task_imp) ||
1838 ipc_importance_task_is_marked_denap_receiver(task_imp);
1839 }
1840
1841 #if 0 /* currently unused */
1842
1843 /*
1844 * Routine: ipc_importance_inherit_reference
1845 * Purpose:
1846 * Add a reference to the inherit importance element.
1847 * Conditions:
1848 * Caller most hold a reference on the inherit element.
1849 */
1850 static inline void
1851 ipc_importance_inherit_reference(ipc_importance_inherit_t inherit)
1852 {
1853 ipc_importance_reference(&inherit->iii_elem);
1854 }
1855 #endif /* currently unused */
1856
1857 /*
1858 * Routine: ipc_importance_inherit_release_locked
1859 * Purpose:
1860 * Release a reference on an inherit importance attribute value,
1861 * unlinking and deallocating the attribute if the last reference.
1862 * Conditions:
1863 * Entered with importance lock held, leaves with it unlocked.
1864 */
1865 static inline void
ipc_importance_inherit_release_locked(ipc_importance_inherit_t inherit)1866 ipc_importance_inherit_release_locked(ipc_importance_inherit_t inherit)
1867 {
1868 ipc_importance_release_locked(&inherit->iii_elem);
1869 }
1870
1871 #if 0 /* currently unused */
1872 /*
1873 * Routine: ipc_importance_inherit_release
1874 * Purpose:
1875 * Release a reference on an inherit importance attribute value,
1876 * unlinking and deallocating the attribute if the last reference.
1877 * Conditions:
1878 * nothing locked on entrance, nothing locked on exit.
1879 * May block.
1880 */
1881 void
1882 ipc_importance_inherit_release(ipc_importance_inherit_t inherit)
1883 {
1884 if (III_NULL != inherit) {
1885 ipc_importance_release(&inherit->iii_elem);
1886 }
1887 }
1888 #endif /* 0 currently unused */
1889
1890 /*
1891 * Routine: ipc_importance_for_task
1892 * Purpose:
1893 * Create a reference for the specified task's base importance
1894 * element. If the base importance element doesn't exist, make it and
1895 * bind it to the active task. If the task is inactive, there isn't
1896 * any need to return a new reference.
1897 * Conditions:
1898 * If made is true, a "made" reference is returned (for donating to
1899 * the voucher system). Otherwise an internal reference is returned.
1900 *
1901 * Nothing locked on entry. May block.
1902 */
1903 ipc_importance_task_t
ipc_importance_for_task(task_t task,boolean_t made)1904 ipc_importance_for_task(task_t task, boolean_t made)
1905 {
1906 ipc_importance_task_t task_elem;
1907 boolean_t first_pass = TRUE;
1908
1909 assert(TASK_NULL != task);
1910
1911 retry:
1912 /* No use returning anything for inactive task */
1913 if (!task->active) {
1914 return IIT_NULL;
1915 }
1916
1917 ipc_importance_lock();
1918 task_elem = task->task_imp_base;
1919 if (IIT_NULL != task_elem) {
1920 /* Add a made reference (borrowing active task ref to do it) */
1921 if (made) {
1922 if (0 == task_elem->iit_made++) {
1923 assert(IIT_REFS_MAX > IIT_REFS(task_elem));
1924 ipc_importance_task_reference_internal(task_elem);
1925 }
1926 } else {
1927 assert(IIT_REFS_MAX > IIT_REFS(task_elem));
1928 ipc_importance_task_reference_internal(task_elem);
1929 }
1930 ipc_importance_unlock();
1931 return task_elem;
1932 }
1933 ipc_importance_unlock();
1934
1935 if (!first_pass) {
1936 return IIT_NULL;
1937 }
1938 first_pass = FALSE;
1939
1940 /* Need to make one - may race with others (be prepared to drop) */
1941 task_elem = zalloc_flags(ipc_importance_task_zone, Z_WAITOK | Z_ZERO);
1942 if (IIT_NULL == task_elem) {
1943 goto retry;
1944 }
1945
1946 task_elem->iit_bits = IIE_TYPE_TASK | 2; /* one for task, one for return/made */
1947 task_elem->iit_made = (made) ? 1 : 0;
1948 task_elem->iit_task = task; /* take actual ref when we're sure */
1949 #if IIE_REF_DEBUG
1950 ipc_importance_counter_init(&task_elem->iit_elem);
1951 #endif
1952 queue_init(&task_elem->iit_kmsgs);
1953 queue_init(&task_elem->iit_inherits);
1954
1955 ipc_importance_lock();
1956 if (!task->active) {
1957 ipc_importance_unlock();
1958 zfree(ipc_importance_task_zone, task_elem);
1959 return IIT_NULL;
1960 }
1961
1962 /* did we lose the race? */
1963 if (IIT_NULL != task->task_imp_base) {
1964 ipc_importance_unlock();
1965 zfree(ipc_importance_task_zone, task_elem);
1966 goto retry;
1967 }
1968
1969 /* we won the race */
1970 task->task_imp_base = task_elem;
1971 task_reference_grp(task, TASK_GRP_INTERNAL);
1972 #if DEVELOPMENT || DEBUG
1973 queue_enter(&global_iit_alloc_queue, task_elem, ipc_importance_task_t, iit_allocation);
1974 task_importance_update_owner_info(task);
1975 #endif
1976 ipc_importance_unlock();
1977
1978 return task_elem;
1979 }
1980
1981 #if DEVELOPMENT || DEBUG
1982 void
task_importance_update_owner_info(task_t task)1983 task_importance_update_owner_info(task_t task)
1984 {
1985 if (task != TASK_NULL && task->task_imp_base != IIT_NULL) {
1986 ipc_importance_task_t task_elem = task->task_imp_base;
1987
1988 task_elem->iit_bsd_pid = task_pid(task);
1989 if (get_bsdtask_info(task)) {
1990 strncpy(&task_elem->iit_procname[0], proc_name_address(get_bsdtask_info(task)), 16);
1991 task_elem->iit_procname[16] = '\0';
1992 } else {
1993 strncpy(&task_elem->iit_procname[0], "unknown", 16);
1994 }
1995 }
1996 }
1997 #endif
1998
1999 static int
task_importance_task_get_pid(ipc_importance_task_t iit)2000 task_importance_task_get_pid(ipc_importance_task_t iit)
2001 {
2002 #if DEVELOPMENT || DEBUG
2003 return (int)iit->iit_bsd_pid;
2004 #else
2005 return task_pid(iit->iit_task);
2006 #endif
2007 }
2008
2009 /*
2010 * Routine: ipc_importance_reset_locked
2011 * Purpose:
2012 * Reset a task's IPC importance (the task is going away or exec'ing)
2013 *
2014 * Remove the donor bit and legacy externalized assertions from the
2015 * current task importance and see if that wipes out downstream donations.
2016 * Conditions:
2017 * importance lock held.
2018 */
2019
2020 static void
ipc_importance_reset_locked(ipc_importance_task_t task_imp,boolean_t donor)2021 ipc_importance_reset_locked(ipc_importance_task_t task_imp, boolean_t donor)
2022 {
2023 boolean_t before_donor, after_donor;
2024
2025 /* remove the donor bit, live-donor bit and externalized boosts */
2026 before_donor = ipc_importance_task_is_donor(task_imp);
2027 if (donor) {
2028 task_imp->iit_donor = 0;
2029 }
2030 assert(IIT_LEGACY_EXTERN(task_imp) <= IIT_EXTERN(task_imp));
2031 assert(task_imp->iit_legacy_externcnt <= task_imp->iit_externcnt);
2032 assert(task_imp->iit_legacy_externdrop <= task_imp->iit_externdrop);
2033 task_imp->iit_externcnt -= task_imp->iit_legacy_externcnt;
2034 task_imp->iit_externdrop -= task_imp->iit_legacy_externdrop;
2035
2036 /* assert(IIT_LEGACY_EXTERN(task_imp) <= task_imp->iit_assertcnt); */
2037 if (IIT_EXTERN(task_imp) < task_imp->iit_assertcnt) {
2038 task_imp->iit_assertcnt -= IIT_LEGACY_EXTERN(task_imp);
2039 } else {
2040 task_imp->iit_assertcnt = IIT_EXTERN(task_imp);
2041 }
2042 task_imp->iit_legacy_externcnt = 0;
2043 task_imp->iit_legacy_externdrop = 0;
2044 after_donor = ipc_importance_task_is_donor(task_imp);
2045
2046 /* propagate a downstream drop if there was a change in donor status */
2047 if (after_donor != before_donor) {
2048 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, FALSE);
2049 }
2050 }
2051
2052 /*
2053 * Routine: ipc_importance_reset
2054 * Purpose:
2055 * Reset a task's IPC importance
2056 *
2057 * The task is being reset, although staying around. Arrange to have the
2058 * external state of the task reset from the importance.
2059 * Conditions:
2060 * importance lock not held.
2061 */
2062
2063 void
ipc_importance_reset(ipc_importance_task_t task_imp,boolean_t donor)2064 ipc_importance_reset(ipc_importance_task_t task_imp, boolean_t donor)
2065 {
2066 if (IIT_NULL == task_imp) {
2067 return;
2068 }
2069 ipc_importance_lock();
2070 ipc_importance_reset_locked(task_imp, donor);
2071 ipc_importance_unlock();
2072 }
2073
2074 /*
2075 * Routine: ipc_importance_disconnect_task
2076 * Purpose:
2077 * Disconnect a task from its importance.
2078 *
2079 * Clear the task pointer from the importance and drop the
2080 * reference the task held on the importance object. Before
2081 * doing that, reset the effects the current task holds on
2082 * the importance and see if that wipes out downstream donations.
2083 *
2084 * We allow the upstream boosts to continue to affect downstream
2085 * even though the local task is being effectively pulled from
2086 * the chain.
2087 * Conditions:
2088 * Nothing locked.
2089 */
2090 void
ipc_importance_disconnect_task(task_t task)2091 ipc_importance_disconnect_task(task_t task)
2092 {
2093 ipc_importance_task_t task_imp;
2094
2095 task_lock(task);
2096 ipc_importance_lock();
2097 task_imp = task->task_imp_base;
2098
2099 /* did somebody beat us to it? */
2100 if (IIT_NULL == task_imp) {
2101 ipc_importance_unlock();
2102 task_unlock(task);
2103 return;
2104 }
2105
2106 /* disconnect the task from this importance */
2107 assert(task_imp->iit_task == task);
2108 task_imp->iit_task = TASK_NULL;
2109 task->task_imp_base = IIT_NULL;
2110 task_unlock(task);
2111
2112 /* reset the effects the current task hold on the importance */
2113 ipc_importance_reset_locked(task_imp, TRUE);
2114
2115 ipc_importance_task_release_locked(task_imp);
2116 /* importance unlocked */
2117
2118 /* deallocate the task now that the importance is unlocked */
2119 task_deallocate_grp(task, TASK_GRP_INTERNAL);
2120 }
2121
2122 /*
2123 * Routine: ipc_importance_exec_switch_task
2124 * Purpose:
2125 * Switch importance task base from old task to new task in exec.
2126 *
2127 * Create an ipc importance linkage from old task to new task,
2128 * once the linkage is created, switch the importance task base
2129 * from old task to new task. After the switch, the linkage will
2130 * represent importance linkage from new task to old task with
2131 * watch port importance inheritance linked to new task.
2132 * Conditions:
2133 * Nothing locked.
2134 * Returns a reference on importance inherit.
2135 */
2136 ipc_importance_inherit_t
ipc_importance_exec_switch_task(task_t old_task,task_t new_task)2137 ipc_importance_exec_switch_task(
2138 task_t old_task,
2139 task_t new_task)
2140 {
2141 ipc_importance_inherit_t inherit = III_NULL;
2142 ipc_importance_task_t old_task_imp = IIT_NULL;
2143 ipc_importance_task_t new_task_imp = IIT_NULL;
2144
2145 task_importance_reset(old_task);
2146
2147 /* Create an importance linkage from old_task to new_task */
2148 inherit = ipc_importance_inherit_from_task(old_task, new_task);
2149
2150 /* Switch task importance base from old task to new task */
2151 ipc_importance_lock();
2152
2153 old_task_imp = old_task->task_imp_base;
2154 new_task_imp = new_task->task_imp_base;
2155
2156 old_task_imp->iit_task = new_task;
2157 new_task_imp->iit_task = old_task;
2158
2159 old_task->task_imp_base = new_task_imp;
2160 new_task->task_imp_base = old_task_imp;
2161
2162 #if DEVELOPMENT || DEBUG
2163 /*
2164 * Update the pid an proc name for importance base if any
2165 */
2166 task_importance_update_owner_info(new_task);
2167 #endif
2168 ipc_importance_unlock();
2169
2170 return inherit;
2171 }
2172
2173 /*
2174 * Routine: ipc_importance_check_circularity
2175 * Purpose:
2176 * Check if queueing "port" in a message for "dest"
2177 * would create a circular group of ports and messages.
2178 *
2179 * If no circularity (FALSE returned), then "port"
2180 * is changed from "in limbo" to "in transit".
2181 *
2182 * That is, we want to set port->ip_destination == dest,
2183 * but guaranteeing that this doesn't create a circle
2184 * port->ip_destination->ip_destination->... == port
2185 *
2186 * Additionally, if port was successfully changed to "in transit",
2187 * propagate boost assertions from the "in limbo" port to all
2188 * the ports in the chain, and, if the destination task accepts
2189 * boosts, to the destination task.
2190 *
2191 * Conditions:
2192 * No ports locked. References held for "port" and "dest".
2193 */
2194
2195 boolean_t
ipc_importance_check_circularity(ipc_port_t port,ipc_port_t dest)2196 ipc_importance_check_circularity(
2197 ipc_port_t port,
2198 ipc_port_t dest)
2199 {
2200 ipc_importance_task_t imp_task = IIT_NULL;
2201 ipc_importance_task_t release_imp_task = IIT_NULL;
2202 boolean_t imp_lock_held = FALSE;
2203 int assertcnt = 0;
2204 ipc_port_t base;
2205 struct turnstile *send_turnstile = TURNSTILE_NULL;
2206 struct task_watchport_elem *watchport_elem = NULL;
2207 bool took_base_ref = false;
2208
2209 assert(port != IP_NULL);
2210 assert(dest != IP_NULL);
2211
2212 if (port == dest) {
2213 return TRUE;
2214 }
2215 base = dest;
2216
2217 /* Check if destination needs a turnstile */
2218 ipc_port_send_turnstile_prepare(dest);
2219
2220 /* port is in limbo, so donation status is safe to latch */
2221 if (port->ip_impdonation != 0) {
2222 imp_lock_held = TRUE;
2223 ipc_importance_lock();
2224 }
2225
2226 /*
2227 * First try a quick check that can run in parallel.
2228 * No circularity if dest is not in transit.
2229 */
2230 ip_mq_lock(port);
2231
2232 /*
2233 * Even if port is just carrying assertions for others,
2234 * we need the importance lock.
2235 */
2236 if (port->ip_impcount > 0 && !imp_lock_held) {
2237 if (!ipc_importance_lock_try()) {
2238 ip_mq_unlock(port);
2239 ipc_importance_lock();
2240 ip_mq_lock(port);
2241 }
2242 imp_lock_held = TRUE;
2243 }
2244
2245 if (ip_mq_lock_try(dest)) {
2246 if (!ip_in_transit(dest)) {
2247 goto not_circular;
2248 }
2249
2250 /* dest is in transit; further checking necessary */
2251
2252 ip_mq_unlock(dest);
2253 }
2254 ip_mq_unlock(port);
2255
2256 /*
2257 * We're about to pay the cost to serialize,
2258 * just go ahead and grab importance lock.
2259 */
2260 if (!imp_lock_held) {
2261 ipc_importance_lock();
2262 imp_lock_held = TRUE;
2263 }
2264
2265 ipc_port_multiple_lock(); /* massive serialization */
2266
2267 took_base_ref = ipc_port_destination_chain_lock(dest, &base);
2268 /* all ports in chain from dest to base, inclusive, are locked */
2269
2270 if (port == base) {
2271 /* circularity detected! */
2272
2273 ipc_port_multiple_unlock();
2274
2275 /* port (== base) is in limbo */
2276
2277 require_ip_active(port);
2278 assert(ip_in_limbo(port));
2279 assert(!took_base_ref);
2280
2281 base = dest;
2282 while (base != IP_NULL) {
2283 ipc_port_t next;
2284
2285 /* base is in transit or in limbo */
2286
2287 require_ip_active(base);
2288 assert(base->ip_receiver_name == MACH_PORT_NULL);
2289 next = ip_get_destination(base);
2290 ip_mq_unlock(base);
2291 base = next;
2292 }
2293
2294 if (imp_lock_held) {
2295 ipc_importance_unlock();
2296 }
2297
2298 ipc_port_send_turnstile_complete(dest);
2299 return TRUE;
2300 }
2301
2302 /*
2303 * The guarantee: lock port while the entire chain is locked.
2304 * Once port is locked, we can take a reference to dest,
2305 * add port to the chain, and unlock everything.
2306 */
2307
2308 ip_mq_lock(port);
2309 ipc_port_multiple_unlock();
2310
2311 not_circular:
2312 /* port is in limbo */
2313 require_ip_active(port);
2314 assert(ip_in_limbo(port));
2315
2316 /* Port is being enqueued in a kmsg, remove the watchport boost in order to push on destination port */
2317 watchport_elem = ipc_port_clear_watchport_elem_internal(port);
2318
2319 /* Check if the port is being enqueued as a part of sync bootstrap checkin */
2320 if (dest->ip_specialreply && dest->ip_sync_bootstrap_checkin) {
2321 port->ip_sync_bootstrap_checkin = 1;
2322 }
2323
2324 ip_reference(dest);
2325
2326 /* port transitions to IN-TRANSIT state */
2327 assert(port->ip_receiver_name == MACH_PORT_NULL);
2328 port->ip_destination = dest;
2329
2330 /* must have been in limbo or still bound to a task */
2331 assert(port->ip_tempowner != 0);
2332
2333 /*
2334 * We delayed dropping assertions from a specific task.
2335 * Cache that info now (we'll drop assertions and the
2336 * task reference below).
2337 */
2338 release_imp_task = ip_get_imp_task(port);
2339 if (IIT_NULL != release_imp_task) {
2340 port->ip_imp_task = IIT_NULL;
2341 }
2342 assertcnt = port->ip_impcount;
2343
2344 /* take the port out of limbo w.r.t. assertions */
2345 port->ip_tempowner = 0;
2346
2347 /*
2348 * Setup linkage for source port if it has a send turnstile i.e. it has
2349 * a thread waiting in send or has a port enqueued in it or has sync ipc
2350 * push from a special reply port.
2351 */
2352 if (port_send_turnstile(port)) {
2353 send_turnstile = turnstile_prepare((uintptr_t)port,
2354 port_send_turnstile_address(port),
2355 TURNSTILE_NULL, TURNSTILE_SYNC_IPC);
2356
2357 turnstile_update_inheritor(send_turnstile, port_send_turnstile(dest),
2358 (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE));
2359
2360 /* update complete and turnstile complete called after dropping all locks */
2361 }
2362 /* now unlock chain */
2363
2364 ip_mq_unlock(port);
2365
2366 for (;;) {
2367 ipc_port_t next;
2368 /* every port along chain track assertions behind it */
2369 ipc_port_impcount_delta(dest, assertcnt, base);
2370
2371 if (dest == base) {
2372 break;
2373 }
2374
2375 /* port is in transit */
2376
2377 require_ip_active(dest);
2378 assert(ip_in_transit(dest));
2379 assert(dest->ip_tempowner == 0);
2380
2381 next = ip_get_destination(dest);
2382 ip_mq_unlock(dest);
2383 dest = next;
2384 }
2385
2386 /* base is not in transit */
2387 assert(!ip_in_transit(base));
2388
2389 /*
2390 * Find the task to boost (if any).
2391 * We will boost "through" ports that don't know
2392 * about inheritance to deliver receive rights that
2393 * do.
2394 */
2395 if (ip_active(base) && (assertcnt > 0)) {
2396 assert(imp_lock_held);
2397 if (base->ip_tempowner != 0) {
2398 if (IIT_NULL != ip_get_imp_task(base)) {
2399 /* specified tempowner task */
2400 imp_task = ip_get_imp_task(base);
2401 assert(ipc_importance_task_is_any_receiver_type(imp_task));
2402 }
2403 /* otherwise don't boost current task */
2404 } else if (ip_in_a_space(base)) {
2405 ipc_space_t space = ip_get_receiver(base);
2406 /* only spaces with boost-accepting tasks */
2407 if (space->is_task != TASK_NULL &&
2408 ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base)) {
2409 imp_task = space->is_task->task_imp_base;
2410 }
2411 }
2412
2413 /* take reference before unlocking base */
2414 if (imp_task != IIT_NULL) {
2415 ipc_importance_task_reference(imp_task);
2416 }
2417 }
2418
2419 ip_mq_unlock(base);
2420
2421 /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */
2422 if (send_turnstile) {
2423 turnstile_update_inheritor_complete(send_turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
2424
2425 /* Take the port lock to call turnstile complete */
2426 ip_mq_lock(port);
2427 turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), NULL, TURNSTILE_SYNC_IPC);
2428 send_turnstile = TURNSTILE_NULL;
2429 ip_mq_unlock(port);
2430 turnstile_cleanup();
2431 }
2432
2433 /*
2434 * Transfer assertions now that the ports are unlocked.
2435 * Avoid extra overhead if transferring to/from the same task.
2436 *
2437 * NOTE: If a transfer is occurring, the new assertions will
2438 * be added to imp_task BEFORE the importance lock is unlocked.
2439 * This is critical - to avoid decrements coming from the kmsgs
2440 * beating the increment to the task.
2441 */
2442 boolean_t transfer_assertions = (imp_task != release_imp_task);
2443
2444 if (imp_task != IIT_NULL) {
2445 assert(imp_lock_held);
2446 if (transfer_assertions) {
2447 ipc_importance_task_hold_internal_assertion_locked(imp_task, assertcnt);
2448 }
2449 }
2450
2451 if (release_imp_task != IIT_NULL) {
2452 assert(imp_lock_held);
2453 if (transfer_assertions) {
2454 ipc_importance_task_drop_internal_assertion_locked(release_imp_task, assertcnt);
2455 }
2456 }
2457
2458 if (imp_lock_held) {
2459 ipc_importance_unlock();
2460 }
2461
2462 if (took_base_ref) {
2463 ip_release(base);
2464 }
2465
2466 if (imp_task != IIT_NULL) {
2467 ipc_importance_task_release(imp_task);
2468 }
2469
2470 if (release_imp_task != IIT_NULL) {
2471 ipc_importance_task_release(release_imp_task);
2472 }
2473
2474 if (watchport_elem) {
2475 task_watchport_elem_deallocate(watchport_elem);
2476 }
2477
2478 return FALSE;
2479 }
2480
2481 /*
2482 * Routine: ipc_importance_send
2483 * Purpose:
2484 * Post the importance voucher attribute [if sent] or a static
2485 * importance boost depending upon options and conditions.
2486 * Conditions:
2487 * Destination port locked on entry and exit, may be dropped during the call.
2488 * Returns:
2489 * A boolean identifying if the port lock was tempoarily dropped.
2490 */
2491 boolean_t
ipc_importance_send(ipc_kmsg_t kmsg,mach_msg_option_t option)2492 ipc_importance_send(
2493 ipc_kmsg_t kmsg,
2494 mach_msg_option_t option)
2495 {
2496 mach_msg_header_t *hdr = ikm_header(kmsg);
2497 ipc_port_t port = hdr->msgh_remote_port;
2498 ipc_port_t voucher_port;
2499 boolean_t port_lock_dropped = FALSE;
2500 ipc_importance_elem_t elem;
2501 task_t task;
2502 ipc_importance_task_t task_imp;
2503 kern_return_t kr;
2504
2505 assert(IP_VALID(port));
2506
2507 /* If no donation to be made, return quickly */
2508 if ((port->ip_impdonation == 0) ||
2509 (option & MACH_SEND_NOIMPORTANCE) != 0) {
2510 return port_lock_dropped;
2511 }
2512
2513 task = current_task();
2514
2515 /* If forced sending a static boost, go update the port */
2516 if ((option & MACH_SEND_IMPORTANCE) != 0) {
2517 /* acquire the importance lock while trying to hang on to port lock */
2518 if (!ipc_importance_lock_try()) {
2519 port_lock_dropped = TRUE;
2520 ip_mq_unlock(port);
2521 ipc_importance_lock();
2522 }
2523 goto portupdate;
2524 }
2525
2526 task_imp = task->task_imp_base;
2527
2528 /* If the sender can never donate importance, nothing to do */
2529 if (ipc_importance_task_is_never_donor(task_imp)) {
2530 return port_lock_dropped;
2531 }
2532
2533 elem = IIE_NULL;
2534
2535 /* If importance receiver and passing a voucher, look for importance in there */
2536 voucher_port = ipc_kmsg_get_voucher_port(kmsg);
2537 if (IP_VALID(voucher_port) &&
2538 ipc_importance_task_is_marked_receiver(task_imp)) {
2539 mach_voucher_attr_value_handle_t vals[MACH_VOUCHER_ATTR_VALUE_MAX_NESTED];
2540 mach_voucher_attr_value_handle_array_size_t val_count;
2541 ipc_voucher_t voucher;
2542
2543 assert(ip_kotype(voucher_port) == IKOT_VOUCHER);
2544 voucher = (ipc_voucher_t)ipc_kobject_get_raw(voucher_port,
2545 IKOT_VOUCHER);
2546
2547 /* check to see if the voucher has an importance attribute */
2548 val_count = MACH_VOUCHER_ATTR_VALUE_MAX_NESTED;
2549 kr = mach_voucher_attr_control_get_values(ipc_importance_control, voucher,
2550 vals, &val_count);
2551 assert(KERN_SUCCESS == kr);
2552
2553 /*
2554 * Only use importance associated with our task (either directly
2555 * or through an inherit that donates to our task).
2556 */
2557 if (0 < val_count) {
2558 ipc_importance_elem_t check_elem;
2559
2560 check_elem = (ipc_importance_elem_t)vals[0];
2561 assert(IIE_NULL != check_elem);
2562 if (IIE_TYPE_INHERIT == IIE_TYPE(check_elem)) {
2563 ipc_importance_inherit_t inherit;
2564 inherit = (ipc_importance_inherit_t) check_elem;
2565 if (inherit->iii_to_task == task_imp) {
2566 elem = check_elem;
2567 }
2568 } else if (check_elem == (ipc_importance_elem_t)task_imp) {
2569 elem = check_elem;
2570 }
2571 }
2572 }
2573
2574 /* If we haven't found an importance attribute to send yet, use the task's */
2575 if (IIE_NULL == elem) {
2576 elem = (ipc_importance_elem_t)task_imp;
2577 }
2578
2579 /* take a reference for the message to hold */
2580 ipc_importance_reference_internal(elem);
2581
2582 /* acquire the importance lock while trying to hang on to port lock */
2583 if (!ipc_importance_lock_try()) {
2584 port_lock_dropped = TRUE;
2585 ip_mq_unlock(port);
2586 ipc_importance_lock();
2587 }
2588
2589 /* link kmsg onto the donor element propagation chain */
2590 ipc_importance_kmsg_link(kmsg, elem);
2591 /* elem reference transfered to kmsg */
2592
2593 incr_ref_counter(elem->iie_kmsg_refs_added);
2594
2595 /* If the sender isn't currently a donor, no need to apply boost */
2596 if (!ipc_importance_task_is_donor(task_imp)) {
2597 ipc_importance_unlock();
2598
2599 /* re-acquire port lock, if needed */
2600 if (TRUE == port_lock_dropped) {
2601 ip_mq_lock(port);
2602 }
2603
2604 return port_lock_dropped;
2605 }
2606
2607 portupdate:
2608 /* Mark the fact that we are (currently) donating through this message */
2609 hdr->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
2610
2611 /*
2612 * If we need to relock the port, do it with the importance still locked.
2613 * This assures we get to add the importance boost through the port to
2614 * the task BEFORE anyone else can attempt to undo that operation if
2615 * the sender lost donor status.
2616 */
2617 if (TRUE == port_lock_dropped) {
2618 ip_mq_lock(port);
2619 }
2620
2621 ipc_importance_assert_held();
2622
2623 #if IMPORTANCE_TRACE
2624 if (kdebug_enable) {
2625 mach_msg_max_trailer_t *dbgtrailer = ipc_kmsg_get_trailer(kmsg, false);
2626 unsigned int sender_pid = dbgtrailer->msgh_audit.val[5];
2627 mach_msg_id_t imp_msgh_id = hdr->msgh_id;
2628 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_SEND)) | DBG_FUNC_START,
2629 task_pid(task), sender_pid, imp_msgh_id, 0, 0);
2630 }
2631 #endif /* IMPORTANCE_TRACE */
2632
2633 mach_port_delta_t delta = 1;
2634 boolean_t need_port_lock;
2635 task_imp = IIT_NULL;
2636
2637 /* adjust port boost count (with importance and port locked) */
2638 need_port_lock = ipc_port_importance_delta_internal(port, IPID_OPTION_NORMAL, &delta, &task_imp);
2639 /* hold a reference on task_imp */
2640
2641 /* if we need to adjust a task importance as a result, apply that here */
2642 if (IIT_NULL != task_imp && delta != 0) {
2643 assert(delta == 1);
2644
2645 /* if this results in a change of state, propagate the transistion */
2646 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_HOLD, delta)) {
2647 /* can't hold the port lock during task transition(s) */
2648 if (!need_port_lock) {
2649 need_port_lock = TRUE;
2650 ip_mq_unlock(port);
2651 }
2652 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_HOLD, TRUE);
2653 }
2654 }
2655
2656 if (task_imp) {
2657 ipc_importance_task_release_locked(task_imp);
2658 /* importance unlocked */
2659 } else {
2660 ipc_importance_unlock();
2661 }
2662
2663 if (need_port_lock) {
2664 port_lock_dropped = TRUE;
2665 ip_mq_lock(port);
2666 }
2667
2668 return port_lock_dropped;
2669 }
2670
2671 /*
2672 * Routine: ipc_importance_inherit_from_kmsg
2673 * Purpose:
2674 * Create a "made" reference for an importance attribute representing
2675 * an inheritance between the sender of a message (if linked) and the
2676 * current task importance. If the message is not linked, a static
2677 * boost may be created, based on the boost state of the message.
2678 *
2679 * Any transfer from kmsg linkage to inherit linkage must be atomic.
2680 *
2681 * If the task is inactive, there isn't any need to return a new reference.
2682 * Conditions:
2683 * Nothing locked on entry. May block.
2684 */
2685 static ipc_importance_inherit_t
ipc_importance_inherit_from_kmsg(ipc_kmsg_t kmsg)2686 ipc_importance_inherit_from_kmsg(ipc_kmsg_t kmsg)
2687 {
2688 ipc_importance_task_t task_imp = IIT_NULL;
2689 ipc_importance_elem_t from_elem = kmsg->ikm_importance;
2690 ipc_importance_elem_t elem;
2691 task_t task_self = current_task();
2692
2693 mach_msg_header_t *hdr = ikm_header(kmsg);
2694 ipc_port_t port = hdr->msgh_remote_port;
2695 ipc_importance_inherit_t inherit = III_NULL;
2696 ipc_importance_inherit_t alloc = III_NULL;
2697 boolean_t cleared_self_donation = FALSE;
2698 boolean_t donating;
2699 uint32_t depth = 1;
2700
2701 /* The kmsg must have an importance donor or static boost to proceed */
2702 if (IIE_NULL == kmsg->ikm_importance &&
2703 !MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
2704 return III_NULL;
2705 }
2706
2707 /*
2708 * No need to set up an inherit linkage if the dest isn't a receiver
2709 * of one type or the other.
2710 */
2711 if (!ipc_importance_task_is_any_receiver_type(task_self->task_imp_base)) {
2712 ipc_importance_lock();
2713 goto out_locked;
2714 }
2715
2716 /* Grab a reference on the importance of the destination */
2717 task_imp = ipc_importance_for_task(task_self, FALSE);
2718
2719 ipc_importance_lock();
2720
2721 if (IIT_NULL == task_imp) {
2722 goto out_locked;
2723 }
2724
2725 incr_ref_counter(task_imp->iit_elem.iie_task_refs_added_inherit_from);
2726
2727 /* If message is already associated with an inherit... */
2728 if (IIE_TYPE_INHERIT == IIE_TYPE(from_elem)) {
2729 ipc_importance_inherit_t from_inherit = (ipc_importance_inherit_t)from_elem;
2730
2731 /* already targeting our task? - just use it */
2732 if (from_inherit->iii_to_task == task_imp) {
2733 /* clear self-donation if not also present in inherit */
2734 if (!from_inherit->iii_donating &&
2735 MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
2736 hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
2737 cleared_self_donation = TRUE;
2738 }
2739 inherit = from_inherit;
2740 } else if (III_DEPTH_MAX == III_DEPTH(from_inherit)) {
2741 ipc_importance_task_t to_task;
2742 ipc_importance_elem_t unlinked_from;
2743
2744 /*
2745 * Chain too long. Switch to looking
2746 * directly at the from_inherit's to-task
2747 * as our source of importance.
2748 */
2749 to_task = from_inherit->iii_to_task;
2750 ipc_importance_task_reference(to_task);
2751 from_elem = (ipc_importance_elem_t)to_task;
2752 depth = III_DEPTH_RESET | 1;
2753
2754 /* Fixup the kmsg linkage to reflect change */
2755 unlinked_from = ipc_importance_kmsg_unlink(kmsg);
2756 assert(unlinked_from == (ipc_importance_elem_t)from_inherit);
2757 ipc_importance_kmsg_link(kmsg, from_elem);
2758 ipc_importance_inherit_release_locked(from_inherit);
2759 /* importance unlocked */
2760 ipc_importance_lock();
2761 } else {
2762 /* inheriting from an inherit */
2763 depth = from_inherit->iii_depth + 1;
2764 }
2765 }
2766
2767 /*
2768 * Don't allow a task to inherit from itself (would keep it permanently
2769 * boosted even if all other donors to the task went away).
2770 */
2771
2772 if (from_elem == (ipc_importance_elem_t)task_imp) {
2773 goto out_locked;
2774 }
2775
2776 /*
2777 * But if the message isn't associated with any linked source, it is
2778 * intended to be permanently boosting (static boost from kernel).
2779 * In that case DO let the process permanently boost itself.
2780 */
2781 if (IIE_NULL == from_elem) {
2782 assert(MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits));
2783 ipc_importance_task_reference_internal(task_imp);
2784 from_elem = (ipc_importance_elem_t)task_imp;
2785 }
2786
2787 /*
2788 * Now that we have the from_elem figured out,
2789 * check to see if we already have an inherit for this pairing
2790 */
2791 while (III_NULL == inherit) {
2792 inherit = ipc_importance_inherit_find(from_elem, task_imp, depth);
2793
2794 /* Do we have to allocate a new inherit */
2795 if (III_NULL == inherit) {
2796 if (III_NULL != alloc) {
2797 break;
2798 }
2799
2800 /* allocate space */
2801 ipc_importance_unlock();
2802 alloc = (ipc_importance_inherit_t)
2803 zalloc(ipc_importance_inherit_zone);
2804 ipc_importance_lock();
2805 }
2806 }
2807
2808 /* snapshot the donating status while we have importance locked */
2809 donating = MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits);
2810
2811 if (III_NULL != inherit) {
2812 /* We found one, piggyback on that */
2813 assert(0 < III_REFS(inherit));
2814 assert(0 < IIE_REFS(inherit->iii_from_elem));
2815 assert(inherit->iii_externcnt >= inherit->iii_made);
2816
2817 /* add in a made reference */
2818 if (0 == inherit->iii_made++) {
2819 assert(III_REFS_MAX > III_REFS(inherit));
2820 ipc_importance_inherit_reference_internal(inherit);
2821 }
2822
2823 /* Reflect the inherit's change of status into the task boosts */
2824 if (0 == III_EXTERN(inherit)) {
2825 assert(!inherit->iii_donating);
2826 inherit->iii_donating = donating;
2827 if (donating) {
2828 task_imp->iit_externcnt += inherit->iii_externcnt;
2829 task_imp->iit_externdrop += inherit->iii_externdrop;
2830 }
2831 } else {
2832 assert(donating == inherit->iii_donating);
2833 }
2834
2835 /* add in a external reference for this use of the inherit */
2836 inherit->iii_externcnt++;
2837 } else {
2838 /* initialize the previously allocated space */
2839 inherit = alloc;
2840 inherit->iii_bits = IIE_TYPE_INHERIT | 1;
2841 inherit->iii_made = 1;
2842 inherit->iii_externcnt = 1;
2843 inherit->iii_externdrop = 0;
2844 inherit->iii_depth = depth;
2845 inherit->iii_to_task = task_imp;
2846 inherit->iii_from_elem = IIE_NULL;
2847 queue_init(&inherit->iii_kmsgs);
2848
2849 if (donating) {
2850 inherit->iii_donating = TRUE;
2851 } else {
2852 inherit->iii_donating = FALSE;
2853 }
2854
2855 /*
2856 * Chain our new inherit on the element it inherits from.
2857 * The new inherit takes our reference on from_elem.
2858 */
2859 ipc_importance_inherit_link(inherit, from_elem);
2860
2861 #if IIE_REF_DEBUG
2862 ipc_importance_counter_init(&inherit->iii_elem);
2863 from_elem->iie_kmsg_refs_inherited++;
2864 task_imp->iit_elem.iie_task_refs_inherited++;
2865 #endif
2866 }
2867
2868 out_locked:
2869 /*
2870 * for those paths that came straight here: snapshot the donating status
2871 * (this should match previous snapshot for other paths).
2872 */
2873 donating = MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits);
2874
2875 /* unlink the kmsg inheritance (if any) */
2876 elem = ipc_importance_kmsg_unlink(kmsg);
2877 assert(elem == from_elem);
2878
2879 /* If found inherit and donating, reflect that in the task externcnt */
2880 if (III_NULL != inherit && donating) {
2881 task_imp->iit_externcnt++;
2882 /* The owner of receive right might have changed, take the internal assertion */
2883 ipc_importance_task_hold_internal_assertion_locked(task_imp, 1);
2884 /* may have dropped and retaken importance lock */
2885 }
2886
2887 /* If we didn't create a new inherit, we have some resources to release */
2888 if (III_NULL == inherit || inherit != alloc) {
2889 if (IIE_NULL != from_elem) {
2890 if (III_NULL != inherit) {
2891 incr_ref_counter(from_elem->iie_kmsg_refs_coalesced);
2892 } else {
2893 incr_ref_counter(from_elem->iie_kmsg_refs_dropped);
2894 }
2895 ipc_importance_release_locked(from_elem);
2896 /* importance unlocked */
2897 } else {
2898 ipc_importance_unlock();
2899 }
2900
2901 if (IIT_NULL != task_imp) {
2902 if (III_NULL != inherit) {
2903 incr_ref_counter(task_imp->iit_elem.iie_task_refs_coalesced);
2904 }
2905 ipc_importance_task_release(task_imp);
2906 }
2907
2908 if (III_NULL != alloc) {
2909 zfree(ipc_importance_inherit_zone, alloc);
2910 }
2911 } else {
2912 /* from_elem and task_imp references transferred to new inherit */
2913 ipc_importance_unlock();
2914 }
2915
2916 /*
2917 * decrement port boost count
2918 * This is OK to do without the importance lock as we atomically
2919 * unlinked the kmsg and snapshot the donating state while holding
2920 * the importance lock
2921 */
2922 if (donating || cleared_self_donation) {
2923 ip_mq_lock(port);
2924 /* drop importance from port and destination task */
2925 if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == FALSE) {
2926 ip_mq_unlock(port);
2927 }
2928 }
2929
2930 if (III_NULL != inherit) {
2931 /* have an associated importance attr, even if currently not donating */
2932 hdr->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
2933 } else {
2934 /* we won't have an importance attribute associated with our message */
2935 hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
2936 }
2937
2938 return inherit;
2939 }
2940
2941 /*
2942 * Routine: ipc_importance_inherit_from_task
2943 * Purpose:
2944 * Create a reference for an importance attribute representing
2945 * an inheritance between the to_task and from_task. The iii
2946 * created will be marked as III_FLAGS_FOR_OTHERS.
2947 *
2948 * It will not dedup any iii which are not marked as III_FLAGS_FOR_OTHERS.
2949 *
2950 * If the task is inactive, there isn't any need to return a new reference.
2951 * Conditions:
2952 * Nothing locked on entry. May block.
2953 * It should not be called from voucher subsystem.
2954 */
2955 static ipc_importance_inherit_t
ipc_importance_inherit_from_task(task_t from_task,task_t to_task)2956 ipc_importance_inherit_from_task(
2957 task_t from_task,
2958 task_t to_task)
2959 {
2960 ipc_importance_task_t to_task_imp = IIT_NULL;
2961 ipc_importance_task_t from_task_imp = IIT_NULL;
2962 ipc_importance_elem_t from_elem = IIE_NULL;
2963
2964 ipc_importance_inherit_t inherit = III_NULL;
2965 ipc_importance_inherit_t alloc = III_NULL;
2966 boolean_t donating;
2967 uint32_t depth = 1;
2968
2969 to_task_imp = ipc_importance_for_task(to_task, FALSE);
2970 from_task_imp = ipc_importance_for_task(from_task, FALSE);
2971 from_elem = (ipc_importance_elem_t)from_task_imp;
2972
2973 ipc_importance_lock();
2974
2975 if (IIT_NULL == to_task_imp || IIT_NULL == from_task_imp) {
2976 goto out_locked;
2977 }
2978
2979 /*
2980 * No need to set up an inherit linkage if the to_task or from_task
2981 * isn't a receiver of one type or the other.
2982 */
2983 if (!ipc_importance_task_is_any_receiver_type(to_task_imp) ||
2984 !ipc_importance_task_is_any_receiver_type(from_task_imp)) {
2985 goto out_locked;
2986 }
2987
2988 /* Do not allow to create a linkage to self */
2989 if (to_task_imp == from_task_imp) {
2990 goto out_locked;
2991 }
2992
2993 incr_ref_counter(to_task_imp->iit_elem.iie_task_refs_added_inherit_from);
2994 incr_ref_counter(from_elem->iie_kmsg_refs_added);
2995
2996 /*
2997 * Now that we have the from_elem figured out,
2998 * check to see if we already have an inherit for this pairing
2999 */
3000 while (III_NULL == inherit) {
3001 inherit = ipc_importance_inherit_find(from_elem, to_task_imp, depth);
3002
3003 /* Do we have to allocate a new inherit */
3004 if (III_NULL == inherit) {
3005 if (III_NULL != alloc) {
3006 break;
3007 }
3008
3009 /* allocate space */
3010 ipc_importance_unlock();
3011 alloc = (ipc_importance_inherit_t)
3012 zalloc(ipc_importance_inherit_zone);
3013 ipc_importance_lock();
3014 }
3015 }
3016
3017 /* snapshot the donating status while we have importance locked */
3018 donating = ipc_importance_task_is_donor(from_task_imp);
3019
3020 if (III_NULL != inherit) {
3021 /* We found one, piggyback on that */
3022 assert(0 < III_REFS(inherit));
3023 assert(0 < IIE_REFS(inherit->iii_from_elem));
3024
3025 /* Take a reference for inherit */
3026 assert(III_REFS_MAX > III_REFS(inherit));
3027 ipc_importance_inherit_reference_internal(inherit);
3028
3029 /* Reflect the inherit's change of status into the task boosts */
3030 if (0 == III_EXTERN(inherit)) {
3031 assert(!inherit->iii_donating);
3032 inherit->iii_donating = donating;
3033 if (donating) {
3034 to_task_imp->iit_externcnt += inherit->iii_externcnt;
3035 to_task_imp->iit_externdrop += inherit->iii_externdrop;
3036 }
3037 } else {
3038 assert(donating == inherit->iii_donating);
3039 }
3040
3041 /* add in a external reference for this use of the inherit */
3042 inherit->iii_externcnt++;
3043 } else {
3044 /* initialize the previously allocated space */
3045 inherit = alloc;
3046 inherit->iii_bits = IIE_TYPE_INHERIT | 1;
3047 inherit->iii_made = 0;
3048 inherit->iii_externcnt = 1;
3049 inherit->iii_externdrop = 0;
3050 inherit->iii_depth = depth;
3051 inherit->iii_to_task = to_task_imp;
3052 inherit->iii_from_elem = IIE_NULL;
3053 queue_init(&inherit->iii_kmsgs);
3054
3055 if (donating) {
3056 inherit->iii_donating = TRUE;
3057 } else {
3058 inherit->iii_donating = FALSE;
3059 }
3060
3061 /*
3062 * Chain our new inherit on the element it inherits from.
3063 * The new inherit takes our reference on from_elem.
3064 */
3065 ipc_importance_inherit_link(inherit, from_elem);
3066
3067 #if IIE_REF_DEBUG
3068 ipc_importance_counter_init(&inherit->iii_elem);
3069 from_elem->iie_kmsg_refs_inherited++;
3070 task_imp->iit_elem.iie_task_refs_inherited++;
3071 #endif
3072 }
3073
3074 out_locked:
3075
3076 /* If found inherit and donating, reflect that in the task externcnt */
3077 if (III_NULL != inherit && donating) {
3078 to_task_imp->iit_externcnt++;
3079 /* take the internal assertion */
3080 ipc_importance_task_hold_internal_assertion_locked(to_task_imp, 1);
3081 /* may have dropped and retaken importance lock */
3082 }
3083
3084 /* If we didn't create a new inherit, we have some resources to release */
3085 if (III_NULL == inherit || inherit != alloc) {
3086 if (IIE_NULL != from_elem) {
3087 if (III_NULL != inherit) {
3088 incr_ref_counter(from_elem->iie_kmsg_refs_coalesced);
3089 } else {
3090 incr_ref_counter(from_elem->iie_kmsg_refs_dropped);
3091 }
3092 ipc_importance_release_locked(from_elem);
3093 /* importance unlocked */
3094 } else {
3095 ipc_importance_unlock();
3096 }
3097
3098 if (IIT_NULL != to_task_imp) {
3099 if (III_NULL != inherit) {
3100 incr_ref_counter(to_task_imp->iit_elem.iie_task_refs_coalesced);
3101 }
3102 ipc_importance_task_release(to_task_imp);
3103 }
3104
3105 if (III_NULL != alloc) {
3106 zfree(ipc_importance_inherit_zone, alloc);
3107 }
3108 } else {
3109 /* from_elem and to_task_imp references transferred to new inherit */
3110 ipc_importance_unlock();
3111 }
3112
3113 return inherit;
3114 }
3115
3116 /*
3117 * Routine: ipc_importance_receive
3118 * Purpose:
3119 * Process importance attributes in a received message.
3120 *
3121 * If an importance voucher attribute was sent, transform
3122 * that into an attribute value reflecting the inheritance
3123 * from the sender to the receiver.
3124 *
3125 * If a static boost is received (or the voucher isn't on
3126 * a voucher-based boost), export a static boost.
3127 * Conditions:
3128 * Nothing locked.
3129 */
3130 void
ipc_importance_receive(ipc_kmsg_t kmsg,mach_msg_option_t option)3131 ipc_importance_receive(
3132 ipc_kmsg_t kmsg,
3133 mach_msg_option_t option)
3134 {
3135 int impresult = -1;
3136
3137 #if IMPORTANCE_TRACE || LEGACY_IMPORTANCE_DELIVERY
3138 task_t task_self = current_task();
3139 unsigned int sender_pid = ipc_kmsg_get_trailer(kmsg, false)->msgh_audit.val[5];
3140 #endif
3141 mach_msg_header_t *hdr = ikm_header(kmsg);
3142
3143 /* convert to a voucher with an inherit importance attribute? */
3144 if ((option & MACH_RCV_VOUCHER) != 0) {
3145 uint8_t recipes[2 * sizeof(ipc_voucher_attr_recipe_data_t) +
3146 sizeof(mach_voucher_attr_value_handle_t)];
3147 ipc_voucher_attr_raw_recipe_array_size_t recipe_size = 0;
3148 ipc_voucher_attr_recipe_t recipe = (ipc_voucher_attr_recipe_t)recipes;
3149 ipc_port_t voucher_port = ipc_kmsg_get_voucher_port(kmsg);
3150 ipc_voucher_t recv_voucher;
3151 mach_voucher_attr_value_handle_t handle;
3152 ipc_importance_inherit_t inherit;
3153 kern_return_t kr;
3154
3155 /* set up recipe to copy the old voucher */
3156 if (IP_VALID(voucher_port)) {
3157 ipc_voucher_t sent_voucher;
3158
3159 sent_voucher = (ipc_voucher_t)ipc_kobject_get_raw(voucher_port,
3160 IKOT_VOUCHER);
3161
3162 recipe->key = MACH_VOUCHER_ATTR_KEY_ALL;
3163 recipe->command = MACH_VOUCHER_ATTR_COPY;
3164 recipe->previous_voucher = sent_voucher;
3165 recipe->content_size = 0;
3166 recipe_size += sizeof(*recipe);
3167 }
3168
3169 /*
3170 * create an inheritance attribute from the kmsg (may be NULL)
3171 * transferring any boosts from the kmsg linkage through the
3172 * port directly to the new inheritance object.
3173 */
3174 inherit = ipc_importance_inherit_from_kmsg(kmsg);
3175 handle = (mach_voucher_attr_value_handle_t)inherit;
3176
3177 assert(IIE_NULL == kmsg->ikm_importance);
3178
3179 /*
3180 * Only create a new voucher if we have an inherit object
3181 * (from the ikm_importance field of the incoming message), OR
3182 * we have a valid incoming voucher. If we have neither of
3183 * these things then there is no need to create a new voucher.
3184 */
3185 if (IP_VALID(voucher_port) || inherit != III_NULL) {
3186 /* replace the importance attribute with the handle we created */
3187 /* our made reference on the inherit is donated to the voucher */
3188 recipe = (ipc_voucher_attr_recipe_t)&recipes[recipe_size];
3189 recipe->key = MACH_VOUCHER_ATTR_KEY_IMPORTANCE;
3190 recipe->command = MACH_VOUCHER_ATTR_SET_VALUE_HANDLE;
3191 recipe->previous_voucher = IPC_VOUCHER_NULL;
3192 recipe->content_size = sizeof(mach_voucher_attr_value_handle_t);
3193 *(mach_voucher_attr_value_handle_t *)(void *)recipe->content = handle;
3194 recipe_size += sizeof(*recipe) + sizeof(mach_voucher_attr_value_handle_t);
3195
3196 kr = ipc_voucher_attr_control_create_mach_voucher(ipc_importance_control,
3197 recipes,
3198 recipe_size,
3199 &recv_voucher);
3200 assert(KERN_SUCCESS == kr);
3201
3202 /* swap the voucher port (and set voucher bits in case it didn't already exist) */
3203 hdr->msgh_bits |= (MACH_MSG_TYPE_MOVE_SEND << 16);
3204 ipc_port_release_send(voucher_port);
3205 voucher_port = convert_voucher_to_port(recv_voucher);
3206 ipc_kmsg_set_voucher_port(kmsg, voucher_port, MACH_MSG_TYPE_MOVE_SEND);
3207 if (III_NULL != inherit) {
3208 impresult = 2;
3209 }
3210 }
3211 } else { /* Don't want a voucher */
3212 /* got linked importance? have to drop */
3213 if (IIE_NULL != kmsg->ikm_importance) {
3214 ipc_importance_elem_t elem;
3215
3216 ipc_importance_lock();
3217 elem = ipc_importance_kmsg_unlink(kmsg);
3218 #if IIE_REF_DEBUG
3219 elem->iie_kmsg_refs_dropped++;
3220 #endif
3221 ipc_importance_release_locked(elem);
3222 /* importance unlocked */
3223 }
3224
3225 /* With kmsg unlinked, can safely examine message importance attribute. */
3226 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
3227 ipc_port_t port = hdr->msgh_remote_port;
3228 #if LEGACY_IMPORTANCE_DELIVERY
3229 ipc_importance_task_t task_imp = task_self->task_imp_base;
3230
3231 /* The owner of receive right might have changed, take the internal assertion */
3232 if (KERN_SUCCESS == ipc_importance_task_hold_internal_assertion(task_imp, 1)) {
3233 ipc_importance_task_externalize_legacy_assertion(task_imp, 1, sender_pid);
3234 impresult = 1;
3235 } else
3236 #endif
3237 {
3238 /* The importance boost never applied to task (clear the bit) */
3239 hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
3240 impresult = 0;
3241 }
3242
3243 /* Drop the boost on the port and the owner of the receive right */
3244 ip_mq_lock(port);
3245 if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == FALSE) {
3246 ip_mq_unlock(port);
3247 }
3248 }
3249 }
3250
3251 #if IMPORTANCE_TRACE
3252 if (-1 < impresult) {
3253 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_DELV)) | DBG_FUNC_NONE,
3254 sender_pid, task_pid(task_self),
3255 hdr->msgh_id, impresult, 0);
3256 }
3257 if (impresult == 2) {
3258 /*
3259 * This probe only covers new voucher-based path. Legacy importance
3260 * will trigger the probe in ipc_importance_task_externalize_assertion()
3261 * above and have impresult==1 here.
3262 */
3263 DTRACE_BOOST5(receive_boost, task_t, task_self, int, task_pid(task_self),
3264 int, sender_pid, int, 1, int, task_self->task_imp_base->iit_assertcnt);
3265 }
3266 #endif /* IMPORTANCE_TRACE */
3267 }
3268
3269 /*
3270 * Routine: ipc_importance_unreceive
3271 * Purpose:
3272 * Undo receive of importance attributes in a message.
3273 *
3274 * Conditions:
3275 * Nothing locked.
3276 */
3277 void
ipc_importance_unreceive(ipc_kmsg_t kmsg,mach_msg_option_t __unused option)3278 ipc_importance_unreceive(
3279 ipc_kmsg_t kmsg,
3280 mach_msg_option_t __unused option)
3281 {
3282 /* importance should already be in the voucher and out of the kmsg */
3283 assert(IIE_NULL == kmsg->ikm_importance);
3284 mach_msg_header_t *hdr = ikm_header(kmsg);
3285
3286 /* See if there is a legacy boost to be dropped from receiver */
3287 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
3288 ipc_importance_task_t task_imp;
3289
3290 hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
3291 task_imp = current_task()->task_imp_base;
3292
3293 if (!IP_VALID(ipc_kmsg_get_voucher_port(kmsg)) && IIT_NULL != task_imp) {
3294 ipc_importance_task_drop_legacy_external_assertion(task_imp, 1);
3295 }
3296 /*
3297 * ipc_kmsg_copyout_dest_to_user() will consume the voucher
3298 * and any contained importance.
3299 */
3300 }
3301 }
3302
3303 /*
3304 * Routine: ipc_importance_clean
3305 * Purpose:
3306 * Clean up importance state in a kmsg that is being cleaned.
3307 * Unlink the importance chain if one was set up, and drop
3308 * the reference this kmsg held on the donor. Then check to
3309 * if importance was carried to the port, and remove that if
3310 * needed.
3311 * Conditions:
3312 * Nothing locked.
3313 */
3314 void
ipc_importance_clean(ipc_kmsg_t kmsg)3315 ipc_importance_clean(
3316 ipc_kmsg_t kmsg)
3317 {
3318 ipc_port_t port;
3319 mach_msg_header_t *hdr = ikm_header(kmsg);
3320
3321 /* Is the kmsg still linked? If so, remove that first */
3322 if (IIE_NULL != kmsg->ikm_importance) {
3323 ipc_importance_elem_t elem;
3324
3325 ipc_importance_lock();
3326 elem = ipc_importance_kmsg_unlink(kmsg);
3327 assert(IIE_NULL != elem);
3328 ipc_importance_release_locked(elem);
3329 /* importance unlocked */
3330 }
3331
3332 /* See if there is a legacy importance boost to be dropped from port */
3333 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
3334 hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
3335 port = hdr->msgh_remote_port;
3336 if (IP_VALID(port)) {
3337 ip_mq_lock(port);
3338 /* inactive ports already had their importance boosts dropped */
3339 if (!ip_active(port) ||
3340 ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == FALSE) {
3341 ip_mq_unlock(port);
3342 }
3343 }
3344 }
3345 }
3346
3347 void
ipc_importance_assert_clean(__assert_only ipc_kmsg_t kmsg)3348 ipc_importance_assert_clean(__assert_only ipc_kmsg_t kmsg)
3349 {
3350 assert(IIE_NULL == kmsg->ikm_importance);
3351 assert(!MACH_MSGH_BITS_RAISED_IMPORTANCE(ikm_header(kmsg)->msgh_bits));
3352 }
3353
3354 /*
3355 * IPC Importance Attribute Manager definition
3356 */
3357
3358 static kern_return_t
3359 ipc_importance_release_value(
3360 ipc_voucher_attr_manager_t manager,
3361 mach_voucher_attr_key_t key,
3362 mach_voucher_attr_value_handle_t value,
3363 mach_voucher_attr_value_reference_t sync);
3364
3365 static kern_return_t
3366 ipc_importance_get_value(
3367 ipc_voucher_attr_manager_t manager,
3368 mach_voucher_attr_key_t key,
3369 mach_voucher_attr_recipe_command_t command,
3370 mach_voucher_attr_value_handle_array_t prev_values,
3371 mach_voucher_attr_value_handle_array_size_t prev_value_count,
3372 mach_voucher_attr_content_t content,
3373 mach_voucher_attr_content_size_t content_size,
3374 mach_voucher_attr_value_handle_t *out_value,
3375 mach_voucher_attr_value_flags_t *out_flags,
3376 ipc_voucher_t *out_value_voucher);
3377
3378 static kern_return_t
3379 ipc_importance_extract_content(
3380 ipc_voucher_attr_manager_t manager,
3381 mach_voucher_attr_key_t key,
3382 mach_voucher_attr_value_handle_array_t values,
3383 mach_voucher_attr_value_handle_array_size_t value_count,
3384 mach_voucher_attr_recipe_command_t *out_command,
3385 mach_voucher_attr_content_t out_content,
3386 mach_voucher_attr_content_size_t *in_out_content_size);
3387
3388 static kern_return_t
3389 ipc_importance_command(
3390 ipc_voucher_attr_manager_t manager,
3391 mach_voucher_attr_key_t key,
3392 mach_voucher_attr_value_handle_array_t values,
3393 mach_msg_type_number_t value_count,
3394 mach_voucher_attr_command_t command,
3395 mach_voucher_attr_content_t in_content,
3396 mach_voucher_attr_content_size_t in_content_size,
3397 mach_voucher_attr_content_t out_content,
3398 mach_voucher_attr_content_size_t *out_content_size);
3399
3400 const struct ipc_voucher_attr_manager ipc_importance_manager = {
3401 .ivam_release_value = ipc_importance_release_value,
3402 .ivam_get_value = ipc_importance_get_value,
3403 .ivam_extract_content = ipc_importance_extract_content,
3404 .ivam_command = ipc_importance_command,
3405 .ivam_flags = IVAM_FLAGS_NONE,
3406 };
3407
3408 #define IMPORTANCE_ASSERT_KEY(key) assert(MACH_VOUCHER_ATTR_KEY_IMPORTANCE == (key))
3409 #define IMPORTANCE_ASSERT_MANAGER(manager) assert(&ipc_importance_manager == (manager))
3410
3411 /*
3412 * Routine: ipc_importance_release_value [Voucher Attribute Manager Interface]
3413 * Purpose:
3414 * Release what the voucher system believes is the last "made" reference
3415 * on an importance attribute value handle. The sync parameter is used to
3416 * avoid races with new made references concurrently being returned to the
3417 * voucher system in other threads.
3418 * Conditions:
3419 * Nothing locked on entry. May block.
3420 */
3421 static kern_return_t
ipc_importance_release_value(ipc_voucher_attr_manager_t __assert_only manager,mach_voucher_attr_key_t __assert_only key,mach_voucher_attr_value_handle_t value,mach_voucher_attr_value_reference_t sync)3422 ipc_importance_release_value(
3423 ipc_voucher_attr_manager_t __assert_only manager,
3424 mach_voucher_attr_key_t __assert_only key,
3425 mach_voucher_attr_value_handle_t value,
3426 mach_voucher_attr_value_reference_t sync)
3427 {
3428 ipc_importance_elem_t elem;
3429
3430 IMPORTANCE_ASSERT_MANAGER(manager);
3431 IMPORTANCE_ASSERT_KEY(key);
3432 assert(0 < sync);
3433
3434 elem = (ipc_importance_elem_t)value;
3435
3436 ipc_importance_lock();
3437
3438 /* Any oustanding made refs? */
3439 if (sync != elem->iie_made) {
3440 assert(sync < elem->iie_made);
3441 ipc_importance_unlock();
3442 return KERN_FAILURE;
3443 }
3444
3445 /* clear made */
3446 elem->iie_made = 0;
3447
3448 /*
3449 * If there are pending external boosts represented by this attribute,
3450 * drop them from the apropriate task
3451 */
3452 if (IIE_TYPE_INHERIT == IIE_TYPE(elem)) {
3453 ipc_importance_inherit_t inherit = (ipc_importance_inherit_t)elem;
3454
3455 assert(inherit->iii_externcnt >= inherit->iii_externdrop);
3456
3457 if (inherit->iii_donating) {
3458 ipc_importance_task_t imp_task = inherit->iii_to_task;
3459 uint32_t assertcnt = III_EXTERN(inherit);
3460
3461 assert(ipc_importance_task_is_any_receiver_type(imp_task));
3462 assert(imp_task->iit_externcnt >= inherit->iii_externcnt);
3463 assert(imp_task->iit_externdrop >= inherit->iii_externdrop);
3464 imp_task->iit_externcnt -= inherit->iii_externcnt;
3465 imp_task->iit_externdrop -= inherit->iii_externdrop;
3466 inherit->iii_externcnt = 0;
3467 inherit->iii_externdrop = 0;
3468 inherit->iii_donating = FALSE;
3469
3470 /* adjust the internal assertions - and propagate if needed */
3471 if (ipc_importance_task_check_transition(imp_task, IIT_UPDATE_DROP, assertcnt)) {
3472 ipc_importance_task_propagate_assertion_locked(imp_task, IIT_UPDATE_DROP, TRUE);
3473 }
3474 } else {
3475 inherit->iii_externcnt = 0;
3476 inherit->iii_externdrop = 0;
3477 }
3478 }
3479
3480 /* drop the made reference on elem */
3481 ipc_importance_release_locked(elem);
3482 /* returns unlocked */
3483
3484 return KERN_SUCCESS;
3485 }
3486
3487
3488 /*
3489 * Routine: ipc_importance_get_value [Voucher Attribute Manager Interface]
3490 * Purpose:
3491 * Convert command and content data into a reference on a [potentially new]
3492 * attribute value. The importance attribute manager will only allow the
3493 * caller to get a value for the current task's importance, or to redeem
3494 * an importance attribute from an existing voucher.
3495 * Conditions:
3496 * Nothing locked on entry. May block.
3497 */
3498 static kern_return_t
ipc_importance_get_value(ipc_voucher_attr_manager_t __assert_only manager,mach_voucher_attr_key_t __assert_only key,mach_voucher_attr_recipe_command_t command,mach_voucher_attr_value_handle_array_t prev_values,mach_voucher_attr_value_handle_array_size_t prev_value_count,mach_voucher_attr_content_t __unused content,mach_voucher_attr_content_size_t content_size,mach_voucher_attr_value_handle_t * out_value,mach_voucher_attr_value_flags_t * out_flags,ipc_voucher_t * out_value_voucher)3499 ipc_importance_get_value(
3500 ipc_voucher_attr_manager_t __assert_only manager,
3501 mach_voucher_attr_key_t __assert_only key,
3502 mach_voucher_attr_recipe_command_t command,
3503 mach_voucher_attr_value_handle_array_t prev_values,
3504 mach_voucher_attr_value_handle_array_size_t prev_value_count,
3505 mach_voucher_attr_content_t __unused content,
3506 mach_voucher_attr_content_size_t content_size,
3507 mach_voucher_attr_value_handle_t *out_value,
3508 mach_voucher_attr_value_flags_t *out_flags,
3509 ipc_voucher_t *out_value_voucher)
3510 {
3511 ipc_importance_elem_t elem;
3512 task_t self;
3513
3514 IMPORTANCE_ASSERT_MANAGER(manager);
3515 IMPORTANCE_ASSERT_KEY(key);
3516
3517 if (0 != content_size) {
3518 return KERN_INVALID_ARGUMENT;
3519 }
3520
3521 *out_flags = MACH_VOUCHER_ATTR_VALUE_FLAGS_NONE;
3522 /* never an out voucher */
3523
3524 switch (command) {
3525 case MACH_VOUCHER_ATTR_REDEEM:
3526
3527 /* redeem of previous values is the value */
3528 if (0 < prev_value_count) {
3529 elem = (ipc_importance_elem_t)prev_values[0];
3530 assert(IIE_NULL != elem);
3531
3532 ipc_importance_lock();
3533 assert(0 < elem->iie_made);
3534 elem->iie_made++;
3535 ipc_importance_unlock();
3536
3537 *out_value = prev_values[0];
3538 return KERN_SUCCESS;
3539 }
3540
3541 /* redeem of default is default */
3542 *out_value = 0;
3543 *out_value_voucher = IPC_VOUCHER_NULL;
3544 return KERN_SUCCESS;
3545
3546 case MACH_VOUCHER_ATTR_IMPORTANCE_SELF:
3547 self = current_task();
3548
3549 elem = (ipc_importance_elem_t)ipc_importance_for_task(self, TRUE);
3550 /* made reference added (or IIE_NULL which isn't referenced) */
3551
3552 *out_value = (mach_voucher_attr_value_handle_t)elem;
3553 *out_value_voucher = IPC_VOUCHER_NULL;
3554 return KERN_SUCCESS;
3555
3556 default:
3557 /*
3558 * every other command is unknown
3559 *
3560 * Specifically, there is no mechanism provided to construct an
3561 * importance attribute for a task/process from just a pid or
3562 * task port. It has to be copied (or redeemed) from a previous
3563 * voucher that has it.
3564 */
3565 return KERN_INVALID_ARGUMENT;
3566 }
3567 }
3568
3569 /*
3570 * Routine: ipc_importance_extract_content [Voucher Attribute Manager Interface]
3571 * Purpose:
3572 * Extract meaning from the attribute value present in a voucher. While
3573 * the real goal is to provide commands and data that can reproduce the
3574 * voucher's value "out of thin air", this isn't possible with importance
3575 * attribute values. Instead, return debug info to help track down dependencies.
3576 * Conditions:
3577 * Nothing locked on entry. May block.
3578 */
3579 static kern_return_t
ipc_importance_extract_content(ipc_voucher_attr_manager_t __assert_only manager,mach_voucher_attr_key_t __assert_only key,mach_voucher_attr_value_handle_array_t values,mach_voucher_attr_value_handle_array_size_t value_count,mach_voucher_attr_recipe_command_t * out_command,mach_voucher_attr_content_t out_content,mach_voucher_attr_content_size_t * in_out_content_size)3580 ipc_importance_extract_content(
3581 ipc_voucher_attr_manager_t __assert_only manager,
3582 mach_voucher_attr_key_t __assert_only key,
3583 mach_voucher_attr_value_handle_array_t values,
3584 mach_voucher_attr_value_handle_array_size_t value_count,
3585 mach_voucher_attr_recipe_command_t *out_command,
3586 mach_voucher_attr_content_t out_content,
3587 mach_voucher_attr_content_size_t *in_out_content_size)
3588 {
3589 ipc_importance_elem_t elem;
3590 unsigned int i;
3591
3592 char *buf = (char *)out_content;
3593 mach_voucher_attr_content_size_t size = *in_out_content_size;
3594 mach_voucher_attr_content_size_t pos = 0;
3595 __unused int pid;
3596
3597 IMPORTANCE_ASSERT_MANAGER(manager);
3598 IMPORTANCE_ASSERT_KEY(key);
3599
3600 /* the first non-default value provides the data */
3601 for (i = 0; i < value_count; i++) {
3602 elem = (ipc_importance_elem_t)values[i];
3603 if (IIE_NULL == elem) {
3604 continue;
3605 }
3606
3607 pos += scnprintf(buf + pos, size - pos, "Importance for ");
3608
3609 for (;;) {
3610 ipc_importance_inherit_t inherit = III_NULL;
3611 ipc_importance_task_t task_imp;
3612
3613 if (IIE_TYPE_TASK == IIE_TYPE(elem)) {
3614 task_imp = (ipc_importance_task_t)elem;
3615 } else {
3616 inherit = (ipc_importance_inherit_t)elem;
3617 task_imp = inherit->iii_to_task;
3618 }
3619 #if DEVELOPMENT || DEBUG
3620 pos += scnprintf(buf + pos, size - pos, "%s[%d]",
3621 task_imp->iit_procname, task_imp->iit_bsd_pid);
3622 #else
3623 ipc_importance_lock();
3624 pid = task_importance_task_get_pid(task_imp);
3625 ipc_importance_unlock();
3626 pos += scnprintf(buf + pos, size - pos, "pid %d", pid);
3627 #endif /* DEVELOPMENT || DEBUG */
3628
3629 if (III_NULL == inherit) {
3630 break;
3631 }
3632 pos += scnprintf(buf + pos, size - pos,
3633 " (%d of %d boosts) %s from ",
3634 III_EXTERN(inherit), inherit->iii_externcnt,
3635 (inherit->iii_donating) ? "donated" : "linked");
3636 elem = inherit->iii_from_elem;
3637 }
3638
3639 pos++; /* account for terminating \0 */
3640 break;
3641 }
3642 *out_command = MACH_VOUCHER_ATTR_NOOP; /* cannot be used to regenerate value */
3643 *in_out_content_size = pos;
3644 return KERN_SUCCESS;
3645 }
3646
3647 /*
3648 * Routine: ipc_importance_command [Voucher Attribute Manager Interface]
3649 * Purpose:
3650 * Run commands against the importance attribute value found in a voucher.
3651 * No such commands are currently supported.
3652 * Conditions:
3653 * Nothing locked on entry. May block.
3654 */
3655 static kern_return_t
ipc_importance_command(ipc_voucher_attr_manager_t __assert_only manager,mach_voucher_attr_key_t __assert_only key,mach_voucher_attr_value_handle_array_t values,mach_msg_type_number_t value_count,mach_voucher_attr_command_t command,mach_voucher_attr_content_t in_content,mach_voucher_attr_content_size_t in_content_size,mach_voucher_attr_content_t out_content,mach_voucher_attr_content_size_t * out_content_size)3656 ipc_importance_command(
3657 ipc_voucher_attr_manager_t __assert_only manager,
3658 mach_voucher_attr_key_t __assert_only key,
3659 mach_voucher_attr_value_handle_array_t values,
3660 mach_msg_type_number_t value_count,
3661 mach_voucher_attr_command_t command,
3662 mach_voucher_attr_content_t in_content,
3663 mach_voucher_attr_content_size_t in_content_size,
3664 mach_voucher_attr_content_t out_content,
3665 mach_voucher_attr_content_size_t *out_content_size)
3666 {
3667 ipc_importance_inherit_t inherit;
3668 ipc_importance_task_t to_task;
3669 uint32_t refs, *outrefsp;
3670 mach_msg_type_number_t i;
3671 uint32_t externcnt;
3672
3673 IMPORTANCE_ASSERT_MANAGER(manager);
3674 IMPORTANCE_ASSERT_KEY(key);
3675
3676 if (in_content_size != sizeof(refs) ||
3677 (*out_content_size != 0 && *out_content_size != sizeof(refs))) {
3678 return KERN_INVALID_ARGUMENT;
3679 }
3680 refs = *(uint32_t *)(void *)in_content;
3681 outrefsp = (*out_content_size != 0) ? (uint32_t *)(void *)out_content : NULL;
3682
3683 if (MACH_VOUCHER_IMPORTANCE_ATTR_DROP_EXTERNAL != command) {
3684 return KERN_NOT_SUPPORTED;
3685 }
3686
3687 /* the first non-default value of the apropos type provides the data */
3688 inherit = III_NULL;
3689 for (i = 0; i < value_count; i++) {
3690 ipc_importance_elem_t elem = (ipc_importance_elem_t)values[i];
3691
3692 if (IIE_NULL != elem && IIE_TYPE_INHERIT == IIE_TYPE(elem)) {
3693 inherit = (ipc_importance_inherit_t)elem;
3694 break;
3695 }
3696 }
3697 if (III_NULL == inherit) {
3698 return KERN_INVALID_ARGUMENT;
3699 }
3700
3701 ipc_importance_lock();
3702
3703 if (0 == refs) {
3704 if (NULL != outrefsp) {
3705 *outrefsp = III_EXTERN(inherit);
3706 }
3707 ipc_importance_unlock();
3708 return KERN_SUCCESS;
3709 }
3710
3711 to_task = inherit->iii_to_task;
3712 assert(ipc_importance_task_is_any_receiver_type(to_task));
3713
3714 /* if not donating to a denap receiver, it was called incorrectly */
3715 if (!ipc_importance_task_is_marked_denap_receiver(to_task)) {
3716 ipc_importance_unlock();
3717 return KERN_INVALID_TASK; /* keeps dispatch happy */
3718 }
3719
3720 /* Enough external references left to drop? */
3721 if (III_EXTERN(inherit) < refs) {
3722 ipc_importance_unlock();
3723 return KERN_FAILURE;
3724 }
3725
3726 /* re-base external and internal counters at the inherit and the to-task (if apropos) */
3727 if (inherit->iii_donating) {
3728 assert(IIT_EXTERN(to_task) >= III_EXTERN(inherit));
3729 assert(to_task->iit_externcnt >= inherit->iii_externcnt);
3730 assert(to_task->iit_externdrop >= inherit->iii_externdrop);
3731 inherit->iii_externdrop += refs;
3732 to_task->iit_externdrop += refs;
3733 externcnt = III_EXTERN(inherit);
3734 if (0 == externcnt) {
3735 inherit->iii_donating = FALSE;
3736 to_task->iit_externcnt -= inherit->iii_externcnt;
3737 to_task->iit_externdrop -= inherit->iii_externdrop;
3738
3739
3740 /* Start AppNap delay hysteresis - even if not the last boost for the task. */
3741 if (ipc_importance_delayed_drop_call != NULL &&
3742 ipc_importance_task_is_marked_denap_receiver(to_task)) {
3743 ipc_importance_task_delayed_drop(to_task);
3744 }
3745
3746 /* drop task assertions associated with the dropped boosts */
3747 if (ipc_importance_task_check_transition(to_task, IIT_UPDATE_DROP, refs)) {
3748 ipc_importance_task_propagate_assertion_locked(to_task, IIT_UPDATE_DROP, TRUE);
3749 /* may have dropped and retaken importance lock */
3750 }
3751 } else {
3752 /* assert(to_task->iit_assertcnt >= refs + externcnt); */
3753 /* defensive deduction in case of assertcnt underflow */
3754 if (to_task->iit_assertcnt > refs + externcnt) {
3755 to_task->iit_assertcnt -= refs;
3756 } else {
3757 to_task->iit_assertcnt = externcnt;
3758 }
3759 }
3760 } else {
3761 inherit->iii_externdrop += refs;
3762 externcnt = III_EXTERN(inherit);
3763 }
3764
3765 /* capture result (if requested) */
3766 if (NULL != outrefsp) {
3767 *outrefsp = externcnt;
3768 }
3769
3770 ipc_importance_unlock();
3771 return KERN_SUCCESS;
3772 }
3773
3774 /*
3775 * Routine: ipc_importance_init
3776 * Purpose:
3777 * Initialize the IPC importance manager.
3778 * Conditions:
3779 * Zones and Vouchers are already initialized.
3780 */
3781 __startup_func
3782 static void
ipc_importance_init(void)3783 ipc_importance_init(void)
3784 {
3785 ipc_register_well_known_mach_voucher_attr_manager(&ipc_importance_manager,
3786 (mach_voucher_attr_value_handle_t)0,
3787 MACH_VOUCHER_ATTR_KEY_IMPORTANCE,
3788 &ipc_importance_control);
3789 }
3790 STARTUP(MACH_IPC, STARTUP_RANK_LAST, ipc_importance_init);
3791
3792 /*
3793 * Routine: ipc_importance_thread_call_init
3794 * Purpose:
3795 * Initialize the IPC importance code dependent upon
3796 * thread-call support being available.
3797 * Conditions:
3798 * Thread-call mechanism is already initialized.
3799 */
3800 __startup_func
3801 static void
ipc_importance_thread_call_init(void)3802 ipc_importance_thread_call_init(void)
3803 {
3804 /* initialize delayed drop queue and thread-call */
3805 queue_init(&ipc_importance_delayed_drop_queue);
3806 ipc_importance_delayed_drop_call =
3807 thread_call_allocate(ipc_importance_task_delayed_drop_scan, NULL);
3808 if (NULL == ipc_importance_delayed_drop_call) {
3809 panic("ipc_importance_init");
3810 }
3811 }
3812 STARTUP(THREAD_CALL, STARTUP_RANK_MIDDLE, ipc_importance_thread_call_init);
3813
3814 /*
3815 * Routing: task_importance_list_pids
3816 * Purpose: list pids where task in donating importance.
3817 * Conditions: To be called only from kdp stackshot code.
3818 * Will panic the system otherwise.
3819 */
3820 extern int
task_importance_list_pids(task_t task,int flags,char * pid_list,unsigned int max_count)3821 task_importance_list_pids(task_t task, int flags, char *pid_list, unsigned int max_count)
3822 {
3823 if (kdp_lck_spin_is_acquired(&ipc_importance_lock_data) ||
3824 max_count < 1 ||
3825 task->task_imp_base == IIT_NULL ||
3826 pid_list == NULL ||
3827 flags != TASK_IMP_LIST_DONATING_PIDS) {
3828 return 0;
3829 }
3830 unsigned int pidcount = 0;
3831 ipc_importance_task_t task_imp = task->task_imp_base;
3832 ipc_kmsg_t temp_kmsg;
3833 mach_msg_header_t *temp_hdr;
3834 ipc_importance_inherit_t temp_inherit;
3835 ipc_importance_elem_t elem;
3836 int target_pid = 0, previous_pid;
3837
3838 queue_iterate(&task_imp->iit_inherits, temp_inherit, ipc_importance_inherit_t, iii_inheritance) {
3839 /* check space in buffer */
3840 if (pidcount >= max_count) {
3841 break;
3842 }
3843 previous_pid = target_pid;
3844 target_pid = -1;
3845
3846 if (temp_inherit->iii_donating) {
3847 target_pid = task_importance_task_get_pid(temp_inherit->iii_to_task);
3848 }
3849
3850 if (target_pid != -1 && previous_pid != target_pid) {
3851 memcpy(pid_list, &target_pid, sizeof(target_pid));
3852 pid_list += sizeof(target_pid);
3853 pidcount++;
3854 }
3855 }
3856
3857 target_pid = 0;
3858 queue_iterate(&task_imp->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance) {
3859 if (pidcount >= max_count) {
3860 break;
3861 }
3862 previous_pid = target_pid;
3863 target_pid = -1;
3864 elem = temp_kmsg->ikm_importance;
3865
3866 if (elem == IIE_NULL) {
3867 continue;
3868 }
3869
3870 temp_hdr = ikm_header(temp_kmsg);
3871
3872 if (!(temp_hdr &&
3873 MACH_MSGH_BITS_RAISED_IMPORTANCE(temp_hdr->msgh_bits))) {
3874 continue;
3875 }
3876
3877 if (IIE_TYPE_TASK == IIE_TYPE(elem)) {
3878 ipc_importance_task_t temp_iit = (ipc_importance_task_t)elem;
3879 target_pid = task_importance_task_get_pid(temp_iit);
3880 } else {
3881 temp_inherit = (ipc_importance_inherit_t)elem;
3882 target_pid = task_importance_task_get_pid(temp_inherit->iii_to_task);
3883 }
3884
3885 if (target_pid != -1 && previous_pid != target_pid) {
3886 memcpy(pid_list, &target_pid, sizeof(target_pid));
3887 pid_list += sizeof(target_pid);
3888 pidcount++;
3889 }
3890 }
3891
3892 return pidcount;
3893 }
3894