xref: /xnu-10002.1.13/osfmk/ipc/ipc_importance.c (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a)
1 /*
2  * Copyright (c) 2013-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach/kern_return.h>
30 #include <mach/mach_types.h>
31 #include <mach/notify.h>
32 #include <ipc/ipc_types.h>
33 #include <ipc/ipc_importance.h>
34 #include <ipc/ipc_port.h>
35 #include <ipc/ipc_voucher.h>
36 #include <kern/ipc_kobject.h>
37 #include <kern/ipc_tt.h>
38 #include <kern/mach_param.h>
39 #include <kern/misc_protos.h>
40 #include <kern/zalloc.h>
41 #include <kern/queue.h>
42 #include <kern/task.h>
43 #include <kern/policy_internal.h>
44 
45 #include <sys/kdebug.h>
46 
47 #include <mach/machine/sdt.h>
48 
49 extern int      proc_pid(void *);
50 extern int      proc_selfpid(void);
51 extern uint64_t proc_uniqueid(void *p);
52 extern char     *proc_name_address(void *p);
53 
54 /*
55  * Globals for delayed boost drop processing.
56  */
57 static queue_head_t ipc_importance_delayed_drop_queue;
58 static thread_call_t ipc_importance_delayed_drop_call;
59 static uint64_t ipc_importance_delayed_drop_timestamp;
60 static boolean_t ipc_importance_delayed_drop_call_requested = FALSE;
61 
62 #define DENAP_DROP_TARGET (1000 * NSEC_PER_MSEC) /* optimum denap delay */
63 #define DENAP_DROP_SKEW    (100 * NSEC_PER_MSEC) /* request skew for wakeup */
64 #define DENAP_DROP_LEEWAY  (2 * DENAP_DROP_SKEW)  /* specified wakeup leeway */
65 
66 #define DENAP_DROP_DELAY (DENAP_DROP_TARGET + DENAP_DROP_SKEW)
67 #define DENAP_DROP_FLAGS (THREAD_CALL_DELAY_SYS_NORMAL | THREAD_CALL_DELAY_LEEWAY)
68 
69 /*
70  * Importance Voucher Attribute Manager
71  */
72 static LCK_SPIN_DECLARE_ATTR(ipc_importance_lock_data, &ipc_lck_grp, &ipc_lck_attr);
73 
74 #define ipc_importance_lock() \
75 	lck_spin_lock_grp(&ipc_importance_lock_data, &ipc_lck_grp)
76 #define ipc_importance_lock_try() \
77 	lck_spin_try_lock_grp(&ipc_importance_lock_data, &ipc_lck_grp)
78 #define ipc_importance_unlock() \
79 	lck_spin_unlock(&ipc_importance_lock_data)
80 #define ipc_importance_assert_held() \
81 	lck_spin_assert(&ipc_importance_lock_data, LCK_ASSERT_OWNED)
82 
83 #if IIE_REF_DEBUG
84 #define incr_ref_counter(x) (os_atomic_inc(&(x), relaxed))
85 
86 static inline
87 uint32_t
ipc_importance_reference_internal(ipc_importance_elem_t elem)88 ipc_importance_reference_internal(ipc_importance_elem_t elem)
89 {
90 	incr_ref_counter(elem->iie_refs_added);
91 	return os_atomic_inc(&elem->iie_bits, relaxed) & IIE_REFS_MASK;
92 }
93 
94 static inline
95 uint32_t
ipc_importance_release_internal(ipc_importance_elem_t elem)96 ipc_importance_release_internal(ipc_importance_elem_t elem)
97 {
98 	incr_ref_counter(elem->iie_refs_dropped);
99 	return os_atomic_dec(&elem->iie_bits, relaxed) & IIE_REFS_MASK;
100 }
101 
102 static inline
103 uint32_t
ipc_importance_task_reference_internal(ipc_importance_task_t task_imp)104 ipc_importance_task_reference_internal(ipc_importance_task_t task_imp)
105 {
106 	uint32_t out;
107 	out = ipc_importance_reference_internal(&task_imp->iit_elem);
108 	incr_ref_counter(task_imp->iit_elem.iie_task_refs_added);
109 	return out;
110 }
111 
112 static inline
113 uint32_t
ipc_importance_task_release_internal(ipc_importance_task_t task_imp)114 ipc_importance_task_release_internal(ipc_importance_task_t task_imp)
115 {
116 	uint32_t out;
117 
118 	assert(1 < IIT_REFS(task_imp));
119 	incr_ref_counter(task_imp->iit_elem.iie_task_refs_dropped);
120 	out = ipc_importance_release_internal(&task_imp->iit_elem);
121 	return out;
122 }
123 
124 static inline
125 void
ipc_importance_counter_init(ipc_importance_elem_t elem)126 ipc_importance_counter_init(ipc_importance_elem_t elem)
127 {
128 	elem->iie_refs_added = 0;
129 	elem->iie_refs_dropped = 0;
130 	elem->iie_kmsg_refs_added = 0;
131 	elem->iie_kmsg_refs_inherited = 0;
132 	elem->iie_kmsg_refs_coalesced = 0;
133 	elem->iie_kmsg_refs_dropped = 0;
134 	elem->iie_task_refs_added = 0;
135 	elem->iie_task_refs_added_inherit_from = 0;
136 	elem->iie_task_refs_added_transition = 0;
137 	elem->iie_task_refs_self_added = 0;
138 	elem->iie_task_refs_inherited = 0;
139 	elem->iie_task_refs_coalesced = 0;
140 	elem->iie_task_refs_dropped = 0;
141 }
142 #else
143 #define incr_ref_counter(x)
144 #endif
145 
146 #if DEVELOPMENT || DEBUG
147 static queue_head_t global_iit_alloc_queue =
148     QUEUE_HEAD_INITIALIZER(global_iit_alloc_queue);
149 #endif
150 
151 static ZONE_DEFINE_TYPE(ipc_importance_task_zone, "ipc task importance",
152     struct ipc_importance_task, ZC_ZFREE_CLEARMEM);
153 static ZONE_DEFINE_TYPE(ipc_importance_inherit_zone, "ipc importance inherit",
154     struct ipc_importance_inherit, ZC_ZFREE_CLEARMEM);
155 static zone_t ipc_importance_inherit_zone;
156 
157 static ipc_voucher_attr_control_t ipc_importance_control;
158 
159 static boolean_t ipc_importance_task_check_transition(ipc_importance_task_t task_imp,
160     iit_update_type_t type, uint32_t delta);
161 
162 static void ipc_importance_task_propagate_assertion_locked(ipc_importance_task_t task_imp,
163     iit_update_type_t type, boolean_t update_task_imp);
164 
165 static ipc_importance_inherit_t ipc_importance_inherit_from_task(task_t from_task, task_t to_task);
166 
167 /*
168  *	Routine:	ipc_importance_kmsg_link
169  *	Purpose:
170  *		Link the kmsg onto the appropriate propagation chain.
171  *		If the element is a task importance, we link directly
172  *		on its propagation chain. Otherwise, we link onto the
173  *		destination task of the inherit.
174  *	Conditions:
175  *		Importance lock held.
176  *		Caller is donating an importance elem reference to the kmsg.
177  */
178 static void
ipc_importance_kmsg_link(ipc_kmsg_t kmsg,ipc_importance_elem_t elem)179 ipc_importance_kmsg_link(
180 	ipc_kmsg_t              kmsg,
181 	ipc_importance_elem_t   elem)
182 {
183 	ipc_importance_elem_t link_elem;
184 
185 	assert(IIE_NULL == kmsg->ikm_importance);
186 
187 	link_elem = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
188 	    (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task :
189 	    elem;
190 
191 	queue_enter(&link_elem->iie_kmsgs, kmsg, ipc_kmsg_t, ikm_inheritance);
192 	kmsg->ikm_importance = elem;
193 }
194 
195 /*
196  *	Routine:	ipc_importance_kmsg_unlink
197  *	Purpose:
198  *		Unlink the kmsg from its current propagation chain.
199  *		If the element is a task importance, we unlink directly
200  *		from its propagation chain. Otherwise, we unlink from the
201  *		destination task of the inherit.
202  *	Returns:
203  *		The reference to the importance element it was linked on.
204  *	Conditions:
205  *		Importance lock held.
206  *		Caller is responsible for dropping reference on returned elem.
207  */
208 static ipc_importance_elem_t
ipc_importance_kmsg_unlink(ipc_kmsg_t kmsg)209 ipc_importance_kmsg_unlink(
210 	ipc_kmsg_t              kmsg)
211 {
212 	ipc_importance_elem_t elem = kmsg->ikm_importance;
213 
214 	if (IIE_NULL != elem) {
215 		ipc_importance_elem_t unlink_elem;
216 
217 		unlink_elem = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
218 		    (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task :
219 		    elem;
220 
221 		queue_remove(&unlink_elem->iie_kmsgs, kmsg, ipc_kmsg_t, ikm_inheritance);
222 		kmsg->ikm_importance = IIE_NULL;
223 	}
224 	return elem;
225 }
226 
227 /*
228  *	Routine:	ipc_importance_inherit_link
229  *	Purpose:
230  *		Link the inherit onto the appropriate propagation chain.
231  *		If the element is a task importance, we link directly
232  *		on its propagation chain. Otherwise, we link onto the
233  *		destination task of the inherit.
234  *	Conditions:
235  *		Importance lock held.
236  *		Caller is donating an elem importance reference to the inherit.
237  */
238 static void
ipc_importance_inherit_link(ipc_importance_inherit_t inherit,ipc_importance_elem_t elem)239 ipc_importance_inherit_link(
240 	ipc_importance_inherit_t inherit,
241 	ipc_importance_elem_t elem)
242 {
243 	ipc_importance_task_t link_task;
244 
245 	assert(IIE_NULL == inherit->iii_from_elem);
246 	link_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
247 	    ((ipc_importance_inherit_t)elem)->iii_to_task :
248 	    (ipc_importance_task_t)elem;
249 
250 	queue_enter(&link_task->iit_inherits, inherit,
251 	    ipc_importance_inherit_t, iii_inheritance);
252 	inherit->iii_from_elem = elem;
253 }
254 
255 /*
256  *	Routine:	ipc_importance_inherit_find
257  *	Purpose:
258  *		Find an existing inherit that links the from element to the
259  *		to_task at a given nesting depth.  As inherits from other
260  *		inherits are actually linked off the original inherit's donation
261  *		receiving task, we have to conduct our search from there if
262  *		the from element is an inherit.
263  *	Returns:
264  *		A pointer (not a reference) to the matching inherit.
265  *	Conditions:
266  *		Importance lock held.
267  */
268 static ipc_importance_inherit_t
ipc_importance_inherit_find(ipc_importance_elem_t from,ipc_importance_task_t to_task,unsigned int depth)269 ipc_importance_inherit_find(
270 	ipc_importance_elem_t from,
271 	ipc_importance_task_t to_task,
272 	unsigned int depth)
273 {
274 	ipc_importance_task_t link_task;
275 	ipc_importance_inherit_t inherit;
276 
277 	link_task = (IIE_TYPE_INHERIT == IIE_TYPE(from)) ?
278 	    ((ipc_importance_inherit_t)from)->iii_to_task :
279 	    (ipc_importance_task_t)from;
280 
281 	queue_iterate(&link_task->iit_inherits, inherit,
282 	    ipc_importance_inherit_t, iii_inheritance) {
283 		if (inherit->iii_to_task == to_task && inherit->iii_depth == depth) {
284 			return inherit;
285 		}
286 	}
287 	return III_NULL;
288 }
289 
290 /*
291  *	Routine:	ipc_importance_inherit_unlink
292  *	Purpose:
293  *		Unlink the inherit from its current propagation chain.
294  *		If the element is a task importance, we unlink directly
295  *		from its propagation chain. Otherwise, we unlink from the
296  *		destination task of the inherit.
297  *	Returns:
298  *		The reference to the importance element it was linked on.
299  *	Conditions:
300  *		Importance lock held.
301  *		Caller is responsible for dropping reference on returned elem.
302  */
303 static ipc_importance_elem_t
ipc_importance_inherit_unlink(ipc_importance_inherit_t inherit)304 ipc_importance_inherit_unlink(
305 	ipc_importance_inherit_t inherit)
306 {
307 	ipc_importance_elem_t elem = inherit->iii_from_elem;
308 
309 	if (IIE_NULL != elem) {
310 		ipc_importance_task_t unlink_task;
311 
312 		unlink_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
313 		    ((ipc_importance_inherit_t)elem)->iii_to_task :
314 		    (ipc_importance_task_t)elem;
315 
316 		queue_remove(&unlink_task->iit_inherits, inherit,
317 		    ipc_importance_inherit_t, iii_inheritance);
318 		inherit->iii_from_elem = IIE_NULL;
319 	}
320 	return elem;
321 }
322 
323 /*
324  *	Routine:	ipc_importance_reference
325  *	Purpose:
326  *		Add a reference to the importance element.
327  *	Conditions:
328  *		Caller must hold a reference on the element.
329  */
330 void
ipc_importance_reference(ipc_importance_elem_t elem)331 ipc_importance_reference(ipc_importance_elem_t elem)
332 {
333 	assert(0 < IIE_REFS(elem));
334 	ipc_importance_reference_internal(elem);
335 }
336 
337 /*
338  *	Routine:	ipc_importance_release_locked
339  *	Purpose:
340  *		Release a reference on an importance attribute value,
341  *		unlinking and deallocating the attribute if the last reference.
342  *	Conditions:
343  *		Entered with importance lock held, leaves with it unlocked.
344  */
345 static void
ipc_importance_release_locked(ipc_importance_elem_t elem)346 ipc_importance_release_locked(ipc_importance_elem_t elem)
347 {
348 	assert(0 < IIE_REFS(elem));
349 
350 #if IMPORTANCE_DEBUG
351 	ipc_importance_inherit_t temp_inherit;
352 	ipc_importance_task_t link_task;
353 	ipc_kmsg_t temp_kmsg;
354 	uint32_t expected = 0;
355 
356 	if (0 < elem->iie_made) {
357 		expected++;
358 	}
359 
360 	link_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
361 	    ((ipc_importance_inherit_t)elem)->iii_to_task :
362 	    (ipc_importance_task_t)elem;
363 
364 	queue_iterate(&link_task->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance)
365 	if (temp_kmsg->ikm_importance == elem) {
366 		expected++;
367 	}
368 	queue_iterate(&link_task->iit_inherits, temp_inherit,
369 	    ipc_importance_inherit_t, iii_inheritance)
370 	if (temp_inherit->iii_from_elem == elem) {
371 		expected++;
372 	}
373 	if (IIE_REFS(elem) < expected + 1) {
374 		panic("ipc_importance_release_locked (%p)", elem);
375 	}
376 #endif /* IMPORTANCE_DEBUG */
377 
378 	if (0 < ipc_importance_release_internal(elem)) {
379 		ipc_importance_unlock();
380 		return;
381 	}
382 
383 	/* last ref */
384 
385 	switch (IIE_TYPE(elem)) {
386 	/* just a "from" task reference to drop */
387 	case IIE_TYPE_TASK:
388 	{
389 		ipc_importance_task_t task_elem;
390 
391 		task_elem = (ipc_importance_task_t)elem;
392 
393 		/* the task can't still hold a reference on the task importance */
394 		assert(TASK_NULL == task_elem->iit_task);
395 
396 #if DEVELOPMENT || DEBUG
397 		queue_remove(&global_iit_alloc_queue, task_elem, ipc_importance_task_t, iit_allocation);
398 #endif
399 
400 		ipc_importance_unlock();
401 
402 		zfree(ipc_importance_task_zone, task_elem);
403 		break;
404 	}
405 
406 	/* dropping an inherit element */
407 	case IIE_TYPE_INHERIT:
408 	{
409 		ipc_importance_inherit_t inherit = (ipc_importance_inherit_t)elem;
410 		ipc_importance_task_t to_task = inherit->iii_to_task;
411 		ipc_importance_elem_t from_elem;
412 
413 		assert(IIT_NULL != to_task);
414 		assert(ipc_importance_task_is_any_receiver_type(to_task));
415 
416 		/* unlink the inherit from its source element */
417 		from_elem = ipc_importance_inherit_unlink(inherit);
418 		assert(IIE_NULL != from_elem);
419 
420 		/*
421 		 * The attribute might have pending external boosts if the attribute
422 		 * was given out during exec, drop them from the appropriate destination
423 		 * task.
424 		 *
425 		 * The attribute will not have any pending external boosts if the
426 		 * attribute was given out to voucher system since it would have been
427 		 * dropped by ipc_importance_release_value, but there is not way to
428 		 * detect that, thus if the attribute has a pending external boost,
429 		 * drop them from the appropriate destination task.
430 		 *
431 		 * The inherit attribute from exec and voucher system would not
432 		 * get deduped to each other, thus dropping the external boost
433 		 * from destination task at two different places will not have
434 		 * any unintended side effects.
435 		 */
436 		assert(inherit->iii_externcnt >= inherit->iii_externdrop);
437 		if (inherit->iii_donating) {
438 			uint32_t assertcnt = III_EXTERN(inherit);
439 
440 			assert(ipc_importance_task_is_any_receiver_type(to_task));
441 			assert(to_task->iit_externcnt >= inherit->iii_externcnt);
442 			assert(to_task->iit_externdrop >= inherit->iii_externdrop);
443 			to_task->iit_externcnt -= inherit->iii_externcnt;
444 			to_task->iit_externdrop -= inherit->iii_externdrop;
445 			inherit->iii_externcnt = 0;
446 			inherit->iii_externdrop = 0;
447 			inherit->iii_donating = FALSE;
448 
449 			/* adjust the internal assertions - and propagate as needed */
450 			if (ipc_importance_task_check_transition(to_task, IIT_UPDATE_DROP, assertcnt)) {
451 				ipc_importance_task_propagate_assertion_locked(to_task, IIT_UPDATE_DROP, TRUE);
452 			}
453 		} else {
454 			inherit->iii_externcnt = 0;
455 			inherit->iii_externdrop = 0;
456 		}
457 
458 		/* release the reference on the source element */
459 		ipc_importance_release_locked(from_elem);
460 		/* unlocked on return */
461 
462 		/* release the reference on the destination task */
463 		ipc_importance_task_release(to_task);
464 
465 		/* free the inherit */
466 		zfree(ipc_importance_inherit_zone, inherit);
467 		break;
468 	}
469 	}
470 }
471 
472 /*
473  *	Routine:	ipc_importance_release
474  *	Purpose:
475  *		Release a reference on an importance attribute value,
476  *		unlinking and deallocating the attribute if the last reference.
477  *	Conditions:
478  *		nothing locked on entrance, nothing locked on exit.
479  *		May block.
480  */
481 void
ipc_importance_release(ipc_importance_elem_t elem)482 ipc_importance_release(ipc_importance_elem_t elem)
483 {
484 	if (IIE_NULL == elem) {
485 		return;
486 	}
487 
488 	ipc_importance_lock();
489 	ipc_importance_release_locked(elem);
490 	/* unlocked */
491 }
492 
493 /*
494  *	Routine:	ipc_importance_task_reference
495  *
496  *
497  *	Purpose:
498  *		Retain a reference on a task importance attribute value.
499  *	Conditions:
500  *		nothing locked on entrance, nothing locked on exit.
501  *		caller holds a reference already.
502  */
503 void
ipc_importance_task_reference(ipc_importance_task_t task_elem)504 ipc_importance_task_reference(ipc_importance_task_t task_elem)
505 {
506 	if (IIT_NULL == task_elem) {
507 		return;
508 	}
509 #if IIE_REF_DEBUG
510 	incr_ref_counter(task_elem->iit_elem.iie_task_refs_added);
511 #endif
512 	ipc_importance_reference(&task_elem->iit_elem);
513 }
514 
515 /*
516  *	Routine:	ipc_importance_task_release
517  *	Purpose:
518  *		Release a reference on a task importance attribute value,
519  *		unlinking and deallocating the attribute if the last reference.
520  *	Conditions:
521  *		nothing locked on entrance, nothing locked on exit.
522  *		May block.
523  */
524 void
ipc_importance_task_release(ipc_importance_task_t task_elem)525 ipc_importance_task_release(ipc_importance_task_t task_elem)
526 {
527 	if (IIT_NULL == task_elem) {
528 		return;
529 	}
530 
531 	ipc_importance_lock();
532 #if IIE_REF_DEBUG
533 	incr_ref_counter(task_elem->iit_elem.iie_task_refs_dropped);
534 #endif
535 	ipc_importance_release_locked(&task_elem->iit_elem);
536 	/* unlocked */
537 }
538 
539 /*
540  *	Routine:	ipc_importance_task_release_locked
541  *	Purpose:
542  *		Release a reference on a task importance attribute value,
543  *		unlinking and deallocating the attribute if the last reference.
544  *	Conditions:
545  *		importance lock held on entry, nothing locked on exit.
546  *		May block.
547  */
548 static void
ipc_importance_task_release_locked(ipc_importance_task_t task_elem)549 ipc_importance_task_release_locked(ipc_importance_task_t task_elem)
550 {
551 	if (IIT_NULL == task_elem) {
552 		ipc_importance_unlock();
553 		return;
554 	}
555 #if IIE_REF_DEBUG
556 	incr_ref_counter(task_elem->iit_elem.iie_task_refs_dropped);
557 #endif
558 	ipc_importance_release_locked(&task_elem->iit_elem);
559 	/* unlocked */
560 }
561 
562 /*
563  * Routines for importance donation/inheritance/boosting
564  */
565 
566 
567 /*
568  * External importance assertions are managed by the process in userspace
569  * Internal importance assertions are the responsibility of the kernel
570  * Assertions are changed from internal to external via task_importance_externalize_assertion
571  */
572 
573 /*
574  *	Routine:	ipc_importance_task_check_transition
575  *	Purpose:
576  *		Increase or decrement the internal task importance counter of the
577  *		specified task and determine if propagation and a task policy
578  *		update is required.
579  *
580  *		If it is already enqueued for a policy update, steal it from that queue
581  *		(as we are reversing that update before it happens).
582  *
583  *	Conditions:
584  *		Called with the importance lock held.
585  *		It is the caller's responsibility to perform the propagation of the
586  *		transition and/or policy changes by checking the return value.
587  */
588 static boolean_t
ipc_importance_task_check_transition(ipc_importance_task_t task_imp,iit_update_type_t type,uint32_t delta)589 ipc_importance_task_check_transition(
590 	ipc_importance_task_t task_imp,
591 	iit_update_type_t type,
592 	uint32_t delta)
593 {
594 #if IMPORTANCE_TRACE
595 	task_t target_task = task_imp->iit_task;
596 #endif
597 	boolean_t boost = (IIT_UPDATE_HOLD == type);
598 	boolean_t before_boosted, after_boosted;
599 
600 	ipc_importance_assert_held();
601 
602 	if (!ipc_importance_task_is_any_receiver_type(task_imp)) {
603 		return FALSE;
604 	}
605 
606 #if IMPORTANCE_TRACE
607 	int target_pid = task_pid(target_task);
608 
609 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (((boost) ? IMP_HOLD : IMP_DROP) | TASK_POLICY_INTERNAL))) | DBG_FUNC_START,
610 	    proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
611 #endif
612 
613 	/* snapshot the effective boosting status before making any changes */
614 	before_boosted = (task_imp->iit_assertcnt > 0);
615 
616 	/* Adjust the assertcnt appropriately */
617 	if (boost) {
618 		task_imp->iit_assertcnt += delta;
619 #if IMPORTANCE_TRACE
620 		DTRACE_BOOST6(send_boost, task_t, target_task, int, target_pid,
621 		    task_t, current_task(), int, proc_selfpid(), int, delta, int, task_imp->iit_assertcnt);
622 #endif
623 	} else {
624 		// assert(delta <= task_imp->iit_assertcnt);
625 		if (task_imp->iit_assertcnt < delta + IIT_EXTERN(task_imp)) {
626 			/* TODO: Turn this back into a panic <rdar://problem/12592649> */
627 			task_imp->iit_assertcnt = IIT_EXTERN(task_imp);
628 		} else {
629 			task_imp->iit_assertcnt -= delta;
630 		}
631 #if IMPORTANCE_TRACE
632 		// This convers both legacy and voucher-based importance.
633 		DTRACE_BOOST4(drop_boost, task_t, target_task, int, target_pid, int, delta, int, task_imp->iit_assertcnt);
634 #endif
635 	}
636 
637 #if IMPORTANCE_TRACE
638 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (((boost) ? IMP_HOLD : IMP_DROP) | TASK_POLICY_INTERNAL))) | DBG_FUNC_END,
639 	    proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
640 #endif
641 
642 	/* did the change result in an effective donor status change? */
643 	after_boosted = (task_imp->iit_assertcnt > 0);
644 
645 	if (after_boosted != before_boosted) {
646 		/*
647 		 * If the task importance is already on an update queue, we just reversed the need for a
648 		 * pending policy update.  If the queue is any other than the delayed-drop-queue, pull it
649 		 * off that queue and release the reference it got going onto the update queue.  If it is
650 		 * the delayed-drop-queue we leave it in place in case it comes back into the drop state
651 		 * before its time delay is up.
652 		 *
653 		 * We still need to propagate the change downstream to reverse the assertcnt effects,
654 		 * but we no longer need to update this task's boost policy state.
655 		 *
656 		 * Otherwise, mark it as needing a policy update.
657 		 */
658 		assert(0 == task_imp->iit_updatepolicy);
659 		if (NULL != task_imp->iit_updateq) {
660 			if (&ipc_importance_delayed_drop_queue != task_imp->iit_updateq) {
661 				queue_remove(task_imp->iit_updateq, task_imp, ipc_importance_task_t, iit_updates);
662 				task_imp->iit_updateq = NULL;
663 				ipc_importance_task_release_internal(task_imp); /* can't be last ref */
664 			}
665 		} else {
666 			task_imp->iit_updatepolicy = 1;
667 		}
668 		return TRUE;
669 	}
670 
671 	return FALSE;
672 }
673 
674 
675 /*
676  *	Routine:	ipc_importance_task_propagate_helper
677  *	Purpose:
678  *		Increase or decrement the internal task importance counter of all
679  *		importance tasks inheriting from the specified one.  If this causes
680  *		that importance task to change state, add it to the list of tasks
681  *		to do a policy update against.
682  *	Conditions:
683  *		Called with the importance lock held.
684  *		It is the caller's responsibility to iterate down the generated list
685  *		and propagate any subsequent assertion changes from there.
686  */
687 static void
ipc_importance_task_propagate_helper(ipc_importance_task_t task_imp,iit_update_type_t type,queue_t propagation)688 ipc_importance_task_propagate_helper(
689 	ipc_importance_task_t task_imp,
690 	iit_update_type_t type,
691 	queue_t propagation)
692 {
693 	ipc_importance_task_t temp_task_imp;
694 
695 	/*
696 	 * iterate the downstream kmsgs, adjust their boosts,
697 	 * and capture the next task to adjust for each message
698 	 */
699 
700 	ipc_kmsg_t temp_kmsg;
701 
702 	queue_iterate(&task_imp->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance) {
703 		mach_msg_header_t *hdr = ikm_header(temp_kmsg);
704 		mach_port_delta_t delta;
705 		ipc_port_t port;
706 
707 		/* toggle the kmsg importance bit as a barrier to parallel adjusts */
708 		if (IIT_UPDATE_HOLD == type) {
709 			if (MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
710 				continue;
711 			}
712 
713 			/* mark the message as now carrying importance */
714 			hdr->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
715 			delta = 1;
716 		} else {
717 			if (!MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
718 				continue;
719 			}
720 
721 			/* clear the message as now carrying importance */
722 			hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
723 			delta = -1;
724 		}
725 
726 		/* determine the task importance to adjust as result (if any) */
727 		port = hdr->msgh_remote_port;
728 		assert(IP_VALID(port));
729 		ip_mq_lock(port);
730 		temp_task_imp = IIT_NULL;
731 		if (!ipc_port_importance_delta_internal(port, IPID_OPTION_NORMAL, &delta, &temp_task_imp)) {
732 			ip_mq_unlock(port);
733 		}
734 
735 		/* no task importance to adjust associated with the port? */
736 		if (IIT_NULL == temp_task_imp) {
737 			continue;
738 		}
739 
740 		/* hold a reference on temp_task_imp */
741 
742 		/* Adjust the task assertions and determine if an edge was crossed */
743 		if (ipc_importance_task_check_transition(temp_task_imp, type, 1)) {
744 			incr_ref_counter(temp_task_imp->iit_elem.iie_task_refs_added_transition);
745 			queue_enter(propagation, temp_task_imp, ipc_importance_task_t, iit_props);
746 			/* reference donated */
747 		} else {
748 			ipc_importance_task_release_internal(temp_task_imp);
749 		}
750 	}
751 
752 	/*
753 	 * iterate the downstream importance inherits
754 	 * and capture the next task importance to boost for each
755 	 */
756 	ipc_importance_inherit_t temp_inherit;
757 
758 	queue_iterate(&task_imp->iit_inherits, temp_inherit, ipc_importance_inherit_t, iii_inheritance) {
759 		uint32_t assertcnt = III_EXTERN(temp_inherit);
760 
761 		temp_task_imp = temp_inherit->iii_to_task;
762 		assert(IIT_NULL != temp_task_imp);
763 
764 		if (IIT_UPDATE_HOLD == type) {
765 			/* if no undropped externcnts in the inherit, nothing to do */
766 			if (0 == assertcnt) {
767 				assert(temp_inherit->iii_donating == FALSE);
768 				continue;
769 			}
770 
771 			/* nothing to do if the inherit is already donating (forced donation) */
772 			if (temp_inherit->iii_donating) {
773 				continue;
774 			}
775 
776 			/* mark it donating and contribute to the task externcnts */
777 			temp_inherit->iii_donating = TRUE;
778 			temp_task_imp->iit_externcnt += temp_inherit->iii_externcnt;
779 			temp_task_imp->iit_externdrop += temp_inherit->iii_externdrop;
780 		} else {
781 			/* if no contributing assertions, move on */
782 			if (0 == assertcnt) {
783 				assert(temp_inherit->iii_donating == FALSE);
784 				continue;
785 			}
786 
787 			/* nothing to do if the inherit is not donating */
788 			if (!temp_inherit->iii_donating) {
789 				continue;
790 			}
791 
792 			/* mark it no longer donating */
793 			temp_inherit->iii_donating = FALSE;
794 
795 			/* remove the contribution the inherit made to the to-task */
796 			assert(IIT_EXTERN(temp_task_imp) >= III_EXTERN(temp_inherit));
797 			assert(temp_task_imp->iit_externcnt >= temp_inherit->iii_externcnt);
798 			assert(temp_task_imp->iit_externdrop >= temp_inherit->iii_externdrop);
799 			temp_task_imp->iit_externcnt -= temp_inherit->iii_externcnt;
800 			temp_task_imp->iit_externdrop -= temp_inherit->iii_externdrop;
801 		}
802 
803 		/* Adjust the task assertions and determine if an edge was crossed */
804 		assert(ipc_importance_task_is_any_receiver_type(temp_task_imp));
805 		if (ipc_importance_task_check_transition(temp_task_imp, type, assertcnt)) {
806 			ipc_importance_task_reference(temp_task_imp);
807 			incr_ref_counter(temp_task_imp->iit_elem.iie_task_refs_added_transition);
808 			queue_enter(propagation, temp_task_imp, ipc_importance_task_t, iit_props);
809 		}
810 	}
811 }
812 
813 /*
814  *	Routine:	ipc_importance_task_process_updates
815  *	Purpose:
816  *	        Process the queue of task importances and apply the policy
817  *		update called for.  Only process tasks in the queue with an
818  *		update timestamp less than the supplied max.
819  *	Conditions:
820  *		Called and returns with importance locked.
821  *		May drop importance lock and block temporarily.
822  */
823 static void
ipc_importance_task_process_updates(queue_t supplied_queue,boolean_t boost,uint64_t max_timestamp)824 ipc_importance_task_process_updates(
825 	queue_t   supplied_queue,
826 	boolean_t boost,
827 	uint64_t  max_timestamp)
828 {
829 	ipc_importance_task_t task_imp;
830 	queue_head_t second_chance;
831 	queue_t queue = supplied_queue;
832 
833 	/*
834 	 * This queue will hold the task's we couldn't trylock on first pass.
835 	 * By using a second (private) queue, we guarantee all tasks that get
836 	 * entered on this queue have a timestamp under the maximum.
837 	 */
838 	queue_init(&second_chance);
839 
840 	/* process any resulting policy updates */
841 retry:
842 	while (!queue_empty(queue)) {
843 		task_t target_task;
844 		struct task_pend_token pend_token = {};
845 
846 		task_imp = (ipc_importance_task_t)queue_first(queue);
847 		assert(0 == task_imp->iit_updatepolicy);
848 		assert(queue == task_imp->iit_updateq);
849 
850 		/* if timestamp is too big, we're done */
851 		if (task_imp->iit_updatetime > max_timestamp) {
852 			break;
853 		}
854 
855 		/* we were given a reference on each task in the queue */
856 
857 		/* remove it from the supplied queue */
858 		queue_remove(queue, task_imp, ipc_importance_task_t, iit_updates);
859 		task_imp->iit_updateq = NULL;
860 
861 		target_task = task_imp->iit_task;
862 
863 		/* Is it well on the way to exiting? */
864 		if (TASK_NULL == target_task) {
865 			ipc_importance_task_release_locked(task_imp);
866 			/* importance unlocked */
867 			ipc_importance_lock();
868 			continue;
869 		}
870 
871 		/* Has the update been reversed on the hysteresis queue? */
872 		if (0 < task_imp->iit_assertcnt &&
873 		    queue == &ipc_importance_delayed_drop_queue) {
874 			ipc_importance_task_release_locked(task_imp);
875 			/* importance unlocked */
876 			ipc_importance_lock();
877 			continue;
878 		}
879 
880 		/*
881 		 * Can we get the task lock out-of-order?
882 		 * If not, stick this back on the second-chance queue.
883 		 */
884 		if (!task_lock_try(target_task)) {
885 			boolean_t should_wait_lock = (queue == &second_chance);
886 			task_imp->iit_updateq = &second_chance;
887 
888 			/*
889 			 * If we're already processing second-chances on
890 			 * tasks, keep this task on the front of the queue.
891 			 * We will wait for the task lock before coming
892 			 * back and trying again, and we have a better
893 			 * chance of re-acquiring the lock if we come back
894 			 * to it right away.
895 			 */
896 			if (should_wait_lock) {
897 				task_reference(target_task);
898 				queue_enter_first(&second_chance, task_imp,
899 				    ipc_importance_task_t, iit_updates);
900 			} else {
901 				queue_enter(&second_chance, task_imp,
902 				    ipc_importance_task_t, iit_updates);
903 			}
904 			ipc_importance_unlock();
905 
906 			if (should_wait_lock) {
907 				task_lock(target_task);
908 				task_unlock(target_task);
909 				task_deallocate(target_task);
910 			}
911 
912 			ipc_importance_lock();
913 			continue;
914 		}
915 
916 		/* is it going away? */
917 		if (!target_task->active) {
918 			task_unlock(target_task);
919 			ipc_importance_task_release_locked(task_imp);
920 			/* importance unlocked */
921 			ipc_importance_lock();
922 			continue;
923 		}
924 
925 		/* take a task reference for while we don't have the importance lock */
926 		task_reference(target_task);
927 
928 		/* count the transition */
929 		if (boost) {
930 			task_imp->iit_transitions++;
931 		}
932 
933 		ipc_importance_unlock();
934 
935 		/* reevaluate turnstile boost */
936 		pend_token.tpt_update_turnstile = 1;
937 
938 		/* apply the policy adjust to the target task (while it is still locked) */
939 		task_update_boost_locked(target_task, boost, &pend_token);
940 
941 		/* complete the policy update with the task unlocked */
942 		ipc_importance_task_release(task_imp);
943 		task_unlock(target_task);
944 		task_policy_update_complete_unlocked(target_task, &pend_token);
945 		task_deallocate(target_task);
946 
947 		ipc_importance_lock();
948 	}
949 
950 	/* If there are tasks we couldn't update the first time, try again */
951 	if (!queue_empty(&second_chance)) {
952 		queue = &second_chance;
953 		goto retry;
954 	}
955 }
956 
957 
958 /*
959  *	Routine:	ipc_importance_task_delayed_drop_scan
960  *	Purpose:
961  *	        The thread call routine to scan the delayed drop queue,
962  *		requesting all updates with a deadline up to the last target
963  *		for the thread-call (which is DENAP_DROP_SKEW beyond the first
964  *		thread's optimum delay).
965  *		update to drop its boost.
966  *	Conditions:
967  *		Nothing locked
968  */
969 static void
ipc_importance_task_delayed_drop_scan(__unused void * arg1,__unused void * arg2)970 ipc_importance_task_delayed_drop_scan(
971 	__unused void *arg1,
972 	__unused void *arg2)
973 {
974 	ipc_importance_lock();
975 
976 	/* process all queued task drops with timestamps up to TARGET(first)+SKEW */
977 	ipc_importance_task_process_updates(&ipc_importance_delayed_drop_queue,
978 	    FALSE,
979 	    ipc_importance_delayed_drop_timestamp);
980 
981 	/* importance lock may have been temporarily dropped */
982 
983 	/* If there are any entries left in the queue, re-arm the call here */
984 	if (!queue_empty(&ipc_importance_delayed_drop_queue)) {
985 		ipc_importance_task_t task_imp;
986 		uint64_t deadline;
987 		uint64_t leeway;
988 
989 		task_imp = (ipc_importance_task_t)queue_first(&ipc_importance_delayed_drop_queue);
990 
991 		nanoseconds_to_absolutetime(DENAP_DROP_DELAY, &deadline);
992 		deadline += task_imp->iit_updatetime;
993 		ipc_importance_delayed_drop_timestamp = deadline;
994 
995 		nanoseconds_to_absolutetime(DENAP_DROP_LEEWAY, &leeway);
996 
997 		thread_call_enter_delayed_with_leeway(
998 			ipc_importance_delayed_drop_call,
999 			NULL,
1000 			deadline,
1001 			leeway,
1002 			DENAP_DROP_FLAGS);
1003 	} else {
1004 		ipc_importance_delayed_drop_call_requested = FALSE;
1005 	}
1006 	ipc_importance_unlock();
1007 }
1008 
1009 /*
1010  *	Routine:	ipc_importance_task_delayed_drop
1011  *	Purpose:
1012  *		Queue the specified task importance for delayed policy
1013  *		update to drop its boost.
1014  *	Conditions:
1015  *		Called with the importance lock held.
1016  */
1017 static void
ipc_importance_task_delayed_drop(ipc_importance_task_t task_imp)1018 ipc_importance_task_delayed_drop(ipc_importance_task_t task_imp)
1019 {
1020 	uint64_t timestamp = mach_absolute_time(); /* no mach_approximate_time() in kernel */
1021 
1022 	assert(ipc_importance_delayed_drop_call != NULL);
1023 
1024 	/*
1025 	 * If still on an update queue from a previous change,
1026 	 * remove it first (and use that reference).  Otherwise, take
1027 	 * a new reference for the delay drop update queue.
1028 	 */
1029 	if (NULL != task_imp->iit_updateq) {
1030 		queue_remove(task_imp->iit_updateq, task_imp,
1031 		    ipc_importance_task_t, iit_updates);
1032 	} else {
1033 		ipc_importance_task_reference_internal(task_imp);
1034 	}
1035 
1036 	task_imp->iit_updateq = &ipc_importance_delayed_drop_queue;
1037 	task_imp->iit_updatetime = timestamp;
1038 
1039 	queue_enter(&ipc_importance_delayed_drop_queue, task_imp,
1040 	    ipc_importance_task_t, iit_updates);
1041 
1042 	/* request the delayed thread-call if not already requested */
1043 	if (!ipc_importance_delayed_drop_call_requested) {
1044 		uint64_t deadline;
1045 		uint64_t leeway;
1046 
1047 		nanoseconds_to_absolutetime(DENAP_DROP_DELAY, &deadline);
1048 		deadline += task_imp->iit_updatetime;
1049 		ipc_importance_delayed_drop_timestamp = deadline;
1050 
1051 		nanoseconds_to_absolutetime(DENAP_DROP_LEEWAY, &leeway);
1052 
1053 		ipc_importance_delayed_drop_call_requested = TRUE;
1054 		thread_call_enter_delayed_with_leeway(
1055 			ipc_importance_delayed_drop_call,
1056 			NULL,
1057 			deadline,
1058 			leeway,
1059 			DENAP_DROP_FLAGS);
1060 	}
1061 }
1062 
1063 
1064 /*
1065  *	Routine:	ipc_importance_task_propagate_assertion_locked
1066  *	Purpose:
1067  *		Propagate the importance transition type to every item
1068  *		If this causes a boost to be applied, determine if that
1069  *		boost should propagate downstream.
1070  *	Conditions:
1071  *		Called with the importance lock held.
1072  */
1073 static void
ipc_importance_task_propagate_assertion_locked(ipc_importance_task_t task_imp,iit_update_type_t type,boolean_t update_task_imp)1074 ipc_importance_task_propagate_assertion_locked(
1075 	ipc_importance_task_t task_imp,
1076 	iit_update_type_t type,
1077 	boolean_t update_task_imp)
1078 {
1079 	boolean_t boost = (IIT_UPDATE_HOLD == type);
1080 	ipc_importance_task_t temp_task_imp;
1081 	queue_head_t propagate;
1082 	queue_head_t updates;
1083 
1084 	queue_init(&updates);
1085 	queue_init(&propagate);
1086 
1087 	ipc_importance_assert_held();
1088 
1089 	/*
1090 	 * If we're going to update the policy for the provided task,
1091 	 * enqueue it on the propagate queue itself.  Otherwise, only
1092 	 * enqueue downstream things.
1093 	 */
1094 	if (update_task_imp) {
1095 		ipc_importance_task_reference(task_imp);
1096 		incr_ref_counter(task_imp->iit_elem.iie_task_refs_added_transition);
1097 		queue_enter(&propagate, task_imp, ipc_importance_task_t, iit_props);
1098 	} else {
1099 		ipc_importance_task_propagate_helper(task_imp, type, &propagate);
1100 	}
1101 
1102 	/*
1103 	 * for each item on the propagation list, propagate any change downstream,
1104 	 * adding new tasks to propagate further if they transistioned as well.
1105 	 */
1106 	while (!queue_empty(&propagate)) {
1107 		boolean_t need_update;
1108 
1109 		queue_remove_first(&propagate, temp_task_imp, ipc_importance_task_t, iit_props);
1110 		/* hold a reference on temp_task_imp */
1111 
1112 		assert(IIT_NULL != temp_task_imp);
1113 
1114 		/* only propagate for receivers not already marked as a donor */
1115 		if (!ipc_importance_task_is_marked_donor(temp_task_imp) &&
1116 		    ipc_importance_task_is_marked_receiver(temp_task_imp)) {
1117 			ipc_importance_task_propagate_helper(temp_task_imp, type, &propagate);
1118 		}
1119 
1120 		/* if we have a policy update to apply, enqueue a reference for later processing */
1121 		need_update = (0 != temp_task_imp->iit_updatepolicy);
1122 		temp_task_imp->iit_updatepolicy = 0;
1123 		if (need_update && TASK_NULL != temp_task_imp->iit_task) {
1124 			if (NULL == temp_task_imp->iit_updateq) {
1125 				/*
1126 				 * If a downstream task that needs an update is subjects to AppNap,
1127 				 * drop boosts according to the delay hysteresis.  Otherwise,
1128 				 * immediate update it.
1129 				 */
1130 				if (!boost && temp_task_imp != task_imp &&
1131 				    ipc_importance_delayed_drop_call != NULL &&
1132 				    ipc_importance_task_is_marked_denap_receiver(temp_task_imp)) {
1133 					ipc_importance_task_delayed_drop(temp_task_imp);
1134 				} else {
1135 					temp_task_imp->iit_updatetime = 0;
1136 					temp_task_imp->iit_updateq = &updates;
1137 					ipc_importance_task_reference_internal(temp_task_imp);
1138 					if (boost) {
1139 						queue_enter(&updates, temp_task_imp,
1140 						    ipc_importance_task_t, iit_updates);
1141 					} else {
1142 						queue_enter_first(&updates, temp_task_imp,
1143 						    ipc_importance_task_t, iit_updates);
1144 					}
1145 				}
1146 			} else {
1147 				/* Must already be on the AppNap hysteresis queue */
1148 				assert(ipc_importance_delayed_drop_call != NULL);
1149 				assert(ipc_importance_task_is_marked_denap_receiver(temp_task_imp));
1150 			}
1151 		}
1152 
1153 		ipc_importance_task_release_internal(temp_task_imp);
1154 	}
1155 
1156 	/* apply updates to task (may drop importance lock) */
1157 	if (!queue_empty(&updates)) {
1158 		ipc_importance_task_process_updates(&updates, boost, 0);
1159 	}
1160 }
1161 
1162 /*
1163  *	Routine:	ipc_importance_task_hold_internal_assertion_locked
1164  *	Purpose:
1165  *		Increment the assertion count on the task importance.
1166  *		If this results in a boost state change in that task,
1167  *		prepare to update task policy for this task AND, if
1168  *		if not just waking out of App Nap, all down-stream
1169  *		tasks that have a similar transition through inheriting
1170  *		this update.
1171  *	Conditions:
1172  *		importance locked on entry and exit.
1173  *		May temporarily drop importance lock and block.
1174  */
1175 static kern_return_t
ipc_importance_task_hold_internal_assertion_locked(ipc_importance_task_t task_imp,uint32_t count)1176 ipc_importance_task_hold_internal_assertion_locked(ipc_importance_task_t task_imp, uint32_t count)
1177 {
1178 	if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_HOLD, count)) {
1179 		ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_HOLD, TRUE);
1180 	}
1181 	return KERN_SUCCESS;
1182 }
1183 
1184 /*
1185  *	Routine:	ipc_importance_task_drop_internal_assertion_locked
1186  *	Purpose:
1187  *		Decrement the assertion count on the task importance.
1188  *		If this results in a boost state change in that task,
1189  *		prepare to update task policy for this task AND, if
1190  *		if not just waking out of App Nap, all down-stream
1191  *		tasks that have a similar transition through inheriting
1192  *		this update.
1193  *	Conditions:
1194  *		importance locked on entry and exit.
1195  *		May temporarily drop importance lock and block.
1196  */
1197 static kern_return_t
ipc_importance_task_drop_internal_assertion_locked(ipc_importance_task_t task_imp,uint32_t count)1198 ipc_importance_task_drop_internal_assertion_locked(ipc_importance_task_t task_imp, uint32_t count)
1199 {
1200 	if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_DROP, count)) {
1201 		ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, TRUE);
1202 	}
1203 	return KERN_SUCCESS;
1204 }
1205 
1206 /*
1207  *      Routine:        ipc_importance_task_hold_internal_assertion
1208  *      Purpose:
1209  *              Increment the assertion count on the task importance.
1210  *              If this results in a 0->1 change in that count,
1211  *              prepare to update task policy for this task AND
1212  *              (potentially) all down-stream tasks that have a
1213  *		similar transition through inheriting this update.
1214  *      Conditions:
1215  *              Nothing locked
1216  *              May block after dropping importance lock.
1217  */
1218 int
ipc_importance_task_hold_internal_assertion(ipc_importance_task_t task_imp,uint32_t count)1219 ipc_importance_task_hold_internal_assertion(ipc_importance_task_t task_imp, uint32_t count)
1220 {
1221 	int ret = KERN_SUCCESS;
1222 
1223 	if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1224 		ipc_importance_lock();
1225 		ret = ipc_importance_task_hold_internal_assertion_locked(task_imp, count);
1226 		ipc_importance_unlock();
1227 	}
1228 	return ret;
1229 }
1230 
1231 /*
1232  *	Routine:	ipc_importance_task_drop_internal_assertion
1233  *	Purpose:
1234  *		Decrement the assertion count on the task importance.
1235  *		If this results in a X->0 change in that count,
1236  *		prepare to update task policy for this task AND
1237  *		all down-stream tasks that have a similar transition
1238  *		through inheriting this drop update.
1239  *	Conditions:
1240  *		Nothing locked on entry.
1241  *		May block after dropping importance lock.
1242  */
1243 kern_return_t
ipc_importance_task_drop_internal_assertion(ipc_importance_task_t task_imp,uint32_t count)1244 ipc_importance_task_drop_internal_assertion(ipc_importance_task_t task_imp, uint32_t count)
1245 {
1246 	kern_return_t ret = KERN_SUCCESS;
1247 
1248 	if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1249 		ipc_importance_lock();
1250 		ret = ipc_importance_task_drop_internal_assertion_locked(task_imp, count);
1251 		ipc_importance_unlock();
1252 	}
1253 	return ret;
1254 }
1255 
1256 /*
1257  *      Routine:        ipc_importance_task_hold_file_lock_assertion
1258  *      Purpose:
1259  *              Increment the file lock assertion count on the task importance.
1260  *              If this results in a 0->1 change in that count,
1261  *              prepare to update task policy for this task AND
1262  *              (potentially) all down-stream tasks that have a
1263  *		similar transition through inheriting this update.
1264  *      Conditions:
1265  *              Nothing locked
1266  *              May block after dropping importance lock.
1267  */
1268 kern_return_t
ipc_importance_task_hold_file_lock_assertion(ipc_importance_task_t task_imp,uint32_t count)1269 ipc_importance_task_hold_file_lock_assertion(ipc_importance_task_t task_imp, uint32_t count)
1270 {
1271 	kern_return_t ret = KERN_SUCCESS;
1272 
1273 	if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1274 		ipc_importance_lock();
1275 		ret = ipc_importance_task_hold_internal_assertion_locked(task_imp, count);
1276 		if (KERN_SUCCESS == ret) {
1277 			task_imp->iit_filelocks += count;
1278 		}
1279 		ipc_importance_unlock();
1280 	}
1281 	return ret;
1282 }
1283 
1284 /*
1285  *	Routine:	ipc_importance_task_drop_file_lock_assertion
1286  *	Purpose:
1287  *		Decrement the assertion count on the task importance.
1288  *		If this results in a X->0 change in that count,
1289  *		prepare to update task policy for this task AND
1290  *		all down-stream tasks that have a similar transition
1291  *		through inheriting this drop update.
1292  *	Conditions:
1293  *		Nothing locked on entry.
1294  *		May block after dropping importance lock.
1295  */
1296 kern_return_t
ipc_importance_task_drop_file_lock_assertion(ipc_importance_task_t task_imp,uint32_t count)1297 ipc_importance_task_drop_file_lock_assertion(ipc_importance_task_t task_imp, uint32_t count)
1298 {
1299 	kern_return_t ret = KERN_SUCCESS;
1300 
1301 	if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1302 		ipc_importance_lock();
1303 		if (count <= task_imp->iit_filelocks) {
1304 			task_imp->iit_filelocks -= count;
1305 			ret = ipc_importance_task_drop_internal_assertion_locked(task_imp, count);
1306 		} else {
1307 			ret = KERN_INVALID_ARGUMENT;
1308 		}
1309 		ipc_importance_unlock();
1310 	}
1311 	return ret;
1312 }
1313 
1314 /*
1315  *	Routine:	ipc_importance_task_hold_legacy_external_assertion
1316  *	Purpose:
1317  *		Increment the external assertion count on the task importance.
1318  *		This cannot result in an 0->1 transition, as the caller must
1319  *		already hold an external boost.
1320  *	Conditions:
1321  *		Nothing locked on entry.
1322  *		May block after dropping importance lock.
1323  *		A queue of task importance structures is returned
1324  *		by ipc_importance_task_hold_assertion_locked(). Each
1325  *		needs to be updated (outside the importance lock hold).
1326  */
1327 kern_return_t
ipc_importance_task_hold_legacy_external_assertion(ipc_importance_task_t task_imp,uint32_t count)1328 ipc_importance_task_hold_legacy_external_assertion(ipc_importance_task_t task_imp, uint32_t count)
1329 {
1330 	task_t target_task;
1331 	uint32_t target_assertcnt;
1332 	uint32_t target_externcnt;
1333 	uint32_t target_legacycnt;
1334 
1335 	kern_return_t ret;
1336 
1337 	ipc_importance_lock();
1338 	target_task = task_imp->iit_task;
1339 
1340 #if IMPORTANCE_TRACE
1341 	int target_pid = task_pid(target_task);
1342 
1343 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | TASK_POLICY_EXTERNAL))) | DBG_FUNC_START,
1344 	    proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1345 #endif
1346 
1347 	if (IIT_LEGACY_EXTERN(task_imp) == 0) {
1348 		/* Only allowed to take a new boost assertion when holding an external boost */
1349 		/* save data for diagnostic printf below */
1350 		target_assertcnt = task_imp->iit_assertcnt;
1351 		target_externcnt = IIT_EXTERN(task_imp);
1352 		target_legacycnt = IIT_LEGACY_EXTERN(task_imp);
1353 		ret = KERN_FAILURE;
1354 		count = 0;
1355 	} else {
1356 		assert(ipc_importance_task_is_any_receiver_type(task_imp));
1357 		assert(0 < task_imp->iit_assertcnt);
1358 		assert(0 < IIT_EXTERN(task_imp));
1359 		task_imp->iit_assertcnt += count;
1360 		task_imp->iit_externcnt += count;
1361 		task_imp->iit_legacy_externcnt += count;
1362 		ret = KERN_SUCCESS;
1363 	}
1364 	ipc_importance_unlock();
1365 
1366 #if IMPORTANCE_TRACE
1367 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | TASK_POLICY_EXTERNAL))) | DBG_FUNC_END,
1368 	    proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1369 	// This covers the legacy case where a task takes an extra boost.
1370 	DTRACE_BOOST5(receive_boost, task_t, target_task, int, target_pid, int, proc_selfpid(), int, count, int, task_imp->iit_assertcnt);
1371 #endif
1372 
1373 	if (KERN_FAILURE == ret && target_task != TASK_NULL) {
1374 		printf("BUG in process %s[%d]: "
1375 		    "attempt to acquire an additional legacy external boost assertion without holding an existing legacy external assertion. "
1376 		    "(%d total, %d external, %d legacy-external)\n",
1377 		    proc_name_address(get_bsdtask_info(target_task)), task_pid(target_task),
1378 		    target_assertcnt, target_externcnt, target_legacycnt);
1379 	}
1380 
1381 	return ret;
1382 }
1383 
1384 /*
1385  *	Routine:	ipc_importance_task_drop_legacy_external_assertion
1386  *	Purpose:
1387  *		Drop the legacy external assertion count on the task and
1388  *		reflect that change to total external assertion count and
1389  *		then onto the internal importance count.
1390  *
1391  *		If this results in a X->0 change in the internal,
1392  *		count, prepare to update task policy for this task AND
1393  *		all down-stream tasks that have a similar transition
1394  *		through inheriting this update.
1395  *	Conditions:
1396  *		Nothing locked on entry.
1397  */
1398 kern_return_t
ipc_importance_task_drop_legacy_external_assertion(ipc_importance_task_t task_imp,uint32_t count)1399 ipc_importance_task_drop_legacy_external_assertion(ipc_importance_task_t task_imp, uint32_t count)
1400 {
1401 	int ret = KERN_SUCCESS;
1402 	task_t target_task;
1403 	uint32_t target_assertcnt;
1404 	uint32_t target_externcnt;
1405 	uint32_t target_legacycnt;
1406 
1407 	if (count > 1) {
1408 		return KERN_INVALID_ARGUMENT;
1409 	}
1410 
1411 	ipc_importance_lock();
1412 	target_task = task_imp->iit_task;
1413 
1414 #if IMPORTANCE_TRACE
1415 	int target_pid = task_pid(target_task);
1416 
1417 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | TASK_POLICY_EXTERNAL))) | DBG_FUNC_START,
1418 	    proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1419 #endif
1420 
1421 	if (count > IIT_LEGACY_EXTERN(task_imp)) {
1422 		/* Process over-released its boost count - save data for diagnostic printf */
1423 		/* TODO: If count > 1, we should clear out as many external assertions as there are left. */
1424 		target_assertcnt = task_imp->iit_assertcnt;
1425 		target_externcnt = IIT_EXTERN(task_imp);
1426 		target_legacycnt = IIT_LEGACY_EXTERN(task_imp);
1427 		ret = KERN_FAILURE;
1428 	} else {
1429 		/*
1430 		 * decrement legacy external count from the top level and reflect
1431 		 * into internal for this and all subsequent updates.
1432 		 */
1433 		assert(ipc_importance_task_is_any_receiver_type(task_imp));
1434 		assert(IIT_EXTERN(task_imp) >= count);
1435 
1436 		task_imp->iit_legacy_externdrop += count;
1437 		task_imp->iit_externdrop += count;
1438 
1439 		/* reset extern counters (if appropriate) */
1440 		if (IIT_LEGACY_EXTERN(task_imp) == 0) {
1441 			if (IIT_EXTERN(task_imp) != 0) {
1442 				task_imp->iit_externcnt -= task_imp->iit_legacy_externcnt;
1443 				task_imp->iit_externdrop -= task_imp->iit_legacy_externdrop;
1444 			} else {
1445 				task_imp->iit_externcnt = 0;
1446 				task_imp->iit_externdrop = 0;
1447 			}
1448 			task_imp->iit_legacy_externcnt = 0;
1449 			task_imp->iit_legacy_externdrop = 0;
1450 		}
1451 
1452 		/* reflect the drop to the internal assertion count (and effect any importance change) */
1453 		if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_DROP, count)) {
1454 			ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, TRUE);
1455 		}
1456 		ret = KERN_SUCCESS;
1457 	}
1458 
1459 #if IMPORTANCE_TRACE
1460 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | TASK_POLICY_EXTERNAL))) | DBG_FUNC_END,
1461 	    proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1462 #endif
1463 
1464 	ipc_importance_unlock();
1465 
1466 	/* delayed printf for user-supplied data failures */
1467 	if (KERN_FAILURE == ret && TASK_NULL != target_task) {
1468 		printf("BUG in process %s[%d]: over-released legacy external boost assertions (%d total, %d external, %d legacy-external)\n",
1469 		    proc_name_address(get_bsdtask_info(target_task)), task_pid(target_task),
1470 		    target_assertcnt, target_externcnt, target_legacycnt);
1471 	}
1472 
1473 	return ret;
1474 }
1475 
1476 
1477 #if LEGACY_IMPORTANCE_DELIVERY
1478 /* Transfer an assertion to legacy userspace responsibility */
1479 static kern_return_t
ipc_importance_task_externalize_legacy_assertion(ipc_importance_task_t task_imp,uint32_t count,__unused int sender_pid)1480 ipc_importance_task_externalize_legacy_assertion(ipc_importance_task_t task_imp, uint32_t count, __unused int sender_pid)
1481 {
1482 	task_t target_task;
1483 
1484 	assert(IIT_NULL != task_imp);
1485 	target_task = task_imp->iit_task;
1486 
1487 	if (TASK_NULL == target_task ||
1488 	    !ipc_importance_task_is_any_receiver_type(task_imp)) {
1489 		return KERN_FAILURE;
1490 	}
1491 
1492 #if IMPORTANCE_TRACE
1493 	int target_pid = task_pid(target_task);
1494 
1495 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_START,
1496 	    proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
1497 #endif
1498 
1499 	ipc_importance_lock();
1500 	/* assert(task_imp->iit_assertcnt >= IIT_EXTERN(task_imp) + count); */
1501 	assert(IIT_EXTERN(task_imp) >= IIT_LEGACY_EXTERN(task_imp));
1502 	task_imp->iit_legacy_externcnt += count;
1503 	task_imp->iit_externcnt += count;
1504 	ipc_importance_unlock();
1505 
1506 #if IMPORTANCE_TRACE
1507 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_END,
1508 	    proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1509 	// This is the legacy boosting path
1510 	DTRACE_BOOST5(receive_boost, task_t, target_task, int, target_pid, int, sender_pid, int, count, int, IIT_LEGACY_EXTERN(task_imp));
1511 #endif /* IMPORTANCE_TRACE */
1512 
1513 	return KERN_SUCCESS;
1514 }
1515 #endif /* LEGACY_IMPORTANCE_DELIVERY */
1516 
1517 /*
1518  *	Routine:	ipc_importance_task_update_live_donor
1519  *	Purpose:
1520  *		Read the live donor status and update the live_donor bit/propagate the change in importance.
1521  *	Conditions:
1522  *		Nothing locked on entrance, nothing locked on exit.
1523  *
1524  *		TODO: Need tracepoints around this function...
1525  */
1526 void
ipc_importance_task_update_live_donor(ipc_importance_task_t task_imp)1527 ipc_importance_task_update_live_donor(ipc_importance_task_t task_imp)
1528 {
1529 	uint32_t task_live_donor;
1530 	boolean_t before_donor;
1531 	boolean_t after_donor;
1532 	task_t target_task;
1533 
1534 	assert(task_imp != NULL);
1535 
1536 	/*
1537 	 * Nothing to do if the task is not marked as expecting
1538 	 * live donor updates.
1539 	 */
1540 	if (!ipc_importance_task_is_marked_live_donor(task_imp)) {
1541 		return;
1542 	}
1543 
1544 	ipc_importance_lock();
1545 
1546 	/* If the task got disconnected on the way here, no use (or ability) adjusting live donor status */
1547 	target_task = task_imp->iit_task;
1548 	if (TASK_NULL == target_task) {
1549 		ipc_importance_unlock();
1550 		return;
1551 	}
1552 	before_donor = ipc_importance_task_is_marked_donor(task_imp);
1553 
1554 	/* snapshot task live donor status - may change, but another call will accompany the change */
1555 	task_live_donor = target_task->effective_policy.tep_live_donor;
1556 
1557 #if IMPORTANCE_TRACE
1558 	int target_pid = task_pid(target_task);
1559 
1560 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1561 	    (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_UPDATE_LIVE_DONOR_STATE)) | DBG_FUNC_START,
1562 	    target_pid, task_imp->iit_donor, task_live_donor, before_donor, 0);
1563 #endif
1564 
1565 	/* update the task importance live donor status based on the task's value */
1566 	task_imp->iit_donor = task_live_donor;
1567 
1568 	after_donor = ipc_importance_task_is_marked_donor(task_imp);
1569 
1570 	/* Has the effectiveness of being a donor changed as a result of this update? */
1571 	if (before_donor != after_donor) {
1572 		iit_update_type_t type;
1573 
1574 		/* propagate assertions without updating the current task policy (already handled) */
1575 		if (0 == before_donor) {
1576 			task_imp->iit_transitions++;
1577 			type = IIT_UPDATE_HOLD;
1578 		} else {
1579 			type = IIT_UPDATE_DROP;
1580 		}
1581 		ipc_importance_task_propagate_assertion_locked(task_imp, type, FALSE);
1582 	}
1583 
1584 #if IMPORTANCE_TRACE
1585 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1586 	    (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_UPDATE_LIVE_DONOR_STATE)) | DBG_FUNC_END,
1587 	    target_pid, task_imp->iit_donor, task_live_donor, after_donor, 0);
1588 #endif
1589 
1590 	ipc_importance_unlock();
1591 }
1592 
1593 
1594 /*
1595  *	Routine:	ipc_importance_task_mark_donor
1596  *	Purpose:
1597  *		Set the task importance donor flag.
1598  *	Conditions:
1599  *		Nothing locked on entrance, nothing locked on exit.
1600  *
1601  *		This is only called while the task is being constructed,
1602  *		so no need to update task policy or propagate downstream.
1603  */
1604 void
ipc_importance_task_mark_donor(ipc_importance_task_t task_imp,boolean_t donating)1605 ipc_importance_task_mark_donor(ipc_importance_task_t task_imp, boolean_t donating)
1606 {
1607 	assert(task_imp != NULL);
1608 
1609 	ipc_importance_lock();
1610 
1611 	int old_donor = task_imp->iit_donor;
1612 
1613 	task_imp->iit_donor = (donating ? 1 : 0);
1614 
1615 	if (task_imp->iit_donor > 0 && old_donor == 0) {
1616 		task_imp->iit_transitions++;
1617 	}
1618 
1619 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1620 	    (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_INIT_DONOR_STATE)) | DBG_FUNC_NONE,
1621 	    task_pid(task_imp->iit_task), donating,
1622 	    old_donor, task_imp->iit_donor, 0);
1623 
1624 	ipc_importance_unlock();
1625 }
1626 
1627 /*
1628  *	Routine:	ipc_importance_task_marked_donor
1629  *	Purpose:
1630  *		Query the donor flag for the given task importance.
1631  *	Conditions:
1632  *		May be called without taking the importance lock.
1633  *		In that case, donor status can change so you must
1634  *		check only once for each donation event.
1635  */
1636 boolean_t
ipc_importance_task_is_marked_donor(ipc_importance_task_t task_imp)1637 ipc_importance_task_is_marked_donor(ipc_importance_task_t task_imp)
1638 {
1639 	if (IIT_NULL == task_imp) {
1640 		return FALSE;
1641 	}
1642 	return 0 != task_imp->iit_donor;
1643 }
1644 
1645 /*
1646  *	Routine:	ipc_importance_task_mark_live_donor
1647  *	Purpose:
1648  *		Indicate that the task is eligible for live donor updates.
1649  *	Conditions:
1650  *		Nothing locked on entrance, nothing locked on exit.
1651  *
1652  *		This is only called while the task is being constructed.
1653  */
1654 void
ipc_importance_task_mark_live_donor(ipc_importance_task_t task_imp,boolean_t live_donating)1655 ipc_importance_task_mark_live_donor(ipc_importance_task_t task_imp, boolean_t live_donating)
1656 {
1657 	assert(task_imp != NULL);
1658 
1659 	ipc_importance_lock();
1660 	task_imp->iit_live_donor = (live_donating ? 1 : 0);
1661 	ipc_importance_unlock();
1662 }
1663 
1664 /*
1665  *	Routine:	ipc_importance_task_is_marked_live_donor
1666  *	Purpose:
1667  *		Query the live donor and donor flags for the given task importance.
1668  *	Conditions:
1669  *		May be called without taking the importance lock.
1670  *		In that case, donor status can change so you must
1671  *		check only once for each donation event.
1672  */
1673 boolean_t
ipc_importance_task_is_marked_live_donor(ipc_importance_task_t task_imp)1674 ipc_importance_task_is_marked_live_donor(ipc_importance_task_t task_imp)
1675 {
1676 	if (IIT_NULL == task_imp) {
1677 		return FALSE;
1678 	}
1679 	return 0 != task_imp->iit_live_donor;
1680 }
1681 
1682 /*
1683  *	Routine:	ipc_importance_task_is_donor
1684  *	Purpose:
1685  *		Query the full donor status for the given task importance.
1686  *	Conditions:
1687  *		May be called without taking the importance lock.
1688  *		In that case, donor status can change so you must
1689  *		check only once for each donation event.
1690  */
1691 boolean_t
ipc_importance_task_is_donor(ipc_importance_task_t task_imp)1692 ipc_importance_task_is_donor(ipc_importance_task_t task_imp)
1693 {
1694 	if (IIT_NULL == task_imp) {
1695 		return FALSE;
1696 	}
1697 	return ipc_importance_task_is_marked_donor(task_imp) ||
1698 	       (ipc_importance_task_is_marked_receiver(task_imp) &&
1699 	       task_imp->iit_assertcnt > 0);
1700 }
1701 
1702 /*
1703  *	Routine:	ipc_importance_task_is_never_donor
1704  *	Purpose:
1705  *		Query if a given task can ever donate importance.
1706  *	Conditions:
1707  *		May be called without taking the importance lock.
1708  *		Condition is permanent for a give task.
1709  */
1710 boolean_t
ipc_importance_task_is_never_donor(ipc_importance_task_t task_imp)1711 ipc_importance_task_is_never_donor(ipc_importance_task_t task_imp)
1712 {
1713 	if (IIT_NULL == task_imp) {
1714 		return TRUE;
1715 	}
1716 	return !ipc_importance_task_is_marked_donor(task_imp) &&
1717 	       !ipc_importance_task_is_marked_live_donor(task_imp) &&
1718 	       !ipc_importance_task_is_marked_receiver(task_imp);
1719 }
1720 
1721 /*
1722  *	Routine:	ipc_importance_task_mark_receiver
1723  *	Purpose:
1724  *		Update the task importance receiver flag.
1725  *	Conditions:
1726  *		Nothing locked on entrance, nothing locked on exit.
1727  *		This can only be invoked before the task is discoverable,
1728  *		so no worries about atomicity(?)
1729  */
1730 void
ipc_importance_task_mark_receiver(ipc_importance_task_t task_imp,boolean_t receiving)1731 ipc_importance_task_mark_receiver(ipc_importance_task_t task_imp, boolean_t receiving)
1732 {
1733 	assert(task_imp != NULL);
1734 
1735 	ipc_importance_lock();
1736 	if (receiving) {
1737 		assert(task_imp->iit_assertcnt == 0);
1738 		assert(task_imp->iit_externcnt == 0);
1739 		assert(task_imp->iit_externdrop == 0);
1740 		assert(task_imp->iit_denap == 0);
1741 		task_imp->iit_receiver = 1;  /* task can receive importance boost */
1742 	} else if (task_imp->iit_receiver) {
1743 		assert(task_imp->iit_denap == 0);
1744 		if (task_imp->iit_assertcnt != 0 || IIT_EXTERN(task_imp) != 0) {
1745 			panic("disabling imp_receiver on task with pending importance boosts!");
1746 		}
1747 		task_imp->iit_receiver = 0;
1748 	}
1749 	ipc_importance_unlock();
1750 }
1751 
1752 
1753 /*
1754  *	Routine:	ipc_importance_task_marked_receiver
1755  *	Purpose:
1756  *		Query the receiver flag for the given task importance.
1757  *	Conditions:
1758  *		May be called without taking the importance lock as
1759  *		the importance flag can never change after task init.
1760  */
1761 boolean_t
ipc_importance_task_is_marked_receiver(ipc_importance_task_t task_imp)1762 ipc_importance_task_is_marked_receiver(ipc_importance_task_t task_imp)
1763 {
1764 	return IIT_NULL != task_imp && 0 != task_imp->iit_receiver;
1765 }
1766 
1767 
1768 /*
1769  *	Routine:	ipc_importance_task_mark_denap_receiver
1770  *	Purpose:
1771  *		Update the task importance de-nap receiver flag.
1772  *	Conditions:
1773  *		Nothing locked on entrance, nothing locked on exit.
1774  *		This can only be invoked before the task is discoverable,
1775  *		so no worries about atomicity(?)
1776  */
1777 void
ipc_importance_task_mark_denap_receiver(ipc_importance_task_t task_imp,boolean_t denap)1778 ipc_importance_task_mark_denap_receiver(ipc_importance_task_t task_imp, boolean_t denap)
1779 {
1780 	assert(task_imp != NULL);
1781 
1782 	ipc_importance_lock();
1783 	if (denap) {
1784 		assert(task_imp->iit_assertcnt == 0);
1785 		assert(task_imp->iit_externcnt == 0);
1786 		assert(task_imp->iit_receiver == 0);
1787 		task_imp->iit_denap = 1;  /* task can receive de-nap boost */
1788 	} else if (task_imp->iit_denap) {
1789 		assert(task_imp->iit_receiver == 0);
1790 		if (0 < task_imp->iit_assertcnt || 0 < IIT_EXTERN(task_imp)) {
1791 			panic("disabling de-nap on task with pending de-nap boosts!");
1792 		}
1793 		task_imp->iit_denap = 0;
1794 	}
1795 	ipc_importance_unlock();
1796 }
1797 
1798 
1799 /*
1800  *	Routine:	ipc_importance_task_marked_denap_receiver
1801  *	Purpose:
1802  *		Query the de-nap receiver flag for the given task importance.
1803  *	Conditions:
1804  *		May be called without taking the importance lock as
1805  *		the de-nap flag can never change after task init.
1806  */
1807 boolean_t
ipc_importance_task_is_marked_denap_receiver(ipc_importance_task_t task_imp)1808 ipc_importance_task_is_marked_denap_receiver(ipc_importance_task_t task_imp)
1809 {
1810 	return IIT_NULL != task_imp && 0 != task_imp->iit_denap;
1811 }
1812 
1813 /*
1814  *	Routine:	ipc_importance_task_is_denap_receiver
1815  *	Purpose:
1816  *		Query the full de-nap receiver status for the given task importance.
1817  *		For now, that is simply whether the receiver flag is set.
1818  *	Conditions:
1819  *		May be called without taking the importance lock as
1820  *		the de-nap receiver flag can never change after task init.
1821  */
1822 boolean_t
ipc_importance_task_is_denap_receiver(ipc_importance_task_t task_imp)1823 ipc_importance_task_is_denap_receiver(ipc_importance_task_t task_imp)
1824 {
1825 	return ipc_importance_task_is_marked_denap_receiver(task_imp);
1826 }
1827 
1828 /*
1829  *	Routine:	ipc_importance_task_is_any_receiver_type
1830  *	Purpose:
1831  *		Query if the task is marked to receive boosts - either
1832  *		importance or denap.
1833  *	Conditions:
1834  *		May be called without taking the importance lock as both
1835  *		the importance and de-nap receiver flags can never change
1836  *		after task init.
1837  */
1838 boolean_t
ipc_importance_task_is_any_receiver_type(ipc_importance_task_t task_imp)1839 ipc_importance_task_is_any_receiver_type(ipc_importance_task_t task_imp)
1840 {
1841 	return ipc_importance_task_is_marked_receiver(task_imp) ||
1842 	       ipc_importance_task_is_marked_denap_receiver(task_imp);
1843 }
1844 
1845 #if 0 /* currently unused */
1846 
1847 /*
1848  *	Routine:	ipc_importance_inherit_reference
1849  *	Purpose:
1850  *		Add a reference to the inherit importance element.
1851  *	Conditions:
1852  *		Caller most hold a reference on the inherit element.
1853  */
1854 static inline void
1855 ipc_importance_inherit_reference(ipc_importance_inherit_t inherit)
1856 {
1857 	ipc_importance_reference(&inherit->iii_elem);
1858 }
1859 #endif /* currently unused */
1860 
1861 /*
1862  *	Routine:	ipc_importance_inherit_release_locked
1863  *	Purpose:
1864  *		Release a reference on an inherit importance attribute value,
1865  *		unlinking and deallocating the attribute if the last reference.
1866  *	Conditions:
1867  *		Entered with importance lock held, leaves with it unlocked.
1868  */
1869 static inline void
ipc_importance_inherit_release_locked(ipc_importance_inherit_t inherit)1870 ipc_importance_inherit_release_locked(ipc_importance_inherit_t inherit)
1871 {
1872 	ipc_importance_release_locked(&inherit->iii_elem);
1873 }
1874 
1875 #if 0 /* currently unused */
1876 /*
1877  *	Routine:	ipc_importance_inherit_release
1878  *	Purpose:
1879  *		Release a reference on an inherit importance attribute value,
1880  *		unlinking and deallocating the attribute if the last reference.
1881  *	Conditions:
1882  *		nothing locked on entrance, nothing locked on exit.
1883  *		May block.
1884  */
1885 void
1886 ipc_importance_inherit_release(ipc_importance_inherit_t inherit)
1887 {
1888 	if (III_NULL != inherit) {
1889 		ipc_importance_release(&inherit->iii_elem);
1890 	}
1891 }
1892 #endif /* 0 currently unused */
1893 
1894 /*
1895  *	Routine:	ipc_importance_for_task
1896  *	Purpose:
1897  *		Create a reference for the specified task's base importance
1898  *		element.  If the base importance element doesn't exist, make it and
1899  *		bind it to the active task.  If the task is inactive, there isn't
1900  *		any need to return a new reference.
1901  *	Conditions:
1902  *		If made is true, a "made" reference is returned (for donating to
1903  *		the voucher system).  Otherwise	an internal reference is returned.
1904  *
1905  *		Nothing locked on entry.  May block.
1906  */
1907 ipc_importance_task_t
ipc_importance_for_task(task_t task,boolean_t made)1908 ipc_importance_for_task(task_t task, boolean_t made)
1909 {
1910 	ipc_importance_task_t task_elem;
1911 	boolean_t first_pass = TRUE;
1912 
1913 	assert(TASK_NULL != task);
1914 
1915 retry:
1916 	/* No use returning anything for inactive task */
1917 	if (!task->active) {
1918 		return IIT_NULL;
1919 	}
1920 
1921 	ipc_importance_lock();
1922 	task_elem = task->task_imp_base;
1923 	if (IIT_NULL != task_elem) {
1924 		/* Add a made reference (borrowing active task ref to do it) */
1925 		if (made) {
1926 			if (0 == task_elem->iit_made++) {
1927 				assert(IIT_REFS_MAX > IIT_REFS(task_elem));
1928 				ipc_importance_task_reference_internal(task_elem);
1929 			}
1930 		} else {
1931 			assert(IIT_REFS_MAX > IIT_REFS(task_elem));
1932 			ipc_importance_task_reference_internal(task_elem);
1933 		}
1934 		ipc_importance_unlock();
1935 		return task_elem;
1936 	}
1937 	ipc_importance_unlock();
1938 
1939 	if (!first_pass) {
1940 		return IIT_NULL;
1941 	}
1942 	first_pass = FALSE;
1943 
1944 	/* Need to make one - may race with others (be prepared to drop) */
1945 	task_elem = zalloc_flags(ipc_importance_task_zone, Z_WAITOK | Z_ZERO);
1946 	if (IIT_NULL == task_elem) {
1947 		goto retry;
1948 	}
1949 
1950 	task_elem->iit_bits = IIE_TYPE_TASK | 2; /* one for task, one for return/made */
1951 	task_elem->iit_made = (made) ? 1 : 0;
1952 	task_elem->iit_task = task; /* take actual ref when we're sure */
1953 #if IIE_REF_DEBUG
1954 	ipc_importance_counter_init(&task_elem->iit_elem);
1955 #endif
1956 	queue_init(&task_elem->iit_kmsgs);
1957 	queue_init(&task_elem->iit_inherits);
1958 
1959 	ipc_importance_lock();
1960 	if (!task->active) {
1961 		ipc_importance_unlock();
1962 		zfree(ipc_importance_task_zone, task_elem);
1963 		return IIT_NULL;
1964 	}
1965 
1966 	/* did we lose the race? */
1967 	if (IIT_NULL != task->task_imp_base) {
1968 		ipc_importance_unlock();
1969 		zfree(ipc_importance_task_zone, task_elem);
1970 		goto retry;
1971 	}
1972 
1973 	/* we won the race */
1974 	task->task_imp_base = task_elem;
1975 	task_reference_grp(task, TASK_GRP_INTERNAL);
1976 #if DEVELOPMENT || DEBUG
1977 	queue_enter(&global_iit_alloc_queue, task_elem, ipc_importance_task_t, iit_allocation);
1978 	task_importance_update_owner_info(task);
1979 #endif
1980 	ipc_importance_unlock();
1981 
1982 	return task_elem;
1983 }
1984 
1985 #if DEVELOPMENT || DEBUG
1986 void
task_importance_update_owner_info(task_t task)1987 task_importance_update_owner_info(task_t task)
1988 {
1989 	if (task != TASK_NULL && task->task_imp_base != IIT_NULL) {
1990 		ipc_importance_task_t task_elem = task->task_imp_base;
1991 
1992 		task_elem->iit_bsd_pid = task_pid(task);
1993 		if (get_bsdtask_info(task)) {
1994 			strncpy(&task_elem->iit_procname[0], proc_name_address(get_bsdtask_info(task)), 16);
1995 			task_elem->iit_procname[16] = '\0';
1996 		} else {
1997 			strncpy(&task_elem->iit_procname[0], "unknown", 16);
1998 		}
1999 	}
2000 }
2001 #endif
2002 
2003 static int
task_importance_task_get_pid(ipc_importance_task_t iit)2004 task_importance_task_get_pid(ipc_importance_task_t iit)
2005 {
2006 #if DEVELOPMENT || DEBUG
2007 	return (int)iit->iit_bsd_pid;
2008 #else
2009 	return task_pid(iit->iit_task);
2010 #endif
2011 }
2012 
2013 /*
2014  *	Routine:	ipc_importance_reset_locked
2015  *	Purpose:
2016  *		Reset a task's IPC importance (the task is going away or exec'ing)
2017  *
2018  *		Remove the donor bit and legacy externalized assertions from the
2019  *		current task importance and see if that wipes out downstream donations.
2020  *	Conditions:
2021  *		importance lock held.
2022  */
2023 
2024 static void
ipc_importance_reset_locked(ipc_importance_task_t task_imp,boolean_t donor)2025 ipc_importance_reset_locked(ipc_importance_task_t task_imp, boolean_t donor)
2026 {
2027 	boolean_t before_donor, after_donor;
2028 
2029 	/* remove the donor bit, live-donor bit and externalized boosts */
2030 	before_donor = ipc_importance_task_is_donor(task_imp);
2031 	if (donor) {
2032 		task_imp->iit_donor = 0;
2033 	}
2034 	assert(IIT_LEGACY_EXTERN(task_imp) <= IIT_EXTERN(task_imp));
2035 	assert(task_imp->iit_legacy_externcnt <= task_imp->iit_externcnt);
2036 	assert(task_imp->iit_legacy_externdrop <= task_imp->iit_externdrop);
2037 	task_imp->iit_externcnt -= task_imp->iit_legacy_externcnt;
2038 	task_imp->iit_externdrop -= task_imp->iit_legacy_externdrop;
2039 
2040 	/* assert(IIT_LEGACY_EXTERN(task_imp) <= task_imp->iit_assertcnt); */
2041 	if (IIT_EXTERN(task_imp) < task_imp->iit_assertcnt) {
2042 		task_imp->iit_assertcnt -= IIT_LEGACY_EXTERN(task_imp);
2043 	} else {
2044 		task_imp->iit_assertcnt = IIT_EXTERN(task_imp);
2045 	}
2046 	task_imp->iit_legacy_externcnt = 0;
2047 	task_imp->iit_legacy_externdrop = 0;
2048 	after_donor = ipc_importance_task_is_donor(task_imp);
2049 
2050 	/* propagate a downstream drop if there was a change in donor status */
2051 	if (after_donor != before_donor) {
2052 		ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, FALSE);
2053 	}
2054 }
2055 
2056 /*
2057  *	Routine:	ipc_importance_reset
2058  *	Purpose:
2059  *		Reset a task's IPC importance
2060  *
2061  *		The task is being reset, although staying around. Arrange to have the
2062  *		external state of the task reset from the importance.
2063  *	Conditions:
2064  *		importance lock not held.
2065  */
2066 
2067 void
ipc_importance_reset(ipc_importance_task_t task_imp,boolean_t donor)2068 ipc_importance_reset(ipc_importance_task_t task_imp, boolean_t donor)
2069 {
2070 	if (IIT_NULL == task_imp) {
2071 		return;
2072 	}
2073 	ipc_importance_lock();
2074 	ipc_importance_reset_locked(task_imp, donor);
2075 	ipc_importance_unlock();
2076 }
2077 
2078 /*
2079  *	Routine:	ipc_importance_disconnect_task
2080  *	Purpose:
2081  *		Disconnect a task from its importance.
2082  *
2083  *		Clear the task pointer from the importance and drop the
2084  *		reference the task held on the importance object.  Before
2085  *		doing that, reset the effects the current task holds on
2086  *		the importance and see if that wipes out downstream donations.
2087  *
2088  *		We allow the upstream boosts to continue to affect downstream
2089  *		even though the local task is being effectively pulled from
2090  *		the chain.
2091  *	Conditions:
2092  *		Nothing locked.
2093  */
2094 void
ipc_importance_disconnect_task(task_t task)2095 ipc_importance_disconnect_task(task_t task)
2096 {
2097 	ipc_importance_task_t task_imp;
2098 
2099 	task_lock(task);
2100 	ipc_importance_lock();
2101 	task_imp = task->task_imp_base;
2102 
2103 	/* did somebody beat us to it? */
2104 	if (IIT_NULL == task_imp) {
2105 		ipc_importance_unlock();
2106 		task_unlock(task);
2107 		return;
2108 	}
2109 
2110 	/* disconnect the task from this importance */
2111 	assert(task_imp->iit_task == task);
2112 	task_imp->iit_task = TASK_NULL;
2113 	task->task_imp_base = IIT_NULL;
2114 	task_unlock(task);
2115 
2116 	/* reset the effects the current task hold on the importance */
2117 	ipc_importance_reset_locked(task_imp, TRUE);
2118 
2119 	ipc_importance_task_release_locked(task_imp);
2120 	/* importance unlocked */
2121 
2122 	/* deallocate the task now that the importance is unlocked */
2123 	task_deallocate_grp(task, TASK_GRP_INTERNAL);
2124 }
2125 
2126 /*
2127  *	Routine:	ipc_importance_exec_switch_task
2128  *	Purpose:
2129  *		Switch importance task base from old task to new task in exec.
2130  *
2131  *		Create an ipc importance linkage from old task to new task,
2132  *		once the linkage is created, switch the importance task base
2133  *		from old task to new task. After the switch, the linkage will
2134  *		represent importance linkage from new task to old task with
2135  *		watch port importance inheritance linked to new task.
2136  *	Conditions:
2137  *		Nothing locked.
2138  *		Returns a reference on importance inherit.
2139  */
2140 ipc_importance_inherit_t
ipc_importance_exec_switch_task(task_t old_task,task_t new_task)2141 ipc_importance_exec_switch_task(
2142 	task_t old_task,
2143 	task_t new_task)
2144 {
2145 	ipc_importance_inherit_t inherit = III_NULL;
2146 	ipc_importance_task_t old_task_imp = IIT_NULL;
2147 	ipc_importance_task_t new_task_imp = IIT_NULL;
2148 
2149 	task_importance_reset(old_task);
2150 
2151 	/* Create an importance linkage from old_task to new_task */
2152 	inherit = ipc_importance_inherit_from_task(old_task, new_task);
2153 
2154 	/* Switch task importance base from old task to new task */
2155 	ipc_importance_lock();
2156 
2157 	old_task_imp = old_task->task_imp_base;
2158 	new_task_imp = new_task->task_imp_base;
2159 
2160 	old_task_imp->iit_task = new_task;
2161 	new_task_imp->iit_task = old_task;
2162 
2163 	old_task->task_imp_base = new_task_imp;
2164 	new_task->task_imp_base = old_task_imp;
2165 
2166 #if DEVELOPMENT || DEBUG
2167 	/*
2168 	 * Update the pid an proc name for importance base if any
2169 	 */
2170 	task_importance_update_owner_info(new_task);
2171 #endif
2172 	ipc_importance_unlock();
2173 
2174 	return inherit;
2175 }
2176 
2177 /*
2178  *	Routine:	ipc_importance_check_circularity
2179  *	Purpose:
2180  *		Check if queueing "port" in a message for "dest"
2181  *		would create a circular group of ports and messages.
2182  *
2183  *		If no circularity (FALSE returned), then "port"
2184  *		is changed from "in limbo" to "in transit".
2185  *
2186  *		That is, we want to set port->ip_destination == dest,
2187  *		but guaranteeing that this doesn't create a circle
2188  *		port->ip_destination->ip_destination->... == port
2189  *
2190  *		Additionally, if port was successfully changed to "in transit",
2191  *		propagate boost assertions from the "in limbo" port to all
2192  *		the ports in the chain, and, if the destination task accepts
2193  *		boosts, to the destination task.
2194  *
2195  *	Conditions:
2196  *		No ports locked.  References held for "port" and "dest".
2197  */
2198 
2199 boolean_t
ipc_importance_check_circularity(ipc_port_t port,ipc_port_t dest)2200 ipc_importance_check_circularity(
2201 	ipc_port_t      port,
2202 	ipc_port_t      dest)
2203 {
2204 	ipc_importance_task_t imp_task = IIT_NULL;
2205 	ipc_importance_task_t release_imp_task = IIT_NULL;
2206 	boolean_t imp_lock_held = FALSE;
2207 	int assertcnt = 0;
2208 	ipc_port_t base;
2209 	struct turnstile *send_turnstile = TURNSTILE_NULL;
2210 	struct task_watchport_elem *watchport_elem = NULL;
2211 	bool took_base_ref = false;
2212 
2213 	assert(port != IP_NULL);
2214 	assert(dest != IP_NULL);
2215 
2216 	if (port == dest) {
2217 		return TRUE;
2218 	}
2219 	base = dest;
2220 
2221 	/* Check if destination needs a turnstile */
2222 	ipc_port_send_turnstile_prepare(dest);
2223 
2224 	/* port is in limbo, so donation status is safe to latch */
2225 	if (port->ip_impdonation != 0) {
2226 		imp_lock_held = TRUE;
2227 		ipc_importance_lock();
2228 	}
2229 
2230 	/*
2231 	 *	First try a quick check that can run in parallel.
2232 	 *	No circularity if dest is not in transit.
2233 	 */
2234 	ip_mq_lock(port);
2235 
2236 	/*
2237 	 * Even if port is just carrying assertions for others,
2238 	 * we need the importance lock.
2239 	 */
2240 	if (port->ip_impcount > 0 && !imp_lock_held) {
2241 		if (!ipc_importance_lock_try()) {
2242 			ip_mq_unlock(port);
2243 			ipc_importance_lock();
2244 			ip_mq_lock(port);
2245 		}
2246 		imp_lock_held = TRUE;
2247 	}
2248 
2249 	if (ip_mq_lock_try(dest)) {
2250 		if (!ip_in_transit(dest)) {
2251 			goto not_circular;
2252 		}
2253 
2254 		/* dest is in transit; further checking necessary */
2255 
2256 		ip_mq_unlock(dest);
2257 	}
2258 	ip_mq_unlock(port);
2259 
2260 	/*
2261 	 * We're about to pay the cost to serialize,
2262 	 * just go ahead and grab importance lock.
2263 	 */
2264 	if (!imp_lock_held) {
2265 		ipc_importance_lock();
2266 		imp_lock_held = TRUE;
2267 	}
2268 
2269 	ipc_port_multiple_lock(); /* massive serialization */
2270 
2271 	took_base_ref = ipc_port_destination_chain_lock(dest, &base);
2272 	/* all ports in chain from dest to base, inclusive, are locked */
2273 
2274 	if (port == base) {
2275 		/* circularity detected! */
2276 
2277 		ipc_port_multiple_unlock();
2278 
2279 		/* port (== base) is in limbo */
2280 
2281 		require_ip_active(port);
2282 		assert(ip_in_limbo(port));
2283 		assert(!took_base_ref);
2284 
2285 		base = dest;
2286 		while (base != IP_NULL) {
2287 			ipc_port_t next;
2288 
2289 			/* base is in transit or in limbo */
2290 
2291 			require_ip_active(base);
2292 			assert(base->ip_receiver_name == MACH_PORT_NULL);
2293 			next = ip_get_destination(base);
2294 			ip_mq_unlock(base);
2295 			base = next;
2296 		}
2297 
2298 		if (imp_lock_held) {
2299 			ipc_importance_unlock();
2300 		}
2301 
2302 		ipc_port_send_turnstile_complete(dest);
2303 		return TRUE;
2304 	}
2305 
2306 	/*
2307 	 *	The guarantee:  lock port while the entire chain is locked.
2308 	 *	Once port is locked, we can take a reference to dest,
2309 	 *	add port to the chain, and unlock everything.
2310 	 */
2311 
2312 	ip_mq_lock(port);
2313 	ipc_port_multiple_unlock();
2314 
2315 not_circular:
2316 	/* port is in limbo */
2317 	require_ip_active(port);
2318 	assert(ip_in_limbo(port));
2319 
2320 	/* Port is being enqueued in a kmsg, remove the watchport boost in order to push on destination port */
2321 	watchport_elem = ipc_port_clear_watchport_elem_internal(port);
2322 
2323 	/* Check if the port is being enqueued as a part of sync bootstrap checkin */
2324 	if (dest->ip_specialreply && dest->ip_sync_bootstrap_checkin) {
2325 		port->ip_sync_bootstrap_checkin = 1;
2326 	}
2327 
2328 	ip_reference(dest);
2329 
2330 	/* port transitions to IN-TRANSIT state */
2331 	assert(port->ip_receiver_name == MACH_PORT_NULL);
2332 	port->ip_destination = dest;
2333 
2334 	/* must have been in limbo or still bound to a task */
2335 	assert(port->ip_tempowner != 0);
2336 
2337 	/*
2338 	 * We delayed dropping assertions from a specific task.
2339 	 * Cache that info now (we'll drop assertions and the
2340 	 * task reference below).
2341 	 */
2342 	release_imp_task = ip_get_imp_task(port);
2343 	if (IIT_NULL != release_imp_task) {
2344 		port->ip_imp_task = IIT_NULL;
2345 	}
2346 	assertcnt = port->ip_impcount;
2347 
2348 	/* take the port out of limbo w.r.t. assertions */
2349 	port->ip_tempowner = 0;
2350 
2351 	/*
2352 	 * Setup linkage for source port if it has a send turnstile i.e. it has
2353 	 * a thread waiting in send or has a port enqueued in it or has sync ipc
2354 	 * push from a special reply port.
2355 	 */
2356 	if (port_send_turnstile(port)) {
2357 		send_turnstile = turnstile_prepare((uintptr_t)port,
2358 		    port_send_turnstile_address(port),
2359 		    TURNSTILE_NULL, TURNSTILE_SYNC_IPC);
2360 
2361 		turnstile_update_inheritor(send_turnstile, port_send_turnstile(dest),
2362 		    (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE));
2363 
2364 		/* update complete and turnstile complete called after dropping all locks */
2365 	}
2366 	/* now unlock chain */
2367 
2368 	ip_mq_unlock(port);
2369 
2370 	for (;;) {
2371 		ipc_port_t next;
2372 		/* every port along chain track assertions behind it */
2373 		ipc_port_impcount_delta(dest, assertcnt, base);
2374 
2375 		if (dest == base) {
2376 			break;
2377 		}
2378 
2379 		/* port is in transit */
2380 
2381 		require_ip_active(dest);
2382 		assert(ip_in_transit(dest));
2383 		assert(dest->ip_tempowner == 0);
2384 
2385 		next = ip_get_destination(dest);
2386 		ip_mq_unlock(dest);
2387 		dest = next;
2388 	}
2389 
2390 	/* base is not in transit */
2391 	assert(!ip_in_transit(base));
2392 
2393 	/*
2394 	 * Find the task to boost (if any).
2395 	 * We will boost "through" ports that don't know
2396 	 * about inheritance to deliver receive rights that
2397 	 * do.
2398 	 */
2399 	if (ip_active(base) && (assertcnt > 0)) {
2400 		assert(imp_lock_held);
2401 		if (base->ip_tempowner != 0) {
2402 			if (IIT_NULL != ip_get_imp_task(base)) {
2403 				/* specified tempowner task */
2404 				imp_task = ip_get_imp_task(base);
2405 				assert(ipc_importance_task_is_any_receiver_type(imp_task));
2406 			}
2407 			/* otherwise don't boost current task */
2408 		} else if (ip_in_a_space(base)) {
2409 			ipc_space_t space = ip_get_receiver(base);
2410 			/* only spaces with boost-accepting tasks */
2411 			if (space->is_task != TASK_NULL &&
2412 			    ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base)) {
2413 				imp_task = space->is_task->task_imp_base;
2414 			}
2415 		}
2416 
2417 		/* take reference before unlocking base */
2418 		if (imp_task != IIT_NULL) {
2419 			ipc_importance_task_reference(imp_task);
2420 		}
2421 	}
2422 
2423 	ip_mq_unlock(base);
2424 
2425 	/* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */
2426 	if (send_turnstile) {
2427 		turnstile_update_inheritor_complete(send_turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
2428 
2429 		/* Take the port lock to call turnstile complete */
2430 		ip_mq_lock(port);
2431 		turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), NULL, TURNSTILE_SYNC_IPC);
2432 		send_turnstile = TURNSTILE_NULL;
2433 		ip_mq_unlock(port);
2434 		turnstile_cleanup();
2435 	}
2436 
2437 	/*
2438 	 * Transfer assertions now that the ports are unlocked.
2439 	 * Avoid extra overhead if transferring to/from the same task.
2440 	 *
2441 	 * NOTE: If a transfer is occurring, the new assertions will
2442 	 * be added to imp_task BEFORE the importance lock is unlocked.
2443 	 * This is critical - to avoid decrements coming from the kmsgs
2444 	 * beating the increment to the task.
2445 	 */
2446 	boolean_t transfer_assertions = (imp_task != release_imp_task);
2447 
2448 	if (imp_task != IIT_NULL) {
2449 		assert(imp_lock_held);
2450 		if (transfer_assertions) {
2451 			ipc_importance_task_hold_internal_assertion_locked(imp_task, assertcnt);
2452 		}
2453 	}
2454 
2455 	if (release_imp_task != IIT_NULL) {
2456 		assert(imp_lock_held);
2457 		if (transfer_assertions) {
2458 			ipc_importance_task_drop_internal_assertion_locked(release_imp_task, assertcnt);
2459 		}
2460 	}
2461 
2462 	if (imp_lock_held) {
2463 		ipc_importance_unlock();
2464 	}
2465 
2466 	if (took_base_ref) {
2467 		ip_release(base);
2468 	}
2469 
2470 	if (imp_task != IIT_NULL) {
2471 		ipc_importance_task_release(imp_task);
2472 	}
2473 
2474 	if (release_imp_task != IIT_NULL) {
2475 		ipc_importance_task_release(release_imp_task);
2476 	}
2477 
2478 	if (watchport_elem) {
2479 		task_watchport_elem_deallocate(watchport_elem);
2480 	}
2481 
2482 	return FALSE;
2483 }
2484 
2485 /*
2486  *	Routine:	ipc_importance_send
2487  *	Purpose:
2488  *		Post the importance voucher attribute [if sent] or a static
2489  *		importance boost depending upon options and conditions.
2490  *	Conditions:
2491  *		Destination port locked on entry and exit, may be dropped during the call.
2492  *	Returns:
2493  *		A boolean identifying if the port lock was tempoarily dropped.
2494  */
2495 boolean_t
ipc_importance_send(ipc_kmsg_t kmsg,mach_msg_option_t option)2496 ipc_importance_send(
2497 	ipc_kmsg_t              kmsg,
2498 	mach_msg_option_t       option)
2499 {
2500 	mach_msg_header_t *hdr = ikm_header(kmsg);
2501 	ipc_port_t port = hdr->msgh_remote_port;
2502 	ipc_port_t voucher_port;
2503 	boolean_t port_lock_dropped = FALSE;
2504 	ipc_importance_elem_t elem;
2505 	task_t task;
2506 	ipc_importance_task_t task_imp;
2507 	kern_return_t kr;
2508 
2509 	assert(IP_VALID(port));
2510 
2511 	/* If no donation to be made, return quickly */
2512 	if ((port->ip_impdonation == 0) ||
2513 	    (option & MACH_SEND_NOIMPORTANCE) != 0) {
2514 		return port_lock_dropped;
2515 	}
2516 
2517 	task = current_task();
2518 
2519 	/* If forced sending a static boost, go update the port */
2520 	if ((option & MACH_SEND_IMPORTANCE) != 0) {
2521 		/* acquire the importance lock while trying to hang on to port lock */
2522 		if (!ipc_importance_lock_try()) {
2523 			port_lock_dropped = TRUE;
2524 			ip_mq_unlock(port);
2525 			ipc_importance_lock();
2526 		}
2527 		goto portupdate;
2528 	}
2529 
2530 	task_imp = task->task_imp_base;
2531 
2532 	/* If the sender can never donate importance, nothing to do */
2533 	if (ipc_importance_task_is_never_donor(task_imp)) {
2534 		return port_lock_dropped;
2535 	}
2536 
2537 	elem = IIE_NULL;
2538 
2539 	/* If importance receiver and passing a voucher, look for importance in there */
2540 	voucher_port = ipc_kmsg_get_voucher_port(kmsg);
2541 	if (IP_VALID(voucher_port) &&
2542 	    ipc_importance_task_is_marked_receiver(task_imp)) {
2543 		mach_voucher_attr_value_handle_t vals[MACH_VOUCHER_ATTR_VALUE_MAX_NESTED];
2544 		mach_voucher_attr_value_handle_array_size_t val_count;
2545 		ipc_voucher_t voucher;
2546 
2547 		assert(ip_kotype(voucher_port) == IKOT_VOUCHER);
2548 		voucher = (ipc_voucher_t)ipc_kobject_get_raw(voucher_port,
2549 		    IKOT_VOUCHER);
2550 
2551 		/* check to see if the voucher has an importance attribute */
2552 		val_count = MACH_VOUCHER_ATTR_VALUE_MAX_NESTED;
2553 		kr = mach_voucher_attr_control_get_values(ipc_importance_control, voucher,
2554 		    vals, &val_count);
2555 		assert(KERN_SUCCESS == kr);
2556 
2557 		/*
2558 		 * Only use importance associated with our task (either directly
2559 		 * or through an inherit that donates to our task).
2560 		 */
2561 		if (0 < val_count) {
2562 			ipc_importance_elem_t check_elem;
2563 
2564 			check_elem = (ipc_importance_elem_t)vals[0];
2565 			assert(IIE_NULL != check_elem);
2566 			if (IIE_TYPE_INHERIT == IIE_TYPE(check_elem)) {
2567 				ipc_importance_inherit_t inherit;
2568 				inherit = (ipc_importance_inherit_t) check_elem;
2569 				if (inherit->iii_to_task == task_imp) {
2570 					elem = check_elem;
2571 				}
2572 			} else if (check_elem == (ipc_importance_elem_t)task_imp) {
2573 				elem = check_elem;
2574 			}
2575 		}
2576 	}
2577 
2578 	/* If we haven't found an importance attribute to send yet, use the task's */
2579 	if (IIE_NULL == elem) {
2580 		elem = (ipc_importance_elem_t)task_imp;
2581 	}
2582 
2583 	/* take a reference for the message to hold */
2584 	ipc_importance_reference_internal(elem);
2585 
2586 	/* acquire the importance lock while trying to hang on to port lock */
2587 	if (!ipc_importance_lock_try()) {
2588 		port_lock_dropped = TRUE;
2589 		ip_mq_unlock(port);
2590 		ipc_importance_lock();
2591 	}
2592 
2593 	/* link kmsg onto the donor element propagation chain */
2594 	ipc_importance_kmsg_link(kmsg, elem);
2595 	/* elem reference transfered to kmsg */
2596 
2597 	incr_ref_counter(elem->iie_kmsg_refs_added);
2598 
2599 	/* If the sender isn't currently a donor, no need to apply boost */
2600 	if (!ipc_importance_task_is_donor(task_imp)) {
2601 		ipc_importance_unlock();
2602 
2603 		/* re-acquire port lock, if needed */
2604 		if (TRUE == port_lock_dropped) {
2605 			ip_mq_lock(port);
2606 		}
2607 
2608 		return port_lock_dropped;
2609 	}
2610 
2611 portupdate:
2612 	/* Mark the fact that we are (currently) donating through this message */
2613 	hdr->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
2614 
2615 	/*
2616 	 * If we need to relock the port, do it with the importance still locked.
2617 	 * This assures we get to add the importance boost through the port to
2618 	 * the task BEFORE anyone else can attempt to undo that operation if
2619 	 * the sender lost donor status.
2620 	 */
2621 	if (TRUE == port_lock_dropped) {
2622 		ip_mq_lock(port);
2623 	}
2624 
2625 	ipc_importance_assert_held();
2626 
2627 #if IMPORTANCE_TRACE
2628 	if (kdebug_enable) {
2629 		mach_msg_max_trailer_t *dbgtrailer = ipc_kmsg_get_trailer(kmsg, false);
2630 		unsigned int sender_pid = dbgtrailer->msgh_audit.val[5];
2631 		mach_msg_id_t imp_msgh_id = hdr->msgh_id;
2632 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_SEND)) | DBG_FUNC_START,
2633 		    task_pid(task), sender_pid, imp_msgh_id, 0, 0);
2634 	}
2635 #endif /* IMPORTANCE_TRACE */
2636 
2637 	mach_port_delta_t delta = 1;
2638 	boolean_t need_port_lock;
2639 	task_imp = IIT_NULL;
2640 
2641 	/* adjust port boost count (with importance and port locked) */
2642 	need_port_lock = ipc_port_importance_delta_internal(port, IPID_OPTION_NORMAL, &delta, &task_imp);
2643 	/* hold a reference on task_imp */
2644 
2645 	/* if we need to adjust a task importance as a result, apply that here */
2646 	if (IIT_NULL != task_imp && delta != 0) {
2647 		assert(delta == 1);
2648 
2649 		/* if this results in a change of state, propagate the transistion */
2650 		if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_HOLD, delta)) {
2651 			/* can't hold the port lock during task transition(s) */
2652 			if (!need_port_lock) {
2653 				need_port_lock = TRUE;
2654 				ip_mq_unlock(port);
2655 			}
2656 			ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_HOLD, TRUE);
2657 		}
2658 	}
2659 
2660 	if (task_imp) {
2661 		ipc_importance_task_release_locked(task_imp);
2662 		/* importance unlocked */
2663 	} else {
2664 		ipc_importance_unlock();
2665 	}
2666 
2667 	if (need_port_lock) {
2668 		port_lock_dropped = TRUE;
2669 		ip_mq_lock(port);
2670 	}
2671 
2672 	return port_lock_dropped;
2673 }
2674 
2675 /*
2676  *	Routine:	ipc_importance_inherit_from_kmsg
2677  *	Purpose:
2678  *		Create a "made" reference for an importance attribute representing
2679  *		an inheritance between the sender of a message (if linked) and the
2680  *		current task importance.  If the message is not linked, a static
2681  *		boost may be created, based on the boost state of the message.
2682  *
2683  *		Any transfer from kmsg linkage to inherit linkage must be atomic.
2684  *
2685  *		If the task is inactive, there isn't any need to return a new reference.
2686  *	Conditions:
2687  *		Nothing locked on entry.  May block.
2688  */
2689 static ipc_importance_inherit_t
ipc_importance_inherit_from_kmsg(ipc_kmsg_t kmsg)2690 ipc_importance_inherit_from_kmsg(ipc_kmsg_t kmsg)
2691 {
2692 	ipc_importance_task_t   task_imp = IIT_NULL;
2693 	ipc_importance_elem_t   from_elem = kmsg->ikm_importance;
2694 	ipc_importance_elem_t   elem;
2695 	task_t  task_self = current_task();
2696 
2697 	mach_msg_header_t *hdr = ikm_header(kmsg);
2698 	ipc_port_t port = hdr->msgh_remote_port;
2699 	ipc_importance_inherit_t inherit = III_NULL;
2700 	ipc_importance_inherit_t alloc = III_NULL;
2701 	boolean_t cleared_self_donation = FALSE;
2702 	boolean_t donating;
2703 	uint32_t depth = 1;
2704 
2705 	/* The kmsg must have an importance donor or static boost to proceed */
2706 	if (IIE_NULL == kmsg->ikm_importance &&
2707 	    !MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
2708 		return III_NULL;
2709 	}
2710 
2711 	/*
2712 	 * No need to set up an inherit linkage if the dest isn't a receiver
2713 	 * of one type or the other.
2714 	 */
2715 	if (!ipc_importance_task_is_any_receiver_type(task_self->task_imp_base)) {
2716 		ipc_importance_lock();
2717 		goto out_locked;
2718 	}
2719 
2720 	/* Grab a reference on the importance of the destination */
2721 	task_imp = ipc_importance_for_task(task_self, FALSE);
2722 
2723 	ipc_importance_lock();
2724 
2725 	if (IIT_NULL == task_imp) {
2726 		goto out_locked;
2727 	}
2728 
2729 	incr_ref_counter(task_imp->iit_elem.iie_task_refs_added_inherit_from);
2730 
2731 	/* If message is already associated with an inherit... */
2732 	if (IIE_TYPE_INHERIT == IIE_TYPE(from_elem)) {
2733 		ipc_importance_inherit_t from_inherit = (ipc_importance_inherit_t)from_elem;
2734 
2735 		/* already targeting our task? - just use it */
2736 		if (from_inherit->iii_to_task == task_imp) {
2737 			/* clear self-donation if not also present in inherit */
2738 			if (!from_inherit->iii_donating &&
2739 			    MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
2740 				hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
2741 				cleared_self_donation = TRUE;
2742 			}
2743 			inherit = from_inherit;
2744 		} else if (III_DEPTH_MAX == III_DEPTH(from_inherit)) {
2745 			ipc_importance_task_t to_task;
2746 			ipc_importance_elem_t unlinked_from;
2747 
2748 			/*
2749 			 * Chain too long. Switch to looking
2750 			 * directly at the from_inherit's to-task
2751 			 * as our source of importance.
2752 			 */
2753 			to_task = from_inherit->iii_to_task;
2754 			ipc_importance_task_reference(to_task);
2755 			from_elem = (ipc_importance_elem_t)to_task;
2756 			depth = III_DEPTH_RESET | 1;
2757 
2758 			/* Fixup the kmsg linkage to reflect change */
2759 			unlinked_from = ipc_importance_kmsg_unlink(kmsg);
2760 			assert(unlinked_from == (ipc_importance_elem_t)from_inherit);
2761 			ipc_importance_kmsg_link(kmsg, from_elem);
2762 			ipc_importance_inherit_release_locked(from_inherit);
2763 			/* importance unlocked */
2764 			ipc_importance_lock();
2765 		} else {
2766 			/* inheriting from an inherit */
2767 			depth = from_inherit->iii_depth + 1;
2768 		}
2769 	}
2770 
2771 	/*
2772 	 * Don't allow a task to inherit from itself (would keep it permanently
2773 	 * boosted even if all other donors to the task went away).
2774 	 */
2775 
2776 	if (from_elem == (ipc_importance_elem_t)task_imp) {
2777 		goto out_locked;
2778 	}
2779 
2780 	/*
2781 	 * But if the message isn't associated with any linked source, it is
2782 	 * intended to be permanently boosting (static boost from kernel).
2783 	 * In that case DO let the process permanently boost itself.
2784 	 */
2785 	if (IIE_NULL == from_elem) {
2786 		assert(MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits));
2787 		ipc_importance_task_reference_internal(task_imp);
2788 		from_elem = (ipc_importance_elem_t)task_imp;
2789 	}
2790 
2791 	/*
2792 	 * Now that we have the from_elem figured out,
2793 	 * check to see if we already have an inherit for this pairing
2794 	 */
2795 	while (III_NULL == inherit) {
2796 		inherit = ipc_importance_inherit_find(from_elem, task_imp, depth);
2797 
2798 		/* Do we have to allocate a new inherit */
2799 		if (III_NULL == inherit) {
2800 			if (III_NULL != alloc) {
2801 				break;
2802 			}
2803 
2804 			/* allocate space */
2805 			ipc_importance_unlock();
2806 			alloc = (ipc_importance_inherit_t)
2807 			    zalloc(ipc_importance_inherit_zone);
2808 			ipc_importance_lock();
2809 		}
2810 	}
2811 
2812 	/* snapshot the donating status while we have importance locked */
2813 	donating = MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits);
2814 
2815 	if (III_NULL != inherit) {
2816 		/* We found one, piggyback on that */
2817 		assert(0 < III_REFS(inherit));
2818 		assert(0 < IIE_REFS(inherit->iii_from_elem));
2819 		assert(inherit->iii_externcnt >= inherit->iii_made);
2820 
2821 		/* add in a made reference */
2822 		if (0 == inherit->iii_made++) {
2823 			assert(III_REFS_MAX > III_REFS(inherit));
2824 			ipc_importance_inherit_reference_internal(inherit);
2825 		}
2826 
2827 		/* Reflect the inherit's change of status into the task boosts */
2828 		if (0 == III_EXTERN(inherit)) {
2829 			assert(!inherit->iii_donating);
2830 			inherit->iii_donating = donating;
2831 			if (donating) {
2832 				task_imp->iit_externcnt += inherit->iii_externcnt;
2833 				task_imp->iit_externdrop += inherit->iii_externdrop;
2834 			}
2835 		} else {
2836 			assert(donating == inherit->iii_donating);
2837 		}
2838 
2839 		/* add in a external reference for this use of the inherit */
2840 		inherit->iii_externcnt++;
2841 	} else {
2842 		/* initialize the previously allocated space */
2843 		inherit = alloc;
2844 		inherit->iii_bits = IIE_TYPE_INHERIT | 1;
2845 		inherit->iii_made = 1;
2846 		inherit->iii_externcnt = 1;
2847 		inherit->iii_externdrop = 0;
2848 		inherit->iii_depth = depth;
2849 		inherit->iii_to_task = task_imp;
2850 		inherit->iii_from_elem = IIE_NULL;
2851 		queue_init(&inherit->iii_kmsgs);
2852 
2853 		if (donating) {
2854 			inherit->iii_donating = TRUE;
2855 		} else {
2856 			inherit->iii_donating = FALSE;
2857 		}
2858 
2859 		/*
2860 		 * Chain our new inherit on the element it inherits from.
2861 		 * The new inherit takes our reference on from_elem.
2862 		 */
2863 		ipc_importance_inherit_link(inherit, from_elem);
2864 
2865 #if IIE_REF_DEBUG
2866 		ipc_importance_counter_init(&inherit->iii_elem);
2867 		from_elem->iie_kmsg_refs_inherited++;
2868 		task_imp->iit_elem.iie_task_refs_inherited++;
2869 #endif
2870 	}
2871 
2872 out_locked:
2873 	/*
2874 	 * for those paths that came straight here: snapshot the donating status
2875 	 * (this should match previous snapshot for other paths).
2876 	 */
2877 	donating = MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits);
2878 
2879 	/* unlink the kmsg inheritance (if any) */
2880 	elem = ipc_importance_kmsg_unlink(kmsg);
2881 	assert(elem == from_elem);
2882 
2883 	/* If found inherit and donating, reflect that in the task externcnt */
2884 	if (III_NULL != inherit && donating) {
2885 		task_imp->iit_externcnt++;
2886 		/* The owner of receive right might have changed, take the internal assertion */
2887 		ipc_importance_task_hold_internal_assertion_locked(task_imp, 1);
2888 		/* may have dropped and retaken importance lock */
2889 	}
2890 
2891 	/* If we didn't create a new inherit, we have some resources to release */
2892 	if (III_NULL == inherit || inherit != alloc) {
2893 		if (IIE_NULL != from_elem) {
2894 			if (III_NULL != inherit) {
2895 				incr_ref_counter(from_elem->iie_kmsg_refs_coalesced);
2896 			} else {
2897 				incr_ref_counter(from_elem->iie_kmsg_refs_dropped);
2898 			}
2899 			ipc_importance_release_locked(from_elem);
2900 			/* importance unlocked */
2901 		} else {
2902 			ipc_importance_unlock();
2903 		}
2904 
2905 		if (IIT_NULL != task_imp) {
2906 			if (III_NULL != inherit) {
2907 				incr_ref_counter(task_imp->iit_elem.iie_task_refs_coalesced);
2908 			}
2909 			ipc_importance_task_release(task_imp);
2910 		}
2911 
2912 		if (III_NULL != alloc) {
2913 			zfree(ipc_importance_inherit_zone, alloc);
2914 		}
2915 	} else {
2916 		/* from_elem and task_imp references transferred to new inherit */
2917 		ipc_importance_unlock();
2918 	}
2919 
2920 	/*
2921 	 * decrement port boost count
2922 	 * This is OK to do without the importance lock as we atomically
2923 	 * unlinked the kmsg and snapshot the donating state while holding
2924 	 * the importance lock
2925 	 */
2926 	if (donating || cleared_self_donation) {
2927 		ip_mq_lock(port);
2928 		/* drop importance from port and destination task */
2929 		if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == FALSE) {
2930 			ip_mq_unlock(port);
2931 		}
2932 	}
2933 
2934 	if (III_NULL != inherit) {
2935 		/* have an associated importance attr, even if currently not donating */
2936 		hdr->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
2937 	} else {
2938 		/* we won't have an importance attribute associated with our message */
2939 		hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
2940 	}
2941 
2942 	return inherit;
2943 }
2944 
2945 /*
2946  *	Routine:	ipc_importance_inherit_from_task
2947  *	Purpose:
2948  *		Create a reference for an importance attribute representing
2949  *		an inheritance between the to_task and from_task. The iii
2950  *		created will be marked as III_FLAGS_FOR_OTHERS.
2951  *
2952  *		It will not dedup any iii which are not marked as III_FLAGS_FOR_OTHERS.
2953  *
2954  *		If the task is inactive, there isn't any need to return a new reference.
2955  *	Conditions:
2956  *		Nothing locked on entry.  May block.
2957  *		It should not be called from voucher subsystem.
2958  */
2959 static ipc_importance_inherit_t
ipc_importance_inherit_from_task(task_t from_task,task_t to_task)2960 ipc_importance_inherit_from_task(
2961 	task_t from_task,
2962 	task_t to_task)
2963 {
2964 	ipc_importance_task_t   to_task_imp = IIT_NULL;
2965 	ipc_importance_task_t   from_task_imp = IIT_NULL;
2966 	ipc_importance_elem_t   from_elem = IIE_NULL;
2967 
2968 	ipc_importance_inherit_t inherit = III_NULL;
2969 	ipc_importance_inherit_t alloc = III_NULL;
2970 	boolean_t donating;
2971 	uint32_t depth = 1;
2972 
2973 	to_task_imp = ipc_importance_for_task(to_task, FALSE);
2974 	from_task_imp = ipc_importance_for_task(from_task, FALSE);
2975 	from_elem = (ipc_importance_elem_t)from_task_imp;
2976 
2977 	ipc_importance_lock();
2978 
2979 	if (IIT_NULL == to_task_imp || IIT_NULL == from_task_imp) {
2980 		goto out_locked;
2981 	}
2982 
2983 	/*
2984 	 * No need to set up an inherit linkage if the to_task or from_task
2985 	 * isn't a receiver of one type or the other.
2986 	 */
2987 	if (!ipc_importance_task_is_any_receiver_type(to_task_imp) ||
2988 	    !ipc_importance_task_is_any_receiver_type(from_task_imp)) {
2989 		goto out_locked;
2990 	}
2991 
2992 	/* Do not allow to create a linkage to self */
2993 	if (to_task_imp == from_task_imp) {
2994 		goto out_locked;
2995 	}
2996 
2997 	incr_ref_counter(to_task_imp->iit_elem.iie_task_refs_added_inherit_from);
2998 	incr_ref_counter(from_elem->iie_kmsg_refs_added);
2999 
3000 	/*
3001 	 * Now that we have the from_elem figured out,
3002 	 * check to see if we already have an inherit for this pairing
3003 	 */
3004 	while (III_NULL == inherit) {
3005 		inherit = ipc_importance_inherit_find(from_elem, to_task_imp, depth);
3006 
3007 		/* Do we have to allocate a new inherit */
3008 		if (III_NULL == inherit) {
3009 			if (III_NULL != alloc) {
3010 				break;
3011 			}
3012 
3013 			/* allocate space */
3014 			ipc_importance_unlock();
3015 			alloc = (ipc_importance_inherit_t)
3016 			    zalloc(ipc_importance_inherit_zone);
3017 			ipc_importance_lock();
3018 		}
3019 	}
3020 
3021 	/* snapshot the donating status while we have importance locked */
3022 	donating = ipc_importance_task_is_donor(from_task_imp);
3023 
3024 	if (III_NULL != inherit) {
3025 		/* We found one, piggyback on that */
3026 		assert(0 < III_REFS(inherit));
3027 		assert(0 < IIE_REFS(inherit->iii_from_elem));
3028 
3029 		/* Take a reference for inherit */
3030 		assert(III_REFS_MAX > III_REFS(inherit));
3031 		ipc_importance_inherit_reference_internal(inherit);
3032 
3033 		/* Reflect the inherit's change of status into the task boosts */
3034 		if (0 == III_EXTERN(inherit)) {
3035 			assert(!inherit->iii_donating);
3036 			inherit->iii_donating = donating;
3037 			if (donating) {
3038 				to_task_imp->iit_externcnt += inherit->iii_externcnt;
3039 				to_task_imp->iit_externdrop += inherit->iii_externdrop;
3040 			}
3041 		} else {
3042 			assert(donating == inherit->iii_donating);
3043 		}
3044 
3045 		/* add in a external reference for this use of the inherit */
3046 		inherit->iii_externcnt++;
3047 	} else {
3048 		/* initialize the previously allocated space */
3049 		inherit = alloc;
3050 		inherit->iii_bits = IIE_TYPE_INHERIT | 1;
3051 		inherit->iii_made = 0;
3052 		inherit->iii_externcnt = 1;
3053 		inherit->iii_externdrop = 0;
3054 		inherit->iii_depth = depth;
3055 		inherit->iii_to_task = to_task_imp;
3056 		inherit->iii_from_elem = IIE_NULL;
3057 		queue_init(&inherit->iii_kmsgs);
3058 
3059 		if (donating) {
3060 			inherit->iii_donating = TRUE;
3061 		} else {
3062 			inherit->iii_donating = FALSE;
3063 		}
3064 
3065 		/*
3066 		 * Chain our new inherit on the element it inherits from.
3067 		 * The new inherit takes our reference on from_elem.
3068 		 */
3069 		ipc_importance_inherit_link(inherit, from_elem);
3070 
3071 #if IIE_REF_DEBUG
3072 		ipc_importance_counter_init(&inherit->iii_elem);
3073 		from_elem->iie_kmsg_refs_inherited++;
3074 		task_imp->iit_elem.iie_task_refs_inherited++;
3075 #endif
3076 	}
3077 
3078 out_locked:
3079 
3080 	/* If found inherit and donating, reflect that in the task externcnt */
3081 	if (III_NULL != inherit && donating) {
3082 		to_task_imp->iit_externcnt++;
3083 		/* take the internal assertion */
3084 		ipc_importance_task_hold_internal_assertion_locked(to_task_imp, 1);
3085 		/* may have dropped and retaken importance lock */
3086 	}
3087 
3088 	/* If we didn't create a new inherit, we have some resources to release */
3089 	if (III_NULL == inherit || inherit != alloc) {
3090 		if (IIE_NULL != from_elem) {
3091 			if (III_NULL != inherit) {
3092 				incr_ref_counter(from_elem->iie_kmsg_refs_coalesced);
3093 			} else {
3094 				incr_ref_counter(from_elem->iie_kmsg_refs_dropped);
3095 			}
3096 			ipc_importance_release_locked(from_elem);
3097 			/* importance unlocked */
3098 		} else {
3099 			ipc_importance_unlock();
3100 		}
3101 
3102 		if (IIT_NULL != to_task_imp) {
3103 			if (III_NULL != inherit) {
3104 				incr_ref_counter(to_task_imp->iit_elem.iie_task_refs_coalesced);
3105 			}
3106 			ipc_importance_task_release(to_task_imp);
3107 		}
3108 
3109 		if (III_NULL != alloc) {
3110 			zfree(ipc_importance_inherit_zone, alloc);
3111 		}
3112 	} else {
3113 		/* from_elem and to_task_imp references transferred to new inherit */
3114 		ipc_importance_unlock();
3115 	}
3116 
3117 	return inherit;
3118 }
3119 
3120 /*
3121  *	Routine:	ipc_importance_receive
3122  *	Purpose:
3123  *		Process importance attributes in a received message.
3124  *
3125  *		If an importance voucher attribute was sent, transform
3126  *		that into an attribute value reflecting the inheritance
3127  *		from the sender to the receiver.
3128  *
3129  *		If a static boost is received (or the voucher isn't on
3130  *		a voucher-based boost), export a static boost.
3131  *	Conditions:
3132  *		Nothing locked.
3133  */
3134 void
ipc_importance_receive(ipc_kmsg_t kmsg,mach_msg_option_t option)3135 ipc_importance_receive(
3136 	ipc_kmsg_t              kmsg,
3137 	mach_msg_option_t       option)
3138 {
3139 	int impresult = -1;
3140 
3141 #if IMPORTANCE_TRACE || LEGACY_IMPORTANCE_DELIVERY
3142 	task_t task_self = current_task();
3143 	unsigned int sender_pid = ipc_kmsg_get_trailer(kmsg, false)->msgh_audit.val[5];
3144 #endif
3145 	mach_msg_header_t *hdr = ikm_header(kmsg);
3146 
3147 	/* convert to a voucher with an inherit importance attribute? */
3148 	if ((option & MACH_RCV_VOUCHER) != 0) {
3149 		uint8_t recipes[2 * sizeof(ipc_voucher_attr_recipe_data_t) +
3150 		sizeof(mach_voucher_attr_value_handle_t)];
3151 		ipc_voucher_attr_raw_recipe_array_size_t recipe_size = 0;
3152 		ipc_voucher_attr_recipe_t recipe = (ipc_voucher_attr_recipe_t)recipes;
3153 		ipc_port_t voucher_port = ipc_kmsg_get_voucher_port(kmsg);
3154 		ipc_voucher_t recv_voucher;
3155 		mach_voucher_attr_value_handle_t handle;
3156 		ipc_importance_inherit_t inherit;
3157 		kern_return_t kr;
3158 
3159 		/* set up recipe to copy the old voucher */
3160 		if (IP_VALID(voucher_port)) {
3161 			ipc_voucher_t sent_voucher;
3162 
3163 			sent_voucher = (ipc_voucher_t)ipc_kobject_get_raw(voucher_port,
3164 			    IKOT_VOUCHER);
3165 
3166 			recipe->key = MACH_VOUCHER_ATTR_KEY_ALL;
3167 			recipe->command = MACH_VOUCHER_ATTR_COPY;
3168 			recipe->previous_voucher = sent_voucher;
3169 			recipe->content_size = 0;
3170 			recipe_size += sizeof(*recipe);
3171 		}
3172 
3173 		/*
3174 		 * create an inheritance attribute from the kmsg (may be NULL)
3175 		 * transferring any boosts from the kmsg linkage through the
3176 		 * port directly to the new inheritance object.
3177 		 */
3178 		inherit = ipc_importance_inherit_from_kmsg(kmsg);
3179 		handle = (mach_voucher_attr_value_handle_t)inherit;
3180 
3181 		assert(IIE_NULL == kmsg->ikm_importance);
3182 
3183 		/*
3184 		 * Only create a new voucher if we have an inherit object
3185 		 * (from the ikm_importance field of the incoming message), OR
3186 		 * we have a valid incoming voucher. If we have neither of
3187 		 * these things then there is no need to create a new voucher.
3188 		 */
3189 		if (IP_VALID(voucher_port) || inherit != III_NULL) {
3190 			/* replace the importance attribute with the handle we created */
3191 			/*  our made reference on the inherit is donated to the voucher */
3192 			recipe = (ipc_voucher_attr_recipe_t)&recipes[recipe_size];
3193 			recipe->key = MACH_VOUCHER_ATTR_KEY_IMPORTANCE;
3194 			recipe->command = MACH_VOUCHER_ATTR_SET_VALUE_HANDLE;
3195 			recipe->previous_voucher = IPC_VOUCHER_NULL;
3196 			recipe->content_size = sizeof(mach_voucher_attr_value_handle_t);
3197 			*(mach_voucher_attr_value_handle_t *)(void *)recipe->content = handle;
3198 			recipe_size += sizeof(*recipe) + sizeof(mach_voucher_attr_value_handle_t);
3199 
3200 			kr = ipc_voucher_attr_control_create_mach_voucher(ipc_importance_control,
3201 			    recipes,
3202 			    recipe_size,
3203 			    &recv_voucher);
3204 			assert(KERN_SUCCESS == kr);
3205 
3206 			/* swap the voucher port (and set voucher bits in case it didn't already exist) */
3207 			hdr->msgh_bits |= (MACH_MSG_TYPE_MOVE_SEND << 16);
3208 			ipc_port_release_send(voucher_port);
3209 			voucher_port = convert_voucher_to_port(recv_voucher);
3210 			ipc_kmsg_set_voucher_port(kmsg, voucher_port, MACH_MSG_TYPE_MOVE_SEND);
3211 			if (III_NULL != inherit) {
3212 				impresult = 2;
3213 			}
3214 		}
3215 	} else { /* Don't want a voucher */
3216 		/* got linked importance? have to drop */
3217 		if (IIE_NULL != kmsg->ikm_importance) {
3218 			ipc_importance_elem_t elem;
3219 
3220 			ipc_importance_lock();
3221 			elem = ipc_importance_kmsg_unlink(kmsg);
3222 #if IIE_REF_DEBUG
3223 			elem->iie_kmsg_refs_dropped++;
3224 #endif
3225 			ipc_importance_release_locked(elem);
3226 			/* importance unlocked */
3227 		}
3228 
3229 		/* With kmsg unlinked, can safely examine message importance attribute. */
3230 		if (MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
3231 			ipc_port_t port = hdr->msgh_remote_port;
3232 #if LEGACY_IMPORTANCE_DELIVERY
3233 			ipc_importance_task_t task_imp = task_self->task_imp_base;
3234 
3235 			/* The owner of receive right might have changed, take the internal assertion */
3236 			if (KERN_SUCCESS == ipc_importance_task_hold_internal_assertion(task_imp, 1)) {
3237 				ipc_importance_task_externalize_legacy_assertion(task_imp, 1, sender_pid);
3238 				impresult = 1;
3239 			} else
3240 #endif
3241 			{
3242 				/* The importance boost never applied to task (clear the bit) */
3243 				hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
3244 				impresult = 0;
3245 			}
3246 
3247 			/* Drop the boost on the port and the owner of the receive right */
3248 			ip_mq_lock(port);
3249 			if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == FALSE) {
3250 				ip_mq_unlock(port);
3251 			}
3252 		}
3253 	}
3254 
3255 #if IMPORTANCE_TRACE
3256 	if (-1 < impresult) {
3257 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_DELV)) | DBG_FUNC_NONE,
3258 		    sender_pid, task_pid(task_self),
3259 		    hdr->msgh_id, impresult, 0);
3260 	}
3261 	if (impresult == 2) {
3262 		/*
3263 		 * This probe only covers new voucher-based path.  Legacy importance
3264 		 * will trigger the probe in ipc_importance_task_externalize_assertion()
3265 		 * above and have impresult==1 here.
3266 		 */
3267 		DTRACE_BOOST5(receive_boost, task_t, task_self, int, task_pid(task_self),
3268 		    int, sender_pid, int, 1, int, task_self->task_imp_base->iit_assertcnt);
3269 	}
3270 #endif /* IMPORTANCE_TRACE */
3271 }
3272 
3273 /*
3274  *	Routine:	ipc_importance_unreceive
3275  *	Purpose:
3276  *		Undo receive of importance attributes in a message.
3277  *
3278  *	Conditions:
3279  *		Nothing locked.
3280  */
3281 void
ipc_importance_unreceive(ipc_kmsg_t kmsg,mach_msg_option_t __unused option)3282 ipc_importance_unreceive(
3283 	ipc_kmsg_t              kmsg,
3284 	mach_msg_option_t       __unused option)
3285 {
3286 	/* importance should already be in the voucher and out of the kmsg */
3287 	assert(IIE_NULL == kmsg->ikm_importance);
3288 	mach_msg_header_t *hdr = ikm_header(kmsg);
3289 
3290 	/* See if there is a legacy boost to be dropped from receiver */
3291 	if (MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
3292 		ipc_importance_task_t task_imp;
3293 
3294 		hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
3295 		task_imp = current_task()->task_imp_base;
3296 
3297 		if (!IP_VALID(ipc_kmsg_get_voucher_port(kmsg)) && IIT_NULL != task_imp) {
3298 			ipc_importance_task_drop_legacy_external_assertion(task_imp, 1);
3299 		}
3300 		/*
3301 		 * ipc_kmsg_copyout_dest_to_user() will consume the voucher
3302 		 * and any contained importance.
3303 		 */
3304 	}
3305 }
3306 
3307 /*
3308  *	Routine:	ipc_importance_clean
3309  *	Purpose:
3310  *		Clean up importance state in a kmsg that is being cleaned.
3311  *		Unlink the importance chain if one was set up, and drop
3312  *		the reference this kmsg held on the donor.  Then check to
3313  *		if importance was carried to the port, and remove that if
3314  *		needed.
3315  *	Conditions:
3316  *		Nothing locked.
3317  */
3318 void
ipc_importance_clean(ipc_kmsg_t kmsg)3319 ipc_importance_clean(
3320 	ipc_kmsg_t              kmsg)
3321 {
3322 	ipc_port_t              port;
3323 	mach_msg_header_t *hdr = ikm_header(kmsg);
3324 
3325 	/* Is the kmsg still linked? If so, remove that first */
3326 	if (IIE_NULL != kmsg->ikm_importance) {
3327 		ipc_importance_elem_t   elem;
3328 
3329 		ipc_importance_lock();
3330 		elem = ipc_importance_kmsg_unlink(kmsg);
3331 		assert(IIE_NULL != elem);
3332 		ipc_importance_release_locked(elem);
3333 		/* importance unlocked */
3334 	}
3335 
3336 	/* See if there is a legacy importance boost to be dropped from port */
3337 	if (MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
3338 		hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
3339 		port = hdr->msgh_remote_port;
3340 		if (IP_VALID(port)) {
3341 			ip_mq_lock(port);
3342 			/* inactive ports already had their importance boosts dropped */
3343 			if (!ip_active(port) ||
3344 			    ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == FALSE) {
3345 				ip_mq_unlock(port);
3346 			}
3347 		}
3348 	}
3349 }
3350 
3351 void
ipc_importance_assert_clean(__assert_only ipc_kmsg_t kmsg)3352 ipc_importance_assert_clean(__assert_only ipc_kmsg_t kmsg)
3353 {
3354 	assert(IIE_NULL == kmsg->ikm_importance);
3355 	assert(!MACH_MSGH_BITS_RAISED_IMPORTANCE(ikm_header(kmsg)->msgh_bits));
3356 }
3357 
3358 /*
3359  * IPC Importance Attribute Manager definition
3360  */
3361 
3362 static kern_return_t
3363 ipc_importance_release_value(
3364 	ipc_voucher_attr_manager_t              manager,
3365 	mach_voucher_attr_key_t                 key,
3366 	mach_voucher_attr_value_handle_t        value,
3367 	mach_voucher_attr_value_reference_t     sync);
3368 
3369 static kern_return_t
3370 ipc_importance_get_value(
3371 	ipc_voucher_attr_manager_t                      manager,
3372 	mach_voucher_attr_key_t                         key,
3373 	mach_voucher_attr_recipe_command_t              command,
3374 	mach_voucher_attr_value_handle_array_t          prev_values,
3375 	mach_voucher_attr_value_handle_array_size_t     prev_value_count,
3376 	mach_voucher_attr_content_t                     content,
3377 	mach_voucher_attr_content_size_t                content_size,
3378 	mach_voucher_attr_value_handle_t                *out_value,
3379 	mach_voucher_attr_value_flags_t                 *out_flags,
3380 	ipc_voucher_t                                   *out_value_voucher);
3381 
3382 static kern_return_t
3383 ipc_importance_extract_content(
3384 	ipc_voucher_attr_manager_t                      manager,
3385 	mach_voucher_attr_key_t                         key,
3386 	mach_voucher_attr_value_handle_array_t          values,
3387 	mach_voucher_attr_value_handle_array_size_t     value_count,
3388 	mach_voucher_attr_recipe_command_t              *out_command,
3389 	mach_voucher_attr_content_t                     out_content,
3390 	mach_voucher_attr_content_size_t                *in_out_content_size);
3391 
3392 static kern_return_t
3393 ipc_importance_command(
3394 	ipc_voucher_attr_manager_t                      manager,
3395 	mach_voucher_attr_key_t                         key,
3396 	mach_voucher_attr_value_handle_array_t          values,
3397 	mach_msg_type_number_t                          value_count,
3398 	mach_voucher_attr_command_t                     command,
3399 	mach_voucher_attr_content_t                     in_content,
3400 	mach_voucher_attr_content_size_t                in_content_size,
3401 	mach_voucher_attr_content_t                     out_content,
3402 	mach_voucher_attr_content_size_t                *out_content_size);
3403 
3404 const struct ipc_voucher_attr_manager ipc_importance_manager = {
3405 	.ivam_release_value =   ipc_importance_release_value,
3406 	.ivam_get_value =       ipc_importance_get_value,
3407 	.ivam_extract_content = ipc_importance_extract_content,
3408 	.ivam_command =         ipc_importance_command,
3409 	.ivam_flags =           IVAM_FLAGS_NONE,
3410 };
3411 
3412 #define IMPORTANCE_ASSERT_KEY(key) assert(MACH_VOUCHER_ATTR_KEY_IMPORTANCE == (key))
3413 #define IMPORTANCE_ASSERT_MANAGER(manager) assert(&ipc_importance_manager == (manager))
3414 
3415 /*
3416  *	Routine:	ipc_importance_release_value [Voucher Attribute Manager Interface]
3417  *	Purpose:
3418  *		Release what the voucher system believes is the last "made" reference
3419  *		on an importance attribute value handle.  The sync parameter is used to
3420  *		avoid races with new made references concurrently being returned to the
3421  *		voucher system in other threads.
3422  *	Conditions:
3423  *		Nothing locked on entry.  May block.
3424  */
3425 static kern_return_t
ipc_importance_release_value(ipc_voucher_attr_manager_t __assert_only manager,mach_voucher_attr_key_t __assert_only key,mach_voucher_attr_value_handle_t value,mach_voucher_attr_value_reference_t sync)3426 ipc_importance_release_value(
3427 	ipc_voucher_attr_manager_t              __assert_only manager,
3428 	mach_voucher_attr_key_t                 __assert_only key,
3429 	mach_voucher_attr_value_handle_t        value,
3430 	mach_voucher_attr_value_reference_t     sync)
3431 {
3432 	ipc_importance_elem_t elem;
3433 
3434 	IMPORTANCE_ASSERT_MANAGER(manager);
3435 	IMPORTANCE_ASSERT_KEY(key);
3436 	assert(0 < sync);
3437 
3438 	elem = (ipc_importance_elem_t)value;
3439 
3440 	ipc_importance_lock();
3441 
3442 	/* Any oustanding made refs? */
3443 	if (sync != elem->iie_made) {
3444 		assert(sync < elem->iie_made);
3445 		ipc_importance_unlock();
3446 		return KERN_FAILURE;
3447 	}
3448 
3449 	/* clear made */
3450 	elem->iie_made = 0;
3451 
3452 	/*
3453 	 * If there are pending external boosts represented by this attribute,
3454 	 * drop them from the apropriate task
3455 	 */
3456 	if (IIE_TYPE_INHERIT == IIE_TYPE(elem)) {
3457 		ipc_importance_inherit_t inherit = (ipc_importance_inherit_t)elem;
3458 
3459 		assert(inherit->iii_externcnt >= inherit->iii_externdrop);
3460 
3461 		if (inherit->iii_donating) {
3462 			ipc_importance_task_t imp_task = inherit->iii_to_task;
3463 			uint32_t assertcnt = III_EXTERN(inherit);
3464 
3465 			assert(ipc_importance_task_is_any_receiver_type(imp_task));
3466 			assert(imp_task->iit_externcnt >= inherit->iii_externcnt);
3467 			assert(imp_task->iit_externdrop >= inherit->iii_externdrop);
3468 			imp_task->iit_externcnt -= inherit->iii_externcnt;
3469 			imp_task->iit_externdrop -= inherit->iii_externdrop;
3470 			inherit->iii_externcnt = 0;
3471 			inherit->iii_externdrop = 0;
3472 			inherit->iii_donating = FALSE;
3473 
3474 			/* adjust the internal assertions - and propagate if needed */
3475 			if (ipc_importance_task_check_transition(imp_task, IIT_UPDATE_DROP, assertcnt)) {
3476 				ipc_importance_task_propagate_assertion_locked(imp_task, IIT_UPDATE_DROP, TRUE);
3477 			}
3478 		} else {
3479 			inherit->iii_externcnt = 0;
3480 			inherit->iii_externdrop = 0;
3481 		}
3482 	}
3483 
3484 	/* drop the made reference on elem */
3485 	ipc_importance_release_locked(elem);
3486 	/* returns unlocked */
3487 
3488 	return KERN_SUCCESS;
3489 }
3490 
3491 
3492 /*
3493  *	Routine:	ipc_importance_get_value [Voucher Attribute Manager Interface]
3494  *	Purpose:
3495  *		Convert command and content data into a reference on a [potentially new]
3496  *		attribute value.  The importance attribute manager will only allow the
3497  *		caller to get a value for the current task's importance, or to redeem
3498  *		an importance attribute from an existing voucher.
3499  *	Conditions:
3500  *		Nothing locked on entry.  May block.
3501  */
3502 static kern_return_t
ipc_importance_get_value(ipc_voucher_attr_manager_t __assert_only manager,mach_voucher_attr_key_t __assert_only key,mach_voucher_attr_recipe_command_t command,mach_voucher_attr_value_handle_array_t prev_values,mach_voucher_attr_value_handle_array_size_t prev_value_count,mach_voucher_attr_content_t __unused content,mach_voucher_attr_content_size_t content_size,mach_voucher_attr_value_handle_t * out_value,mach_voucher_attr_value_flags_t * out_flags,ipc_voucher_t * out_value_voucher)3503 ipc_importance_get_value(
3504 	ipc_voucher_attr_manager_t                      __assert_only manager,
3505 	mach_voucher_attr_key_t                         __assert_only key,
3506 	mach_voucher_attr_recipe_command_t              command,
3507 	mach_voucher_attr_value_handle_array_t          prev_values,
3508 	mach_voucher_attr_value_handle_array_size_t     prev_value_count,
3509 	mach_voucher_attr_content_t                     __unused content,
3510 	mach_voucher_attr_content_size_t                content_size,
3511 	mach_voucher_attr_value_handle_t                *out_value,
3512 	mach_voucher_attr_value_flags_t                 *out_flags,
3513 	ipc_voucher_t                                   *out_value_voucher)
3514 {
3515 	ipc_importance_elem_t elem;
3516 	task_t self;
3517 
3518 	IMPORTANCE_ASSERT_MANAGER(manager);
3519 	IMPORTANCE_ASSERT_KEY(key);
3520 
3521 	if (0 != content_size) {
3522 		return KERN_INVALID_ARGUMENT;
3523 	}
3524 
3525 	*out_flags = MACH_VOUCHER_ATTR_VALUE_FLAGS_NONE;
3526 	/* never an out voucher */
3527 
3528 	switch (command) {
3529 	case MACH_VOUCHER_ATTR_REDEEM:
3530 
3531 		/* redeem of previous values is the value */
3532 		if (0 < prev_value_count) {
3533 			elem = (ipc_importance_elem_t)prev_values[0];
3534 			assert(IIE_NULL != elem);
3535 
3536 			ipc_importance_lock();
3537 			assert(0 < elem->iie_made);
3538 			elem->iie_made++;
3539 			ipc_importance_unlock();
3540 
3541 			*out_value = prev_values[0];
3542 			return KERN_SUCCESS;
3543 		}
3544 
3545 		/* redeem of default is default */
3546 		*out_value = 0;
3547 		*out_value_voucher = IPC_VOUCHER_NULL;
3548 		return KERN_SUCCESS;
3549 
3550 	case MACH_VOUCHER_ATTR_IMPORTANCE_SELF:
3551 		self = current_task();
3552 
3553 		elem = (ipc_importance_elem_t)ipc_importance_for_task(self, TRUE);
3554 		/* made reference added (or IIE_NULL which isn't referenced) */
3555 
3556 		*out_value = (mach_voucher_attr_value_handle_t)elem;
3557 		*out_value_voucher = IPC_VOUCHER_NULL;
3558 		return KERN_SUCCESS;
3559 
3560 	default:
3561 		/*
3562 		 * every other command is unknown
3563 		 *
3564 		 * Specifically, there is no mechanism provided to construct an
3565 		 * importance attribute for a task/process from just a pid or
3566 		 * task port.  It has to be copied (or redeemed) from a previous
3567 		 * voucher that has it.
3568 		 */
3569 		return KERN_INVALID_ARGUMENT;
3570 	}
3571 }
3572 
3573 /*
3574  *	Routine:	ipc_importance_extract_content [Voucher Attribute Manager Interface]
3575  *	Purpose:
3576  *		Extract meaning from the attribute value present in a voucher.  While
3577  *		the real goal is to provide commands and data that can reproduce the
3578  *		voucher's value "out of thin air", this isn't possible with importance
3579  *		attribute values.  Instead, return debug info to help track down dependencies.
3580  *	Conditions:
3581  *		Nothing locked on entry.  May block.
3582  */
3583 static kern_return_t
ipc_importance_extract_content(ipc_voucher_attr_manager_t __assert_only manager,mach_voucher_attr_key_t __assert_only key,mach_voucher_attr_value_handle_array_t values,mach_voucher_attr_value_handle_array_size_t value_count,mach_voucher_attr_recipe_command_t * out_command,mach_voucher_attr_content_t out_content,mach_voucher_attr_content_size_t * in_out_content_size)3584 ipc_importance_extract_content(
3585 	ipc_voucher_attr_manager_t                      __assert_only manager,
3586 	mach_voucher_attr_key_t                         __assert_only key,
3587 	mach_voucher_attr_value_handle_array_t          values,
3588 	mach_voucher_attr_value_handle_array_size_t     value_count,
3589 	mach_voucher_attr_recipe_command_t              *out_command,
3590 	mach_voucher_attr_content_t                     out_content,
3591 	mach_voucher_attr_content_size_t                *in_out_content_size)
3592 {
3593 	ipc_importance_elem_t elem;
3594 	unsigned int i;
3595 
3596 	char *buf = (char *)out_content;
3597 	mach_voucher_attr_content_size_t size = *in_out_content_size;
3598 	mach_voucher_attr_content_size_t pos = 0;
3599 	__unused int pid;
3600 
3601 	IMPORTANCE_ASSERT_MANAGER(manager);
3602 	IMPORTANCE_ASSERT_KEY(key);
3603 
3604 	if (size < 1) {
3605 		/* rdar://110276886 we need space for the terminating NUL */
3606 		return KERN_NO_SPACE;
3607 	}
3608 
3609 	/* the first non-default value provides the data */
3610 	for (i = 0; i < value_count; i++) {
3611 		elem = (ipc_importance_elem_t)values[i];
3612 		if (IIE_NULL == elem) {
3613 			continue;
3614 		}
3615 
3616 		pos += scnprintf(buf + pos, size - pos, "Importance for ");
3617 
3618 		for (;;) {
3619 			ipc_importance_inherit_t inherit = III_NULL;
3620 			ipc_importance_task_t task_imp;
3621 
3622 			if (IIE_TYPE_TASK == IIE_TYPE(elem)) {
3623 				task_imp = (ipc_importance_task_t)elem;
3624 			} else {
3625 				inherit = (ipc_importance_inherit_t)elem;
3626 				task_imp = inherit->iii_to_task;
3627 			}
3628 #if DEVELOPMENT || DEBUG
3629 			pos += scnprintf(buf + pos, size - pos, "%s[%d]",
3630 			    task_imp->iit_procname, task_imp->iit_bsd_pid);
3631 #else
3632 			ipc_importance_lock();
3633 			pid = task_importance_task_get_pid(task_imp);
3634 			ipc_importance_unlock();
3635 			pos += scnprintf(buf + pos, size - pos, "pid %d", pid);
3636 #endif /* DEVELOPMENT || DEBUG */
3637 
3638 			if (III_NULL == inherit) {
3639 				break;
3640 			}
3641 			pos += scnprintf(buf + pos, size - pos,
3642 			    " (%d of %d boosts) %s from ",
3643 			    III_EXTERN(inherit), inherit->iii_externcnt,
3644 			    (inherit->iii_donating) ? "donated" : "linked");
3645 			elem = inherit->iii_from_elem;
3646 		}
3647 
3648 		pos++; /* account for terminating \0 */
3649 		break;
3650 	}
3651 	*out_command = MACH_VOUCHER_ATTR_NOOP; /* cannot be used to regenerate value */
3652 	*in_out_content_size = pos;
3653 	return KERN_SUCCESS;
3654 }
3655 
3656 /*
3657  *	Routine:	ipc_importance_command [Voucher Attribute Manager Interface]
3658  *	Purpose:
3659  *		Run commands against the importance attribute value found in a voucher.
3660  *		No such commands are currently supported.
3661  *	Conditions:
3662  *		Nothing locked on entry.  May block.
3663  */
3664 static kern_return_t
ipc_importance_command(ipc_voucher_attr_manager_t __assert_only manager,mach_voucher_attr_key_t __assert_only key,mach_voucher_attr_value_handle_array_t values,mach_msg_type_number_t value_count,mach_voucher_attr_command_t command,mach_voucher_attr_content_t in_content,mach_voucher_attr_content_size_t in_content_size,mach_voucher_attr_content_t out_content,mach_voucher_attr_content_size_t * out_content_size)3665 ipc_importance_command(
3666 	ipc_voucher_attr_manager_t              __assert_only manager,
3667 	mach_voucher_attr_key_t                 __assert_only key,
3668 	mach_voucher_attr_value_handle_array_t  values,
3669 	mach_msg_type_number_t                  value_count,
3670 	mach_voucher_attr_command_t             command,
3671 	mach_voucher_attr_content_t             in_content,
3672 	mach_voucher_attr_content_size_t        in_content_size,
3673 	mach_voucher_attr_content_t             out_content,
3674 	mach_voucher_attr_content_size_t        *out_content_size)
3675 {
3676 	ipc_importance_inherit_t inherit;
3677 	ipc_importance_task_t to_task;
3678 	uint32_t refs, *outrefsp;
3679 	mach_msg_type_number_t i;
3680 	uint32_t externcnt;
3681 
3682 	IMPORTANCE_ASSERT_MANAGER(manager);
3683 	IMPORTANCE_ASSERT_KEY(key);
3684 
3685 	if (in_content_size != sizeof(refs) ||
3686 	    (*out_content_size != 0 && *out_content_size != sizeof(refs))) {
3687 		return KERN_INVALID_ARGUMENT;
3688 	}
3689 	refs = *(uint32_t *)(void *)in_content;
3690 	outrefsp = (*out_content_size != 0) ? (uint32_t *)(void *)out_content : NULL;
3691 
3692 	if (MACH_VOUCHER_IMPORTANCE_ATTR_DROP_EXTERNAL != command) {
3693 		return KERN_NOT_SUPPORTED;
3694 	}
3695 
3696 	/* the first non-default value of the apropos type provides the data */
3697 	inherit = III_NULL;
3698 	for (i = 0; i < value_count; i++) {
3699 		ipc_importance_elem_t elem = (ipc_importance_elem_t)values[i];
3700 
3701 		if (IIE_NULL != elem && IIE_TYPE_INHERIT == IIE_TYPE(elem)) {
3702 			inherit = (ipc_importance_inherit_t)elem;
3703 			break;
3704 		}
3705 	}
3706 	if (III_NULL == inherit) {
3707 		return KERN_INVALID_ARGUMENT;
3708 	}
3709 
3710 	ipc_importance_lock();
3711 
3712 	if (0 == refs) {
3713 		if (NULL != outrefsp) {
3714 			*outrefsp = III_EXTERN(inherit);
3715 		}
3716 		ipc_importance_unlock();
3717 		return KERN_SUCCESS;
3718 	}
3719 
3720 	to_task = inherit->iii_to_task;
3721 	assert(ipc_importance_task_is_any_receiver_type(to_task));
3722 
3723 	/* if not donating to a denap receiver, it was called incorrectly */
3724 	if (!ipc_importance_task_is_marked_denap_receiver(to_task)) {
3725 		ipc_importance_unlock();
3726 		return KERN_INVALID_TASK; /* keeps dispatch happy */
3727 	}
3728 
3729 	/* Enough external references left to drop? */
3730 	if (III_EXTERN(inherit) < refs) {
3731 		ipc_importance_unlock();
3732 		return KERN_FAILURE;
3733 	}
3734 
3735 	/* re-base external and internal counters at the inherit and the to-task (if apropos) */
3736 	if (inherit->iii_donating) {
3737 		assert(IIT_EXTERN(to_task) >= III_EXTERN(inherit));
3738 		assert(to_task->iit_externcnt >= inherit->iii_externcnt);
3739 		assert(to_task->iit_externdrop >= inherit->iii_externdrop);
3740 		inherit->iii_externdrop += refs;
3741 		to_task->iit_externdrop += refs;
3742 		externcnt = III_EXTERN(inherit);
3743 		if (0 == externcnt) {
3744 			inherit->iii_donating = FALSE;
3745 			to_task->iit_externcnt -= inherit->iii_externcnt;
3746 			to_task->iit_externdrop -= inherit->iii_externdrop;
3747 
3748 
3749 			/* Start AppNap delay hysteresis - even if not the last boost for the task. */
3750 			if (ipc_importance_delayed_drop_call != NULL &&
3751 			    ipc_importance_task_is_marked_denap_receiver(to_task)) {
3752 				ipc_importance_task_delayed_drop(to_task);
3753 			}
3754 
3755 			/* drop task assertions associated with the dropped boosts */
3756 			if (ipc_importance_task_check_transition(to_task, IIT_UPDATE_DROP, refs)) {
3757 				ipc_importance_task_propagate_assertion_locked(to_task, IIT_UPDATE_DROP, TRUE);
3758 				/* may have dropped and retaken importance lock */
3759 			}
3760 		} else {
3761 			/* assert(to_task->iit_assertcnt >= refs + externcnt); */
3762 			/* defensive deduction in case of assertcnt underflow */
3763 			if (to_task->iit_assertcnt > refs + externcnt) {
3764 				to_task->iit_assertcnt -= refs;
3765 			} else {
3766 				to_task->iit_assertcnt = externcnt;
3767 			}
3768 		}
3769 	} else {
3770 		inherit->iii_externdrop += refs;
3771 		externcnt = III_EXTERN(inherit);
3772 	}
3773 
3774 	/* capture result (if requested) */
3775 	if (NULL != outrefsp) {
3776 		*outrefsp = externcnt;
3777 	}
3778 
3779 	ipc_importance_unlock();
3780 	return KERN_SUCCESS;
3781 }
3782 
3783 /*
3784  *	Routine:	ipc_importance_init
3785  *	Purpose:
3786  *		Initialize the  IPC importance manager.
3787  *	Conditions:
3788  *		Zones and Vouchers are already initialized.
3789  */
3790 __startup_func
3791 static void
ipc_importance_init(void)3792 ipc_importance_init(void)
3793 {
3794 	ipc_register_well_known_mach_voucher_attr_manager(&ipc_importance_manager,
3795 	    (mach_voucher_attr_value_handle_t)0,
3796 	    MACH_VOUCHER_ATTR_KEY_IMPORTANCE,
3797 	    &ipc_importance_control);
3798 }
3799 STARTUP(MACH_IPC, STARTUP_RANK_LAST, ipc_importance_init);
3800 
3801 /*
3802  *	Routine:	ipc_importance_thread_call_init
3803  *	Purpose:
3804  *		Initialize the IPC importance code dependent upon
3805  *		thread-call support being available.
3806  *	Conditions:
3807  *		Thread-call mechanism is already initialized.
3808  */
3809 __startup_func
3810 static void
ipc_importance_thread_call_init(void)3811 ipc_importance_thread_call_init(void)
3812 {
3813 	/* initialize delayed drop queue and thread-call */
3814 	queue_init(&ipc_importance_delayed_drop_queue);
3815 	ipc_importance_delayed_drop_call =
3816 	    thread_call_allocate(ipc_importance_task_delayed_drop_scan, NULL);
3817 	if (NULL == ipc_importance_delayed_drop_call) {
3818 		panic("ipc_importance_init");
3819 	}
3820 }
3821 STARTUP(THREAD_CALL, STARTUP_RANK_MIDDLE, ipc_importance_thread_call_init);
3822 
3823 /*
3824  * Routing: task_importance_list_pids
3825  * Purpose: list pids where task in donating importance.
3826  * Conditions: To be called only from kdp stackshot code.
3827  *             Will panic the system otherwise.
3828  */
3829 extern int
task_importance_list_pids(task_t task,int flags,char * pid_list,unsigned int max_count)3830 task_importance_list_pids(task_t task, int flags, char *pid_list, unsigned int max_count)
3831 {
3832 	if (kdp_lck_spin_is_acquired(&ipc_importance_lock_data) ||
3833 	    max_count < 1 ||
3834 	    task->task_imp_base == IIT_NULL ||
3835 	    pid_list == NULL ||
3836 	    flags != TASK_IMP_LIST_DONATING_PIDS) {
3837 		return 0;
3838 	}
3839 	unsigned int pidcount = 0;
3840 	ipc_importance_task_t task_imp = task->task_imp_base;
3841 	ipc_kmsg_t temp_kmsg;
3842 	mach_msg_header_t *temp_hdr;
3843 	ipc_importance_inherit_t temp_inherit;
3844 	ipc_importance_elem_t elem;
3845 	int target_pid = 0, previous_pid;
3846 
3847 	queue_iterate(&task_imp->iit_inherits, temp_inherit, ipc_importance_inherit_t, iii_inheritance) {
3848 		/* check space in buffer */
3849 		if (pidcount >= max_count) {
3850 			break;
3851 		}
3852 		previous_pid = target_pid;
3853 		target_pid = -1;
3854 
3855 		if (temp_inherit->iii_donating) {
3856 			target_pid = task_importance_task_get_pid(temp_inherit->iii_to_task);
3857 		}
3858 
3859 		if (target_pid != -1 && previous_pid != target_pid) {
3860 			memcpy(pid_list, &target_pid, sizeof(target_pid));
3861 			pid_list += sizeof(target_pid);
3862 			pidcount++;
3863 		}
3864 	}
3865 
3866 	target_pid = 0;
3867 	queue_iterate(&task_imp->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance) {
3868 		if (pidcount >= max_count) {
3869 			break;
3870 		}
3871 		previous_pid = target_pid;
3872 		target_pid = -1;
3873 		elem = temp_kmsg->ikm_importance;
3874 
3875 		if (elem == IIE_NULL) {
3876 			continue;
3877 		}
3878 
3879 		temp_hdr = ikm_header(temp_kmsg);
3880 
3881 		if (!(temp_hdr &&
3882 		    MACH_MSGH_BITS_RAISED_IMPORTANCE(temp_hdr->msgh_bits))) {
3883 			continue;
3884 		}
3885 
3886 		if (IIE_TYPE_TASK == IIE_TYPE(elem)) {
3887 			ipc_importance_task_t temp_iit = (ipc_importance_task_t)elem;
3888 			target_pid = task_importance_task_get_pid(temp_iit);
3889 		} else {
3890 			temp_inherit = (ipc_importance_inherit_t)elem;
3891 			target_pid = task_importance_task_get_pid(temp_inherit->iii_to_task);
3892 		}
3893 
3894 		if (target_pid != -1 && previous_pid != target_pid) {
3895 			memcpy(pid_list, &target_pid, sizeof(target_pid));
3896 			pid_list += sizeof(target_pid);
3897 			pidcount++;
3898 		}
3899 	}
3900 
3901 	return pidcount;
3902 }
3903