xref: /xnu-12377.41.6/osfmk/kern/task_policy.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/policy_internal.h>
30 #include <mach/task_policy.h>
31 #include <mach/task.h>
32 #include <mach/mach_types.h>
33 #include <mach/task_server.h>
34 #include <kern/host.h>                  /* host_priv_self()        */
35 #include <mach/host_priv.h>             /* host_get_special_port() */
36 #include <mach/host_special_ports.h>    /* RESOURCE_NOTIFY_PORT    */
37 #include <kern/sched.h>
38 #include <kern/task.h>
39 #include <mach/thread_policy.h>
40 #include <sys/errno.h>
41 #include <sys/resource.h>
42 #include <machine/limits.h>
43 #include <kern/ledger.h>
44 #include <kern/thread_call.h>
45 #include <kern/sfi.h>
46 #include <kern/coalition.h>
47 #if CONFIG_TELEMETRY
48 #include <kern/telemetry.h>
49 #endif
50 #if !defined(XNU_TARGET_OS_OSX)
51 #include <kern/kalloc.h>
52 #include <sys/errno.h>
53 #endif /* !defined(XNU_TARGET_OS_OSX) */
54 
55 #if IMPORTANCE_INHERITANCE
56 #include <ipc/ipc_importance.h>
57 #if IMPORTANCE_TRACE
58 #include <mach/machine/sdt.h>
59 #endif /* IMPORTANCE_TRACE */
60 #endif /* IMPORTANCE_INHERITACE */
61 
62 #include <sys/kdebug.h>
63 
64 /*
65  *  Task Policy
66  *
67  *  This subsystem manages task and thread IO priority and backgrounding,
68  *  as well as importance inheritance, process suppression, task QoS, and apptype.
69  *  These properties have a suprising number of complex interactions, so they are
70  *  centralized here in one state machine to simplify the implementation of those interactions.
71  *
72  *  Architecture:
73  *  Threads and tasks have two policy fields: requested, effective.
74  *  Requested represents the wishes of each interface that influences task policy.
75  *  Effective represents the distillation of that policy into a set of behaviors.
76  *
77  *  Each thread making a modification in the policy system passes a 'pending' struct,
78  *  which tracks updates that will be applied after dropping the policy engine lock.
79  *
80  *  Each interface that has an input into the task policy state machine controls a field in requested.
81  *  If the interface has a getter, it returns what is in the field in requested, but that is
82  *  not necessarily what is actually in effect.
83  *
84  *  All kernel subsystems that behave differently based on task policy call into
85  *  the proc_get_effective_(task|thread)_policy functions, which return the decision of the task policy state machine
86  *  for that subsystem by querying only the 'effective' field.
87  *
88  *  Policy change operations:
89  *  Here are the steps to change a policy on a task or thread:
90  *  1) Lock task
91  *  2) Change requested field for the relevant policy
92  *  3) Run a task policy update, which recalculates effective based on requested,
93  *     then takes a diff between the old and new versions of requested and calls the relevant
94  *     other subsystems to apply these changes, and updates the pending field.
95  *  4) Unlock task
96  *  5) Run task policy update complete, which looks at the pending field to update
97  *     subsystems which cannot be touched while holding the task lock.
98  *
99  *  To add a new requested policy, add the field in the requested struct, the flavor in task.h,
100  *  the setter and getter in proc_(set|get)_task_policy*,
101  *  then set up the effects of that behavior in task_policy_update*. If the policy manifests
102  *  itself as a distinct effective policy, add it to the effective struct and add it to the
103  *  proc_get_effective_task_policy accessor.
104  *
105  *  Most policies are set via proc_set_task_policy, but policies that don't fit that interface
106  *  roll their own lock/set/update/unlock/complete code inside this file.
107  *
108  *
109  *  Suppression policy
110  *
111  *  These are a set of behaviors that can be requested for a task.  They currently have specific
112  *  implied actions when they're enabled, but they may be made customizable in the future.
113  *
114  *  When the affected task is boosted, we temporarily disable the suppression behaviors
115  *  so that the affected process has a chance to run so it can call the API to permanently
116  *  disable the suppression behaviors.
117  *
118  *  Locking
119  *
120  *  Changing task policy on a task takes the task lock.
121  *  Changing task policy on a thread takes the thread mutex.
122  *  Task policy changes that affect threads will take each thread's mutex to update it if necessary.
123  *
124  *  Querying the effective policy does not take a lock, because callers
125  *  may run in interrupt context or other place where locks are not OK.
126  *
127  *  This means that any notification of state change needs to be externally synchronized.
128  *  We do this by idempotent callouts after the state has changed to ask
129  *  other subsystems to update their view of the world.
130  *
131  * TODO: Move all cpu/wakes/io monitor code into a separate file
132  * TODO: Move all importance code over to importance subsystem
133  * TODO: Move all taskwatch code into a separate file
134  * TODO: Move all VM importance code into a separate file
135  */
136 
137 /* Task policy related helper functions */
138 static void proc_set_task_policy_locked(task_t task, int category, int flavor, int value, int value2);
139 
140 static void task_policy_update_locked(task_t task, task_pend_token_t pend_token);
141 static void task_policy_update_internal_locked(task_t task, bool in_create, task_pend_token_t pend_token);
142 
143 /* For attributes that have two scalars as input/output */
144 static void proc_set_task_policy2(task_t task, int category, int flavor, int value1, int value2);
145 static void proc_get_task_policy2(task_t task, int category, int flavor, int *value1, int *value2);
146 
147 static boolean_t task_policy_update_coalition_focal_tasks(task_t task, int prev_role, int next_role, task_pend_token_t pend_token);
148 
149 static uint64_t task_requested_bitfield(task_t task);
150 static uint64_t task_effective_bitfield(task_t task);
151 
152 /* Convenience functions for munging a policy bitfield into a tracepoint */
153 static uintptr_t trequested_0(task_t task);
154 static uintptr_t trequested_1(task_t task);
155 static uintptr_t teffective_0(task_t task);
156 static uintptr_t teffective_1(task_t task);
157 
158 /* CPU limits helper functions */
159 static int task_set_cpuusage(task_t task, uint8_t percentage, uint64_t interval, uint64_t deadline, int scope, int entitled);
160 static int task_get_cpuusage(task_t task, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep, int *scope);
161 static int task_enable_cpumon_locked(task_t task);
162 static int task_disable_cpumon(task_t task);
163 static int task_clear_cpuusage_locked(task_t task, int cpumon_entitled);
164 static int task_apply_resource_actions(task_t task, int type);
165 static void task_action_cpuusage(thread_call_param_t param0, thread_call_param_t param1);
166 
167 #ifdef MACH_BSD
168 typedef struct proc *   proc_t;
169 int                     proc_pid(struct proc *proc);
170 extern int              proc_selfpid(void);
171 extern char *           proc_name_address(void *p);
172 extern const char *     proc_best_name(proc_t proc);
173 
174 extern int proc_pidpathinfo_internal(proc_t p, uint64_t arg,
175     char *buffer, uint32_t buffersize,
176     int32_t *retval);
177 #endif /* MACH_BSD */
178 
179 
180 #if CONFIG_TASKWATCH
181 /* Taskwatch related helper functions */
182 static void set_thread_appbg(thread_t thread, int setbg, int importance);
183 static void add_taskwatch_locked(task_t task, task_watch_t * twp);
184 static void remove_taskwatch_locked(task_t task, task_watch_t * twp);
185 static void task_watch_lock(void);
186 static void task_watch_unlock(void);
187 static void apply_appstate_watchers(task_t task);
188 
189 typedef struct task_watcher {
190 	queue_chain_t   tw_links;       /* queueing of threads */
191 	task_t          tw_task;        /* task that is being watched */
192 	thread_t        tw_thread;      /* thread that is watching the watch_task */
193 	int             tw_state;       /* the current app state of the thread */
194 	int             tw_importance;  /* importance prior to backgrounding */
195 } task_watch_t;
196 
197 typedef struct thread_watchlist {
198 	thread_t        thread;         /* thread being worked on for taskwatch action */
199 	int             importance;     /* importance to be restored if thread is being made active */
200 } thread_watchlist_t;
201 
202 #endif /* CONFIG_TASKWATCH */
203 
204 extern int memorystatus_update_priority_for_appnap(proc_t p);
205 
206 /* Importance Inheritance related helper functions */
207 
208 #if IMPORTANCE_INHERITANCE
209 
210 static void task_importance_mark_live_donor(task_t task, boolean_t donating);
211 static void task_importance_mark_receiver(task_t task, boolean_t receiving);
212 static void task_importance_mark_denap_receiver(task_t task, boolean_t denap);
213 
214 static boolean_t task_is_marked_live_importance_donor(task_t task);
215 static boolean_t task_is_importance_receiver(task_t task);
216 static boolean_t task_is_importance_denap_receiver(task_t task);
217 
218 static int task_importance_hold_internal_assertion(task_t target_task, uint32_t count);
219 
220 static void task_add_importance_watchport(task_t task, mach_port_t port, int *boostp);
221 static void task_importance_update_live_donor(task_t target_task);
222 
223 static void task_set_boost_locked(task_t task, boolean_t boost_active);
224 
225 #endif /* IMPORTANCE_INHERITANCE */
226 
227 #if IMPORTANCE_TRACE
228 #define __imptrace_only
229 #else /* IMPORTANCE_TRACE */
230 #define __imptrace_only __unused
231 #endif /* !IMPORTANCE_TRACE */
232 
233 #if IMPORTANCE_INHERITANCE
234 #define __imp_only
235 #else
236 #define __imp_only __unused
237 #endif
238 
239 /*
240  * Default parameters for certain policies
241  */
242 
243 int proc_standard_daemon_tier = THROTTLE_LEVEL_TIER1;
244 int proc_suppressed_disk_tier = THROTTLE_LEVEL_TIER1;
245 
246 int proc_graphics_timer_qos   = (LATENCY_QOS_TIER_0 & 0xFF);
247 
248 const int proc_default_bg_iotier  = THROTTLE_LEVEL_TIER2;
249 
250 /* Latency/throughput QoS fields remain zeroed, i.e. TIER_UNSPECIFIED at creation */
251 const struct task_requested_policy default_task_requested_policy = {
252 	.trp_bg_iotier = proc_default_bg_iotier
253 };
254 const struct task_effective_policy default_task_effective_policy = {};
255 
256 /*
257  * Default parameters for CPU usage monitor.
258  *
259  * Default setting is 50% over 3 minutes.
260  */
261 #define         DEFAULT_CPUMON_PERCENTAGE 50
262 #define         DEFAULT_CPUMON_INTERVAL   (3 * 60)
263 
264 uint8_t         proc_max_cpumon_percentage;
265 uint64_t        proc_max_cpumon_interval;
266 
267 kern_return_t
qos_latency_policy_validate(task_latency_qos_t ltier)268 qos_latency_policy_validate(task_latency_qos_t ltier)
269 {
270 	if ((ltier != LATENCY_QOS_TIER_UNSPECIFIED) &&
271 	    ((ltier > LATENCY_QOS_TIER_5) || (ltier < LATENCY_QOS_TIER_0))) {
272 		return KERN_INVALID_ARGUMENT;
273 	}
274 
275 	return KERN_SUCCESS;
276 }
277 
278 kern_return_t
qos_throughput_policy_validate(task_throughput_qos_t ttier)279 qos_throughput_policy_validate(task_throughput_qos_t ttier)
280 {
281 	if ((ttier != THROUGHPUT_QOS_TIER_UNSPECIFIED) &&
282 	    ((ttier > THROUGHPUT_QOS_TIER_5) || (ttier < THROUGHPUT_QOS_TIER_0))) {
283 		return KERN_INVALID_ARGUMENT;
284 	}
285 
286 	return KERN_SUCCESS;
287 }
288 
289 static kern_return_t
task_qos_policy_validate(task_qos_policy_t qosinfo,mach_msg_type_number_t count)290 task_qos_policy_validate(task_qos_policy_t qosinfo, mach_msg_type_number_t count)
291 {
292 	if (count < TASK_QOS_POLICY_COUNT) {
293 		return KERN_INVALID_ARGUMENT;
294 	}
295 
296 	task_latency_qos_t ltier = qosinfo->task_latency_qos_tier;
297 	task_throughput_qos_t ttier = qosinfo->task_throughput_qos_tier;
298 
299 	kern_return_t kr = qos_latency_policy_validate(ltier);
300 
301 	if (kr != KERN_SUCCESS) {
302 		return kr;
303 	}
304 
305 	kr = qos_throughput_policy_validate(ttier);
306 
307 	return kr;
308 }
309 
310 uint32_t
qos_extract(uint32_t qv)311 qos_extract(uint32_t qv)
312 {
313 	return qv & 0xFF;
314 }
315 
316 uint32_t
qos_latency_policy_package(uint32_t qv)317 qos_latency_policy_package(uint32_t qv)
318 {
319 	return (qv == LATENCY_QOS_TIER_UNSPECIFIED) ? LATENCY_QOS_TIER_UNSPECIFIED : ((0xFF << 16) | qv);
320 }
321 
322 uint32_t
qos_throughput_policy_package(uint32_t qv)323 qos_throughput_policy_package(uint32_t qv)
324 {
325 	return (qv == THROUGHPUT_QOS_TIER_UNSPECIFIED) ? THROUGHPUT_QOS_TIER_UNSPECIFIED : ((0xFE << 16) | qv);
326 }
327 
328 #define TASK_POLICY_SUPPRESSION_DISABLE  0x1
329 #define TASK_POLICY_SUPPRESSION_IOTIER2  0x2
330 #define TASK_POLICY_SUPPRESSION_NONDONOR 0x4
331 /* TEMPORARY boot-arg controlling task_policy suppression (App Nap) */
332 static boolean_t task_policy_suppression_flags = TASK_POLICY_SUPPRESSION_IOTIER2 |
333     TASK_POLICY_SUPPRESSION_NONDONOR;
334 
335 static void
task_set_requested_apptype(task_t task,uint64_t apptype,__unused boolean_t update_tg_flag)336 task_set_requested_apptype(task_t task, uint64_t apptype, __unused boolean_t update_tg_flag)
337 {
338 	task->requested_policy.trp_apptype = apptype;
339 #if CONFIG_THREAD_GROUPS
340 	if (update_tg_flag && task_is_app(task)) {
341 		task_coalition_thread_group_application_set(task);
342 	}
343 #endif /* CONFIG_THREAD_GROUPS */
344 }
345 
346 kern_return_t
task_policy_set(task_t task,task_policy_flavor_t flavor,task_policy_t policy_info,mach_msg_type_number_t count)347 task_policy_set(
348 	task_t                                  task,
349 	task_policy_flavor_t    flavor,
350 	task_policy_t                   policy_info,
351 	mach_msg_type_number_t  count)
352 {
353 	kern_return_t           result = KERN_SUCCESS;
354 
355 	if (task == TASK_NULL || task == kernel_task) {
356 		return KERN_INVALID_ARGUMENT;
357 	}
358 
359 	switch (flavor) {
360 	case TASK_CATEGORY_POLICY: {
361 		task_category_policy_t info = (task_category_policy_t)policy_info;
362 
363 		if (count < TASK_CATEGORY_POLICY_COUNT) {
364 			return KERN_INVALID_ARGUMENT;
365 		}
366 
367 #if !defined(XNU_TARGET_OS_OSX)
368 		/* On embedded, you can't modify your own role. */
369 		if (current_task() == task) {
370 			return KERN_INVALID_ARGUMENT;
371 		}
372 #endif
373 
374 		switch (info->role) {
375 		case TASK_FOREGROUND_APPLICATION:
376 		case TASK_BACKGROUND_APPLICATION:
377 		case TASK_DEFAULT_APPLICATION:
378 			proc_set_task_policy(task,
379 			    TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE,
380 			    info->role);
381 			break;
382 
383 		case TASK_CONTROL_APPLICATION:
384 			if (task != current_task() || !task_is_privileged(task)) {
385 				result = KERN_INVALID_ARGUMENT;
386 			} else {
387 				proc_set_task_policy(task,
388 				    TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE,
389 				    info->role);
390 			}
391 			break;
392 
393 		case TASK_GRAPHICS_SERVER:
394 			/* TODO: Restrict this role to FCFS <rdar://problem/12552788> */
395 			if (task != current_task() || !task_is_privileged(task)) {
396 				result = KERN_INVALID_ARGUMENT;
397 			} else {
398 				proc_set_task_policy(task,
399 				    TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE,
400 				    info->role);
401 			}
402 			break;
403 		default:
404 			result = KERN_INVALID_ARGUMENT;
405 			break;
406 		} /* switch (info->role) */
407 
408 		break;
409 	}
410 
411 /* Desired energy-efficiency/performance "quality-of-service" */
412 	case TASK_BASE_QOS_POLICY:
413 	case TASK_OVERRIDE_QOS_POLICY:
414 	{
415 		task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info;
416 		kern_return_t kr = task_qos_policy_validate(qosinfo, count);
417 
418 		if (kr != KERN_SUCCESS) {
419 			return kr;
420 		}
421 
422 
423 		uint32_t lqos = qos_extract(qosinfo->task_latency_qos_tier);
424 		uint32_t tqos = qos_extract(qosinfo->task_throughput_qos_tier);
425 
426 		proc_set_task_policy2(task, TASK_POLICY_ATTRIBUTE,
427 		    flavor == TASK_BASE_QOS_POLICY ? TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS : TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS,
428 		    lqos, tqos);
429 	}
430 	break;
431 
432 	case TASK_BASE_LATENCY_QOS_POLICY:
433 	{
434 		task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info;
435 		kern_return_t kr = task_qos_policy_validate(qosinfo, count);
436 
437 		if (kr != KERN_SUCCESS) {
438 			return kr;
439 		}
440 
441 		uint32_t lqos = qos_extract(qosinfo->task_latency_qos_tier);
442 
443 		proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE, TASK_BASE_LATENCY_QOS_POLICY, lqos);
444 	}
445 	break;
446 
447 	case TASK_BASE_THROUGHPUT_QOS_POLICY:
448 	{
449 		task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info;
450 		kern_return_t kr = task_qos_policy_validate(qosinfo, count);
451 
452 		if (kr != KERN_SUCCESS) {
453 			return kr;
454 		}
455 
456 		uint32_t tqos = qos_extract(qosinfo->task_throughput_qos_tier);
457 
458 		proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE, TASK_BASE_THROUGHPUT_QOS_POLICY, tqos);
459 	}
460 	break;
461 
462 	case TASK_SUPPRESSION_POLICY:
463 	{
464 #if !defined(XNU_TARGET_OS_OSX)
465 		/*
466 		 * Suppression policy is not enabled for embedded
467 		 * because apps aren't marked as denap receivers
468 		 */
469 		result = KERN_INVALID_ARGUMENT;
470 		break;
471 #else /* !defined(XNU_TARGET_OS_OSX) */
472 
473 		task_suppression_policy_t info = (task_suppression_policy_t)policy_info;
474 
475 		if (count < TASK_SUPPRESSION_POLICY_COUNT) {
476 			return KERN_INVALID_ARGUMENT;
477 		}
478 
479 		struct task_qos_policy qosinfo;
480 
481 		qosinfo.task_latency_qos_tier = info->timer_throttle;
482 		qosinfo.task_throughput_qos_tier = info->throughput_qos;
483 
484 		kern_return_t kr = task_qos_policy_validate(&qosinfo, TASK_QOS_POLICY_COUNT);
485 
486 		if (kr != KERN_SUCCESS) {
487 			return kr;
488 		}
489 
490 		/* TEMPORARY disablement of task suppression */
491 		if (info->active &&
492 		    (task_policy_suppression_flags & TASK_POLICY_SUPPRESSION_DISABLE)) {
493 			return KERN_SUCCESS;
494 		}
495 
496 		struct task_pend_token pend_token = {};
497 
498 		task_lock(task);
499 
500 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
501 		    (IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, info->active)) | DBG_FUNC_START,
502 		    proc_selfpid(), task_pid(task), trequested_0(task),
503 		    trequested_1(task), 0);
504 
505 		task->requested_policy.trp_sup_active      = (info->active)         ? 1 : 0;
506 		task->requested_policy.trp_sup_lowpri_cpu  = (info->lowpri_cpu)     ? 1 : 0;
507 		task->requested_policy.trp_sup_timer       = qos_extract(info->timer_throttle);
508 		task->requested_policy.trp_sup_disk        = (info->disk_throttle)  ? 1 : 0;
509 		task->requested_policy.trp_sup_throughput  = qos_extract(info->throughput_qos);
510 		task->requested_policy.trp_sup_cpu         = (info->suppressed_cpu) ? 1 : 0;
511 		task->requested_policy.trp_sup_bg_sockets  = (info->background_sockets) ? 1 : 0;
512 
513 		task_policy_update_locked(task, &pend_token);
514 
515 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
516 		    (IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, info->active)) | DBG_FUNC_END,
517 		    proc_selfpid(), task_pid(task), trequested_0(task),
518 		    trequested_1(task), 0);
519 
520 		task_unlock(task);
521 
522 		task_policy_update_complete_unlocked(task, &pend_token);
523 
524 		break;
525 
526 #endif /* !defined(XNU_TARGET_OS_OSX) */
527 	}
528 
529 	default:
530 		result = KERN_INVALID_ARGUMENT;
531 		break;
532 	}
533 
534 	return result;
535 }
536 
537 /* Sets BSD 'nice' value on the task */
538 kern_return_t
task_importance(task_t task,integer_t importance)539 task_importance(
540 	task_t                          task,
541 	integer_t                       importance)
542 {
543 	if (task == TASK_NULL || task == kernel_task) {
544 		return KERN_INVALID_ARGUMENT;
545 	}
546 
547 	task_lock(task);
548 
549 	if (!task->active) {
550 		task_unlock(task);
551 
552 		return KERN_TERMINATED;
553 	}
554 
555 	if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) >= TASK_CONTROL_APPLICATION) {
556 		task_unlock(task);
557 
558 		return KERN_INVALID_ARGUMENT;
559 	}
560 
561 	task->importance = importance;
562 
563 	struct task_pend_token pend_token = {};
564 
565 	task_policy_update_locked(task, &pend_token);
566 
567 	task_unlock(task);
568 
569 	task_policy_update_complete_unlocked(task, &pend_token);
570 
571 	return KERN_SUCCESS;
572 }
573 
574 kern_return_t
task_policy_get(task_t task,task_policy_flavor_t flavor,task_policy_t policy_info,mach_msg_type_number_t * count,boolean_t * get_default)575 task_policy_get(
576 	task_t                                  task,
577 	task_policy_flavor_t    flavor,
578 	task_policy_t                   policy_info,
579 	mach_msg_type_number_t  *count,
580 	boolean_t                               *get_default)
581 {
582 	if (task == TASK_NULL || task == kernel_task) {
583 		return KERN_INVALID_ARGUMENT;
584 	}
585 
586 	switch (flavor) {
587 	case TASK_CATEGORY_POLICY:
588 	{
589 		task_category_policy_t          info = (task_category_policy_t)policy_info;
590 
591 		if (*count < TASK_CATEGORY_POLICY_COUNT) {
592 			return KERN_INVALID_ARGUMENT;
593 		}
594 
595 		if (*get_default) {
596 			info->role = TASK_UNSPECIFIED;
597 		} else {
598 			info->role = proc_get_task_policy(task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE);
599 		}
600 		break;
601 	}
602 
603 	case TASK_BASE_QOS_POLICY: /* FALLTHRU */
604 	case TASK_OVERRIDE_QOS_POLICY:
605 	{
606 		task_qos_policy_t info = (task_qos_policy_t)policy_info;
607 
608 		if (*count < TASK_QOS_POLICY_COUNT) {
609 			return KERN_INVALID_ARGUMENT;
610 		}
611 
612 		if (*get_default) {
613 			info->task_latency_qos_tier = LATENCY_QOS_TIER_UNSPECIFIED;
614 			info->task_throughput_qos_tier = THROUGHPUT_QOS_TIER_UNSPECIFIED;
615 		} else if (flavor == TASK_BASE_QOS_POLICY) {
616 			int value1, value2;
617 
618 			proc_get_task_policy2(task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS, &value1, &value2);
619 
620 			info->task_latency_qos_tier = qos_latency_policy_package(value1);
621 			info->task_throughput_qos_tier = qos_throughput_policy_package(value2);
622 		} else if (flavor == TASK_OVERRIDE_QOS_POLICY) {
623 			int value1, value2;
624 
625 			proc_get_task_policy2(task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS, &value1, &value2);
626 
627 			info->task_latency_qos_tier = qos_latency_policy_package(value1);
628 			info->task_throughput_qos_tier = qos_throughput_policy_package(value2);
629 		}
630 
631 		break;
632 	}
633 
634 	case TASK_POLICY_STATE:
635 	{
636 		task_policy_state_t info = (task_policy_state_t)policy_info;
637 
638 		if (*count < TASK_POLICY_STATE_COUNT) {
639 			return KERN_INVALID_ARGUMENT;
640 		}
641 
642 		/* Only root can get this info */
643 		if (!task_is_privileged(current_task())) {
644 			return KERN_PROTECTION_FAILURE;
645 		}
646 
647 		if (*get_default) {
648 			info->requested = 0;
649 			info->effective = 0;
650 			info->pending = 0;
651 			info->imp_assertcnt = 0;
652 			info->imp_externcnt = 0;
653 			info->flags = 0;
654 			info->imp_transitions = 0;
655 		} else {
656 			task_lock(task);
657 
658 			info->requested = task_requested_bitfield(task);
659 			info->effective = task_effective_bitfield(task);
660 			info->pending   = 0;
661 
662 			info->tps_requested_policy = *(uint64_t*)(&task->requested_policy);
663 			info->tps_effective_policy = *(uint64_t*)(&task->effective_policy);
664 
665 			info->flags = 0;
666 			if (task->task_imp_base != NULL) {
667 				info->imp_assertcnt = task->task_imp_base->iit_assertcnt;
668 				info->imp_externcnt = IIT_EXTERN(task->task_imp_base);
669 				info->flags |= (task_is_marked_importance_receiver(task) ? TASK_IMP_RECEIVER : 0);
670 				info->flags |= (task_is_marked_importance_denap_receiver(task) ? TASK_DENAP_RECEIVER : 0);
671 				info->flags |= (task_is_marked_importance_donor(task) ? TASK_IMP_DONOR : 0);
672 				info->flags |= (task_is_marked_live_importance_donor(task) ? TASK_IMP_LIVE_DONOR : 0);
673 				info->flags |= (get_task_pidsuspended(task) ? TASK_IS_PIDSUSPENDED : 0);
674 				info->imp_transitions = task->task_imp_base->iit_transitions;
675 			} else {
676 				info->imp_assertcnt = 0;
677 				info->imp_externcnt = 0;
678 				info->imp_transitions = 0;
679 			}
680 			task_unlock(task);
681 		}
682 
683 		break;
684 	}
685 
686 	case TASK_SUPPRESSION_POLICY:
687 	{
688 		task_suppression_policy_t info = (task_suppression_policy_t)policy_info;
689 
690 		if (*count < TASK_SUPPRESSION_POLICY_COUNT) {
691 			return KERN_INVALID_ARGUMENT;
692 		}
693 
694 		task_lock(task);
695 
696 		if (*get_default) {
697 			info->active            = 0;
698 			info->lowpri_cpu        = 0;
699 			info->timer_throttle    = LATENCY_QOS_TIER_UNSPECIFIED;
700 			info->disk_throttle     = 0;
701 			info->cpu_limit         = 0;
702 			info->suspend           = 0;
703 			info->throughput_qos    = 0;
704 			info->suppressed_cpu    = 0;
705 		} else {
706 			info->active            = task->requested_policy.trp_sup_active;
707 			info->lowpri_cpu        = task->requested_policy.trp_sup_lowpri_cpu;
708 			info->timer_throttle    = qos_latency_policy_package(task->requested_policy.trp_sup_timer);
709 			info->disk_throttle     = task->requested_policy.trp_sup_disk;
710 			info->cpu_limit         = 0;
711 			info->suspend           = 0;
712 			info->throughput_qos    = qos_throughput_policy_package(task->requested_policy.trp_sup_throughput);
713 			info->suppressed_cpu    = task->requested_policy.trp_sup_cpu;
714 			info->background_sockets = task->requested_policy.trp_sup_bg_sockets;
715 		}
716 
717 		task_unlock(task);
718 		break;
719 	}
720 
721 	default:
722 		return KERN_INVALID_ARGUMENT;
723 	}
724 
725 	return KERN_SUCCESS;
726 }
727 
728 /*
729  * Called at task creation
730  * We calculate the correct effective but don't apply it to anything yet.
731  * The threads, etc will inherit from the task as they get created.
732  */
733 void
task_policy_create(task_t task,task_t parent_task)734 task_policy_create(task_t task, task_t parent_task)
735 {
736 	task_set_requested_apptype(task, parent_task->requested_policy.trp_apptype, true);
737 
738 	task->requested_policy.trp_int_darwinbg     = parent_task->requested_policy.trp_int_darwinbg;
739 	task->requested_policy.trp_ext_darwinbg     = parent_task->requested_policy.trp_ext_darwinbg;
740 	task->requested_policy.trp_int_iotier       = parent_task->requested_policy.trp_int_iotier;
741 	task->requested_policy.trp_ext_iotier       = parent_task->requested_policy.trp_ext_iotier;
742 	task->requested_policy.trp_int_iopassive    = parent_task->requested_policy.trp_int_iopassive;
743 	task->requested_policy.trp_ext_iopassive    = parent_task->requested_policy.trp_ext_iopassive;
744 	task->requested_policy.trp_bg_iotier        = parent_task->requested_policy.trp_bg_iotier;
745 	task->requested_policy.trp_terminated       = parent_task->requested_policy.trp_terminated;
746 	task->requested_policy.trp_qos_clamp        = parent_task->requested_policy.trp_qos_clamp;
747 
748 	if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE && !task_is_exec_copy(task)) {
749 		/* Do not update the apptype for exec copy task */
750 		if (parent_task->requested_policy.trp_boosted) {
751 			task_set_requested_apptype(task, TASK_APPTYPE_DAEMON_INTERACTIVE, true);
752 			task_importance_mark_donor(task, TRUE);
753 		} else {
754 			task_set_requested_apptype(task, TASK_APPTYPE_DAEMON_BACKGROUND, true);
755 			task_importance_mark_receiver(task, FALSE);
756 		}
757 	}
758 
759 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
760 	    (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_TASK))) | DBG_FUNC_START,
761 	    task_pid(task), teffective_0(task),
762 	    teffective_1(task), task->priority, 0);
763 
764 	task_policy_update_internal_locked(task, true, NULL);
765 
766 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
767 	    (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_TASK))) | DBG_FUNC_END,
768 	    task_pid(task), teffective_0(task),
769 	    teffective_1(task), task->priority, 0);
770 
771 	task_importance_update_live_donor(task);
772 }
773 
774 
775 static void
task_policy_update_locked(task_t task,task_pend_token_t pend_token)776 task_policy_update_locked(task_t task, task_pend_token_t pend_token)
777 {
778 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
779 	    (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_TASK) | DBG_FUNC_START),
780 	    task_pid(task), teffective_0(task),
781 	    teffective_1(task), task->priority, 0);
782 
783 	task_policy_update_internal_locked(task, false, pend_token);
784 
785 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
786 	    (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_TASK)) | DBG_FUNC_END,
787 	    task_pid(task), teffective_0(task),
788 	    teffective_1(task), task->priority, 0);
789 }
790 
791 /*
792  * One state update function TO RULE THEM ALL
793  *
794  * This function updates the task or thread effective policy fields
795  * and pushes the results to the relevant subsystems.
796  *
797  * Must call update_complete after unlocking the task,
798  * as some subsystems cannot be updated while holding the task lock.
799  *
800  * Called with task locked, not thread
801  */
802 
803 static void
task_policy_update_internal_locked(task_t task,bool in_create,task_pend_token_t pend_token)804 task_policy_update_internal_locked(task_t task, bool in_create, task_pend_token_t pend_token)
805 {
806 	/*
807 	 * Step 1:
808 	 *  Gather requested policy and effective coalition state
809 	 */
810 
811 	struct task_requested_policy requested = task->requested_policy;
812 	bool coalition_is_bg = task_get_effective_jetsam_coalition_policy(task, TASK_POLICY_DARWIN_BG);
813 
814 	/*
815 	 * Step 2:
816 	 *  Calculate new effective policies from requested policy and task state
817 	 *  Rules:
818 	 *      Don't change requested, it won't take effect
819 	 */
820 
821 	struct task_effective_policy next = {};
822 
823 	/* Capture properties from coalition */
824 	next.tep_coalition_bg = coalition_is_bg;
825 
826 	/* Update task role */
827 	next.tep_role = requested.trp_role;
828 
829 	/* Set task qos clamp and ceiling */
830 
831 	thread_qos_t role_clamp = THREAD_QOS_UNSPECIFIED;
832 
833 	if (requested.trp_apptype == TASK_APPTYPE_APP_DEFAULT ||
834 	    requested.trp_apptype == TASK_APPTYPE_APP_NONUI) {
835 		switch (next.tep_role) {
836 		case TASK_FOREGROUND_APPLICATION:
837 			/* Foreground apps get urgent scheduler priority */
838 			next.tep_qos_ui_is_urgent = 1;
839 			next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED;
840 			break;
841 
842 		case TASK_BACKGROUND_APPLICATION:
843 			/* This is really 'non-focal but on-screen' */
844 			next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED;
845 			break;
846 
847 		case TASK_DEFAULT_APPLICATION:
848 			/* This is 'may render UI but we don't know if it's focal/nonfocal' */
849 			next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED;
850 			break;
851 
852 		case TASK_NONUI_APPLICATION:
853 			/* i.e. 'off-screen' */
854 			next.tep_qos_ceiling = THREAD_QOS_LEGACY;
855 			break;
856 
857 		case TASK_USER_INIT_APPLICATION:
858 			/* i.e. 'off-screen', but doing user-initiated work */
859 			next.tep_qos_ceiling = THREAD_QOS_USER_INITIATED;
860 			break;
861 
862 		case TASK_CONTROL_APPLICATION:
863 		case TASK_GRAPHICS_SERVER:
864 			next.tep_qos_ui_is_urgent = 1;
865 			next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED;
866 			break;
867 
868 		case TASK_THROTTLE_APPLICATION:
869 			/* i.e. 'TAL launch' */
870 			next.tep_qos_ceiling = THREAD_QOS_UTILITY;
871 			role_clamp = THREAD_QOS_UTILITY;
872 			break;
873 
874 		case TASK_DARWINBG_APPLICATION:
875 			/* i.e. 'DARWIN_BG throttled background application' */
876 			next.tep_qos_ceiling = THREAD_QOS_BACKGROUND;
877 			break;
878 
879 		case TASK_UNSPECIFIED:
880 		default:
881 			/* Apps that don't have an application role get
882 			 * USER_INTERACTIVE and USER_INITIATED squashed to LEGACY */
883 			next.tep_qos_ceiling = THREAD_QOS_LEGACY;
884 			break;
885 		}
886 	} else {
887 		/* Daemons and dext get USER_INTERACTIVE squashed to USER_INITIATED */
888 		next.tep_qos_ceiling = THREAD_QOS_USER_INITIATED;
889 	}
890 
891 	if (role_clamp != THREAD_QOS_UNSPECIFIED) {
892 		if (requested.trp_qos_clamp != THREAD_QOS_UNSPECIFIED) {
893 			next.tep_qos_clamp = MIN(role_clamp, requested.trp_qos_clamp);
894 		} else {
895 			next.tep_qos_clamp = role_clamp;
896 		}
897 	} else {
898 		next.tep_qos_clamp = requested.trp_qos_clamp;
899 	}
900 
901 	/* Calculate DARWIN_BG */
902 	bool wants_darwinbg        = false;
903 	bool wants_all_sockets_bg  = false; /* Do I want my existing sockets to be bg */
904 	bool wants_watchersbg      = false; /* Do I want my pidbound threads to be bg */
905 	bool bg_clamps_turnstiles  = false; /* This task does not want turnstile-boost-above-task */
906 
907 	/*
908 	 * Adaptive daemons are DARWIN_BG unless boosted, and don't get network throttled.
909 	 * Their threads can be turnstile-boosted out of BG.
910 	 */
911 	if (requested.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE &&
912 	    requested.trp_boosted == 0) {
913 		wants_darwinbg = true;
914 	}
915 
916 	/*
917 	 * If DARWIN_BG has been requested at either level, it's engaged.
918 	 * Only true DARWIN_BG changes cause watchers to transition.
919 	 *
920 	 * Backgrounding due to apptype does.
921 	 */
922 	if (requested.trp_int_darwinbg || requested.trp_ext_darwinbg ||
923 	    next.tep_role == TASK_DARWINBG_APPLICATION) {
924 		wants_watchersbg = wants_all_sockets_bg = wants_darwinbg = true;
925 		bg_clamps_turnstiles = true;
926 	}
927 
928 	if (next.tep_coalition_bg) {
929 		wants_watchersbg = wants_all_sockets_bg = wants_darwinbg = true;
930 		bg_clamps_turnstiles = true;
931 	}
932 
933 	/* Application launching in special Transparent App Lifecycle throttle mode */
934 	if ((requested.trp_apptype == TASK_APPTYPE_APP_DEFAULT ||
935 	    requested.trp_apptype == TASK_APPTYPE_APP_NONUI) &&
936 	    requested.trp_role == TASK_THROTTLE_APPLICATION) {
937 		next.tep_tal_engaged = 1;
938 	}
939 
940 	/* Background daemons are always DARWIN_BG, no exceptions, and don't get network throttled. */
941 	if (requested.trp_apptype == TASK_APPTYPE_DAEMON_BACKGROUND) {
942 		wants_darwinbg = true;
943 		bg_clamps_turnstiles = true;
944 	}
945 
946 	if (next.tep_qos_clamp == THREAD_QOS_BACKGROUND ||
947 	    next.tep_qos_clamp == THREAD_QOS_MAINTENANCE) {
948 		wants_darwinbg = true;
949 		bg_clamps_turnstiles = true;
950 	}
951 
952 	/*
953 	 * Runaway-mitigated processes are darwinbg unless their threads
954 	 * are turnstile-boosted.
955 	 */
956 	if (requested.trp_runaway_mitigation) {
957 		wants_darwinbg = true;
958 		next.tep_runaway_mitigation = 1;
959 	}
960 
961 	/* Calculate side effects of DARWIN_BG */
962 
963 	if (wants_darwinbg) {
964 		next.tep_darwinbg = 1;
965 		/* darwinbg tasks always create bg sockets, but we don't always loop over all sockets */
966 		next.tep_new_sockets_bg = 1;
967 		next.tep_lowpri_cpu = 1;
968 	}
969 
970 	if (wants_all_sockets_bg) {
971 		next.tep_all_sockets_bg = 1;
972 	}
973 
974 	if (wants_watchersbg) {
975 		next.tep_watchers_bg = 1;
976 	}
977 
978 	if (wants_darwinbg && bg_clamps_turnstiles == false) {
979 		next.tep_promote_above_task = 1;
980 	}
981 
982 	/* Calculate low CPU priority */
983 
984 	boolean_t wants_lowpri_cpu = false;
985 
986 	if (wants_darwinbg) {
987 		wants_lowpri_cpu = true;
988 	}
989 
990 	if (requested.trp_sup_lowpri_cpu && requested.trp_boosted == 0) {
991 		wants_lowpri_cpu = true;
992 	}
993 
994 	if (wants_lowpri_cpu) {
995 		next.tep_lowpri_cpu = 1;
996 	}
997 
998 	/* Calculate IO policy */
999 
1000 	/* Update BG IO policy (so we can see if it has changed) */
1001 	next.tep_bg_iotier = requested.trp_bg_iotier;
1002 
1003 	int iopol = THROTTLE_LEVEL_TIER0;
1004 
1005 	if (wants_darwinbg) {
1006 		iopol = MAX(iopol, requested.trp_bg_iotier);
1007 	}
1008 
1009 	if (requested.trp_apptype == TASK_APPTYPE_DAEMON_STANDARD) {
1010 		iopol = MAX(iopol, proc_standard_daemon_tier);
1011 	}
1012 
1013 	if (requested.trp_sup_disk && requested.trp_boosted == 0) {
1014 		iopol = MAX(iopol, proc_suppressed_disk_tier);
1015 	}
1016 
1017 	if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) {
1018 		iopol = MAX(iopol, thread_qos_policy_params.qos_iotier[next.tep_qos_clamp]);
1019 	}
1020 
1021 	iopol = MAX(iopol, requested.trp_int_iotier);
1022 	iopol = MAX(iopol, requested.trp_ext_iotier);
1023 
1024 	next.tep_io_tier = iopol;
1025 
1026 	/* Calculate Passive IO policy */
1027 
1028 	if (requested.trp_ext_iopassive || requested.trp_int_iopassive) {
1029 		next.tep_io_passive = 1;
1030 	}
1031 
1032 	/* Calculate suppression-active flag */
1033 	if (requested.trp_sup_active && requested.trp_boosted == 0) {
1034 		next.tep_sup_active = 1;
1035 	}
1036 
1037 	/* Calculate timer QOS */
1038 	int latency_qos = requested.trp_base_latency_qos;
1039 
1040 	if (requested.trp_sup_timer && requested.trp_boosted == 0) {
1041 		latency_qos = requested.trp_sup_timer;
1042 	}
1043 
1044 	if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) {
1045 		latency_qos = MAX(latency_qos, (int)thread_qos_policy_params.qos_latency_qos[next.tep_qos_clamp]);
1046 	}
1047 
1048 	if (requested.trp_over_latency_qos != 0) {
1049 		latency_qos = requested.trp_over_latency_qos;
1050 	}
1051 
1052 	/* Treat the windowserver special */
1053 	if (requested.trp_role == TASK_GRAPHICS_SERVER) {
1054 		latency_qos = proc_graphics_timer_qos;
1055 	}
1056 
1057 	next.tep_latency_qos = latency_qos;
1058 
1059 	/* Calculate throughput QOS */
1060 	int through_qos = requested.trp_base_through_qos;
1061 
1062 	if (requested.trp_sup_throughput && requested.trp_boosted == 0) {
1063 		through_qos = requested.trp_sup_throughput;
1064 	}
1065 
1066 	if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) {
1067 		through_qos = MAX(through_qos, (int)thread_qos_policy_params.qos_through_qos[next.tep_qos_clamp]);
1068 	}
1069 
1070 	if (requested.trp_over_through_qos != 0) {
1071 		through_qos = requested.trp_over_through_qos;
1072 	}
1073 
1074 	next.tep_through_qos = through_qos;
1075 
1076 	/* Calculate suppressed CPU priority */
1077 	if (requested.trp_sup_cpu && requested.trp_boosted == 0) {
1078 		next.tep_suppressed_cpu = 1;
1079 	}
1080 
1081 	/*
1082 	 * Calculate background sockets
1083 	 * Don't take into account boosting to limit transition frequency.
1084 	 */
1085 	if (requested.trp_sup_bg_sockets) {
1086 		next.tep_all_sockets_bg = 1;
1087 		next.tep_new_sockets_bg = 1;
1088 	}
1089 
1090 	/* Apply SFI Managed class bit */
1091 	next.tep_sfi_managed = requested.trp_sfi_managed;
1092 
1093 	/* Calculate 'live donor' status for live importance */
1094 	switch (requested.trp_apptype) {
1095 	case TASK_APPTYPE_APP_NONUI:
1096 	case TASK_APPTYPE_APP_DEFAULT:
1097 		if (requested.trp_ext_darwinbg == 1 ||
1098 		    next.tep_coalition_bg ||
1099 		    (next.tep_sup_active == 1 &&
1100 		    (task_policy_suppression_flags & TASK_POLICY_SUPPRESSION_NONDONOR)) ||
1101 		    next.tep_role == TASK_DARWINBG_APPLICATION ||
1102 		    next.tep_runaway_mitigation) {
1103 			next.tep_live_donor = 0;
1104 		} else {
1105 			next.tep_live_donor = 1;
1106 		}
1107 		break;
1108 
1109 	case TASK_APPTYPE_DAEMON_INTERACTIVE:
1110 	case TASK_APPTYPE_DAEMON_STANDARD:
1111 	case TASK_APPTYPE_DAEMON_ADAPTIVE:
1112 	case TASK_APPTYPE_DAEMON_BACKGROUND:
1113 	case TASK_APPTYPE_DRIVER:
1114 	default:
1115 		next.tep_live_donor = 0;
1116 		break;
1117 	}
1118 
1119 	if (requested.trp_terminated) {
1120 		/*
1121 		 * Shoot down the throttles that slow down exit or response to SIGTERM
1122 		 * We don't need to shoot down:
1123 		 * passive        (don't want to cause others to throttle)
1124 		 * all_sockets_bg (don't need to iterate FDs on every exit)
1125 		 * new_sockets_bg (doesn't matter for exiting process)
1126 		 * pidsuspend     (jetsam-ed BG process shouldn't run again)
1127 		 * watchers_bg    (watcher threads don't need to be unthrottled)
1128 		 * latency_qos    (affects userspace timers only)
1129 		 */
1130 
1131 		next.tep_terminated     = 1;
1132 		next.tep_darwinbg       = 0;
1133 		next.tep_lowpri_cpu     = 0;
1134 		next.tep_io_tier        = THROTTLE_LEVEL_TIER0;
1135 		next.tep_tal_engaged    = 0;
1136 		next.tep_role           = TASK_UNSPECIFIED;
1137 		next.tep_suppressed_cpu = 0;
1138 		next.tep_runaway_mitigation = 0;
1139 		next.tep_promote_above_task = 0;
1140 	}
1141 
1142 	/*
1143 	 * Step 3:
1144 	 *  Swap out old policy for new policy
1145 	 */
1146 
1147 	struct task_effective_policy prev = task->effective_policy;
1148 
1149 	/* This is the point where the new values become visible to other threads */
1150 	task->effective_policy = next;
1151 
1152 	/* Don't do anything further to a half-formed task */
1153 	if (in_create) {
1154 		return;
1155 	}
1156 
1157 	if (task == kernel_task) {
1158 		panic("Attempting to set task policy on kernel_task");
1159 	}
1160 
1161 	/*
1162 	 * Step 4:
1163 	 *  Pend updates that can't be done while holding the task lock
1164 	 */
1165 
1166 	if (prev.tep_all_sockets_bg != next.tep_all_sockets_bg) {
1167 		pend_token->tpt_update_sockets = 1;
1168 	}
1169 
1170 	/* Only re-scan the timer list if the qos level is getting less strong */
1171 	if (prev.tep_latency_qos > next.tep_latency_qos) {
1172 		pend_token->tpt_update_timers = 1;
1173 	}
1174 
1175 #if CONFIG_TASKWATCH
1176 	if (prev.tep_watchers_bg != next.tep_watchers_bg) {
1177 		pend_token->tpt_update_watchers = 1;
1178 	}
1179 #endif /* CONFIG_TASKWATCH */
1180 
1181 	if (prev.tep_live_donor != next.tep_live_donor) {
1182 		pend_token->tpt_update_live_donor = 1;
1183 	}
1184 
1185 	if (prev.tep_sup_active != next.tep_sup_active) {
1186 		pend_token->tpt_update_appnap = 1;
1187 	}
1188 
1189 	/* runaway mitigation mode generates its own dedicated tracepoint */
1190 	if (prev.tep_runaway_mitigation != next.tep_runaway_mitigation) {
1191 		KDBG_RELEASE(IMPORTANCE_CODE(IMP_RUNAWAY_MITIGATION, 0) |
1192 		    (next.tep_runaway_mitigation ? DBG_FUNC_START : DBG_FUNC_END),
1193 		    task_pid(task), next.tep_terminated);
1194 	}
1195 
1196 	/*
1197 	 * Step 5:
1198 	 *  Update other subsystems as necessary if something has changed
1199 	 */
1200 
1201 	bool update_threads = false, update_sfi = false, update_termination = false;
1202 
1203 	/*
1204 	 * Check for the attributes that thread_policy_update_internal_locked() consults,
1205 	 *  and trigger thread policy re-evaluation.
1206 	 */
1207 	if (prev.tep_io_tier != next.tep_io_tier ||
1208 	    prev.tep_bg_iotier != next.tep_bg_iotier ||
1209 	    prev.tep_io_passive != next.tep_io_passive ||
1210 	    prev.tep_darwinbg != next.tep_darwinbg ||
1211 	    prev.tep_qos_clamp != next.tep_qos_clamp ||
1212 	    prev.tep_qos_ceiling != next.tep_qos_ceiling ||
1213 	    prev.tep_qos_ui_is_urgent != next.tep_qos_ui_is_urgent ||
1214 	    prev.tep_latency_qos != next.tep_latency_qos ||
1215 	    prev.tep_through_qos != next.tep_through_qos ||
1216 	    prev.tep_lowpri_cpu != next.tep_lowpri_cpu ||
1217 	    prev.tep_new_sockets_bg != next.tep_new_sockets_bg ||
1218 	    prev.tep_terminated != next.tep_terminated ||
1219 	    prev.tep_promote_above_task != next.tep_promote_above_task) {
1220 		update_threads = true;
1221 	}
1222 
1223 	/*
1224 	 * Check for the attributes that sfi_thread_classify() consults,
1225 	 *  and trigger SFI re-evaluation.
1226 	 */
1227 	if (prev.tep_latency_qos != next.tep_latency_qos ||
1228 	    prev.tep_role != next.tep_role ||
1229 	    prev.tep_runaway_mitigation != next.tep_runaway_mitigation ||
1230 	    prev.tep_sfi_managed != next.tep_sfi_managed) {
1231 		update_sfi = true;
1232 	}
1233 
1234 	/* Reflect task role transitions into the coalition role counters */
1235 	if (prev.tep_role != next.tep_role) {
1236 		if (task_policy_update_coalition_focal_tasks(task, prev.tep_role, next.tep_role, pend_token)) {
1237 			update_sfi = true;
1238 		}
1239 	}
1240 
1241 	if (prev.tep_terminated != next.tep_terminated) {
1242 		update_termination = true;
1243 	}
1244 
1245 	bool update_priority = false;
1246 
1247 	int16_t priority     = BASEPRI_DEFAULT;
1248 	int16_t max_priority = MAXPRI_USER;
1249 
1250 	if (next.tep_lowpri_cpu) {
1251 		priority = MAXPRI_THROTTLE;
1252 		max_priority = MAXPRI_THROTTLE;
1253 	} else if (next.tep_suppressed_cpu) {
1254 		priority = MAXPRI_SUPPRESSED;
1255 		max_priority = MAXPRI_SUPPRESSED;
1256 	} else {
1257 		switch (next.tep_role) {
1258 		case TASK_CONTROL_APPLICATION:
1259 			priority = BASEPRI_CONTROL;
1260 			break;
1261 		case TASK_GRAPHICS_SERVER:
1262 			priority = BASEPRI_GRAPHICS;
1263 			max_priority = MAXPRI_RESERVED;
1264 			break;
1265 		default:
1266 			break;
1267 		}
1268 
1269 		/* factor in 'nice' value */
1270 		priority += task->importance;
1271 
1272 		if (task->effective_policy.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) {
1273 			int16_t qos_clamp_priority = thread_qos_policy_params.qos_pri[task->effective_policy.tep_qos_clamp];
1274 
1275 			priority        = MIN(priority, qos_clamp_priority);
1276 			max_priority    = MIN(max_priority, qos_clamp_priority);
1277 		}
1278 
1279 		if (priority > max_priority) {
1280 			priority = max_priority;
1281 		} else if (priority < MINPRI) {
1282 			priority = MINPRI;
1283 		}
1284 	}
1285 
1286 	assert(priority <= max_priority);
1287 
1288 	/* avoid extra work if priority isn't changing */
1289 	if (priority != task->priority ||
1290 	    max_priority != task->max_priority) {
1291 		/* update the scheduling priority for the task */
1292 		task->max_priority  = max_priority;
1293 		task->priority      = priority;
1294 		update_priority     = true;
1295 	}
1296 
1297 	/* Loop over the threads in the task:
1298 	 * only once
1299 	 * only if necessary
1300 	 * with one thread mutex hold per thread
1301 	 */
1302 	if (update_threads || update_priority || update_sfi) {
1303 		thread_t thread;
1304 
1305 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
1306 			struct task_pend_token thread_pend_token = {};
1307 
1308 			if (update_sfi) {
1309 				thread_pend_token.tpt_update_thread_sfi = 1;
1310 			}
1311 
1312 			if (update_priority || update_threads) {
1313 				/* Check if we need to reevaluate turnstile push */
1314 				if (pend_token->tpt_update_turnstile) {
1315 					thread_pend_token.tpt_update_turnstile = 1;
1316 				}
1317 				thread_policy_update_tasklocked(thread,
1318 				    task->priority, task->max_priority,
1319 				    &thread_pend_token);
1320 			}
1321 
1322 			assert(!thread_pend_token.tpt_update_sockets);
1323 
1324 			// Slightly risky, as we still hold the task lock...
1325 			thread_policy_update_complete_unlocked(thread, &thread_pend_token);
1326 		}
1327 	}
1328 
1329 	if (update_termination) {
1330 		/*
1331 		 * This update is done after the terminated bit is set,
1332 		 * and all updates other than this one will check that bit,
1333 		 * so we know that it will be the last update.  (This path
1334 		 * skips the check for the terminated bit.)
1335 		 */
1336 		if (task_set_game_mode_locked(task, false)) {
1337 			pend_token->tpt_update_game_mode = 1;
1338 		}
1339 		if (task_set_carplay_mode_locked(task, false)) {
1340 			pend_token->tpt_update_carplay_mode = 1;
1341 		}
1342 	}
1343 }
1344 
1345 
1346 /*
1347  * Yet another layering violation. We reach out and bang on the coalition directly.
1348  */
1349 static boolean_t
task_policy_update_coalition_focal_tasks(task_t task,int prev_role,int next_role,task_pend_token_t pend_token)1350 task_policy_update_coalition_focal_tasks(task_t            task,
1351     int               prev_role,
1352     int               next_role,
1353     task_pend_token_t pend_token)
1354 {
1355 	boolean_t sfi_transition = FALSE;
1356 	uint32_t new_count = 0;
1357 
1358 	/* task moving into/out-of the foreground */
1359 	if (prev_role != TASK_FOREGROUND_APPLICATION && next_role == TASK_FOREGROUND_APPLICATION) {
1360 		if (task_coalition_adjust_focal_count(task, 1, &new_count) && (new_count == 1)) {
1361 			sfi_transition = TRUE;
1362 			pend_token->tpt_update_tg_ui_flag = TRUE;
1363 		}
1364 	} else if (prev_role == TASK_FOREGROUND_APPLICATION && next_role != TASK_FOREGROUND_APPLICATION) {
1365 		if (task_coalition_adjust_focal_count(task, -1, &new_count) && (new_count == 0)) {
1366 			sfi_transition = TRUE;
1367 			pend_token->tpt_update_tg_ui_flag = TRUE;
1368 		}
1369 	}
1370 
1371 	/* task moving into/out-of background */
1372 	if (prev_role != TASK_BACKGROUND_APPLICATION && next_role == TASK_BACKGROUND_APPLICATION) {
1373 		if (task_coalition_adjust_nonfocal_count(task, 1, &new_count) && (new_count == 1)) {
1374 			sfi_transition = TRUE;
1375 		}
1376 	} else if (prev_role == TASK_BACKGROUND_APPLICATION && next_role != TASK_BACKGROUND_APPLICATION) {
1377 		if (task_coalition_adjust_nonfocal_count(task, -1, &new_count) && (new_count == 0)) {
1378 			sfi_transition = TRUE;
1379 		}
1380 	}
1381 
1382 	if (sfi_transition) {
1383 		pend_token->tpt_update_coal_sfi = 1;
1384 	}
1385 	return sfi_transition;
1386 }
1387 
1388 /*
1389  * Called with coalition locked to push updates from coalition policy
1390  * into its member tasks
1391  */
1392 void
coalition_policy_update_task(task_t task,coalition_pend_token_t coal_pend_token)1393 coalition_policy_update_task(task_t task, coalition_pend_token_t coal_pend_token)
1394 {
1395 	/*
1396 	 * Push a task policy update incorporating the new state
1397 	 * of the coalition, but because we have the coalition locked,
1398 	 * we can't do task_policy_update_complete_unlocked in this function.
1399 	 *
1400 	 * Instead, we stash the pend token on the task, and ask the coalition
1401 	 * to come around later after the lock is dropped to do the follow-up.
1402 	 */
1403 
1404 	task_pend_token_t task_pend_token = &task->pended_coalition_changes;
1405 
1406 	assert(task_pend_token->tpt_value == 0);
1407 
1408 	task_lock(task);
1409 
1410 	task_policy_update_locked(task, task_pend_token);
1411 
1412 	task_unlock(task);
1413 
1414 	if (task_pend_token->tpt_update_timers) {
1415 		/*
1416 		 * ml_timer_evaluate can be batched, so defer it to happen
1417 		 * once at the coalition level
1418 		 */
1419 		coal_pend_token->cpt_update_timers = 1;
1420 		task_pend_token->tpt_update_timers = 0;
1421 	}
1422 
1423 	if (task_pend_token->tpt_value != 0) {
1424 		/*
1425 		 * We need to come look at this task after unlocking
1426 		 * the coalition to do pended work.
1427 		 */
1428 		coal_pend_token->cpt_update_j_coal_tasks = 1;
1429 	}
1430 }
1431 
1432 /*
1433  * Called with task unlocked to do things that can't be done while holding the task lock
1434  */
1435 void
task_policy_update_complete_unlocked(task_t task,task_pend_token_t pend_token)1436 task_policy_update_complete_unlocked(task_t task, task_pend_token_t pend_token)
1437 {
1438 #ifdef MACH_BSD
1439 	if (pend_token->tpt_update_sockets) {
1440 		proc_apply_task_networkbg(task_pid(task), THREAD_NULL);
1441 	}
1442 #endif /* MACH_BSD */
1443 
1444 	/* The timer throttle has been removed or reduced, we need to look for expired timers and fire them */
1445 	if (pend_token->tpt_update_timers) {
1446 		ml_timer_evaluate();
1447 	}
1448 
1449 #if CONFIG_TASKWATCH
1450 	if (pend_token->tpt_update_watchers) {
1451 		apply_appstate_watchers(task);
1452 	}
1453 #endif /* CONFIG_TASKWATCH */
1454 
1455 	if (pend_token->tpt_update_live_donor) {
1456 		task_importance_update_live_donor(task);
1457 	}
1458 
1459 #if CONFIG_SCHED_SFI
1460 	/* use the resource coalition for SFI re-evaluation */
1461 	if (pend_token->tpt_update_coal_sfi) {
1462 		coalition_for_each_task(task->coalition[COALITION_TYPE_RESOURCE],
1463 		    ^ bool (task_t each_task) {
1464 			thread_t thread;
1465 
1466 			/* skip the task we're re-evaluating on behalf of: it's already updated */
1467 			if (each_task == task) {
1468 			        return false;
1469 			}
1470 
1471 			task_lock(each_task);
1472 
1473 			queue_iterate(&each_task->threads, thread, thread_t, task_threads) {
1474 			        sfi_reevaluate(thread);
1475 			}
1476 
1477 			task_unlock(each_task);
1478 
1479 			return false;
1480 		});
1481 	}
1482 #endif /* CONFIG_SCHED_SFI */
1483 
1484 #if CONFIG_THREAD_GROUPS
1485 	if (pend_token->tpt_update_tg_ui_flag) {
1486 		task_coalition_thread_group_focal_update(task);
1487 	}
1488 	if (pend_token->tpt_update_tg_app_flag) {
1489 		task_coalition_thread_group_application_set(task);
1490 	}
1491 	if (pend_token->tpt_update_game_mode) {
1492 		task_coalition_thread_group_game_mode_update(task);
1493 	}
1494 	if (pend_token->tpt_update_carplay_mode) {
1495 		task_coalition_thread_group_carplay_mode_update(task);
1496 	}
1497 #endif /* CONFIG_THREAD_GROUPS */
1498 
1499 	/*
1500 	 * Use the app-nap transitions to influence the
1501 	 * transition of the process within the jetsam band
1502 	 * [and optionally its live-donor status]
1503 	 * On macOS only.
1504 	 */
1505 	if (pend_token->tpt_update_appnap) {
1506 		memorystatus_update_priority_for_appnap((proc_t) get_bsdtask_info(task));
1507 	}
1508 }
1509 
1510 /*
1511  * Initiate a task policy state transition
1512  *
1513  * Everything that modifies requested except functions that need to hold the task lock
1514  * should use this function
1515  *
1516  * Argument validation should be performed before reaching this point.
1517  *
1518  * TODO: Do we need to check task->active?
1519  */
1520 void
proc_set_task_policy(task_t task,int category,int flavor,int value)1521 proc_set_task_policy(task_t     task,
1522     int        category,
1523     int        flavor,
1524     int        value)
1525 {
1526 	struct task_pend_token pend_token = {};
1527 
1528 	task_lock(task);
1529 
1530 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1531 	    (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_START,
1532 	    task_pid(task), trequested_0(task),
1533 	    trequested_1(task), value, 0);
1534 
1535 	proc_set_task_policy_locked(task, category, flavor, value, 0);
1536 
1537 	task_policy_update_locked(task, &pend_token);
1538 
1539 
1540 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1541 	    (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_END,
1542 	    task_pid(task), trequested_0(task),
1543 	    trequested_1(task), tpending(&pend_token), 0);
1544 
1545 	task_unlock(task);
1546 
1547 	task_policy_update_complete_unlocked(task, &pend_token);
1548 }
1549 
1550 /*
1551  * Variant of proc_set_task_policy() that sets two scalars in the requested policy structure.
1552  * Same locking rules apply.
1553  */
1554 void
proc_set_task_policy2(task_t task,int category,int flavor,int value,int value2)1555 proc_set_task_policy2(task_t    task,
1556     int       category,
1557     int       flavor,
1558     int       value,
1559     int       value2)
1560 {
1561 	struct task_pend_token pend_token = {};
1562 
1563 	task_lock(task);
1564 
1565 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1566 	    (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_START,
1567 	    task_pid(task), trequested_0(task),
1568 	    trequested_1(task), value, 0);
1569 
1570 	proc_set_task_policy_locked(task, category, flavor, value, value2);
1571 
1572 	task_policy_update_locked(task, &pend_token);
1573 
1574 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1575 	    (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_END,
1576 	    task_pid(task), trequested_0(task),
1577 	    trequested_1(task), tpending(&pend_token), 0);
1578 
1579 	task_unlock(task);
1580 
1581 	task_policy_update_complete_unlocked(task, &pend_token);
1582 }
1583 
1584 /*
1585  * Set the requested state for a specific flavor to a specific value.
1586  *
1587  *  TODO:
1588  *  Verify that arguments to non iopol things are 1 or 0
1589  */
1590 static void
proc_set_task_policy_locked(task_t task,int category,int flavor,int value,int value2)1591 proc_set_task_policy_locked(task_t      task,
1592     int         category,
1593     int         flavor,
1594     int         value,
1595     int         value2)
1596 {
1597 	int tier, passive;
1598 
1599 	struct task_requested_policy requested = task->requested_policy;
1600 
1601 	switch (flavor) {
1602 	/* Category: EXTERNAL and INTERNAL */
1603 
1604 	case TASK_POLICY_DARWIN_BG:
1605 		if (category == TASK_POLICY_EXTERNAL) {
1606 			requested.trp_ext_darwinbg = value;
1607 		} else {
1608 			requested.trp_int_darwinbg = value;
1609 		}
1610 		break;
1611 
1612 	case TASK_POLICY_IOPOL:
1613 		proc_iopol_to_tier(value, &tier, &passive);
1614 		if (category == TASK_POLICY_EXTERNAL) {
1615 			requested.trp_ext_iotier  = tier;
1616 			requested.trp_ext_iopassive = passive;
1617 		} else {
1618 			requested.trp_int_iotier  = tier;
1619 			requested.trp_int_iopassive = passive;
1620 		}
1621 		break;
1622 
1623 	case TASK_POLICY_IO:
1624 		if (category == TASK_POLICY_EXTERNAL) {
1625 			requested.trp_ext_iotier = value;
1626 		} else {
1627 			requested.trp_int_iotier = value;
1628 		}
1629 		break;
1630 
1631 	case TASK_POLICY_PASSIVE_IO:
1632 		if (category == TASK_POLICY_EXTERNAL) {
1633 			requested.trp_ext_iopassive = value;
1634 		} else {
1635 			requested.trp_int_iopassive = value;
1636 		}
1637 		break;
1638 
1639 	/* Category: INTERNAL */
1640 
1641 	case TASK_POLICY_DARWIN_BG_IOPOL:
1642 		assert(category == TASK_POLICY_INTERNAL);
1643 		proc_iopol_to_tier(value, &tier, &passive);
1644 		requested.trp_bg_iotier = tier;
1645 		break;
1646 
1647 	/* Category: ATTRIBUTE */
1648 
1649 	case TASK_POLICY_BOOST:
1650 		assert(category == TASK_POLICY_ATTRIBUTE);
1651 		requested.trp_boosted = value;
1652 		break;
1653 
1654 	case TASK_POLICY_ROLE:
1655 		assert(category == TASK_POLICY_ATTRIBUTE);
1656 		requested.trp_role = value;
1657 		break;
1658 
1659 	case TASK_POLICY_TERMINATED:
1660 		assert(category == TASK_POLICY_ATTRIBUTE);
1661 		requested.trp_terminated = value;
1662 		break;
1663 
1664 	case TASK_BASE_LATENCY_QOS_POLICY:
1665 		assert(category == TASK_POLICY_ATTRIBUTE);
1666 		requested.trp_base_latency_qos = value;
1667 		break;
1668 
1669 	case TASK_BASE_THROUGHPUT_QOS_POLICY:
1670 		assert(category == TASK_POLICY_ATTRIBUTE);
1671 		requested.trp_base_through_qos = value;
1672 		break;
1673 
1674 	case TASK_POLICY_SFI_MANAGED:
1675 		assert(category == TASK_POLICY_ATTRIBUTE);
1676 		requested.trp_sfi_managed = value;
1677 		break;
1678 
1679 	case TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS:
1680 		assert(category == TASK_POLICY_ATTRIBUTE);
1681 		requested.trp_base_latency_qos = value;
1682 		requested.trp_base_through_qos = value2;
1683 		break;
1684 
1685 	case TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS:
1686 		assert(category == TASK_POLICY_ATTRIBUTE);
1687 		requested.trp_over_latency_qos = value;
1688 		requested.trp_over_through_qos = value2;
1689 		break;
1690 
1691 	case TASK_POLICY_RUNAWAY_MITIGATION:
1692 		assert(category == TASK_POLICY_ATTRIBUTE);
1693 		requested.trp_runaway_mitigation = value;
1694 		break;
1695 
1696 	default:
1697 		panic("unknown task policy: %d %d %d %d", category, flavor, value, value2);
1698 		break;
1699 	}
1700 
1701 	task->requested_policy = requested;
1702 }
1703 
1704 /*
1705  * Gets what you set. Effective values may be different.
1706  */
1707 int
proc_get_task_policy(task_t task,int category,int flavor)1708 proc_get_task_policy(task_t     task,
1709     int        category,
1710     int        flavor)
1711 {
1712 	int value = 0;
1713 
1714 	task_lock(task);
1715 
1716 	struct task_requested_policy requested = task->requested_policy;
1717 
1718 	switch (flavor) {
1719 	case TASK_POLICY_DARWIN_BG:
1720 		if (category == TASK_POLICY_EXTERNAL) {
1721 			value = requested.trp_ext_darwinbg;
1722 		} else {
1723 			value = requested.trp_int_darwinbg;
1724 		}
1725 		break;
1726 	case TASK_POLICY_IOPOL:
1727 		if (category == TASK_POLICY_EXTERNAL) {
1728 			value = proc_tier_to_iopol(requested.trp_ext_iotier,
1729 			    requested.trp_ext_iopassive);
1730 		} else {
1731 			value = proc_tier_to_iopol(requested.trp_int_iotier,
1732 			    requested.trp_int_iopassive);
1733 		}
1734 		break;
1735 	case TASK_POLICY_IO:
1736 		if (category == TASK_POLICY_EXTERNAL) {
1737 			value = requested.trp_ext_iotier;
1738 		} else {
1739 			value = requested.trp_int_iotier;
1740 		}
1741 		break;
1742 	case TASK_POLICY_PASSIVE_IO:
1743 		if (category == TASK_POLICY_EXTERNAL) {
1744 			value = requested.trp_ext_iopassive;
1745 		} else {
1746 			value = requested.trp_int_iopassive;
1747 		}
1748 		break;
1749 	case TASK_POLICY_DARWIN_BG_IOPOL:
1750 		assert(category == TASK_POLICY_INTERNAL);
1751 		value = proc_tier_to_iopol(requested.trp_bg_iotier, 0);
1752 		break;
1753 	case TASK_POLICY_ROLE:
1754 		assert(category == TASK_POLICY_ATTRIBUTE);
1755 		value = requested.trp_role;
1756 		break;
1757 	case TASK_POLICY_SFI_MANAGED:
1758 		assert(category == TASK_POLICY_ATTRIBUTE);
1759 		value = requested.trp_sfi_managed;
1760 		break;
1761 	case TASK_POLICY_RUNAWAY_MITIGATION:
1762 		assert(category == TASK_POLICY_ATTRIBUTE);
1763 		value = requested.trp_runaway_mitigation;
1764 		break;
1765 	default:
1766 		panic("unknown policy_flavor %d", flavor);
1767 		break;
1768 	}
1769 
1770 	task_unlock(task);
1771 
1772 	return value;
1773 }
1774 
1775 /*
1776  * Variant of proc_get_task_policy() that returns two scalar outputs.
1777  */
1778 void
proc_get_task_policy2(task_t task,__assert_only int category,int flavor,int * value1,int * value2)1779 proc_get_task_policy2(task_t task,
1780     __assert_only int category,
1781     int flavor,
1782     int *value1,
1783     int *value2)
1784 {
1785 	task_lock(task);
1786 
1787 	struct task_requested_policy requested = task->requested_policy;
1788 
1789 	switch (flavor) {
1790 	case TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS:
1791 		assert(category == TASK_POLICY_ATTRIBUTE);
1792 		*value1 = requested.trp_base_latency_qos;
1793 		*value2 = requested.trp_base_through_qos;
1794 		break;
1795 
1796 	case TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS:
1797 		assert(category == TASK_POLICY_ATTRIBUTE);
1798 		*value1 = requested.trp_over_latency_qos;
1799 		*value2 = requested.trp_over_through_qos;
1800 		break;
1801 
1802 	default:
1803 		panic("unknown policy_flavor %d", flavor);
1804 		break;
1805 	}
1806 
1807 	task_unlock(task);
1808 }
1809 
1810 /*
1811  * Function for querying effective state for relevant subsystems
1812  * Gets what is actually in effect, for subsystems which pull policy instead of receive updates.
1813  *
1814  * ONLY the relevant subsystem should query this.
1815  * NEVER take a value from the 'effective' function and stuff it into a setter.
1816  *
1817  * NOTE: This accessor does not take the task lock.
1818  * Notifications of state updates need to be externally synchronized with state queries.
1819  * This routine *MUST* remain interrupt safe, as it is potentially invoked
1820  * within the context of a timer interrupt.  It is also called in KDP context for stackshot.
1821  */
1822 int
proc_get_effective_task_policy(task_t task,int flavor)1823 proc_get_effective_task_policy(task_t   task,
1824     int      flavor)
1825 {
1826 	int value = 0;
1827 
1828 	switch (flavor) {
1829 	case TASK_POLICY_DARWIN_BG:
1830 		/*
1831 		 * This backs the KPI call proc_pidbackgrounded to find
1832 		 * out if a pid is backgrounded.
1833 		 * It is used to communicate state to the VM system, as well as
1834 		 * prioritizing requests to the graphics system.
1835 		 * Returns 1 for background mode, 0 for normal mode
1836 		 */
1837 		value = task->effective_policy.tep_darwinbg;
1838 		break;
1839 	case TASK_POLICY_ALL_SOCKETS_BG:
1840 		/*
1841 		 * do_background_socket() calls this to determine what it should do to the proc's sockets
1842 		 * Returns 1 for background mode, 0 for normal mode
1843 		 *
1844 		 * This consults both thread and task so un-DBGing a thread while the task is BG
1845 		 * doesn't get you out of the network throttle.
1846 		 */
1847 		value = task->effective_policy.tep_all_sockets_bg;
1848 		break;
1849 	case TASK_POLICY_SUP_ACTIVE:
1850 		/*
1851 		 * Is the task in AppNap? This is used to determine the urgency
1852 		 * that's passed to the performance management subsystem for threads
1853 		 * that are running at a priority <= MAXPRI_THROTTLE.
1854 		 */
1855 		value = task->effective_policy.tep_sup_active;
1856 		break;
1857 	case TASK_POLICY_LATENCY_QOS:
1858 		/*
1859 		 * timer arming calls into here to find out the timer coalescing level
1860 		 * Returns a QoS tier (0-6)
1861 		 */
1862 		value = task->effective_policy.tep_latency_qos;
1863 		break;
1864 	case TASK_POLICY_THROUGH_QOS:
1865 		/*
1866 		 * This value is passed into the urgency callout from the scheduler
1867 		 * to the performance management subsystem.
1868 		 * Returns a QoS tier (0-6)
1869 		 */
1870 		value = task->effective_policy.tep_through_qos;
1871 		break;
1872 	case TASK_POLICY_ROLE:
1873 		/*
1874 		 * This controls various things that ask whether a process is foreground,
1875 		 * like SFI, VM, access to GPU, etc
1876 		 */
1877 		value = task->effective_policy.tep_role;
1878 		break;
1879 	case TASK_POLICY_WATCHERS_BG:
1880 		/*
1881 		 * This controls whether or not a thread watching this process should be BG.
1882 		 */
1883 		value = task->effective_policy.tep_watchers_bg;
1884 		break;
1885 	case TASK_POLICY_SFI_MANAGED:
1886 		/*
1887 		 * This controls whether or not a process is targeted for specific control by thermald.
1888 		 */
1889 		value = task->effective_policy.tep_sfi_managed;
1890 		break;
1891 	case TASK_POLICY_TERMINATED:
1892 		/*
1893 		 * This controls whether or not a process has its throttling properties shot down for termination.
1894 		 */
1895 		value = task->effective_policy.tep_terminated;
1896 		break;
1897 	case TASK_POLICY_RUNAWAY_MITIGATION:
1898 		/*
1899 		 * This shows whether or not a process has been tagged for runaway mitigation.
1900 		 */
1901 		value = task->effective_policy.tep_runaway_mitigation;
1902 		break;
1903 	default:
1904 		panic("unknown policy_flavor %d", flavor);
1905 		break;
1906 	}
1907 
1908 	return value;
1909 }
1910 
1911 /*
1912  * Convert from IOPOL_* values to throttle tiers.
1913  *
1914  * TODO: Can this be made more compact, like an array lookup
1915  * Note that it is possible to support e.g. IOPOL_PASSIVE_STANDARD in the future
1916  */
1917 
1918 void
proc_iopol_to_tier(int iopolicy,int * tier,int * passive)1919 proc_iopol_to_tier(int iopolicy, int *tier, int *passive)
1920 {
1921 	*passive = 0;
1922 	*tier = 0;
1923 	switch (iopolicy) {
1924 	case IOPOL_IMPORTANT:
1925 		*tier = THROTTLE_LEVEL_TIER0;
1926 		break;
1927 	case IOPOL_PASSIVE:
1928 		*tier = THROTTLE_LEVEL_TIER0;
1929 		*passive = 1;
1930 		break;
1931 	case IOPOL_STANDARD:
1932 		*tier = THROTTLE_LEVEL_TIER1;
1933 		break;
1934 	case IOPOL_UTILITY:
1935 		*tier = THROTTLE_LEVEL_TIER2;
1936 		break;
1937 	case IOPOL_THROTTLE:
1938 		*tier = THROTTLE_LEVEL_TIER3;
1939 		break;
1940 	default:
1941 		panic("unknown I/O policy %d", iopolicy);
1942 		break;
1943 	}
1944 }
1945 
1946 int
proc_tier_to_iopol(int tier,int passive)1947 proc_tier_to_iopol(int tier, int passive)
1948 {
1949 	if (passive == 1) {
1950 		switch (tier) {
1951 		case THROTTLE_LEVEL_TIER0:
1952 			return IOPOL_PASSIVE;
1953 		default:
1954 			panic("unknown passive tier %d", tier);
1955 			return IOPOL_DEFAULT;
1956 		}
1957 	} else {
1958 		switch (tier) {
1959 		case THROTTLE_LEVEL_NONE:
1960 		case THROTTLE_LEVEL_TIER0:
1961 			return IOPOL_DEFAULT;
1962 		case THROTTLE_LEVEL_TIER1:
1963 			return IOPOL_STANDARD;
1964 		case THROTTLE_LEVEL_TIER2:
1965 			return IOPOL_UTILITY;
1966 		case THROTTLE_LEVEL_TIER3:
1967 			return IOPOL_THROTTLE;
1968 		default:
1969 			panic("unknown tier %d", tier);
1970 			return IOPOL_DEFAULT;
1971 		}
1972 	}
1973 }
1974 
1975 int
proc_darwin_role_to_task_role(int darwin_role,task_role_t * task_role)1976 proc_darwin_role_to_task_role(int darwin_role, task_role_t* task_role)
1977 {
1978 	integer_t role = TASK_UNSPECIFIED;
1979 
1980 	switch (darwin_role) {
1981 	case PRIO_DARWIN_ROLE_DEFAULT:
1982 		role = TASK_UNSPECIFIED;
1983 		break;
1984 	case PRIO_DARWIN_ROLE_UI_FOCAL:
1985 		role = TASK_FOREGROUND_APPLICATION;
1986 		break;
1987 	case PRIO_DARWIN_ROLE_UI:
1988 		role = TASK_DEFAULT_APPLICATION;
1989 		break;
1990 	case PRIO_DARWIN_ROLE_NON_UI:
1991 		role = TASK_NONUI_APPLICATION;
1992 		break;
1993 	case PRIO_DARWIN_ROLE_UI_NON_FOCAL:
1994 		role = TASK_BACKGROUND_APPLICATION;
1995 		break;
1996 	case PRIO_DARWIN_ROLE_TAL_LAUNCH:
1997 		role = TASK_THROTTLE_APPLICATION;
1998 		break;
1999 	case PRIO_DARWIN_ROLE_DARWIN_BG:
2000 		role = TASK_DARWINBG_APPLICATION;
2001 		break;
2002 	case PRIO_DARWIN_ROLE_USER_INIT:
2003 		role = TASK_USER_INIT_APPLICATION;
2004 		break;
2005 	default:
2006 		return EINVAL;
2007 	}
2008 
2009 	*task_role = role;
2010 
2011 	return 0;
2012 }
2013 
2014 int
proc_task_role_to_darwin_role(task_role_t task_role)2015 proc_task_role_to_darwin_role(task_role_t task_role)
2016 {
2017 	switch (task_role) {
2018 	case TASK_FOREGROUND_APPLICATION:
2019 		return PRIO_DARWIN_ROLE_UI_FOCAL;
2020 	case TASK_BACKGROUND_APPLICATION:
2021 		return PRIO_DARWIN_ROLE_UI_NON_FOCAL;
2022 	case TASK_NONUI_APPLICATION:
2023 		return PRIO_DARWIN_ROLE_NON_UI;
2024 	case TASK_DEFAULT_APPLICATION:
2025 		return PRIO_DARWIN_ROLE_UI;
2026 	case TASK_THROTTLE_APPLICATION:
2027 		return PRIO_DARWIN_ROLE_TAL_LAUNCH;
2028 	case TASK_DARWINBG_APPLICATION:
2029 		return PRIO_DARWIN_ROLE_DARWIN_BG;
2030 	case TASK_USER_INIT_APPLICATION:
2031 		return PRIO_DARWIN_ROLE_USER_INIT;
2032 	case TASK_UNSPECIFIED:
2033 	default:
2034 		return PRIO_DARWIN_ROLE_DEFAULT;
2035 	}
2036 }
2037 
2038 
2039 /* TODO: remove this variable when interactive daemon audit period is over */
2040 static TUNABLE(bool, ipc_importance_interactive_receiver,
2041     "imp_interactive_receiver", false);
2042 
2043 /*
2044  * Called at process exec to initialize the apptype, qos clamp, and qos seed of a process
2045  *
2046  * TODO: Make this function more table-driven instead of ad-hoc
2047  */
2048 void
proc_set_task_spawnpolicy(task_t task,thread_t thread,int apptype,int qos_clamp,task_role_t role,ipc_port_t * portwatch_ports,uint32_t portwatch_count)2049 proc_set_task_spawnpolicy(task_t task, thread_t thread, int apptype, int qos_clamp, task_role_t role,
2050     ipc_port_t * portwatch_ports, uint32_t portwatch_count)
2051 {
2052 	struct task_pend_token pend_token = {};
2053 
2054 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2055 	    (IMPORTANCE_CODE(IMP_TASK_APPTYPE, apptype)) | DBG_FUNC_START,
2056 	    task_pid(task), trequested_0(task), trequested_1(task),
2057 	    apptype, 0);
2058 
2059 	if (apptype != TASK_APPTYPE_NONE) {
2060 		/*
2061 		 * Reset the receiver and denap state inherited from the
2062 		 * task's parent, but only if we are going to reset it via the
2063 		 * provided apptype.
2064 		 */
2065 		if (task_is_importance_receiver(task)) {
2066 			task_importance_mark_receiver(task, FALSE);
2067 		}
2068 		if (task_is_importance_denap_receiver(task)) {
2069 			task_importance_mark_denap_receiver(task, FALSE);
2070 		}
2071 	}
2072 
2073 	switch (apptype) {
2074 	case TASK_APPTYPE_APP_DEFAULT:
2075 	case TASK_APPTYPE_APP_NONUI:
2076 		/* Apps become donors via the 'live-donor' flag instead of the static donor flag */
2077 		task_importance_mark_donor(task, FALSE);
2078 		task_importance_mark_live_donor(task, TRUE);
2079 		// importance_receiver == FALSE
2080 #if defined(XNU_TARGET_OS_OSX)
2081 		/* Apps are de-nap recievers on macOS for suppression behaviors */
2082 		task_importance_mark_denap_receiver(task, TRUE);
2083 #endif /* !defined(XNU_TARGET_OS_OSX) */
2084 		break;
2085 
2086 	case TASK_APPTYPE_DAEMON_INTERACTIVE:
2087 		task_importance_mark_donor(task, TRUE);
2088 		task_importance_mark_live_donor(task, FALSE);
2089 		// importance_denap_receiver == FALSE
2090 
2091 		/*
2092 		 * A boot arg controls whether interactive daemons are importance receivers.
2093 		 * Normally, they are not.  But for testing their behavior as an adaptive
2094 		 * daemon, the boot-arg can be set.
2095 		 *
2096 		 * TODO: remove this when the interactive daemon audit period is over.
2097 		 */
2098 		task_importance_mark_receiver(task, /* FALSE */ ipc_importance_interactive_receiver);
2099 		break;
2100 
2101 	case TASK_APPTYPE_DAEMON_STANDARD:
2102 		task_importance_mark_donor(task, TRUE);
2103 		task_importance_mark_live_donor(task, FALSE);
2104 		// importance_denap_receiver == FALSE
2105 		// importance_receiver == FALSE
2106 		break;
2107 
2108 	case TASK_APPTYPE_DAEMON_ADAPTIVE:
2109 		task_importance_mark_donor(task, FALSE);
2110 		task_importance_mark_live_donor(task, FALSE);
2111 		task_importance_mark_receiver(task, TRUE);
2112 		// importance_denap_receiver == FALSE
2113 		break;
2114 
2115 	case TASK_APPTYPE_DAEMON_BACKGROUND:
2116 		task_importance_mark_donor(task, FALSE);
2117 		task_importance_mark_live_donor(task, FALSE);
2118 		// importance_denap_receiver == FALSE
2119 		// importance_receiver == FALSE
2120 		break;
2121 
2122 	case TASK_APPTYPE_DRIVER:
2123 		task_importance_mark_donor(task, FALSE);
2124 		task_importance_mark_live_donor(task, FALSE);
2125 		// importance_denap_receiver == FALSE
2126 		// importance_receiver == FALSE
2127 		break;
2128 
2129 	case TASK_APPTYPE_NONE:
2130 		break;
2131 	}
2132 
2133 	if (portwatch_ports != NULL && apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) {
2134 		int portwatch_boosts = 0;
2135 
2136 		for (uint32_t i = 0; i < portwatch_count; i++) {
2137 			ipc_port_t port = NULL;
2138 
2139 			if (IP_VALID(port = portwatch_ports[i])) {
2140 				int boost = 0;
2141 				task_add_importance_watchport(task, port, &boost);
2142 				portwatch_boosts += boost;
2143 			}
2144 		}
2145 
2146 		if (portwatch_boosts > 0) {
2147 			task_importance_hold_internal_assertion(task, portwatch_boosts);
2148 		}
2149 	}
2150 
2151 	/* Redirect the turnstile push of watchports to task */
2152 	if (portwatch_count && portwatch_ports != NULL) {
2153 		task_add_turnstile_watchports(task, thread, portwatch_ports, portwatch_count);
2154 	}
2155 
2156 	task_lock(task);
2157 
2158 	if (apptype != TASK_APPTYPE_NONE) {
2159 		task_set_requested_apptype(task, apptype, false);
2160 		if (task_is_app(task)) {
2161 			pend_token.tpt_update_tg_app_flag = 1;
2162 		}
2163 	}
2164 
2165 #if !defined(XNU_TARGET_OS_OSX)
2166 	/* Remove this after launchd starts setting it properly */
2167 	if (apptype == TASK_APPTYPE_APP_DEFAULT && role == TASK_UNSPECIFIED) {
2168 		task->requested_policy.trp_role = TASK_FOREGROUND_APPLICATION;
2169 	} else
2170 #endif
2171 	if (role != TASK_UNSPECIFIED) {
2172 		task->requested_policy.trp_role = (uint32_t)role;
2173 	}
2174 
2175 	if (qos_clamp != THREAD_QOS_UNSPECIFIED) {
2176 		task->requested_policy.trp_qos_clamp = qos_clamp;
2177 	}
2178 
2179 	task_policy_update_locked(task, &pend_token);
2180 
2181 	task_unlock(task);
2182 
2183 	/* Ensure the donor bit is updated to be in sync with the new live donor status */
2184 	pend_token.tpt_update_live_donor = 1;
2185 
2186 	task_policy_update_complete_unlocked(task, &pend_token);
2187 
2188 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2189 	    (IMPORTANCE_CODE(IMP_TASK_APPTYPE, apptype)) | DBG_FUNC_END,
2190 	    task_pid(task), trequested_0(task), trequested_1(task),
2191 	    task_is_importance_receiver(task), 0);
2192 }
2193 
2194 /*
2195  * Inherit task role across exec
2196  */
2197 void
proc_inherit_task_role(task_t new_task,task_t old_task)2198 proc_inherit_task_role(task_t new_task,
2199     task_t old_task)
2200 {
2201 	int role;
2202 
2203 	/* inherit the role from old task to new task */
2204 	role = proc_get_task_policy(old_task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE);
2205 	proc_set_task_policy(new_task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE, role);
2206 }
2207 
2208 /*
2209  * Compute the default main thread qos for a task
2210  */
2211 thread_qos_t
task_compute_main_thread_qos(task_t task)2212 task_compute_main_thread_qos(task_t task)
2213 {
2214 	thread_qos_t primordial_qos = THREAD_QOS_UNSPECIFIED;
2215 
2216 	thread_qos_t qos_clamp = task->requested_policy.trp_qos_clamp;
2217 
2218 	switch (task->requested_policy.trp_apptype) {
2219 	case TASK_APPTYPE_APP_DEFAULT:
2220 		primordial_qos = THREAD_QOS_USER_INTERACTIVE;
2221 		break;
2222 
2223 	case TASK_APPTYPE_DAEMON_INTERACTIVE:
2224 	case TASK_APPTYPE_DAEMON_STANDARD:
2225 	case TASK_APPTYPE_DAEMON_ADAPTIVE:
2226 	case TASK_APPTYPE_DRIVER:
2227 	case TASK_APPTYPE_APP_NONUI:
2228 		primordial_qos = THREAD_QOS_LEGACY;
2229 		break;
2230 
2231 	case TASK_APPTYPE_DAEMON_BACKGROUND:
2232 		primordial_qos = THREAD_QOS_BACKGROUND;
2233 		break;
2234 	}
2235 
2236 	if (task_is_initproc(task)) {
2237 		/* PID 1 gets a special case */
2238 		primordial_qos = MAX(primordial_qos, THREAD_QOS_USER_INITIATED);
2239 	}
2240 
2241 	if (qos_clamp != THREAD_QOS_UNSPECIFIED) {
2242 		if (primordial_qos != THREAD_QOS_UNSPECIFIED) {
2243 			primordial_qos = MIN(qos_clamp, primordial_qos);
2244 		} else {
2245 			primordial_qos = qos_clamp;
2246 		}
2247 	}
2248 
2249 	return primordial_qos;
2250 }
2251 
2252 boolean_t
task_is_daemon(task_t task)2253 task_is_daemon(task_t task)
2254 {
2255 	switch (task->requested_policy.trp_apptype) {
2256 	case TASK_APPTYPE_DAEMON_INTERACTIVE:
2257 	case TASK_APPTYPE_DAEMON_STANDARD:
2258 	case TASK_APPTYPE_DAEMON_ADAPTIVE:
2259 	case TASK_APPTYPE_DAEMON_BACKGROUND:
2260 		return TRUE;
2261 	default:
2262 		return FALSE;
2263 	}
2264 }
2265 
2266 bool
task_is_driver(task_t task)2267 task_is_driver(task_t task)
2268 {
2269 	if (!task) {
2270 		return FALSE;
2271 	}
2272 	return task->requested_policy.trp_apptype == TASK_APPTYPE_DRIVER;
2273 }
2274 
2275 bool
task_is_app(task_t task)2276 task_is_app(task_t task)
2277 {
2278 	switch (task->requested_policy.trp_apptype) {
2279 	case TASK_APPTYPE_APP_DEFAULT:
2280 	case TASK_APPTYPE_APP_NONUI:
2281 		return TRUE;
2282 	default:
2283 		return FALSE;
2284 	}
2285 }
2286 
2287 
2288 /* for telemetry */
2289 integer_t
task_grab_latency_qos(task_t task)2290 task_grab_latency_qos(task_t task)
2291 {
2292 	return qos_latency_policy_package(proc_get_effective_task_policy(task, TASK_POLICY_LATENCY_QOS));
2293 }
2294 
2295 /* update the darwin background action state in the flags field for libproc */
2296 int
proc_get_darwinbgstate(task_t task,uint32_t * flagsp)2297 proc_get_darwinbgstate(task_t task, uint32_t * flagsp)
2298 {
2299 	if (task->requested_policy.trp_ext_darwinbg) {
2300 		*flagsp |= PROC_FLAG_EXT_DARWINBG;
2301 	}
2302 
2303 	if (task->requested_policy.trp_int_darwinbg) {
2304 		*flagsp |= PROC_FLAG_DARWINBG;
2305 	}
2306 
2307 #if !defined(XNU_TARGET_OS_OSX)
2308 	if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_BACKGROUND) {
2309 		*flagsp |= PROC_FLAG_IOS_APPLEDAEMON;
2310 	}
2311 
2312 	if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) {
2313 		*flagsp |= PROC_FLAG_IOS_IMPPROMOTION;
2314 	}
2315 #endif /* !defined(XNU_TARGET_OS_OSX) */
2316 
2317 	if (task_is_app(task)) {
2318 		*flagsp |= PROC_FLAG_APPLICATION;
2319 	}
2320 
2321 	if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) {
2322 		*flagsp |= PROC_FLAG_ADAPTIVE;
2323 	}
2324 
2325 	if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE &&
2326 	    task->requested_policy.trp_boosted == 1) {
2327 		*flagsp |= PROC_FLAG_ADAPTIVE_IMPORTANT;
2328 	}
2329 
2330 	if (task_is_importance_donor(task)) {
2331 		*flagsp |= PROC_FLAG_IMPORTANCE_DONOR;
2332 	}
2333 
2334 	if (task->effective_policy.tep_sup_active) {
2335 		*flagsp |= PROC_FLAG_SUPPRESSED;
2336 	}
2337 
2338 	return 0;
2339 }
2340 
2341 /*
2342  * Tracepoint data... Reading the tracepoint data can be somewhat complicated.
2343  * The current scheme packs as much data into a single tracepoint as it can.
2344  *
2345  * Each task/thread requested/effective structure is 64 bits in size. Any
2346  * given tracepoint will emit either requested or effective data, but not both.
2347  *
2348  * A tracepoint may emit any of task, thread, or task & thread data.
2349  *
2350  * The type of data emitted varies with pointer size. Where possible, both
2351  * task and thread data are emitted. In LP32 systems, the first and second
2352  * halves of either the task or thread data is emitted.
2353  *
2354  * The code uses uintptr_t array indexes instead of high/low to avoid
2355  * confusion WRT big vs little endian.
2356  *
2357  * The truth table for the tracepoint data functions is below, and has the
2358  * following invariants:
2359  *
2360  * 1) task and thread are uintptr_t*
2361  * 2) task may never be NULL
2362  *
2363  *
2364  *                                     LP32            LP64
2365  * trequested_0(task, NULL)            task[0]         task[0]
2366  * trequested_1(task, NULL)            task[1]         NULL
2367  * trequested_0(task, thread)          thread[0]       task[0]
2368  * trequested_1(task, thread)          thread[1]       thread[0]
2369  *
2370  * Basically, you get a full task or thread on LP32, and both on LP64.
2371  *
2372  * The uintptr_t munging here is squicky enough to deserve a comment.
2373  *
2374  * The variables we are accessing are laid out in memory like this:
2375  *
2376  * [            LP64 uintptr_t  0          ]
2377  * [ LP32 uintptr_t 0 ] [ LP32 uintptr_t 1 ]
2378  *
2379  *      1   2   3   4     5   6   7   8
2380  *
2381  */
2382 
2383 static uintptr_t
trequested_0(task_t task)2384 trequested_0(task_t task)
2385 {
2386 	static_assert(sizeof(struct task_requested_policy) == sizeof(uint64_t), "size invariant violated");
2387 
2388 	uintptr_t* raw = (uintptr_t*)&task->requested_policy;
2389 
2390 	return raw[0];
2391 }
2392 
2393 static uintptr_t
trequested_1(task_t task)2394 trequested_1(task_t task)
2395 {
2396 #if defined __LP64__
2397 	(void)task;
2398 	return 0;
2399 #else
2400 	uintptr_t* raw = (uintptr_t*)(&task->requested_policy);
2401 	return raw[1];
2402 #endif
2403 }
2404 
2405 static uintptr_t
teffective_0(task_t task)2406 teffective_0(task_t task)
2407 {
2408 	static_assert(sizeof(struct task_effective_policy) == sizeof(uint64_t), "size invariant violated");
2409 	uintptr_t* raw = (uintptr_t*)&task->effective_policy;
2410 
2411 	return raw[0];
2412 }
2413 
2414 static uintptr_t
teffective_1(task_t task)2415 teffective_1(task_t task)
2416 {
2417 #if defined __LP64__
2418 	(void)task;
2419 	return 0;
2420 #else
2421 	uintptr_t* raw = (uintptr_t*)(&task->effective_policy);
2422 	return raw[1];
2423 #endif
2424 }
2425 
2426 /* dump pending for tracepoint */
2427 uint32_t
tpending(task_pend_token_t pend_token)2428 tpending(task_pend_token_t pend_token)
2429 {
2430 	return *(uint32_t*)(void*)(pend_token);
2431 }
2432 
2433 uint64_t
task_requested_bitfield(task_t task)2434 task_requested_bitfield(task_t task)
2435 {
2436 	uint64_t bits = 0;
2437 	struct task_requested_policy requested = task->requested_policy;
2438 
2439 	bits |= (requested.trp_int_darwinbg     ? POLICY_REQ_INT_DARWIN_BG  : 0);
2440 	bits |= (requested.trp_ext_darwinbg     ? POLICY_REQ_EXT_DARWIN_BG  : 0);
2441 	bits |= (requested.trp_int_iotier       ? (((uint64_t)requested.trp_int_iotier) << POLICY_REQ_INT_IO_TIER_SHIFT) : 0);
2442 	bits |= (requested.trp_ext_iotier       ? (((uint64_t)requested.trp_ext_iotier) << POLICY_REQ_EXT_IO_TIER_SHIFT) : 0);
2443 	bits |= (requested.trp_int_iopassive    ? POLICY_REQ_INT_PASSIVE_IO : 0);
2444 	bits |= (requested.trp_ext_iopassive    ? POLICY_REQ_EXT_PASSIVE_IO : 0);
2445 	bits |= (requested.trp_bg_iotier        ? (((uint64_t)requested.trp_bg_iotier) << POLICY_REQ_BG_IOTIER_SHIFT)   : 0);
2446 	bits |= (requested.trp_terminated       ? POLICY_REQ_TERMINATED     : 0);
2447 
2448 	bits |= (requested.trp_boosted          ? POLICY_REQ_BOOSTED        : 0);
2449 	bits |= (requested.trp_tal_enabled      ? POLICY_REQ_TAL_ENABLED    : 0);
2450 	bits |= (requested.trp_apptype          ? (((uint64_t)requested.trp_apptype) << POLICY_REQ_APPTYPE_SHIFT)  : 0);
2451 	bits |= (requested.trp_role             ? (((uint64_t)requested.trp_role) << POLICY_REQ_ROLE_SHIFT)     : 0);
2452 
2453 	bits |= (requested.trp_sup_active       ? POLICY_REQ_SUP_ACTIVE         : 0);
2454 	bits |= (requested.trp_sup_lowpri_cpu   ? POLICY_REQ_SUP_LOWPRI_CPU     : 0);
2455 	bits |= (requested.trp_sup_cpu          ? POLICY_REQ_SUP_CPU            : 0);
2456 	bits |= (requested.trp_sup_timer        ? (((uint64_t)requested.trp_sup_timer) << POLICY_REQ_SUP_TIMER_THROTTLE_SHIFT) : 0);
2457 	bits |= (requested.trp_sup_throughput   ? (((uint64_t)requested.trp_sup_throughput) << POLICY_REQ_SUP_THROUGHPUT_SHIFT)     : 0);
2458 	bits |= (requested.trp_sup_disk         ? POLICY_REQ_SUP_DISK_THROTTLE  : 0);
2459 	bits |= (requested.trp_sup_bg_sockets   ? POLICY_REQ_SUP_BG_SOCKETS     : 0);
2460 
2461 	bits |= (requested.trp_base_latency_qos ? (((uint64_t)requested.trp_base_latency_qos) << POLICY_REQ_BASE_LATENCY_QOS_SHIFT) : 0);
2462 	bits |= (requested.trp_over_latency_qos ? (((uint64_t)requested.trp_over_latency_qos) << POLICY_REQ_OVER_LATENCY_QOS_SHIFT) : 0);
2463 	bits |= (requested.trp_base_through_qos ? (((uint64_t)requested.trp_base_through_qos) << POLICY_REQ_BASE_THROUGH_QOS_SHIFT) : 0);
2464 	bits |= (requested.trp_over_through_qos ? (((uint64_t)requested.trp_over_through_qos) << POLICY_REQ_OVER_THROUGH_QOS_SHIFT) : 0);
2465 	bits |= (requested.trp_sfi_managed      ? POLICY_REQ_SFI_MANAGED        : 0);
2466 	bits |= (requested.trp_qos_clamp        ? (((uint64_t)requested.trp_qos_clamp) << POLICY_REQ_QOS_CLAMP_SHIFT)        : 0);
2467 
2468 	return bits;
2469 }
2470 
2471 uint64_t
task_effective_bitfield(task_t task)2472 task_effective_bitfield(task_t task)
2473 {
2474 	uint64_t bits = 0;
2475 	struct task_effective_policy effective = task->effective_policy;
2476 
2477 	bits |= (effective.tep_io_tier          ? (((uint64_t)effective.tep_io_tier) << POLICY_EFF_IO_TIER_SHIFT) : 0);
2478 	bits |= (effective.tep_io_passive       ? POLICY_EFF_IO_PASSIVE     : 0);
2479 	bits |= (effective.tep_darwinbg         ? POLICY_EFF_DARWIN_BG      : 0);
2480 	bits |= (effective.tep_lowpri_cpu       ? POLICY_EFF_LOWPRI_CPU     : 0);
2481 	bits |= (effective.tep_terminated       ? POLICY_EFF_TERMINATED     : 0);
2482 	bits |= (effective.tep_all_sockets_bg   ? POLICY_EFF_ALL_SOCKETS_BG : 0);
2483 	bits |= (effective.tep_new_sockets_bg   ? POLICY_EFF_NEW_SOCKETS_BG : 0);
2484 	bits |= (effective.tep_bg_iotier        ? (((uint64_t)effective.tep_bg_iotier) << POLICY_EFF_BG_IOTIER_SHIFT) : 0);
2485 	bits |= (effective.tep_qos_ui_is_urgent ? POLICY_EFF_QOS_UI_IS_URGENT : 0);
2486 
2487 	bits |= (effective.tep_tal_engaged      ? POLICY_EFF_TAL_ENGAGED    : 0);
2488 	bits |= (effective.tep_watchers_bg      ? POLICY_EFF_WATCHERS_BG    : 0);
2489 	bits |= (effective.tep_sup_active       ? POLICY_EFF_SUP_ACTIVE     : 0);
2490 	bits |= (effective.tep_suppressed_cpu   ? POLICY_EFF_SUP_CPU        : 0);
2491 	bits |= (effective.tep_role             ? (((uint64_t)effective.tep_role) << POLICY_EFF_ROLE_SHIFT)        : 0);
2492 	bits |= (effective.tep_latency_qos      ? (((uint64_t)effective.tep_latency_qos) << POLICY_EFF_LATENCY_QOS_SHIFT) : 0);
2493 	bits |= (effective.tep_through_qos      ? (((uint64_t)effective.tep_through_qos) << POLICY_EFF_THROUGH_QOS_SHIFT) : 0);
2494 	bits |= (effective.tep_sfi_managed      ? POLICY_EFF_SFI_MANAGED    : 0);
2495 	bits |= (effective.tep_qos_ceiling      ? (((uint64_t)effective.tep_qos_ceiling) << POLICY_EFF_QOS_CEILING_SHIFT) : 0);
2496 
2497 	return bits;
2498 }
2499 
2500 
2501 /*
2502  * Resource usage and CPU related routines
2503  */
2504 
2505 int
proc_get_task_ruse_cpu(task_t task,uint32_t * policyp,uint8_t * percentagep,uint64_t * intervalp,uint64_t * deadlinep)2506 proc_get_task_ruse_cpu(task_t task, uint32_t *policyp, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep)
2507 {
2508 	int error = 0;
2509 	int scope;
2510 
2511 	task_lock(task);
2512 
2513 
2514 	error = task_get_cpuusage(task, percentagep, intervalp, deadlinep, &scope);
2515 	task_unlock(task);
2516 
2517 	/*
2518 	 * Reverse-map from CPU resource limit scopes back to policies (see comment below).
2519 	 */
2520 	if (scope == TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
2521 		*policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC;
2522 	} else if (scope == TASK_RUSECPU_FLAGS_PROC_LIMIT) {
2523 		*policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_THROTTLE;
2524 	} else if (scope == TASK_RUSECPU_FLAGS_DEADLINE) {
2525 		*policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE;
2526 	}
2527 
2528 	return error;
2529 }
2530 
2531 /*
2532  * Configure the default CPU usage monitor parameters.
2533  *
2534  * For tasks which have this mechanism activated: if any thread in the
2535  * process consumes more CPU than this, an EXC_RESOURCE exception will be generated.
2536  */
2537 void
proc_init_cpumon_params(void)2538 proc_init_cpumon_params(void)
2539 {
2540 	/*
2541 	 * The max CPU percentage can be configured via the boot-args and
2542 	 * a key in the device tree. The boot-args are honored first, then the
2543 	 * device tree.
2544 	 */
2545 	if (!PE_parse_boot_argn("max_cpumon_percentage", &proc_max_cpumon_percentage,
2546 	    sizeof(proc_max_cpumon_percentage))) {
2547 		uint64_t max_percentage = 0ULL;
2548 
2549 		if (!PE_get_default("kern.max_cpumon_percentage", &max_percentage,
2550 		    sizeof(max_percentage))) {
2551 			max_percentage = DEFAULT_CPUMON_PERCENTAGE;
2552 		}
2553 
2554 		assert(max_percentage <= UINT8_MAX);
2555 		proc_max_cpumon_percentage = (uint8_t) max_percentage;
2556 	}
2557 
2558 	if (proc_max_cpumon_percentage > 100) {
2559 		proc_max_cpumon_percentage = 100;
2560 	}
2561 
2562 	/*
2563 	 * The interval should be specified in seconds.
2564 	 *
2565 	 * Like the max CPU percentage, the max CPU interval can be configured
2566 	 * via boot-args and the device tree.
2567 	 */
2568 	if (!PE_parse_boot_argn("max_cpumon_interval", &proc_max_cpumon_interval,
2569 	    sizeof(proc_max_cpumon_interval))) {
2570 		if (!PE_get_default("kern.max_cpumon_interval", &proc_max_cpumon_interval,
2571 		    sizeof(proc_max_cpumon_interval))) {
2572 			proc_max_cpumon_interval = DEFAULT_CPUMON_INTERVAL;
2573 		}
2574 	}
2575 
2576 	proc_max_cpumon_interval *= NSEC_PER_SEC;
2577 
2578 	/* TEMPORARY boot arg to control App suppression */
2579 	PE_parse_boot_argn("task_policy_suppression_flags",
2580 	    &task_policy_suppression_flags,
2581 	    sizeof(task_policy_suppression_flags));
2582 
2583 	/* adjust suppression disk policy if called for in boot arg */
2584 	if (task_policy_suppression_flags & TASK_POLICY_SUPPRESSION_IOTIER2) {
2585 		proc_suppressed_disk_tier = THROTTLE_LEVEL_TIER2;
2586 	}
2587 }
2588 
2589 /*
2590  * Currently supported configurations for CPU limits.
2591  *
2592  * Policy				| Deadline-based CPU limit | Percentage-based CPU limit
2593  * -------------------------------------+--------------------------+------------------------------
2594  * PROC_POLICY_RSRCACT_THROTTLE		| ENOTSUP		   | Task-wide scope only
2595  * PROC_POLICY_RSRCACT_SUSPEND		| Task-wide scope only	   | ENOTSUP
2596  * PROC_POLICY_RSRCACT_TERMINATE	| Task-wide scope only	   | ENOTSUP
2597  * PROC_POLICY_RSRCACT_NOTIFY_KQ	| Task-wide scope only	   | ENOTSUP
2598  * PROC_POLICY_RSRCACT_NOTIFY_EXC	| ENOTSUP		   | Per-thread scope only
2599  *
2600  * A deadline-based CPU limit is actually a simple wallclock timer - the requested action is performed
2601  * after the specified amount of wallclock time has elapsed.
2602  *
2603  * A percentage-based CPU limit performs the requested action after the specified amount of actual CPU time
2604  * has been consumed -- regardless of how much wallclock time has elapsed -- by either the task as an
2605  * aggregate entity (so-called "Task-wide" or "Proc-wide" scope, whereby the CPU time consumed by all threads
2606  * in the task are added together), or by any one thread in the task (so-called "per-thread" scope).
2607  *
2608  * We support either deadline != 0 OR percentage != 0, but not both. The original intention in having them
2609  * share an API was to use actual CPU time as the basis of the deadline-based limit (as in: perform an action
2610  * after I have used some amount of CPU time; this is different than the recurring percentage/interval model)
2611  * but the potential consumer of the API at the time was insisting on wallclock time instead.
2612  *
2613  * Currently, requesting notification via an exception is the only way to get per-thread scope for a
2614  * CPU limit. All other types of notifications force task-wide scope for the limit.
2615  */
2616 int
proc_set_task_ruse_cpu(task_t task,uint16_t policy,uint8_t percentage,uint64_t interval,uint64_t deadline,int cpumon_entitled)2617 proc_set_task_ruse_cpu(task_t task, uint16_t policy, uint8_t percentage, uint64_t interval, uint64_t deadline,
2618     int cpumon_entitled)
2619 {
2620 	int error = 0;
2621 	int scope;
2622 
2623 	/*
2624 	 * Enforce the matrix of supported configurations for policy, percentage, and deadline.
2625 	 */
2626 	switch (policy) {
2627 	// If no policy is explicitly given, the default is to throttle.
2628 	case TASK_POLICY_RESOURCE_ATTRIBUTE_NONE:
2629 	case TASK_POLICY_RESOURCE_ATTRIBUTE_THROTTLE:
2630 		if (deadline != 0) {
2631 			return ENOTSUP;
2632 		}
2633 		scope = TASK_RUSECPU_FLAGS_PROC_LIMIT;
2634 		break;
2635 	case TASK_POLICY_RESOURCE_ATTRIBUTE_SUSPEND:
2636 	case TASK_POLICY_RESOURCE_ATTRIBUTE_TERMINATE:
2637 	case TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_KQ:
2638 		if (percentage != 0) {
2639 			return ENOTSUP;
2640 		}
2641 		scope = TASK_RUSECPU_FLAGS_DEADLINE;
2642 		break;
2643 	case TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC:
2644 		if (deadline != 0) {
2645 			return ENOTSUP;
2646 		}
2647 		scope = TASK_RUSECPU_FLAGS_PERTHR_LIMIT;
2648 #ifdef CONFIG_NOMONITORS
2649 		return error;
2650 #endif /* CONFIG_NOMONITORS */
2651 		break;
2652 	default:
2653 		return EINVAL;
2654 	}
2655 
2656 	task_lock(task);
2657 	if (task != current_task()) {
2658 		task->policy_ru_cpu_ext = policy;
2659 	} else {
2660 		task->policy_ru_cpu = policy;
2661 	}
2662 	error = task_set_cpuusage(task, percentage, interval, deadline, scope, cpumon_entitled);
2663 	task_unlock(task);
2664 	return error;
2665 }
2666 
2667 /* TODO: get rid of these */
2668 #define TASK_POLICY_CPU_RESOURCE_USAGE          0
2669 #define TASK_POLICY_WIREDMEM_RESOURCE_USAGE     1
2670 #define TASK_POLICY_VIRTUALMEM_RESOURCE_USAGE   2
2671 #define TASK_POLICY_DISK_RESOURCE_USAGE         3
2672 #define TASK_POLICY_NETWORK_RESOURCE_USAGE      4
2673 #define TASK_POLICY_POWER_RESOURCE_USAGE        5
2674 
2675 #define TASK_POLICY_RESOURCE_USAGE_COUNT        6
2676 
2677 int
proc_clear_task_ruse_cpu(task_t task,int cpumon_entitled)2678 proc_clear_task_ruse_cpu(task_t task, int cpumon_entitled)
2679 {
2680 	int error = 0;
2681 	int action;
2682 	void * bsdinfo = NULL;
2683 
2684 	task_lock(task);
2685 	if (task != current_task()) {
2686 		task->policy_ru_cpu_ext = TASK_POLICY_RESOURCE_ATTRIBUTE_DEFAULT;
2687 	} else {
2688 		task->policy_ru_cpu = TASK_POLICY_RESOURCE_ATTRIBUTE_DEFAULT;
2689 	}
2690 
2691 	error = task_clear_cpuusage_locked(task, cpumon_entitled);
2692 	if (error != 0) {
2693 		goto out;
2694 	}
2695 
2696 	action = task->applied_ru_cpu;
2697 	if (task->applied_ru_cpu_ext != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
2698 		/* reset action */
2699 		task->applied_ru_cpu_ext = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE;
2700 	}
2701 	if (action != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
2702 		bsdinfo = get_bsdtask_info(task);
2703 		task_unlock(task);
2704 		proc_restore_resource_actions(bsdinfo, TASK_POLICY_CPU_RESOURCE_USAGE, action);
2705 		goto out1;
2706 	}
2707 
2708 out:
2709 	task_unlock(task);
2710 out1:
2711 	return error;
2712 }
2713 
2714 /* used to apply resource limit related actions */
2715 static int
task_apply_resource_actions(task_t task,int type)2716 task_apply_resource_actions(task_t task, int type)
2717 {
2718 	int action = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE;
2719 	void * bsdinfo = NULL;
2720 
2721 	switch (type) {
2722 	case TASK_POLICY_CPU_RESOURCE_USAGE:
2723 		break;
2724 	case TASK_POLICY_WIREDMEM_RESOURCE_USAGE:
2725 	case TASK_POLICY_VIRTUALMEM_RESOURCE_USAGE:
2726 	case TASK_POLICY_DISK_RESOURCE_USAGE:
2727 	case TASK_POLICY_NETWORK_RESOURCE_USAGE:
2728 	case TASK_POLICY_POWER_RESOURCE_USAGE:
2729 		return 0;
2730 
2731 	default:
2732 		return 1;
2733 	}
2734 	;
2735 
2736 	/* only cpu actions for now */
2737 	task_lock(task);
2738 
2739 	if (task->applied_ru_cpu_ext == TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
2740 		/* apply action */
2741 		task->applied_ru_cpu_ext = task->policy_ru_cpu_ext;
2742 		action = task->applied_ru_cpu_ext;
2743 	} else {
2744 		action = task->applied_ru_cpu_ext;
2745 	}
2746 
2747 	if (action != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
2748 		bsdinfo = get_bsdtask_info(task);
2749 		task_unlock(task);
2750 		proc_apply_resource_actions(bsdinfo, TASK_POLICY_CPU_RESOURCE_USAGE, action);
2751 	} else {
2752 		task_unlock(task);
2753 	}
2754 
2755 	return 0;
2756 }
2757 
2758 /*
2759  * XXX This API is somewhat broken; we support multiple simultaneous CPU limits, but the get/set API
2760  * only allows for one at a time. This means that if there is a per-thread limit active, the other
2761  * "scopes" will not be accessible via this API. We could change it to pass in the scope of interest
2762  * to the caller, and prefer that, but there's no need for that at the moment.
2763  */
2764 static int
task_get_cpuusage(task_t task,uint8_t * percentagep,uint64_t * intervalp,uint64_t * deadlinep,int * scope)2765 task_get_cpuusage(task_t task, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep, int *scope)
2766 {
2767 	*percentagep = 0;
2768 	*intervalp = 0;
2769 	*deadlinep = 0;
2770 
2771 	if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) != 0) {
2772 		*scope = TASK_RUSECPU_FLAGS_PERTHR_LIMIT;
2773 		*percentagep = task->rusage_cpu_perthr_percentage;
2774 		*intervalp = task->rusage_cpu_perthr_interval;
2775 	} else if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PROC_LIMIT) != 0) {
2776 		*scope = TASK_RUSECPU_FLAGS_PROC_LIMIT;
2777 		*percentagep = task->rusage_cpu_percentage;
2778 		*intervalp = task->rusage_cpu_interval;
2779 	} else if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_DEADLINE) != 0) {
2780 		*scope = TASK_RUSECPU_FLAGS_DEADLINE;
2781 		*deadlinep = task->rusage_cpu_deadline;
2782 	} else {
2783 		*scope = 0;
2784 	}
2785 
2786 	return 0;
2787 }
2788 
2789 /*
2790  * Suspend the CPU usage monitor for the task.  Return value indicates
2791  * if the mechanism was actually enabled.
2792  */
2793 int
task_suspend_cpumon(task_t task)2794 task_suspend_cpumon(task_t task)
2795 {
2796 	thread_t thread;
2797 
2798 	task_lock_assert_owned(task);
2799 
2800 	if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) == 0) {
2801 		return KERN_INVALID_ARGUMENT;
2802 	}
2803 
2804 	/*
2805 	 * Suspend monitoring for the task, and propagate that change to each thread.
2806 	 */
2807 	task->rusage_cpu_flags &= ~(TASK_RUSECPU_FLAGS_PERTHR_LIMIT | TASK_RUSECPU_FLAGS_FATAL_CPUMON);
2808 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
2809 		act_set_astledger(thread);
2810 	}
2811 
2812 	return KERN_SUCCESS;
2813 }
2814 
2815 /*
2816  * Remove all traces of the CPU monitor.
2817  */
2818 int
task_disable_cpumon(task_t task)2819 task_disable_cpumon(task_t task)
2820 {
2821 	int kret;
2822 
2823 	task_lock_assert_owned(task);
2824 
2825 	kret = task_suspend_cpumon(task);
2826 	if (kret) {
2827 		return kret;
2828 	}
2829 
2830 	/* Once we clear these values, the monitor can't be resumed */
2831 	task->rusage_cpu_perthr_percentage = 0;
2832 	task->rusage_cpu_perthr_interval = 0;
2833 
2834 	return KERN_SUCCESS;
2835 }
2836 
2837 
2838 static int
task_enable_cpumon_locked(task_t task)2839 task_enable_cpumon_locked(task_t task)
2840 {
2841 	thread_t thread;
2842 	task_lock_assert_owned(task);
2843 
2844 	if (task->rusage_cpu_perthr_percentage == 0 ||
2845 	    task->rusage_cpu_perthr_interval == 0) {
2846 		return KERN_INVALID_ARGUMENT;
2847 	}
2848 
2849 	task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_PERTHR_LIMIT;
2850 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
2851 		act_set_astledger(thread);
2852 	}
2853 
2854 	return KERN_SUCCESS;
2855 }
2856 
2857 int
task_resume_cpumon(task_t task)2858 task_resume_cpumon(task_t task)
2859 {
2860 	kern_return_t kret;
2861 
2862 	if (!task) {
2863 		return EINVAL;
2864 	}
2865 
2866 	task_lock(task);
2867 	kret = task_enable_cpumon_locked(task);
2868 	task_unlock(task);
2869 
2870 	return kret;
2871 }
2872 
2873 
2874 /* duplicate values from bsd/sys/process_policy.h */
2875 #define PROC_POLICY_CPUMON_DISABLE      0xFF
2876 #define PROC_POLICY_CPUMON_DEFAULTS     0xFE
2877 
2878 static int
task_set_cpuusage(task_t task,uint8_t percentage,uint64_t interval,uint64_t deadline,int scope,int cpumon_entitled)2879 task_set_cpuusage(task_t task, uint8_t percentage, uint64_t interval, uint64_t deadline, int scope, int cpumon_entitled)
2880 {
2881 	uint64_t abstime = 0;
2882 	uint64_t limittime = 0;
2883 
2884 	lck_mtx_assert(&task->lock, LCK_MTX_ASSERT_OWNED);
2885 
2886 	/* By default, refill once per second */
2887 	if (interval == 0) {
2888 		interval = NSEC_PER_SEC;
2889 	}
2890 
2891 	if (percentage != 0) {
2892 		if (scope == TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
2893 			boolean_t warn = FALSE;
2894 
2895 			/*
2896 			 * A per-thread CPU limit on a task generates an exception
2897 			 * (LEDGER_ACTION_EXCEPTION) if any one thread in the task
2898 			 * exceeds the limit.
2899 			 */
2900 
2901 			if (percentage == PROC_POLICY_CPUMON_DISABLE) {
2902 				if (cpumon_entitled) {
2903 					/* 25095698 - task_disable_cpumon() should be reliable */
2904 					task_disable_cpumon(task);
2905 					return 0;
2906 				}
2907 
2908 				/*
2909 				 * This task wishes to disable the CPU usage monitor, but it's
2910 				 * missing the required entitlement:
2911 				 *     com.apple.private.kernel.override-cpumon
2912 				 *
2913 				 * Instead, treat this as a request to reset its params
2914 				 * back to the defaults.
2915 				 */
2916 				warn = TRUE;
2917 				percentage = PROC_POLICY_CPUMON_DEFAULTS;
2918 			}
2919 
2920 			if (percentage == PROC_POLICY_CPUMON_DEFAULTS) {
2921 				percentage = proc_max_cpumon_percentage;
2922 				interval   = proc_max_cpumon_interval;
2923 			}
2924 
2925 			if (percentage > 100) {
2926 				percentage = 100;
2927 			}
2928 
2929 			/*
2930 			 * Passing in an interval of -1 means either:
2931 			 * - Leave the interval as-is, if there's already a per-thread
2932 			 *   limit configured
2933 			 * - Use the system default.
2934 			 */
2935 			if (interval == -1ULL) {
2936 				if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
2937 					interval = task->rusage_cpu_perthr_interval;
2938 				} else {
2939 					interval = proc_max_cpumon_interval;
2940 				}
2941 			}
2942 
2943 			/*
2944 			 * Enforce global caps on CPU usage monitor here if the process is not
2945 			 * entitled to escape the global caps.
2946 			 */
2947 			if ((percentage > proc_max_cpumon_percentage) && (cpumon_entitled == 0)) {
2948 				warn = TRUE;
2949 				percentage = proc_max_cpumon_percentage;
2950 			}
2951 
2952 			if ((interval > proc_max_cpumon_interval) && (cpumon_entitled == 0)) {
2953 				warn = TRUE;
2954 				interval = proc_max_cpumon_interval;
2955 			}
2956 
2957 			if (warn) {
2958 				int       pid = 0;
2959 				const char *procname = "unknown";
2960 
2961 #ifdef MACH_BSD
2962 				pid = proc_selfpid();
2963 				void *cur_bsd_info = get_bsdtask_info(current_task());
2964 				if (cur_bsd_info != NULL) {
2965 					procname = proc_name_address(cur_bsd_info);
2966 				}
2967 #endif
2968 
2969 				printf("process %s[%d] denied attempt to escape CPU monitor"
2970 				    " (missing required entitlement).\n", procname, pid);
2971 			}
2972 
2973 			/* configure the limit values */
2974 			task->rusage_cpu_perthr_percentage = percentage;
2975 			task->rusage_cpu_perthr_interval = interval;
2976 
2977 			/* and enable the CPU monitor */
2978 			(void)task_enable_cpumon_locked(task);
2979 		} else if (scope == TASK_RUSECPU_FLAGS_PROC_LIMIT) {
2980 			/*
2981 			 * Currently, a proc-wide CPU limit always blocks if the limit is
2982 			 * exceeded (LEDGER_ACTION_BLOCK).
2983 			 */
2984 			task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_PROC_LIMIT;
2985 			task->rusage_cpu_percentage = percentage;
2986 			task->rusage_cpu_interval = interval;
2987 
2988 			limittime = (interval * percentage) / 100;
2989 			nanoseconds_to_absolutetime(limittime, &abstime);
2990 
2991 			ledger_set_limit(task->ledger, task_ledgers.cpu_time, abstime, 0);
2992 			ledger_set_period(task->ledger, task_ledgers.cpu_time, interval);
2993 			ledger_set_action(task->ledger, task_ledgers.cpu_time, LEDGER_ACTION_BLOCK);
2994 		}
2995 	}
2996 
2997 	if (deadline != 0) {
2998 		assert(scope == TASK_RUSECPU_FLAGS_DEADLINE);
2999 
3000 		/* if already in use, cancel and wait for it to cleanout */
3001 		if (task->rusage_cpu_callt != NULL) {
3002 			task_unlock(task);
3003 			thread_call_cancel_wait(task->rusage_cpu_callt);
3004 			task_lock(task);
3005 		}
3006 		if (task->rusage_cpu_callt == NULL) {
3007 			task->rusage_cpu_callt = thread_call_allocate_with_priority(task_action_cpuusage, (thread_call_param_t)task, THREAD_CALL_PRIORITY_KERNEL);
3008 		}
3009 		/* setup callout */
3010 		if (task->rusage_cpu_callt != 0) {
3011 			uint64_t save_abstime = 0;
3012 
3013 			task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_DEADLINE;
3014 			task->rusage_cpu_deadline = deadline;
3015 
3016 			nanoseconds_to_absolutetime(deadline, &abstime);
3017 			save_abstime = abstime;
3018 			clock_absolutetime_interval_to_deadline(save_abstime, &abstime);
3019 			thread_call_enter_delayed(task->rusage_cpu_callt, abstime);
3020 		}
3021 	}
3022 
3023 	return 0;
3024 }
3025 
3026 int
task_clear_cpuusage(task_t task,int cpumon_entitled)3027 task_clear_cpuusage(task_t task, int cpumon_entitled)
3028 {
3029 	int retval = 0;
3030 
3031 	task_lock(task);
3032 	retval = task_clear_cpuusage_locked(task, cpumon_entitled);
3033 	task_unlock(task);
3034 
3035 	return retval;
3036 }
3037 
3038 static int
task_clear_cpuusage_locked(task_t task,int cpumon_entitled)3039 task_clear_cpuusage_locked(task_t task, int cpumon_entitled)
3040 {
3041 	thread_call_t savecallt;
3042 
3043 	/* cancel percentage handling if set */
3044 	if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PROC_LIMIT) {
3045 		task->rusage_cpu_flags &= ~TASK_RUSECPU_FLAGS_PROC_LIMIT;
3046 		ledger_set_limit(task->ledger, task_ledgers.cpu_time, LEDGER_LIMIT_INFINITY, 0);
3047 		task->rusage_cpu_percentage = 0;
3048 		task->rusage_cpu_interval = 0;
3049 	}
3050 
3051 	/*
3052 	 * Disable the CPU usage monitor.
3053 	 */
3054 	if (cpumon_entitled) {
3055 		task_disable_cpumon(task);
3056 	}
3057 
3058 	/* cancel deadline handling if set */
3059 	if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_DEADLINE) {
3060 		task->rusage_cpu_flags &= ~TASK_RUSECPU_FLAGS_DEADLINE;
3061 		if (task->rusage_cpu_callt != 0) {
3062 			savecallt = task->rusage_cpu_callt;
3063 			task->rusage_cpu_callt = NULL;
3064 			task->rusage_cpu_deadline = 0;
3065 			task_unlock(task);
3066 			thread_call_cancel_wait(savecallt);
3067 			thread_call_free(savecallt);
3068 			task_lock(task);
3069 		}
3070 	}
3071 	return 0;
3072 }
3073 
3074 /* called by ledger unit to enforce action due to resource usage criteria being met */
3075 static void
task_action_cpuusage(thread_call_param_t param0,__unused thread_call_param_t param1)3076 task_action_cpuusage(thread_call_param_t param0, __unused thread_call_param_t param1)
3077 {
3078 	task_t task = (task_t)param0;
3079 	(void)task_apply_resource_actions(task, TASK_POLICY_CPU_RESOURCE_USAGE);
3080 	return;
3081 }
3082 
3083 
3084 /*
3085  * Routines for taskwatch and pidbind
3086  */
3087 
3088 #if CONFIG_TASKWATCH
3089 
3090 LCK_MTX_DECLARE_ATTR(task_watch_mtx, &task_lck_grp, &task_lck_attr);
3091 
3092 static void
task_watch_lock(void)3093 task_watch_lock(void)
3094 {
3095 	lck_mtx_lock(&task_watch_mtx);
3096 }
3097 
3098 static void
task_watch_unlock(void)3099 task_watch_unlock(void)
3100 {
3101 	lck_mtx_unlock(&task_watch_mtx);
3102 }
3103 
3104 static void
add_taskwatch_locked(task_t task,task_watch_t * twp)3105 add_taskwatch_locked(task_t task, task_watch_t * twp)
3106 {
3107 	queue_enter(&task->task_watchers, twp, task_watch_t *, tw_links);
3108 	task->num_taskwatchers++;
3109 }
3110 
3111 static void
remove_taskwatch_locked(task_t task,task_watch_t * twp)3112 remove_taskwatch_locked(task_t task, task_watch_t * twp)
3113 {
3114 	queue_remove(&task->task_watchers, twp, task_watch_t *, tw_links);
3115 	task->num_taskwatchers--;
3116 }
3117 
3118 
3119 int
proc_lf_pidbind(task_t curtask,uint64_t tid,task_t target_task,int bind)3120 proc_lf_pidbind(task_t curtask, uint64_t tid, task_t target_task, int bind)
3121 {
3122 	thread_t target_thread = NULL;
3123 	int ret = 0, setbg = 0;
3124 	task_watch_t *twp = NULL;
3125 	task_t task = TASK_NULL;
3126 
3127 	target_thread = task_findtid(curtask, tid);
3128 	if (target_thread == NULL) {
3129 		return ESRCH;
3130 	}
3131 	/* holds thread reference */
3132 
3133 	if (bind != 0) {
3134 		/* task is still active ? */
3135 		task_lock(target_task);
3136 		if (target_task->active == 0) {
3137 			task_unlock(target_task);
3138 			ret = ESRCH;
3139 			goto out;
3140 		}
3141 		task_unlock(target_task);
3142 
3143 		twp = kalloc_type(task_watch_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
3144 
3145 		task_watch_lock();
3146 
3147 		if (target_thread->taskwatch != NULL) {
3148 			/* already bound to another task */
3149 			task_watch_unlock();
3150 
3151 			kfree_type(task_watch_t, twp);
3152 			ret = EBUSY;
3153 			goto out;
3154 		}
3155 
3156 		task_reference(target_task);
3157 
3158 		setbg = proc_get_effective_task_policy(target_task, TASK_POLICY_WATCHERS_BG);
3159 
3160 		twp->tw_task = target_task;             /* holds the task reference */
3161 		twp->tw_thread = target_thread;         /* holds the thread reference */
3162 		twp->tw_state = setbg;
3163 		twp->tw_importance = target_thread->importance;
3164 
3165 		add_taskwatch_locked(target_task, twp);
3166 
3167 		target_thread->taskwatch = twp;
3168 
3169 		task_watch_unlock();
3170 
3171 		if (setbg) {
3172 			set_thread_appbg(target_thread, setbg, INT_MIN);
3173 		}
3174 
3175 		/* retain the thread reference as it is in twp */
3176 		target_thread = NULL;
3177 	} else {
3178 		/* unbind */
3179 		task_watch_lock();
3180 		if ((twp = target_thread->taskwatch) != NULL) {
3181 			task = twp->tw_task;
3182 			target_thread->taskwatch = NULL;
3183 			remove_taskwatch_locked(task, twp);
3184 
3185 			task_watch_unlock();
3186 
3187 			task_deallocate(task);                  /* drop task ref in twp */
3188 			set_thread_appbg(target_thread, 0, twp->tw_importance);
3189 			thread_deallocate(target_thread);       /* drop thread ref in twp */
3190 			kfree_type(task_watch_t, twp);
3191 		} else {
3192 			task_watch_unlock();
3193 			ret = 0;                /* return success if it not alredy bound */
3194 			goto out;
3195 		}
3196 	}
3197 out:
3198 	thread_deallocate(target_thread);       /* drop thread ref acquired in this routine */
3199 	return ret;
3200 }
3201 
3202 static void
set_thread_appbg(thread_t thread,int setbg,__unused int importance)3203 set_thread_appbg(thread_t thread, int setbg, __unused int importance)
3204 {
3205 	int enable = (setbg ? TASK_POLICY_ENABLE : TASK_POLICY_DISABLE);
3206 
3207 	proc_set_thread_policy(thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_PIDBIND_BG, enable);
3208 }
3209 
3210 static void
apply_appstate_watchers(task_t task)3211 apply_appstate_watchers(task_t task)
3212 {
3213 	int numwatchers = 0, i, j, setbg;
3214 	thread_watchlist_t * threadlist;
3215 	task_watch_t * twp;
3216 
3217 retry:
3218 	/* if no watchers on the list return */
3219 	if ((numwatchers = task->num_taskwatchers) == 0) {
3220 		return;
3221 	}
3222 
3223 	threadlist = kalloc_type(thread_watchlist_t, numwatchers, Z_WAITOK | Z_ZERO);
3224 	if (threadlist == NULL) {
3225 		return;
3226 	}
3227 
3228 	task_watch_lock();
3229 	/*serialize application of app state changes */
3230 
3231 	if (task->watchapplying != 0) {
3232 		lck_mtx_sleep(&task_watch_mtx, LCK_SLEEP_DEFAULT, &task->watchapplying, THREAD_UNINT);
3233 		task_watch_unlock();
3234 		kfree_type(thread_watchlist_t, numwatchers, threadlist);
3235 		goto retry;
3236 	}
3237 
3238 	if (numwatchers != task->num_taskwatchers) {
3239 		task_watch_unlock();
3240 		kfree_type(thread_watchlist_t, numwatchers, threadlist);
3241 		goto retry;
3242 	}
3243 
3244 	setbg = proc_get_effective_task_policy(task, TASK_POLICY_WATCHERS_BG);
3245 
3246 	task->watchapplying = 1;
3247 	i = 0;
3248 	queue_iterate(&task->task_watchers, twp, task_watch_t *, tw_links) {
3249 		threadlist[i].thread = twp->tw_thread;
3250 		thread_reference(threadlist[i].thread);
3251 		if (setbg != 0) {
3252 			twp->tw_importance = twp->tw_thread->importance;
3253 			threadlist[i].importance = INT_MIN;
3254 		} else {
3255 			threadlist[i].importance = twp->tw_importance;
3256 		}
3257 		i++;
3258 		if (i > numwatchers) {
3259 			break;
3260 		}
3261 	}
3262 
3263 	task_watch_unlock();
3264 
3265 	for (j = 0; j < i; j++) {
3266 		set_thread_appbg(threadlist[j].thread, setbg, threadlist[j].importance);
3267 		thread_deallocate(threadlist[j].thread);
3268 	}
3269 	kfree_type(thread_watchlist_t, numwatchers, threadlist);
3270 
3271 
3272 	task_watch_lock();
3273 	task->watchapplying = 0;
3274 	thread_wakeup_one(&task->watchapplying);
3275 	task_watch_unlock();
3276 }
3277 
3278 void
thead_remove_taskwatch(thread_t thread)3279 thead_remove_taskwatch(thread_t thread)
3280 {
3281 	task_watch_t * twp;
3282 	int importance = 0;
3283 
3284 	task_watch_lock();
3285 	if ((twp = thread->taskwatch) != NULL) {
3286 		thread->taskwatch = NULL;
3287 		remove_taskwatch_locked(twp->tw_task, twp);
3288 	}
3289 	task_watch_unlock();
3290 	if (twp != NULL) {
3291 		thread_deallocate(twp->tw_thread);
3292 		task_deallocate(twp->tw_task);
3293 		importance = twp->tw_importance;
3294 		kfree_type(task_watch_t, twp);
3295 		/* remove the thread and networkbg */
3296 		set_thread_appbg(thread, 0, importance);
3297 	}
3298 }
3299 
3300 void
task_removewatchers(task_t task)3301 task_removewatchers(task_t task)
3302 {
3303 	queue_head_t queue;
3304 	task_watch_t *twp;
3305 
3306 	task_watch_lock();
3307 	queue_new_head(&task->task_watchers, &queue, task_watch_t *, tw_links);
3308 	queue_init(&task->task_watchers);
3309 
3310 	queue_iterate(&queue, twp, task_watch_t *, tw_links) {
3311 		/*
3312 		 * Since the linkage is removed and thead state cleanup is already set up,
3313 		 * remove the refernce from the thread.
3314 		 */
3315 		twp->tw_thread->taskwatch = NULL;       /* removed linkage, clear thread holding ref */
3316 	}
3317 
3318 	task->num_taskwatchers = 0;
3319 	task_watch_unlock();
3320 
3321 	while (!queue_empty(&queue)) {
3322 		queue_remove_first(&queue, twp, task_watch_t *, tw_links);
3323 		/* remove thread and network bg */
3324 		set_thread_appbg(twp->tw_thread, 0, twp->tw_importance);
3325 		thread_deallocate(twp->tw_thread);
3326 		task_deallocate(twp->tw_task);
3327 		kfree_type(task_watch_t, twp);
3328 	}
3329 }
3330 #endif /* CONFIG_TASKWATCH */
3331 
3332 /*
3333  * Routines for importance donation/inheritance/boosting
3334  */
3335 
3336 static void
task_importance_update_live_donor(task_t target_task)3337 task_importance_update_live_donor(task_t target_task)
3338 {
3339 #if IMPORTANCE_INHERITANCE
3340 
3341 	ipc_importance_task_t task_imp;
3342 
3343 	task_imp = ipc_importance_for_task(target_task, FALSE);
3344 	if (IIT_NULL != task_imp) {
3345 		ipc_importance_task_update_live_donor(task_imp);
3346 		ipc_importance_task_release(task_imp);
3347 	}
3348 #endif /* IMPORTANCE_INHERITANCE */
3349 }
3350 
3351 void
task_importance_mark_donor(task_t task,boolean_t donating)3352 task_importance_mark_donor(task_t task, boolean_t donating)
3353 {
3354 #if IMPORTANCE_INHERITANCE
3355 	ipc_importance_task_t task_imp;
3356 
3357 	task_imp = ipc_importance_for_task(task, FALSE);
3358 	if (IIT_NULL != task_imp) {
3359 		ipc_importance_task_mark_donor(task_imp, donating);
3360 		ipc_importance_task_release(task_imp);
3361 	}
3362 #endif /* IMPORTANCE_INHERITANCE */
3363 }
3364 
3365 void
task_importance_mark_live_donor(task_t task,boolean_t live_donating)3366 task_importance_mark_live_donor(task_t task, boolean_t live_donating)
3367 {
3368 #if IMPORTANCE_INHERITANCE
3369 	ipc_importance_task_t task_imp;
3370 
3371 	task_imp = ipc_importance_for_task(task, FALSE);
3372 	if (IIT_NULL != task_imp) {
3373 		ipc_importance_task_mark_live_donor(task_imp, live_donating);
3374 		ipc_importance_task_release(task_imp);
3375 	}
3376 #endif /* IMPORTANCE_INHERITANCE */
3377 }
3378 
3379 void
task_importance_mark_receiver(task_t task,boolean_t receiving)3380 task_importance_mark_receiver(task_t task, boolean_t receiving)
3381 {
3382 #if IMPORTANCE_INHERITANCE
3383 	ipc_importance_task_t task_imp;
3384 
3385 	task_imp = ipc_importance_for_task(task, FALSE);
3386 	if (IIT_NULL != task_imp) {
3387 		ipc_importance_task_mark_receiver(task_imp, receiving);
3388 		ipc_importance_task_release(task_imp);
3389 	}
3390 #endif /* IMPORTANCE_INHERITANCE */
3391 }
3392 
3393 void
task_importance_mark_denap_receiver(task_t task,boolean_t denap)3394 task_importance_mark_denap_receiver(task_t task, boolean_t denap)
3395 {
3396 #if IMPORTANCE_INHERITANCE
3397 	ipc_importance_task_t task_imp;
3398 
3399 	task_imp = ipc_importance_for_task(task, FALSE);
3400 	if (IIT_NULL != task_imp) {
3401 		ipc_importance_task_mark_denap_receiver(task_imp, denap);
3402 		ipc_importance_task_release(task_imp);
3403 	}
3404 #endif /* IMPORTANCE_INHERITANCE */
3405 }
3406 
3407 void
task_importance_reset(__imp_only task_t task)3408 task_importance_reset(__imp_only task_t task)
3409 {
3410 #if IMPORTANCE_INHERITANCE
3411 	ipc_importance_task_t task_imp;
3412 
3413 	/* TODO: Lower importance downstream before disconnect */
3414 	task_imp = task->task_imp_base;
3415 	ipc_importance_reset(task_imp, FALSE);
3416 	task_importance_update_live_donor(task);
3417 #endif /* IMPORTANCE_INHERITANCE */
3418 }
3419 
3420 void
task_importance_init_from_parent(__imp_only task_t new_task,__imp_only task_t parent_task)3421 task_importance_init_from_parent(__imp_only task_t new_task, __imp_only task_t parent_task)
3422 {
3423 #if IMPORTANCE_INHERITANCE
3424 	ipc_importance_task_t new_task_imp = IIT_NULL;
3425 
3426 	new_task->task_imp_base = NULL;
3427 	if (!parent_task) {
3428 		return;
3429 	}
3430 
3431 	if (task_is_marked_importance_donor(parent_task)) {
3432 		new_task_imp = ipc_importance_for_task(new_task, FALSE);
3433 		assert(IIT_NULL != new_task_imp);
3434 		ipc_importance_task_mark_donor(new_task_imp, TRUE);
3435 	}
3436 	if (task_is_marked_live_importance_donor(parent_task)) {
3437 		if (IIT_NULL == new_task_imp) {
3438 			new_task_imp = ipc_importance_for_task(new_task, FALSE);
3439 		}
3440 		assert(IIT_NULL != new_task_imp);
3441 		ipc_importance_task_mark_live_donor(new_task_imp, TRUE);
3442 	}
3443 	/* Do not inherit 'receiver' on fork, vfexec or true spawn */
3444 	if (task_is_exec_copy(new_task) &&
3445 	    task_is_marked_importance_receiver(parent_task)) {
3446 		if (IIT_NULL == new_task_imp) {
3447 			new_task_imp = ipc_importance_for_task(new_task, FALSE);
3448 		}
3449 		assert(IIT_NULL != new_task_imp);
3450 		ipc_importance_task_mark_receiver(new_task_imp, TRUE);
3451 	}
3452 	if (task_is_marked_importance_denap_receiver(parent_task)) {
3453 		if (IIT_NULL == new_task_imp) {
3454 			new_task_imp = ipc_importance_for_task(new_task, FALSE);
3455 		}
3456 		assert(IIT_NULL != new_task_imp);
3457 		ipc_importance_task_mark_denap_receiver(new_task_imp, TRUE);
3458 	}
3459 	if (IIT_NULL != new_task_imp) {
3460 		assert(new_task->task_imp_base == new_task_imp);
3461 		ipc_importance_task_release(new_task_imp);
3462 	}
3463 #endif /* IMPORTANCE_INHERITANCE */
3464 }
3465 
3466 #if IMPORTANCE_INHERITANCE
3467 /*
3468  * Sets the task boost bit to the provided value.  Does NOT run the update function.
3469  *
3470  * Task lock must be held.
3471  */
3472 static void
task_set_boost_locked(task_t task,boolean_t boost_active)3473 task_set_boost_locked(task_t task, boolean_t boost_active)
3474 {
3475 #if IMPORTANCE_TRACE
3476 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_BOOST, (boost_active ? IMP_BOOSTED : IMP_UNBOOSTED)) | DBG_FUNC_START),
3477 	    proc_selfpid(), task_pid(task), trequested_0(task), trequested_1(task), 0);
3478 #endif /* IMPORTANCE_TRACE */
3479 
3480 	task->requested_policy.trp_boosted = boost_active;
3481 
3482 #if IMPORTANCE_TRACE
3483 	if (boost_active == TRUE) {
3484 		DTRACE_BOOST2(boost, task_t, task, int, task_pid(task));
3485 	} else {
3486 		DTRACE_BOOST2(unboost, task_t, task, int, task_pid(task));
3487 	}
3488 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_BOOST, (boost_active ? IMP_BOOSTED : IMP_UNBOOSTED)) | DBG_FUNC_END),
3489 	    proc_selfpid(), task_pid(task),
3490 	    trequested_0(task), trequested_1(task), 0);
3491 #endif /* IMPORTANCE_TRACE */
3492 }
3493 
3494 /*
3495  * Sets the task boost bit to the provided value and applies the update.
3496  *
3497  * Task lock must be held.  Must call update complete after unlocking the task.
3498  */
3499 void
task_update_boost_locked(task_t task,boolean_t boost_active,task_pend_token_t pend_token)3500 task_update_boost_locked(task_t task, boolean_t boost_active, task_pend_token_t pend_token)
3501 {
3502 	task_set_boost_locked(task, boost_active);
3503 
3504 	task_policy_update_locked(task, pend_token);
3505 }
3506 
3507 /*
3508  * Check if this task should donate importance.
3509  *
3510  * May be called without taking the task lock. In that case, donor status can change
3511  * so you must check only once for each donation event.
3512  */
3513 boolean_t
task_is_importance_donor(task_t task)3514 task_is_importance_donor(task_t task)
3515 {
3516 	if (task->task_imp_base == IIT_NULL) {
3517 		return FALSE;
3518 	}
3519 	return ipc_importance_task_is_donor(task->task_imp_base);
3520 }
3521 
3522 /*
3523  * Query the status of the task's donor mark.
3524  */
3525 boolean_t
task_is_marked_importance_donor(task_t task)3526 task_is_marked_importance_donor(task_t task)
3527 {
3528 	if (task->task_imp_base == IIT_NULL) {
3529 		return FALSE;
3530 	}
3531 	return ipc_importance_task_is_marked_donor(task->task_imp_base);
3532 }
3533 
3534 /*
3535  * Query the status of the task's live donor and donor mark.
3536  */
3537 boolean_t
task_is_marked_live_importance_donor(task_t task)3538 task_is_marked_live_importance_donor(task_t task)
3539 {
3540 	if (task->task_imp_base == IIT_NULL) {
3541 		return FALSE;
3542 	}
3543 	return ipc_importance_task_is_marked_live_donor(task->task_imp_base);
3544 }
3545 
3546 
3547 /*
3548  * This routine may be called without holding task lock
3549  * since the value of imp_receiver can never be unset.
3550  */
3551 boolean_t
task_is_importance_receiver(task_t task)3552 task_is_importance_receiver(task_t task)
3553 {
3554 	if (task->task_imp_base == IIT_NULL) {
3555 		return FALSE;
3556 	}
3557 	return ipc_importance_task_is_marked_receiver(task->task_imp_base);
3558 }
3559 
3560 /*
3561  * Query the task's receiver mark.
3562  */
3563 boolean_t
task_is_marked_importance_receiver(task_t task)3564 task_is_marked_importance_receiver(task_t task)
3565 {
3566 	if (task->task_imp_base == IIT_NULL) {
3567 		return FALSE;
3568 	}
3569 	return ipc_importance_task_is_marked_receiver(task->task_imp_base);
3570 }
3571 
3572 /*
3573  * This routine may be called without holding task lock
3574  * since the value of de-nap receiver can never be unset.
3575  */
3576 boolean_t
task_is_importance_denap_receiver(task_t task)3577 task_is_importance_denap_receiver(task_t task)
3578 {
3579 	if (task->task_imp_base == IIT_NULL) {
3580 		return FALSE;
3581 	}
3582 	return ipc_importance_task_is_denap_receiver(task->task_imp_base);
3583 }
3584 
3585 /*
3586  * Query the task's de-nap receiver mark.
3587  */
3588 boolean_t
task_is_marked_importance_denap_receiver(task_t task)3589 task_is_marked_importance_denap_receiver(task_t task)
3590 {
3591 	if (task->task_imp_base == IIT_NULL) {
3592 		return FALSE;
3593 	}
3594 	return ipc_importance_task_is_marked_denap_receiver(task->task_imp_base);
3595 }
3596 
3597 /*
3598  * This routine may be called without holding task lock
3599  * since the value of imp_receiver can never be unset.
3600  */
3601 boolean_t
task_is_importance_receiver_type(task_t task)3602 task_is_importance_receiver_type(task_t task)
3603 {
3604 	if (task->task_imp_base == IIT_NULL) {
3605 		return FALSE;
3606 	}
3607 	return task_is_importance_receiver(task) ||
3608 	       task_is_importance_denap_receiver(task);
3609 }
3610 
3611 /*
3612  * External importance assertions are managed by the process in userspace
3613  * Internal importance assertions are the responsibility of the kernel
3614  * Assertions are changed from internal to external via task_importance_externalize_assertion
3615  */
3616 
3617 int
task_importance_hold_internal_assertion(task_t target_task,uint32_t count)3618 task_importance_hold_internal_assertion(task_t target_task, uint32_t count)
3619 {
3620 	ipc_importance_task_t task_imp;
3621 	kern_return_t ret;
3622 
3623 	/* may be first time, so allow for possible importance setup */
3624 	task_imp = ipc_importance_for_task(target_task, FALSE);
3625 	if (IIT_NULL == task_imp) {
3626 		return EOVERFLOW;
3627 	}
3628 	ret = ipc_importance_task_hold_internal_assertion(task_imp, count);
3629 	ipc_importance_task_release(task_imp);
3630 
3631 	return (KERN_SUCCESS != ret) ? ENOTSUP : 0;
3632 }
3633 
3634 int
task_importance_hold_file_lock_assertion(task_t target_task,uint32_t count)3635 task_importance_hold_file_lock_assertion(task_t target_task, uint32_t count)
3636 {
3637 	ipc_importance_task_t task_imp;
3638 	kern_return_t ret;
3639 
3640 	/* may be first time, so allow for possible importance setup */
3641 	task_imp = ipc_importance_for_task(target_task, FALSE);
3642 	if (IIT_NULL == task_imp) {
3643 		return EOVERFLOW;
3644 	}
3645 	ret = ipc_importance_task_hold_file_lock_assertion(task_imp, count);
3646 	ipc_importance_task_release(task_imp);
3647 
3648 	return (KERN_SUCCESS != ret) ? ENOTSUP : 0;
3649 }
3650 
3651 int
task_importance_hold_legacy_external_assertion(task_t target_task,uint32_t count)3652 task_importance_hold_legacy_external_assertion(task_t target_task, uint32_t count)
3653 {
3654 	ipc_importance_task_t task_imp;
3655 	kern_return_t ret;
3656 
3657 	/* must already have set up an importance */
3658 	task_imp = target_task->task_imp_base;
3659 	if (IIT_NULL == task_imp) {
3660 		return EOVERFLOW;
3661 	}
3662 	ret = ipc_importance_task_hold_legacy_external_assertion(task_imp, count);
3663 	return (KERN_SUCCESS != ret) ? ENOTSUP : 0;
3664 }
3665 
3666 int
task_importance_drop_file_lock_assertion(task_t target_task,uint32_t count)3667 task_importance_drop_file_lock_assertion(task_t target_task, uint32_t count)
3668 {
3669 	ipc_importance_task_t task_imp;
3670 	kern_return_t ret;
3671 
3672 	/* must already have set up an importance */
3673 	task_imp = target_task->task_imp_base;
3674 	if (IIT_NULL == task_imp) {
3675 		return EOVERFLOW;
3676 	}
3677 	ret = ipc_importance_task_drop_file_lock_assertion(target_task->task_imp_base, count);
3678 	return (KERN_SUCCESS != ret) ? EOVERFLOW : 0;
3679 }
3680 
3681 int
task_importance_drop_legacy_external_assertion(task_t target_task,uint32_t count)3682 task_importance_drop_legacy_external_assertion(task_t target_task, uint32_t count)
3683 {
3684 	ipc_importance_task_t task_imp;
3685 	kern_return_t ret;
3686 
3687 	/* must already have set up an importance */
3688 	task_imp = target_task->task_imp_base;
3689 	if (IIT_NULL == task_imp) {
3690 		return EOVERFLOW;
3691 	}
3692 	ret = ipc_importance_task_drop_legacy_external_assertion(task_imp, count);
3693 	return (KERN_SUCCESS != ret) ? EOVERFLOW : 0;
3694 }
3695 
3696 static void
task_add_importance_watchport(task_t task,mach_port_t port,int * boostp)3697 task_add_importance_watchport(task_t task, mach_port_t port, int *boostp)
3698 {
3699 	int boost = 0;
3700 
3701 	__imptrace_only int released_pid = 0;
3702 	__imptrace_only int pid = task_pid(task);
3703 
3704 	ipc_importance_task_t release_imp_task = IIT_NULL;
3705 
3706 	if (IP_VALID(port) != 0) {
3707 		ipc_importance_task_t new_imp_task = ipc_importance_for_task(task, FALSE);
3708 
3709 		ip_mq_lock(port);
3710 
3711 		/*
3712 		 * The port must have been marked tempowner already.
3713 		 * This also filters out ports whose receive rights
3714 		 * are already enqueued in a message, as you can't
3715 		 * change the right's destination once it's already
3716 		 * on its way.
3717 		 */
3718 		if (port->ip_tempowner != 0) {
3719 			assert(port->ip_impdonation != 0);
3720 
3721 			boost = port->ip_impcount;
3722 			if (IIT_NULL != ip_get_imp_task(port)) {
3723 				/*
3724 				 * if this port is already bound to a task,
3725 				 * release the task reference and drop any
3726 				 * watchport-forwarded boosts
3727 				 */
3728 				release_imp_task = ip_get_imp_task(port);
3729 				port->ip_imp_task = IIT_NULL;
3730 			}
3731 
3732 			/* mark the port is watching another task (reference held in port->ip_imp_task) */
3733 			if (ipc_importance_task_is_marked_receiver(new_imp_task)) {
3734 				port->ip_imp_task = new_imp_task;
3735 				new_imp_task = IIT_NULL;
3736 			}
3737 		}
3738 		ip_mq_unlock(port);
3739 
3740 		if (IIT_NULL != new_imp_task) {
3741 			ipc_importance_task_release(new_imp_task);
3742 		}
3743 
3744 		if (IIT_NULL != release_imp_task) {
3745 			if (boost > 0) {
3746 				ipc_importance_task_drop_internal_assertion(release_imp_task, boost);
3747 			}
3748 
3749 			// released_pid = task_pid(release_imp_task); /* TODO: Need ref-safe way to get pid */
3750 			ipc_importance_task_release(release_imp_task);
3751 		}
3752 #if IMPORTANCE_TRACE
3753 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_WATCHPORT, 0)) | DBG_FUNC_NONE,
3754 		    proc_selfpid(), pid, boost, released_pid, 0);
3755 #endif /* IMPORTANCE_TRACE */
3756 	}
3757 
3758 	*boostp = boost;
3759 	return;
3760 }
3761 
3762 #endif /* IMPORTANCE_INHERITANCE */
3763 
3764 /*
3765  * Routines for VM to query task importance
3766  */
3767 
3768 
3769 /*
3770  * Order to be considered while estimating importance
3771  * for low memory notification and purging purgeable memory.
3772  */
3773 #define TASK_IMPORTANCE_FOREGROUND     4
3774 #define TASK_IMPORTANCE_NOTDARWINBG    1
3775 
3776 
3777 /*
3778  * (Un)Mark the task as a privileged listener for memory notifications.
3779  * if marked, this task will be among the first to be notified amongst
3780  * the bulk of all other tasks when the system enters a pressure level
3781  * of interest to this task.
3782  */
3783 int
task_low_mem_privileged_listener(task_t task,boolean_t new_value,boolean_t * old_value)3784 task_low_mem_privileged_listener(task_t task, boolean_t new_value, boolean_t *old_value)
3785 {
3786 	if (old_value != NULL) {
3787 		*old_value = (boolean_t)task->low_mem_privileged_listener;
3788 	} else {
3789 		task_lock(task);
3790 		task->low_mem_privileged_listener = (uint32_t)new_value;
3791 		task_unlock(task);
3792 	}
3793 
3794 	return 0;
3795 }
3796 
3797 /*
3798  * Checks if the task is already notified.
3799  *
3800  * Condition: task lock should be held while calling this function.
3801  */
3802 boolean_t
task_has_been_notified(task_t task,int pressurelevel)3803 task_has_been_notified(task_t task, int pressurelevel)
3804 {
3805 	if (task == NULL) {
3806 		return FALSE;
3807 	}
3808 
3809 	if (pressurelevel == kVMPressureWarning) {
3810 		return task->low_mem_notified_warn ? TRUE : FALSE;
3811 	} else if (pressurelevel == kVMPressureCritical) {
3812 		return task->low_mem_notified_critical ? TRUE : FALSE;
3813 	} else {
3814 		return TRUE;
3815 	}
3816 }
3817 
3818 
3819 /*
3820  * Checks if the task is used for purging.
3821  *
3822  * Condition: task lock should be held while calling this function.
3823  */
3824 boolean_t
task_used_for_purging(task_t task,int pressurelevel)3825 task_used_for_purging(task_t task, int pressurelevel)
3826 {
3827 	if (task == NULL) {
3828 		return FALSE;
3829 	}
3830 
3831 	if (pressurelevel == kVMPressureWarning) {
3832 		return task->purged_memory_warn ? TRUE : FALSE;
3833 	} else if (pressurelevel == kVMPressureCritical) {
3834 		return task->purged_memory_critical ? TRUE : FALSE;
3835 	} else {
3836 		return TRUE;
3837 	}
3838 }
3839 
3840 
3841 /*
3842  * Mark the task as notified with memory notification.
3843  *
3844  * Condition: task lock should be held while calling this function.
3845  */
3846 void
task_mark_has_been_notified(task_t task,int pressurelevel)3847 task_mark_has_been_notified(task_t task, int pressurelevel)
3848 {
3849 	if (task == NULL) {
3850 		return;
3851 	}
3852 
3853 	if (pressurelevel == kVMPressureWarning) {
3854 		task->low_mem_notified_warn = 1;
3855 	} else if (pressurelevel == kVMPressureCritical) {
3856 		task->low_mem_notified_critical = 1;
3857 	}
3858 }
3859 
3860 
3861 /*
3862  * Mark the task as purged.
3863  *
3864  * Condition: task lock should be held while calling this function.
3865  */
3866 void
task_mark_used_for_purging(task_t task,int pressurelevel)3867 task_mark_used_for_purging(task_t task, int pressurelevel)
3868 {
3869 	if (task == NULL) {
3870 		return;
3871 	}
3872 
3873 	if (pressurelevel == kVMPressureWarning) {
3874 		task->purged_memory_warn = 1;
3875 	} else if (pressurelevel == kVMPressureCritical) {
3876 		task->purged_memory_critical = 1;
3877 	}
3878 }
3879 
3880 
3881 /*
3882  * Mark the task eligible for low memory notification.
3883  *
3884  * Condition: task lock should be held while calling this function.
3885  */
3886 void
task_clear_has_been_notified(task_t task,int pressurelevel)3887 task_clear_has_been_notified(task_t task, int pressurelevel)
3888 {
3889 	if (task == NULL) {
3890 		return;
3891 	}
3892 
3893 	if (pressurelevel == kVMPressureWarning) {
3894 		task->low_mem_notified_warn = 0;
3895 	} else if (pressurelevel == kVMPressureCritical) {
3896 		task->low_mem_notified_critical = 0;
3897 	}
3898 }
3899 
3900 
3901 /*
3902  * Mark the task eligible for purging its purgeable memory.
3903  *
3904  * Condition: task lock should be held while calling this function.
3905  */
3906 void
task_clear_used_for_purging(task_t task)3907 task_clear_used_for_purging(task_t task)
3908 {
3909 	if (task == NULL) {
3910 		return;
3911 	}
3912 
3913 	task->purged_memory_warn = 0;
3914 	task->purged_memory_critical = 0;
3915 }
3916 
3917 
3918 /*
3919  * Estimate task importance for purging its purgeable memory
3920  * and low memory notification.
3921  *
3922  * Importance is calculated in the following order of criteria:
3923  * -Task role : Background vs Foreground
3924  * -Boost status: Not boosted vs Boosted
3925  * -Darwin BG status.
3926  *
3927  * Returns: Estimated task importance. Less important task will have lower
3928  *          estimated importance.
3929  */
3930 int
task_importance_estimate(task_t task)3931 task_importance_estimate(task_t task)
3932 {
3933 	int task_importance = 0;
3934 
3935 	if (task == NULL) {
3936 		return 0;
3937 	}
3938 
3939 	if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) == TASK_FOREGROUND_APPLICATION) {
3940 		task_importance += TASK_IMPORTANCE_FOREGROUND;
3941 	}
3942 
3943 	if (proc_get_effective_task_policy(task, TASK_POLICY_DARWIN_BG) == 0) {
3944 		task_importance += TASK_IMPORTANCE_NOTDARWINBG;
3945 	}
3946 
3947 	return task_importance;
3948 }
3949 
3950 boolean_t
task_has_assertions(task_t task)3951 task_has_assertions(task_t task)
3952 {
3953 	return task->task_imp_base->iit_assertcnt? TRUE : FALSE;
3954 }
3955 
3956 
3957 kern_return_t
send_resource_violation(typeof(send_cpu_usage_violation) sendfunc,task_t violator,struct ledger_entry_info * linfo,resource_notify_flags_t flags)3958 send_resource_violation(typeof(send_cpu_usage_violation) sendfunc,
3959     task_t violator,
3960     struct ledger_entry_info *linfo,
3961     resource_notify_flags_t flags)
3962 {
3963 #ifndef MACH_BSD
3964 	return KERN_NOT_SUPPORTED;
3965 #else
3966 	kern_return_t   kr = KERN_SUCCESS;
3967 	proc_t          proc = NULL;
3968 	posix_path_t    proc_path = "";
3969 	proc_name_t     procname = "<unknown>";
3970 	int             pid = -1;
3971 	clock_sec_t     secs;
3972 	clock_nsec_t    nsecs;
3973 	mach_timespec_t timestamp;
3974 	thread_t        curthread = current_thread();
3975 	ipc_port_t      dstport = MACH_PORT_NULL;
3976 
3977 	if (!violator) {
3978 		kr = KERN_INVALID_ARGUMENT; goto finish;
3979 	}
3980 
3981 	/* extract violator information */
3982 	task_lock(violator);
3983 	if (!(proc = get_bsdtask_info(violator))) {
3984 		task_unlock(violator);
3985 		kr = KERN_INVALID_ARGUMENT; goto finish;
3986 	}
3987 	(void)mig_strncpy(procname, proc_best_name(proc), sizeof(procname));
3988 	pid = task_pid(violator);
3989 	if (flags & kRNFatalLimitFlag) {
3990 		kr = proc_pidpathinfo_internal(proc, 0, proc_path,
3991 		    sizeof(proc_path), NULL);
3992 	}
3993 	task_unlock(violator);
3994 	if (kr) {
3995 		goto finish;
3996 	}
3997 
3998 	/* violation time ~ now */
3999 	clock_get_calendar_nanotime(&secs, &nsecs);
4000 	timestamp.tv_sec = (int32_t)secs;
4001 	timestamp.tv_nsec = (int32_t)nsecs;
4002 	/* 25567702 tracks widening mach_timespec_t */
4003 
4004 	/* send message */
4005 	kr = host_get_special_port(host_priv_self(), HOST_LOCAL_NODE,
4006 	    HOST_RESOURCE_NOTIFY_PORT, &dstport);
4007 	if (kr) {
4008 		goto finish;
4009 	}
4010 
4011 	thread_set_honor_qlimit(curthread);
4012 	kr = sendfunc(dstport,
4013 	    procname, pid, proc_path, timestamp,
4014 	    linfo->lei_balance, linfo->lei_last_refill,
4015 	    linfo->lei_limit, linfo->lei_refill_period,
4016 	    flags);
4017 	thread_clear_honor_qlimit(curthread);
4018 
4019 	ipc_port_release_send(dstport);
4020 
4021 finish:
4022 	return kr;
4023 #endif      /* MACH_BSD */
4024 }
4025 
4026 kern_return_t
send_resource_violation_with_fatal_port(typeof(send_port_space_violation) sendfunc,task_t violator,int64_t current_size,int64_t limit,mach_port_t fatal_port,resource_notify_flags_t flags)4027 send_resource_violation_with_fatal_port(typeof(send_port_space_violation) sendfunc,
4028     task_t violator,
4029     int64_t current_size,
4030     int64_t limit,
4031     mach_port_t fatal_port,
4032     resource_notify_flags_t flags)
4033 {
4034 #ifndef MACH_BSD
4035 	kr = KERN_NOT_SUPPORTED; goto finish;
4036 #else
4037 	kern_return_t   kr = KERN_SUCCESS;
4038 	proc_t          proc = NULL;
4039 	proc_name_t     procname = "<unknown>";
4040 	int             pid = -1;
4041 	clock_sec_t     secs;
4042 	clock_nsec_t    nsecs;
4043 	mach_timespec_t timestamp;
4044 	thread_t        curthread = current_thread();
4045 	ipc_port_t      dstport = MACH_PORT_NULL;
4046 
4047 	if (!violator) {
4048 		kr = KERN_INVALID_ARGUMENT; goto finish;
4049 	}
4050 
4051 	/* extract violator information; no need to acquire task lock */
4052 	assert(violator == current_task());
4053 	if (!(proc = get_bsdtask_info(violator))) {
4054 		kr = KERN_INVALID_ARGUMENT; goto finish;
4055 	}
4056 	(void)mig_strncpy(procname, proc_best_name(proc), sizeof(procname));
4057 	pid = task_pid(violator);
4058 
4059 	/* violation time ~ now */
4060 	clock_get_calendar_nanotime(&secs, &nsecs);
4061 	timestamp.tv_sec = (int32_t)secs;
4062 	timestamp.tv_nsec = (int32_t)nsecs;
4063 	/* 25567702 tracks widening mach_timespec_t */
4064 
4065 	/* send message */
4066 	kr = task_get_special_port(current_task(), TASK_RESOURCE_NOTIFY_PORT, &dstport);
4067 	if (dstport == MACH_PORT_NULL) {
4068 		kr = host_get_special_port(host_priv_self(), HOST_LOCAL_NODE,
4069 		    HOST_RESOURCE_NOTIFY_PORT, &dstport);
4070 		if (kr) {
4071 			goto finish;
4072 		}
4073 	}
4074 
4075 	thread_set_honor_qlimit(curthread);
4076 	kr = sendfunc(dstport,
4077 	    procname, pid, timestamp,
4078 	    current_size, limit, fatal_port,
4079 	    flags);
4080 	thread_clear_honor_qlimit(curthread);
4081 
4082 	ipc_port_release_send(dstport);
4083 
4084 #endif /* MACH_BSD */
4085 finish:
4086 	return kr;
4087 }
4088 
4089 /*
4090  * Resource violations trace four 64-bit integers.  For K32, two additional
4091  * codes are allocated, the first with the low nibble doubled.  So if the K64
4092  * code is 0x042, the K32 codes would be 0x044 and 0x45.
4093  */
4094 #ifdef __LP64__
4095 void
trace_resource_violation(uint16_t code,struct ledger_entry_info * linfo)4096 trace_resource_violation(uint16_t code,
4097     struct ledger_entry_info *linfo)
4098 {
4099 	KERNEL_DBG_IST_SANE(KDBG_CODE(DBG_MACH, DBG_MACH_RESOURCE, code),
4100 	    linfo->lei_balance, linfo->lei_last_refill,
4101 	    linfo->lei_limit, linfo->lei_refill_period);
4102 }
4103 #else /* K32 */
4104 /* TODO: create/find a trace_two_LLs() for K32 systems */
4105 #define MASK32 0xffffffff
4106 void
trace_resource_violation(uint16_t code,struct ledger_entry_info * linfo)4107 trace_resource_violation(uint16_t code,
4108     struct ledger_entry_info *linfo)
4109 {
4110 	int8_t lownibble = (code & 0x3) * 2;
4111 	int16_t codeA = (code & 0xffc) | lownibble;
4112 	int16_t codeB = codeA + 1;
4113 
4114 	int32_t balance_high = (linfo->lei_balance >> 32) & MASK32;
4115 	int32_t balance_low = linfo->lei_balance & MASK32;
4116 	int32_t last_refill_high = (linfo->lei_last_refill >> 32) & MASK32;
4117 	int32_t last_refill_low = linfo->lei_last_refill & MASK32;
4118 
4119 	int32_t limit_high = (linfo->lei_limit >> 32) & MASK32;
4120 	int32_t limit_low = linfo->lei_limit & MASK32;
4121 	int32_t refill_period_high = (linfo->lei_refill_period >> 32) & MASK32;
4122 	int32_t refill_period_low = linfo->lei_refill_period & MASK32;
4123 
4124 	KERNEL_DBG_IST_SANE(KDBG_CODE(DBG_MACH, DBG_MACH_RESOURCE, codeA),
4125 	    balance_high, balance_low,
4126 	    last_refill_high, last_refill_low);
4127 	KERNEL_DBG_IST_SANE(KDBG_CODE(DBG_MACH, DBG_MACH_RESOURCE, codeB),
4128 	    limit_high, limit_low,
4129 	    refill_period_high, refill_period_low);
4130 }
4131 #endif /* K64/K32 */
4132