xref: /xnu-8019.80.24/osfmk/kern/task_policy.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/policy_internal.h>
30 #include <mach/task_policy.h>
31 #include <mach/task.h>
32 #include <mach/mach_types.h>
33 #include <mach/task_server.h>
34 #include <kern/host.h>                  /* host_priv_self()        */
35 #include <mach/host_priv.h>             /* host_get_special_port() */
36 #include <mach/host_special_ports.h>    /* RESOURCE_NOTIFY_PORT    */
37 #include <kern/sched.h>
38 #include <kern/task.h>
39 #include <mach/thread_policy.h>
40 #include <sys/errno.h>
41 #include <sys/resource.h>
42 #include <machine/limits.h>
43 #include <kern/ledger.h>
44 #include <kern/thread_call.h>
45 #include <kern/sfi.h>
46 #include <kern/coalition.h>
47 #if CONFIG_TELEMETRY
48 #include <kern/telemetry.h>
49 #endif
50 #if !defined(XNU_TARGET_OS_OSX)
51 #include <kern/kalloc.h>
52 #include <sys/errno.h>
53 #endif /* !defined(XNU_TARGET_OS_OSX) */
54 
55 #if IMPORTANCE_INHERITANCE
56 #include <ipc/ipc_importance.h>
57 #if IMPORTANCE_TRACE
58 #include <mach/machine/sdt.h>
59 #endif /* IMPORTANCE_TRACE */
60 #endif /* IMPORTANCE_INHERITACE */
61 
62 #include <sys/kdebug.h>
63 
64 /*
65  *  Task Policy
66  *
67  *  This subsystem manages task and thread IO priority and backgrounding,
68  *  as well as importance inheritance, process suppression, task QoS, and apptype.
69  *  These properties have a suprising number of complex interactions, so they are
70  *  centralized here in one state machine to simplify the implementation of those interactions.
71  *
72  *  Architecture:
73  *  Threads and tasks have two policy fields: requested, effective.
74  *  Requested represents the wishes of each interface that influences task policy.
75  *  Effective represents the distillation of that policy into a set of behaviors.
76  *
77  *  Each thread making a modification in the policy system passes a 'pending' struct,
78  *  which tracks updates that will be applied after dropping the policy engine lock.
79  *
80  *  Each interface that has an input into the task policy state machine controls a field in requested.
81  *  If the interface has a getter, it returns what is in the field in requested, but that is
82  *  not necessarily what is actually in effect.
83  *
84  *  All kernel subsystems that behave differently based on task policy call into
85  *  the proc_get_effective_(task|thread)_policy functions, which return the decision of the task policy state machine
86  *  for that subsystem by querying only the 'effective' field.
87  *
88  *  Policy change operations:
89  *  Here are the steps to change a policy on a task or thread:
90  *  1) Lock task
91  *  2) Change requested field for the relevant policy
92  *  3) Run a task policy update, which recalculates effective based on requested,
93  *     then takes a diff between the old and new versions of requested and calls the relevant
94  *     other subsystems to apply these changes, and updates the pending field.
95  *  4) Unlock task
96  *  5) Run task policy update complete, which looks at the pending field to update
97  *     subsystems which cannot be touched while holding the task lock.
98  *
99  *  To add a new requested policy, add the field in the requested struct, the flavor in task.h,
100  *  the setter and getter in proc_(set|get)_task_policy*,
101  *  then set up the effects of that behavior in task_policy_update*. If the policy manifests
102  *  itself as a distinct effective policy, add it to the effective struct and add it to the
103  *  proc_get_effective_task_policy accessor.
104  *
105  *  Most policies are set via proc_set_task_policy, but policies that don't fit that interface
106  *  roll their own lock/set/update/unlock/complete code inside this file.
107  *
108  *
109  *  Suppression policy
110  *
111  *  These are a set of behaviors that can be requested for a task.  They currently have specific
112  *  implied actions when they're enabled, but they may be made customizable in the future.
113  *
114  *  When the affected task is boosted, we temporarily disable the suppression behaviors
115  *  so that the affected process has a chance to run so it can call the API to permanently
116  *  disable the suppression behaviors.
117  *
118  *  Locking
119  *
120  *  Changing task policy on a task takes the task lock.
121  *  Changing task policy on a thread takes the thread mutex.
122  *  Task policy changes that affect threads will take each thread's mutex to update it if necessary.
123  *
124  *  Querying the effective policy does not take a lock, because callers
125  *  may run in interrupt context or other place where locks are not OK.
126  *
127  *  This means that any notification of state change needs to be externally synchronized.
128  *  We do this by idempotent callouts after the state has changed to ask
129  *  other subsystems to update their view of the world.
130  *
131  * TODO: Move all cpu/wakes/io monitor code into a separate file
132  * TODO: Move all importance code over to importance subsystem
133  * TODO: Move all taskwatch code into a separate file
134  * TODO: Move all VM importance code into a separate file
135  */
136 
137 /* Task policy related helper functions */
138 static void proc_set_task_policy_locked(task_t task, int category, int flavor, int value, int value2);
139 
140 static void task_policy_update_locked(task_t task, task_pend_token_t pend_token);
141 static void task_policy_update_internal_locked(task_t task, bool in_create, task_pend_token_t pend_token);
142 
143 /* For attributes that have two scalars as input/output */
144 static void proc_set_task_policy2(task_t task, int category, int flavor, int value1, int value2);
145 static void proc_get_task_policy2(task_t task, int category, int flavor, int *value1, int *value2);
146 
147 static boolean_t task_policy_update_coalition_focal_tasks(task_t task, int prev_role, int next_role, task_pend_token_t pend_token);
148 
149 static uint64_t task_requested_bitfield(task_t task);
150 static uint64_t task_effective_bitfield(task_t task);
151 
152 /* Convenience functions for munging a policy bitfield into a tracepoint */
153 static uintptr_t trequested_0(task_t task);
154 static uintptr_t trequested_1(task_t task);
155 static uintptr_t teffective_0(task_t task);
156 static uintptr_t teffective_1(task_t task);
157 
158 /* CPU limits helper functions */
159 static int task_set_cpuusage(task_t task, uint8_t percentage, uint64_t interval, uint64_t deadline, int scope, int entitled);
160 static int task_get_cpuusage(task_t task, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep, int *scope);
161 static int task_enable_cpumon_locked(task_t task);
162 static int task_disable_cpumon(task_t task);
163 static int task_clear_cpuusage_locked(task_t task, int cpumon_entitled);
164 static int task_apply_resource_actions(task_t task, int type);
165 static void task_action_cpuusage(thread_call_param_t param0, thread_call_param_t param1);
166 
167 #ifdef MACH_BSD
168 typedef struct proc *   proc_t;
169 int                     proc_pid(struct proc *proc);
170 extern int              proc_selfpid(void);
171 extern char *           proc_name_address(void *p);
172 extern char *           proc_best_name(proc_t proc);
173 
174 extern int proc_pidpathinfo_internal(proc_t p, uint64_t arg,
175     char *buffer, uint32_t buffersize,
176     int32_t *retval);
177 #endif /* MACH_BSD */
178 
179 
180 #if CONFIG_TASKWATCH
181 /* Taskwatch related helper functions */
182 static void set_thread_appbg(thread_t thread, int setbg, int importance);
183 static void add_taskwatch_locked(task_t task, task_watch_t * twp);
184 static void remove_taskwatch_locked(task_t task, task_watch_t * twp);
185 static void task_watch_lock(void);
186 static void task_watch_unlock(void);
187 static void apply_appstate_watchers(task_t task);
188 
189 typedef struct task_watcher {
190 	queue_chain_t   tw_links;       /* queueing of threads */
191 	task_t          tw_task;        /* task that is being watched */
192 	thread_t        tw_thread;      /* thread that is watching the watch_task */
193 	int             tw_state;       /* the current app state of the thread */
194 	int             tw_importance;  /* importance prior to backgrounding */
195 } task_watch_t;
196 
197 typedef struct thread_watchlist {
198 	thread_t        thread;         /* thread being worked on for taskwatch action */
199 	int             importance;     /* importance to be restored if thread is being made active */
200 } thread_watchlist_t;
201 
202 #endif /* CONFIG_TASKWATCH */
203 
204 extern int memorystatus_update_priority_for_appnap(proc_t p, boolean_t is_appnap);
205 
206 /* Importance Inheritance related helper functions */
207 
208 #if IMPORTANCE_INHERITANCE
209 
210 static void task_importance_mark_live_donor(task_t task, boolean_t donating);
211 static void task_importance_mark_receiver(task_t task, boolean_t receiving);
212 static void task_importance_mark_denap_receiver(task_t task, boolean_t denap);
213 
214 static boolean_t task_is_marked_live_importance_donor(task_t task);
215 static boolean_t task_is_importance_receiver(task_t task);
216 static boolean_t task_is_importance_denap_receiver(task_t task);
217 
218 static int task_importance_hold_internal_assertion(task_t target_task, uint32_t count);
219 
220 static void task_add_importance_watchport(task_t task, mach_port_t port, int *boostp);
221 static void task_importance_update_live_donor(task_t target_task);
222 
223 static void task_set_boost_locked(task_t task, boolean_t boost_active);
224 
225 #endif /* IMPORTANCE_INHERITANCE */
226 
227 #if IMPORTANCE_TRACE
228 #define __imptrace_only
229 #else /* IMPORTANCE_TRACE */
230 #define __imptrace_only __unused
231 #endif /* !IMPORTANCE_TRACE */
232 
233 #if IMPORTANCE_INHERITANCE
234 #define __imp_only
235 #else
236 #define __imp_only __unused
237 #endif
238 
239 /*
240  * Default parameters for certain policies
241  */
242 
243 int proc_standard_daemon_tier = THROTTLE_LEVEL_TIER1;
244 int proc_suppressed_disk_tier = THROTTLE_LEVEL_TIER1;
245 int proc_tal_disk_tier        = THROTTLE_LEVEL_TIER1;
246 
247 int proc_graphics_timer_qos   = (LATENCY_QOS_TIER_0 & 0xFF);
248 
249 const int proc_default_bg_iotier  = THROTTLE_LEVEL_TIER2;
250 
251 /* Latency/throughput QoS fields remain zeroed, i.e. TIER_UNSPECIFIED at creation */
252 const struct task_requested_policy default_task_requested_policy = {
253 	.trp_bg_iotier = proc_default_bg_iotier
254 };
255 const struct task_effective_policy default_task_effective_policy = {};
256 
257 /*
258  * Default parameters for CPU usage monitor.
259  *
260  * Default setting is 50% over 3 minutes.
261  */
262 #define         DEFAULT_CPUMON_PERCENTAGE 50
263 #define         DEFAULT_CPUMON_INTERVAL   (3 * 60)
264 
265 uint8_t         proc_max_cpumon_percentage;
266 uint64_t        proc_max_cpumon_interval;
267 
268 
269 kern_return_t
qos_latency_policy_validate(task_latency_qos_t ltier)270 qos_latency_policy_validate(task_latency_qos_t ltier)
271 {
272 	if ((ltier != LATENCY_QOS_TIER_UNSPECIFIED) &&
273 	    ((ltier > LATENCY_QOS_TIER_5) || (ltier < LATENCY_QOS_TIER_0))) {
274 		return KERN_INVALID_ARGUMENT;
275 	}
276 
277 	return KERN_SUCCESS;
278 }
279 
280 kern_return_t
qos_throughput_policy_validate(task_throughput_qos_t ttier)281 qos_throughput_policy_validate(task_throughput_qos_t ttier)
282 {
283 	if ((ttier != THROUGHPUT_QOS_TIER_UNSPECIFIED) &&
284 	    ((ttier > THROUGHPUT_QOS_TIER_5) || (ttier < THROUGHPUT_QOS_TIER_0))) {
285 		return KERN_INVALID_ARGUMENT;
286 	}
287 
288 	return KERN_SUCCESS;
289 }
290 
291 static kern_return_t
task_qos_policy_validate(task_qos_policy_t qosinfo,mach_msg_type_number_t count)292 task_qos_policy_validate(task_qos_policy_t qosinfo, mach_msg_type_number_t count)
293 {
294 	if (count < TASK_QOS_POLICY_COUNT) {
295 		return KERN_INVALID_ARGUMENT;
296 	}
297 
298 	task_latency_qos_t ltier = qosinfo->task_latency_qos_tier;
299 	task_throughput_qos_t ttier = qosinfo->task_throughput_qos_tier;
300 
301 	kern_return_t kr = qos_latency_policy_validate(ltier);
302 
303 	if (kr != KERN_SUCCESS) {
304 		return kr;
305 	}
306 
307 	kr = qos_throughput_policy_validate(ttier);
308 
309 	return kr;
310 }
311 
312 uint32_t
qos_extract(uint32_t qv)313 qos_extract(uint32_t qv)
314 {
315 	return qv & 0xFF;
316 }
317 
318 uint32_t
qos_latency_policy_package(uint32_t qv)319 qos_latency_policy_package(uint32_t qv)
320 {
321 	return (qv == LATENCY_QOS_TIER_UNSPECIFIED) ? LATENCY_QOS_TIER_UNSPECIFIED : ((0xFF << 16) | qv);
322 }
323 
324 uint32_t
qos_throughput_policy_package(uint32_t qv)325 qos_throughput_policy_package(uint32_t qv)
326 {
327 	return (qv == THROUGHPUT_QOS_TIER_UNSPECIFIED) ? THROUGHPUT_QOS_TIER_UNSPECIFIED : ((0xFE << 16) | qv);
328 }
329 
330 #define TASK_POLICY_SUPPRESSION_DISABLE  0x1
331 #define TASK_POLICY_SUPPRESSION_IOTIER2  0x2
332 #define TASK_POLICY_SUPPRESSION_NONDONOR 0x4
333 /* TEMPORARY boot-arg controlling task_policy suppression (App Nap) */
334 static boolean_t task_policy_suppression_flags = TASK_POLICY_SUPPRESSION_IOTIER2 |
335     TASK_POLICY_SUPPRESSION_NONDONOR;
336 
337 kern_return_t
task_policy_set(task_t task,task_policy_flavor_t flavor,task_policy_t policy_info,mach_msg_type_number_t count)338 task_policy_set(
339 	task_t                                  task,
340 	task_policy_flavor_t    flavor,
341 	task_policy_t                   policy_info,
342 	mach_msg_type_number_t  count)
343 {
344 	kern_return_t           result = KERN_SUCCESS;
345 
346 	if (task == TASK_NULL || task == kernel_task) {
347 		return KERN_INVALID_ARGUMENT;
348 	}
349 
350 	switch (flavor) {
351 	case TASK_CATEGORY_POLICY: {
352 		task_category_policy_t info = (task_category_policy_t)policy_info;
353 
354 		if (count < TASK_CATEGORY_POLICY_COUNT) {
355 			return KERN_INVALID_ARGUMENT;
356 		}
357 
358 #if !defined(XNU_TARGET_OS_OSX)
359 		/* On embedded, you can't modify your own role. */
360 		if (current_task() == task) {
361 			return KERN_INVALID_ARGUMENT;
362 		}
363 #endif
364 
365 		switch (info->role) {
366 		case TASK_FOREGROUND_APPLICATION:
367 		case TASK_BACKGROUND_APPLICATION:
368 		case TASK_DEFAULT_APPLICATION:
369 			proc_set_task_policy(task,
370 			    TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE,
371 			    info->role);
372 			break;
373 
374 		case TASK_CONTROL_APPLICATION:
375 			if (task != current_task() || !task_is_privileged(task)) {
376 				result = KERN_INVALID_ARGUMENT;
377 			} else {
378 				proc_set_task_policy(task,
379 				    TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE,
380 				    info->role);
381 			}
382 			break;
383 
384 		case TASK_GRAPHICS_SERVER:
385 			/* TODO: Restrict this role to FCFS <rdar://problem/12552788> */
386 			if (task != current_task() || !task_is_privileged(task)) {
387 				result = KERN_INVALID_ARGUMENT;
388 			} else {
389 				proc_set_task_policy(task,
390 				    TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE,
391 				    info->role);
392 			}
393 			break;
394 		default:
395 			result = KERN_INVALID_ARGUMENT;
396 			break;
397 		} /* switch (info->role) */
398 
399 		break;
400 	}
401 
402 /* Desired energy-efficiency/performance "quality-of-service" */
403 	case TASK_BASE_QOS_POLICY:
404 	case TASK_OVERRIDE_QOS_POLICY:
405 	{
406 		task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info;
407 		kern_return_t kr = task_qos_policy_validate(qosinfo, count);
408 
409 		if (kr != KERN_SUCCESS) {
410 			return kr;
411 		}
412 
413 
414 		uint32_t lqos = qos_extract(qosinfo->task_latency_qos_tier);
415 		uint32_t tqos = qos_extract(qosinfo->task_throughput_qos_tier);
416 
417 		proc_set_task_policy2(task, TASK_POLICY_ATTRIBUTE,
418 		    flavor == TASK_BASE_QOS_POLICY ? TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS : TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS,
419 		    lqos, tqos);
420 	}
421 	break;
422 
423 	case TASK_BASE_LATENCY_QOS_POLICY:
424 	{
425 		task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info;
426 		kern_return_t kr = task_qos_policy_validate(qosinfo, count);
427 
428 		if (kr != KERN_SUCCESS) {
429 			return kr;
430 		}
431 
432 		uint32_t lqos = qos_extract(qosinfo->task_latency_qos_tier);
433 
434 		proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE, TASK_BASE_LATENCY_QOS_POLICY, lqos);
435 	}
436 	break;
437 
438 	case TASK_BASE_THROUGHPUT_QOS_POLICY:
439 	{
440 		task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info;
441 		kern_return_t kr = task_qos_policy_validate(qosinfo, count);
442 
443 		if (kr != KERN_SUCCESS) {
444 			return kr;
445 		}
446 
447 		uint32_t tqos = qos_extract(qosinfo->task_throughput_qos_tier);
448 
449 		proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE, TASK_BASE_THROUGHPUT_QOS_POLICY, tqos);
450 	}
451 	break;
452 
453 	case TASK_SUPPRESSION_POLICY:
454 	{
455 #if !defined(XNU_TARGET_OS_OSX)
456 		/*
457 		 * Suppression policy is not enabled for embedded
458 		 * because apps aren't marked as denap receivers
459 		 */
460 		result = KERN_INVALID_ARGUMENT;
461 		break;
462 #else /* !defined(XNU_TARGET_OS_OSX) */
463 
464 		task_suppression_policy_t info = (task_suppression_policy_t)policy_info;
465 
466 		if (count < TASK_SUPPRESSION_POLICY_COUNT) {
467 			return KERN_INVALID_ARGUMENT;
468 		}
469 
470 		struct task_qos_policy qosinfo;
471 
472 		qosinfo.task_latency_qos_tier = info->timer_throttle;
473 		qosinfo.task_throughput_qos_tier = info->throughput_qos;
474 
475 		kern_return_t kr = task_qos_policy_validate(&qosinfo, TASK_QOS_POLICY_COUNT);
476 
477 		if (kr != KERN_SUCCESS) {
478 			return kr;
479 		}
480 
481 		/* TEMPORARY disablement of task suppression */
482 		if (info->active &&
483 		    (task_policy_suppression_flags & TASK_POLICY_SUPPRESSION_DISABLE)) {
484 			return KERN_SUCCESS;
485 		}
486 
487 		struct task_pend_token pend_token = {};
488 
489 		task_lock(task);
490 
491 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
492 		    (IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, info->active)) | DBG_FUNC_START,
493 		    proc_selfpid(), task_pid(task), trequested_0(task),
494 		    trequested_1(task), 0);
495 
496 		task->requested_policy.trp_sup_active      = (info->active)         ? 1 : 0;
497 		task->requested_policy.trp_sup_lowpri_cpu  = (info->lowpri_cpu)     ? 1 : 0;
498 		task->requested_policy.trp_sup_timer       = qos_extract(info->timer_throttle);
499 		task->requested_policy.trp_sup_disk        = (info->disk_throttle)  ? 1 : 0;
500 		task->requested_policy.trp_sup_throughput  = qos_extract(info->throughput_qos);
501 		task->requested_policy.trp_sup_cpu         = (info->suppressed_cpu) ? 1 : 0;
502 		task->requested_policy.trp_sup_bg_sockets  = (info->background_sockets) ? 1 : 0;
503 
504 		task_policy_update_locked(task, &pend_token);
505 
506 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
507 		    (IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, info->active)) | DBG_FUNC_END,
508 		    proc_selfpid(), task_pid(task), trequested_0(task),
509 		    trequested_1(task), 0);
510 
511 		task_unlock(task);
512 
513 		task_policy_update_complete_unlocked(task, &pend_token);
514 
515 		break;
516 
517 #endif /* !defined(XNU_TARGET_OS_OSX) */
518 	}
519 
520 	default:
521 		result = KERN_INVALID_ARGUMENT;
522 		break;
523 	}
524 
525 	return result;
526 }
527 
528 /* Sets BSD 'nice' value on the task */
529 kern_return_t
task_importance(task_t task,integer_t importance)530 task_importance(
531 	task_t                          task,
532 	integer_t                       importance)
533 {
534 	if (task == TASK_NULL || task == kernel_task) {
535 		return KERN_INVALID_ARGUMENT;
536 	}
537 
538 	task_lock(task);
539 
540 	if (!task->active) {
541 		task_unlock(task);
542 
543 		return KERN_TERMINATED;
544 	}
545 
546 	if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) >= TASK_CONTROL_APPLICATION) {
547 		task_unlock(task);
548 
549 		return KERN_INVALID_ARGUMENT;
550 	}
551 
552 	task->importance = importance;
553 
554 	struct task_pend_token pend_token = {};
555 
556 	task_policy_update_locked(task, &pend_token);
557 
558 	task_unlock(task);
559 
560 	task_policy_update_complete_unlocked(task, &pend_token);
561 
562 	return KERN_SUCCESS;
563 }
564 
565 kern_return_t
task_policy_get(task_t task,task_policy_flavor_t flavor,task_policy_t policy_info,mach_msg_type_number_t * count,boolean_t * get_default)566 task_policy_get(
567 	task_t                                  task,
568 	task_policy_flavor_t    flavor,
569 	task_policy_t                   policy_info,
570 	mach_msg_type_number_t  *count,
571 	boolean_t                               *get_default)
572 {
573 	if (task == TASK_NULL || task == kernel_task) {
574 		return KERN_INVALID_ARGUMENT;
575 	}
576 
577 	switch (flavor) {
578 	case TASK_CATEGORY_POLICY:
579 	{
580 		task_category_policy_t          info = (task_category_policy_t)policy_info;
581 
582 		if (*count < TASK_CATEGORY_POLICY_COUNT) {
583 			return KERN_INVALID_ARGUMENT;
584 		}
585 
586 		if (*get_default) {
587 			info->role = TASK_UNSPECIFIED;
588 		} else {
589 			info->role = proc_get_task_policy(task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE);
590 		}
591 		break;
592 	}
593 
594 	case TASK_BASE_QOS_POLICY: /* FALLTHRU */
595 	case TASK_OVERRIDE_QOS_POLICY:
596 	{
597 		task_qos_policy_t info = (task_qos_policy_t)policy_info;
598 
599 		if (*count < TASK_QOS_POLICY_COUNT) {
600 			return KERN_INVALID_ARGUMENT;
601 		}
602 
603 		if (*get_default) {
604 			info->task_latency_qos_tier = LATENCY_QOS_TIER_UNSPECIFIED;
605 			info->task_throughput_qos_tier = THROUGHPUT_QOS_TIER_UNSPECIFIED;
606 		} else if (flavor == TASK_BASE_QOS_POLICY) {
607 			int value1, value2;
608 
609 			proc_get_task_policy2(task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS, &value1, &value2);
610 
611 			info->task_latency_qos_tier = qos_latency_policy_package(value1);
612 			info->task_throughput_qos_tier = qos_throughput_policy_package(value2);
613 		} else if (flavor == TASK_OVERRIDE_QOS_POLICY) {
614 			int value1, value2;
615 
616 			proc_get_task_policy2(task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS, &value1, &value2);
617 
618 			info->task_latency_qos_tier = qos_latency_policy_package(value1);
619 			info->task_throughput_qos_tier = qos_throughput_policy_package(value2);
620 		}
621 
622 		break;
623 	}
624 
625 	case TASK_POLICY_STATE:
626 	{
627 		task_policy_state_t info = (task_policy_state_t)policy_info;
628 
629 		if (*count < TASK_POLICY_STATE_COUNT) {
630 			return KERN_INVALID_ARGUMENT;
631 		}
632 
633 		/* Only root can get this info */
634 		if (!task_is_privileged(current_task())) {
635 			return KERN_PROTECTION_FAILURE;
636 		}
637 
638 		if (*get_default) {
639 			info->requested = 0;
640 			info->effective = 0;
641 			info->pending = 0;
642 			info->imp_assertcnt = 0;
643 			info->imp_externcnt = 0;
644 			info->flags = 0;
645 			info->imp_transitions = 0;
646 		} else {
647 			task_lock(task);
648 
649 			info->requested = task_requested_bitfield(task);
650 			info->effective = task_effective_bitfield(task);
651 			info->pending   = 0;
652 
653 			info->tps_requested_policy = *(uint64_t*)(&task->requested_policy);
654 			info->tps_effective_policy = *(uint64_t*)(&task->effective_policy);
655 
656 			info->flags = 0;
657 			if (task->task_imp_base != NULL) {
658 				info->imp_assertcnt = task->task_imp_base->iit_assertcnt;
659 				info->imp_externcnt = IIT_EXTERN(task->task_imp_base);
660 				info->flags |= (task_is_marked_importance_receiver(task) ? TASK_IMP_RECEIVER : 0);
661 				info->flags |= (task_is_marked_importance_denap_receiver(task) ? TASK_DENAP_RECEIVER : 0);
662 				info->flags |= (task_is_marked_importance_donor(task) ? TASK_IMP_DONOR : 0);
663 				info->flags |= (task_is_marked_live_importance_donor(task) ? TASK_IMP_LIVE_DONOR : 0);
664 				info->flags |= (get_task_pidsuspended(task) ? TASK_IS_PIDSUSPENDED : 0);
665 				info->imp_transitions = task->task_imp_base->iit_transitions;
666 			} else {
667 				info->imp_assertcnt = 0;
668 				info->imp_externcnt = 0;
669 				info->imp_transitions = 0;
670 			}
671 			task_unlock(task);
672 		}
673 
674 		break;
675 	}
676 
677 	case TASK_SUPPRESSION_POLICY:
678 	{
679 		task_suppression_policy_t info = (task_suppression_policy_t)policy_info;
680 
681 		if (*count < TASK_SUPPRESSION_POLICY_COUNT) {
682 			return KERN_INVALID_ARGUMENT;
683 		}
684 
685 		task_lock(task);
686 
687 		if (*get_default) {
688 			info->active            = 0;
689 			info->lowpri_cpu        = 0;
690 			info->timer_throttle    = LATENCY_QOS_TIER_UNSPECIFIED;
691 			info->disk_throttle     = 0;
692 			info->cpu_limit         = 0;
693 			info->suspend           = 0;
694 			info->throughput_qos    = 0;
695 			info->suppressed_cpu    = 0;
696 		} else {
697 			info->active            = task->requested_policy.trp_sup_active;
698 			info->lowpri_cpu        = task->requested_policy.trp_sup_lowpri_cpu;
699 			info->timer_throttle    = qos_latency_policy_package(task->requested_policy.trp_sup_timer);
700 			info->disk_throttle     = task->requested_policy.trp_sup_disk;
701 			info->cpu_limit         = 0;
702 			info->suspend           = 0;
703 			info->throughput_qos    = qos_throughput_policy_package(task->requested_policy.trp_sup_throughput);
704 			info->suppressed_cpu    = task->requested_policy.trp_sup_cpu;
705 			info->background_sockets = task->requested_policy.trp_sup_bg_sockets;
706 		}
707 
708 		task_unlock(task);
709 		break;
710 	}
711 
712 	default:
713 		return KERN_INVALID_ARGUMENT;
714 	}
715 
716 	return KERN_SUCCESS;
717 }
718 
719 /*
720  * Called at task creation
721  * We calculate the correct effective but don't apply it to anything yet.
722  * The threads, etc will inherit from the task as they get created.
723  */
724 void
task_policy_create(task_t task,task_t parent_task)725 task_policy_create(task_t task, task_t parent_task)
726 {
727 	task->requested_policy.trp_apptype          = parent_task->requested_policy.trp_apptype;
728 
729 	task->requested_policy.trp_int_darwinbg     = parent_task->requested_policy.trp_int_darwinbg;
730 	task->requested_policy.trp_ext_darwinbg     = parent_task->requested_policy.trp_ext_darwinbg;
731 	task->requested_policy.trp_int_iotier       = parent_task->requested_policy.trp_int_iotier;
732 	task->requested_policy.trp_ext_iotier       = parent_task->requested_policy.trp_ext_iotier;
733 	task->requested_policy.trp_int_iopassive    = parent_task->requested_policy.trp_int_iopassive;
734 	task->requested_policy.trp_ext_iopassive    = parent_task->requested_policy.trp_ext_iopassive;
735 	task->requested_policy.trp_bg_iotier        = parent_task->requested_policy.trp_bg_iotier;
736 	task->requested_policy.trp_terminated       = parent_task->requested_policy.trp_terminated;
737 	task->requested_policy.trp_qos_clamp        = parent_task->requested_policy.trp_qos_clamp;
738 
739 	if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE && !task_is_exec_copy(task)) {
740 		/* Do not update the apptype for exec copy task */
741 		if (parent_task->requested_policy.trp_boosted) {
742 			task->requested_policy.trp_apptype = TASK_APPTYPE_DAEMON_INTERACTIVE;
743 			task_importance_mark_donor(task, TRUE);
744 		} else {
745 			task->requested_policy.trp_apptype = TASK_APPTYPE_DAEMON_BACKGROUND;
746 			task_importance_mark_receiver(task, FALSE);
747 		}
748 	}
749 
750 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
751 	    (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_TASK))) | DBG_FUNC_START,
752 	    task_pid(task), teffective_0(task),
753 	    teffective_1(task), task->priority, 0);
754 
755 	task_policy_update_internal_locked(task, true, NULL);
756 
757 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
758 	    (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_TASK))) | DBG_FUNC_END,
759 	    task_pid(task), teffective_0(task),
760 	    teffective_1(task), task->priority, 0);
761 
762 	task_importance_update_live_donor(task);
763 }
764 
765 
766 static void
task_policy_update_locked(task_t task,task_pend_token_t pend_token)767 task_policy_update_locked(task_t task, task_pend_token_t pend_token)
768 {
769 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
770 	    (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_TASK) | DBG_FUNC_START),
771 	    task_pid(task), teffective_0(task),
772 	    teffective_1(task), task->priority, 0);
773 
774 	task_policy_update_internal_locked(task, false, pend_token);
775 
776 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
777 	    (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_TASK)) | DBG_FUNC_END,
778 	    task_pid(task), teffective_0(task),
779 	    teffective_1(task), task->priority, 0);
780 }
781 
782 /*
783  * One state update function TO RULE THEM ALL
784  *
785  * This function updates the task or thread effective policy fields
786  * and pushes the results to the relevant subsystems.
787  *
788  * Must call update_complete after unlocking the task,
789  * as some subsystems cannot be updated while holding the task lock.
790  *
791  * Called with task locked, not thread
792  */
793 
794 static void
task_policy_update_internal_locked(task_t task,bool in_create,task_pend_token_t pend_token)795 task_policy_update_internal_locked(task_t task, bool in_create, task_pend_token_t pend_token)
796 {
797 	/*
798 	 * Step 1:
799 	 *  Gather requested policy
800 	 */
801 
802 	struct task_requested_policy requested = task->requested_policy;
803 
804 	/*
805 	 * Step 2:
806 	 *  Calculate new effective policies from requested policy and task state
807 	 *  Rules:
808 	 *      Don't change requested, it won't take effect
809 	 */
810 
811 	struct task_effective_policy next = {};
812 
813 	/* Update task role */
814 	next.tep_role = requested.trp_role;
815 
816 	/* Set task qos clamp and ceiling */
817 	next.tep_qos_clamp = requested.trp_qos_clamp;
818 
819 	if (requested.trp_apptype == TASK_APPTYPE_APP_DEFAULT) {
820 		switch (next.tep_role) {
821 		case TASK_FOREGROUND_APPLICATION:
822 			/* Foreground apps get urgent scheduler priority */
823 			next.tep_qos_ui_is_urgent = 1;
824 			next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED;
825 			break;
826 
827 		case TASK_BACKGROUND_APPLICATION:
828 			/* This is really 'non-focal but on-screen' */
829 			next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED;
830 			break;
831 
832 		case TASK_DEFAULT_APPLICATION:
833 			/* This is 'may render UI but we don't know if it's focal/nonfocal' */
834 			next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED;
835 			break;
836 
837 		case TASK_NONUI_APPLICATION:
838 			/* i.e. 'off-screen' */
839 			next.tep_qos_ceiling = THREAD_QOS_LEGACY;
840 			break;
841 
842 		case TASK_CONTROL_APPLICATION:
843 		case TASK_GRAPHICS_SERVER:
844 			next.tep_qos_ui_is_urgent = 1;
845 			next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED;
846 			break;
847 
848 		case TASK_THROTTLE_APPLICATION:
849 			/* i.e. 'TAL launch' */
850 			next.tep_qos_ceiling = THREAD_QOS_UTILITY;
851 			break;
852 
853 		case TASK_DARWINBG_APPLICATION:
854 			/* i.e. 'DARWIN_BG throttled background application' */
855 			next.tep_qos_ceiling = THREAD_QOS_BACKGROUND;
856 			break;
857 
858 		case TASK_UNSPECIFIED:
859 		default:
860 			/* Apps that don't have an application role get
861 			 * USER_INTERACTIVE and USER_INITIATED squashed to LEGACY */
862 			next.tep_qos_ceiling = THREAD_QOS_LEGACY;
863 			break;
864 		}
865 	} else {
866 		/* Daemons and dext get USER_INTERACTIVE squashed to USER_INITIATED */
867 		next.tep_qos_ceiling = THREAD_QOS_USER_INITIATED;
868 	}
869 
870 	/* Calculate DARWIN_BG */
871 	bool wants_darwinbg        = false;
872 	bool wants_all_sockets_bg  = false; /* Do I want my existing sockets to be bg */
873 	bool wants_watchersbg      = false; /* Do I want my pidbound threads to be bg */
874 	bool adaptive_bg_only      = false; /* This task is BG only because it's adaptive unboosted */
875 
876 	/* Adaptive daemons are DARWIN_BG unless boosted, and don't get network throttled. */
877 	if (requested.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE &&
878 	    requested.trp_boosted == 0) {
879 		wants_darwinbg = true;
880 		adaptive_bg_only = true;
881 	}
882 
883 	/*
884 	 * If DARWIN_BG has been requested at either level, it's engaged.
885 	 * Only true DARWIN_BG changes cause watchers to transition.
886 	 *
887 	 * Backgrounding due to apptype does.
888 	 */
889 	if (requested.trp_int_darwinbg || requested.trp_ext_darwinbg ||
890 	    next.tep_role == TASK_DARWINBG_APPLICATION) {
891 		wants_watchersbg = wants_all_sockets_bg = wants_darwinbg = true;
892 		adaptive_bg_only = false;
893 	}
894 
895 	/* Application launching in special Transparent App Lifecycle throttle mode */
896 	if (requested.trp_apptype == TASK_APPTYPE_APP_DEFAULT &&
897 	    requested.trp_role == TASK_THROTTLE_APPLICATION) {
898 		next.tep_tal_engaged = 1;
899 	}
900 
901 	/* Background daemons are always DARWIN_BG, no exceptions, and don't get network throttled. */
902 	if (requested.trp_apptype == TASK_APPTYPE_DAEMON_BACKGROUND) {
903 		wants_darwinbg = true;
904 		adaptive_bg_only = false;
905 	}
906 
907 	if (next.tep_qos_clamp == THREAD_QOS_BACKGROUND ||
908 	    next.tep_qos_clamp == THREAD_QOS_MAINTENANCE) {
909 		wants_darwinbg = true;
910 		adaptive_bg_only = false;
911 	}
912 
913 	/* Calculate side effects of DARWIN_BG */
914 
915 	if (wants_darwinbg) {
916 		next.tep_darwinbg = 1;
917 		/* darwinbg tasks always create bg sockets, but we don't always loop over all sockets */
918 		next.tep_new_sockets_bg = 1;
919 		next.tep_lowpri_cpu = 1;
920 	}
921 
922 	if (wants_all_sockets_bg) {
923 		next.tep_all_sockets_bg = 1;
924 	}
925 
926 	if (wants_watchersbg) {
927 		next.tep_watchers_bg = 1;
928 	}
929 
930 	next.tep_adaptive_bg = adaptive_bg_only;
931 
932 	/* Calculate low CPU priority */
933 
934 	boolean_t wants_lowpri_cpu = false;
935 
936 	if (wants_darwinbg) {
937 		wants_lowpri_cpu = true;
938 	}
939 
940 	if (next.tep_tal_engaged) {
941 		wants_lowpri_cpu = true;
942 	}
943 
944 	if (requested.trp_sup_lowpri_cpu && requested.trp_boosted == 0) {
945 		wants_lowpri_cpu = true;
946 	}
947 
948 	if (wants_lowpri_cpu) {
949 		next.tep_lowpri_cpu = 1;
950 	}
951 
952 	/* Calculate IO policy */
953 
954 	/* Update BG IO policy (so we can see if it has changed) */
955 	next.tep_bg_iotier = requested.trp_bg_iotier;
956 
957 	int iopol = THROTTLE_LEVEL_TIER0;
958 
959 	if (wants_darwinbg) {
960 		iopol = MAX(iopol, requested.trp_bg_iotier);
961 	}
962 
963 	if (requested.trp_apptype == TASK_APPTYPE_DAEMON_STANDARD) {
964 		iopol = MAX(iopol, proc_standard_daemon_tier);
965 	}
966 
967 	if (requested.trp_sup_disk && requested.trp_boosted == 0) {
968 		iopol = MAX(iopol, proc_suppressed_disk_tier);
969 	}
970 
971 	if (next.tep_tal_engaged) {
972 		iopol = MAX(iopol, proc_tal_disk_tier);
973 	}
974 
975 	if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) {
976 		iopol = MAX(iopol, thread_qos_policy_params.qos_iotier[next.tep_qos_clamp]);
977 	}
978 
979 	iopol = MAX(iopol, requested.trp_int_iotier);
980 	iopol = MAX(iopol, requested.trp_ext_iotier);
981 
982 	next.tep_io_tier = iopol;
983 
984 	/* Calculate Passive IO policy */
985 
986 	if (requested.trp_ext_iopassive || requested.trp_int_iopassive) {
987 		next.tep_io_passive = 1;
988 	}
989 
990 	/* Calculate suppression-active flag */
991 	boolean_t appnap_transition = false;
992 
993 	if (requested.trp_sup_active && requested.trp_boosted == 0) {
994 		next.tep_sup_active = 1;
995 	}
996 
997 	if (task->effective_policy.tep_sup_active != next.tep_sup_active) {
998 		appnap_transition = true;
999 	}
1000 
1001 	/* Calculate timer QOS */
1002 	int latency_qos = requested.trp_base_latency_qos;
1003 
1004 	if (requested.trp_sup_timer && requested.trp_boosted == 0) {
1005 		latency_qos = requested.trp_sup_timer;
1006 	}
1007 
1008 	if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) {
1009 		latency_qos = MAX(latency_qos, (int)thread_qos_policy_params.qos_latency_qos[next.tep_qos_clamp]);
1010 	}
1011 
1012 	if (requested.trp_over_latency_qos != 0) {
1013 		latency_qos = requested.trp_over_latency_qos;
1014 	}
1015 
1016 	/* Treat the windowserver special */
1017 	if (requested.trp_role == TASK_GRAPHICS_SERVER) {
1018 		latency_qos = proc_graphics_timer_qos;
1019 	}
1020 
1021 	next.tep_latency_qos = latency_qos;
1022 
1023 	/* Calculate throughput QOS */
1024 	int through_qos = requested.trp_base_through_qos;
1025 
1026 	if (requested.trp_sup_throughput && requested.trp_boosted == 0) {
1027 		through_qos = requested.trp_sup_throughput;
1028 	}
1029 
1030 	if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) {
1031 		through_qos = MAX(through_qos, (int)thread_qos_policy_params.qos_through_qos[next.tep_qos_clamp]);
1032 	}
1033 
1034 	if (requested.trp_over_through_qos != 0) {
1035 		through_qos = requested.trp_over_through_qos;
1036 	}
1037 
1038 	next.tep_through_qos = through_qos;
1039 
1040 	/* Calculate suppressed CPU priority */
1041 	if (requested.trp_sup_cpu && requested.trp_boosted == 0) {
1042 		next.tep_suppressed_cpu = 1;
1043 	}
1044 
1045 	/*
1046 	 * Calculate background sockets
1047 	 * Don't take into account boosting to limit transition frequency.
1048 	 */
1049 	if (requested.trp_sup_bg_sockets) {
1050 		next.tep_all_sockets_bg = 1;
1051 		next.tep_new_sockets_bg = 1;
1052 	}
1053 
1054 	/* Apply SFI Managed class bit */
1055 	next.tep_sfi_managed = requested.trp_sfi_managed;
1056 
1057 	/* Calculate 'live donor' status for live importance */
1058 	switch (requested.trp_apptype) {
1059 	case TASK_APPTYPE_APP_TAL:
1060 	case TASK_APPTYPE_APP_DEFAULT:
1061 		if (requested.trp_ext_darwinbg == 1 ||
1062 		    (next.tep_sup_active == 1 &&
1063 		    (task_policy_suppression_flags & TASK_POLICY_SUPPRESSION_NONDONOR)) ||
1064 		    next.tep_role == TASK_DARWINBG_APPLICATION) {
1065 			next.tep_live_donor = 0;
1066 		} else {
1067 			next.tep_live_donor = 1;
1068 		}
1069 		break;
1070 
1071 	case TASK_APPTYPE_DAEMON_INTERACTIVE:
1072 	case TASK_APPTYPE_DAEMON_STANDARD:
1073 	case TASK_APPTYPE_DAEMON_ADAPTIVE:
1074 	case TASK_APPTYPE_DAEMON_BACKGROUND:
1075 	case TASK_APPTYPE_DRIVER:
1076 	default:
1077 		next.tep_live_donor = 0;
1078 		break;
1079 	}
1080 
1081 	if (requested.trp_terminated) {
1082 		/*
1083 		 * Shoot down the throttles that slow down exit or response to SIGTERM
1084 		 * We don't need to shoot down:
1085 		 * passive        (don't want to cause others to throttle)
1086 		 * all_sockets_bg (don't need to iterate FDs on every exit)
1087 		 * new_sockets_bg (doesn't matter for exiting process)
1088 		 * pidsuspend     (jetsam-ed BG process shouldn't run again)
1089 		 * watchers_bg    (watcher threads don't need to be unthrottled)
1090 		 * latency_qos    (affects userspace timers only)
1091 		 */
1092 
1093 		next.tep_terminated     = 1;
1094 		next.tep_darwinbg       = 0;
1095 		next.tep_lowpri_cpu     = 0;
1096 		next.tep_io_tier        = THROTTLE_LEVEL_TIER0;
1097 		next.tep_tal_engaged    = 0;
1098 		next.tep_role           = TASK_UNSPECIFIED;
1099 		next.tep_suppressed_cpu = 0;
1100 	}
1101 
1102 	/*
1103 	 * Step 3:
1104 	 *  Swap out old policy for new policy
1105 	 */
1106 
1107 	struct task_effective_policy prev = task->effective_policy;
1108 
1109 	/* This is the point where the new values become visible to other threads */
1110 	task->effective_policy = next;
1111 
1112 	/* Don't do anything further to a half-formed task */
1113 	if (in_create) {
1114 		return;
1115 	}
1116 
1117 	if (task == kernel_task) {
1118 		panic("Attempting to set task policy on kernel_task");
1119 	}
1120 
1121 	/*
1122 	 * Step 4:
1123 	 *  Pend updates that can't be done while holding the task lock
1124 	 */
1125 
1126 	if (prev.tep_all_sockets_bg != next.tep_all_sockets_bg) {
1127 		pend_token->tpt_update_sockets = 1;
1128 	}
1129 
1130 	/* Only re-scan the timer list if the qos level is getting less strong */
1131 	if (prev.tep_latency_qos > next.tep_latency_qos) {
1132 		pend_token->tpt_update_timers = 1;
1133 	}
1134 
1135 #if CONFIG_TASKWATCH
1136 	if (prev.tep_watchers_bg != next.tep_watchers_bg) {
1137 		pend_token->tpt_update_watchers = 1;
1138 	}
1139 #endif /* CONFIG_TASKWATCH */
1140 
1141 	if (prev.tep_live_donor != next.tep_live_donor) {
1142 		pend_token->tpt_update_live_donor = 1;
1143 	}
1144 
1145 	/*
1146 	 * Step 5:
1147 	 *  Update other subsystems as necessary if something has changed
1148 	 */
1149 
1150 	bool update_threads = false, update_sfi = false;
1151 
1152 	/*
1153 	 * Check for the attributes that thread_policy_update_internal_locked() consults,
1154 	 *  and trigger thread policy re-evaluation.
1155 	 */
1156 	if (prev.tep_io_tier != next.tep_io_tier ||
1157 	    prev.tep_bg_iotier != next.tep_bg_iotier ||
1158 	    prev.tep_io_passive != next.tep_io_passive ||
1159 	    prev.tep_darwinbg != next.tep_darwinbg ||
1160 	    prev.tep_qos_clamp != next.tep_qos_clamp ||
1161 	    prev.tep_qos_ceiling != next.tep_qos_ceiling ||
1162 	    prev.tep_qos_ui_is_urgent != next.tep_qos_ui_is_urgent ||
1163 	    prev.tep_latency_qos != next.tep_latency_qos ||
1164 	    prev.tep_through_qos != next.tep_through_qos ||
1165 	    prev.tep_lowpri_cpu != next.tep_lowpri_cpu ||
1166 	    prev.tep_new_sockets_bg != next.tep_new_sockets_bg ||
1167 	    prev.tep_terminated != next.tep_terminated ||
1168 	    prev.tep_adaptive_bg != next.tep_adaptive_bg) {
1169 		update_threads = true;
1170 	}
1171 
1172 	/*
1173 	 * Check for the attributes that sfi_thread_classify() consults,
1174 	 *  and trigger SFI re-evaluation.
1175 	 */
1176 	if (prev.tep_latency_qos != next.tep_latency_qos ||
1177 	    prev.tep_role != next.tep_role ||
1178 	    prev.tep_sfi_managed != next.tep_sfi_managed) {
1179 		update_sfi = true;
1180 	}
1181 
1182 	/* Reflect task role transitions into the coalition role counters */
1183 	if (prev.tep_role != next.tep_role) {
1184 		if (task_policy_update_coalition_focal_tasks(task, prev.tep_role, next.tep_role, pend_token)) {
1185 			update_sfi = true;
1186 		}
1187 	}
1188 
1189 	bool update_priority = false;
1190 
1191 	int16_t priority     = BASEPRI_DEFAULT;
1192 	int16_t max_priority = MAXPRI_USER;
1193 
1194 	if (next.tep_lowpri_cpu) {
1195 		priority = MAXPRI_THROTTLE;
1196 		max_priority = MAXPRI_THROTTLE;
1197 	} else if (next.tep_suppressed_cpu) {
1198 		priority = MAXPRI_SUPPRESSED;
1199 		max_priority = MAXPRI_SUPPRESSED;
1200 	} else {
1201 		switch (next.tep_role) {
1202 		case TASK_CONTROL_APPLICATION:
1203 			priority = BASEPRI_CONTROL;
1204 			break;
1205 		case TASK_GRAPHICS_SERVER:
1206 			priority = BASEPRI_GRAPHICS;
1207 			max_priority = MAXPRI_RESERVED;
1208 			break;
1209 		default:
1210 			break;
1211 		}
1212 
1213 		/* factor in 'nice' value */
1214 		priority += task->importance;
1215 
1216 		if (task->effective_policy.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) {
1217 			int16_t qos_clamp_priority = thread_qos_policy_params.qos_pri[task->effective_policy.tep_qos_clamp];
1218 
1219 			priority        = MIN(priority, qos_clamp_priority);
1220 			max_priority    = MIN(max_priority, qos_clamp_priority);
1221 		}
1222 
1223 		if (priority > max_priority) {
1224 			priority = max_priority;
1225 		} else if (priority < MINPRI) {
1226 			priority = MINPRI;
1227 		}
1228 	}
1229 
1230 	assert(priority <= max_priority);
1231 
1232 	/* avoid extra work if priority isn't changing */
1233 	if (priority != task->priority ||
1234 	    max_priority != task->max_priority) {
1235 		/* update the scheduling priority for the task */
1236 		task->max_priority  = max_priority;
1237 		task->priority      = priority;
1238 		update_priority     = true;
1239 	}
1240 
1241 	/* Loop over the threads in the task:
1242 	 * only once
1243 	 * only if necessary
1244 	 * with one thread mutex hold per thread
1245 	 */
1246 	if (update_threads || update_priority || update_sfi) {
1247 		thread_t thread;
1248 
1249 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
1250 			struct task_pend_token thread_pend_token = {};
1251 
1252 			if (update_sfi) {
1253 				thread_pend_token.tpt_update_thread_sfi = 1;
1254 			}
1255 
1256 			if (update_priority || update_threads) {
1257 				thread_policy_update_tasklocked(thread,
1258 				    task->priority, task->max_priority,
1259 				    &thread_pend_token);
1260 			}
1261 
1262 			assert(!thread_pend_token.tpt_update_sockets);
1263 
1264 			// Slightly risky, as we still hold the task lock...
1265 			thread_policy_update_complete_unlocked(thread, &thread_pend_token);
1266 		}
1267 	}
1268 
1269 	/*
1270 	 * Use the app-nap transitions to influence the
1271 	 * transition of the process within the jetsam band
1272 	 * [and optionally its live-donor status]
1273 	 * On macOS only.
1274 	 */
1275 	if (appnap_transition) {
1276 		if (task->effective_policy.tep_sup_active == 1) {
1277 			memorystatus_update_priority_for_appnap(((proc_t) task->bsd_info), TRUE);
1278 		} else {
1279 			memorystatus_update_priority_for_appnap(((proc_t) task->bsd_info), FALSE);
1280 		}
1281 	}
1282 }
1283 
1284 
1285 /*
1286  * Yet another layering violation. We reach out and bang on the coalition directly.
1287  */
1288 static boolean_t
task_policy_update_coalition_focal_tasks(task_t task,int prev_role,int next_role,task_pend_token_t pend_token)1289 task_policy_update_coalition_focal_tasks(task_t            task,
1290     int               prev_role,
1291     int               next_role,
1292     task_pend_token_t pend_token)
1293 {
1294 	boolean_t sfi_transition = FALSE;
1295 	uint32_t new_count = 0;
1296 
1297 	/* task moving into/out-of the foreground */
1298 	if (prev_role != TASK_FOREGROUND_APPLICATION && next_role == TASK_FOREGROUND_APPLICATION) {
1299 		if (task_coalition_adjust_focal_count(task, 1, &new_count) && (new_count == 1)) {
1300 			sfi_transition = TRUE;
1301 			pend_token->tpt_update_tg_ui_flag = TRUE;
1302 		}
1303 	} else if (prev_role == TASK_FOREGROUND_APPLICATION && next_role != TASK_FOREGROUND_APPLICATION) {
1304 		if (task_coalition_adjust_focal_count(task, -1, &new_count) && (new_count == 0)) {
1305 			sfi_transition = TRUE;
1306 			pend_token->tpt_update_tg_ui_flag = TRUE;
1307 		}
1308 	}
1309 
1310 	/* task moving into/out-of background */
1311 	if (prev_role != TASK_BACKGROUND_APPLICATION && next_role == TASK_BACKGROUND_APPLICATION) {
1312 		if (task_coalition_adjust_nonfocal_count(task, 1, &new_count) && (new_count == 1)) {
1313 			sfi_transition = TRUE;
1314 		}
1315 	} else if (prev_role == TASK_BACKGROUND_APPLICATION && next_role != TASK_BACKGROUND_APPLICATION) {
1316 		if (task_coalition_adjust_nonfocal_count(task, -1, &new_count) && (new_count == 0)) {
1317 			sfi_transition = TRUE;
1318 		}
1319 	}
1320 
1321 	if (sfi_transition) {
1322 		pend_token->tpt_update_coal_sfi = 1;
1323 	}
1324 	return sfi_transition;
1325 }
1326 
1327 #if CONFIG_SCHED_SFI
1328 
1329 /* coalition object is locked */
1330 static void
task_sfi_reevaluate_cb(coalition_t coal,void * ctx,task_t task)1331 task_sfi_reevaluate_cb(coalition_t coal, void *ctx, task_t task)
1332 {
1333 	thread_t thread;
1334 
1335 	/* unused for now */
1336 	(void)coal;
1337 
1338 	/* skip the task we're re-evaluating on behalf of: it's already updated */
1339 	if (task == (task_t)ctx) {
1340 		return;
1341 	}
1342 
1343 	task_lock(task);
1344 
1345 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
1346 		sfi_reevaluate(thread);
1347 	}
1348 
1349 	task_unlock(task);
1350 }
1351 #endif /* CONFIG_SCHED_SFI */
1352 
1353 /*
1354  * Called with task unlocked to do things that can't be done while holding the task lock
1355  */
1356 void
task_policy_update_complete_unlocked(task_t task,task_pend_token_t pend_token)1357 task_policy_update_complete_unlocked(task_t task, task_pend_token_t pend_token)
1358 {
1359 #ifdef MACH_BSD
1360 	if (pend_token->tpt_update_sockets) {
1361 		proc_apply_task_networkbg(task_pid(task), THREAD_NULL);
1362 	}
1363 #endif /* MACH_BSD */
1364 
1365 	/* The timer throttle has been removed or reduced, we need to look for expired timers and fire them */
1366 	if (pend_token->tpt_update_timers) {
1367 		ml_timer_evaluate();
1368 	}
1369 
1370 #if CONFIG_TASKWATCH
1371 	if (pend_token->tpt_update_watchers) {
1372 		apply_appstate_watchers(task);
1373 	}
1374 #endif /* CONFIG_TASKWATCH */
1375 
1376 	if (pend_token->tpt_update_live_donor) {
1377 		task_importance_update_live_donor(task);
1378 	}
1379 
1380 #if CONFIG_SCHED_SFI
1381 	/* use the resource coalition for SFI re-evaluation */
1382 	if (pend_token->tpt_update_coal_sfi) {
1383 		coalition_for_each_task(task->coalition[COALITION_TYPE_RESOURCE],
1384 		    (void *)task, task_sfi_reevaluate_cb);
1385 	}
1386 #endif /* CONFIG_SCHED_SFI */
1387 
1388 #if CONFIG_THREAD_GROUPS
1389 	if (pend_token->tpt_update_tg_ui_flag) {
1390 		task_coalition_thread_group_focal_update(task);
1391 	}
1392 #endif /* CONFIG_THREAD_GROUPS */
1393 }
1394 
1395 /*
1396  * Initiate a task policy state transition
1397  *
1398  * Everything that modifies requested except functions that need to hold the task lock
1399  * should use this function
1400  *
1401  * Argument validation should be performed before reaching this point.
1402  *
1403  * TODO: Do we need to check task->active?
1404  */
1405 void
proc_set_task_policy(task_t task,int category,int flavor,int value)1406 proc_set_task_policy(task_t     task,
1407     int        category,
1408     int        flavor,
1409     int        value)
1410 {
1411 	struct task_pend_token pend_token = {};
1412 
1413 	task_lock(task);
1414 
1415 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1416 	    (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_START,
1417 	    task_pid(task), trequested_0(task),
1418 	    trequested_1(task), value, 0);
1419 
1420 	proc_set_task_policy_locked(task, category, flavor, value, 0);
1421 
1422 	task_policy_update_locked(task, &pend_token);
1423 
1424 
1425 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1426 	    (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_END,
1427 	    task_pid(task), trequested_0(task),
1428 	    trequested_1(task), tpending(&pend_token), 0);
1429 
1430 	task_unlock(task);
1431 
1432 	task_policy_update_complete_unlocked(task, &pend_token);
1433 }
1434 
1435 /*
1436  * Variant of proc_set_task_policy() that sets two scalars in the requested policy structure.
1437  * Same locking rules apply.
1438  */
1439 void
proc_set_task_policy2(task_t task,int category,int flavor,int value,int value2)1440 proc_set_task_policy2(task_t    task,
1441     int       category,
1442     int       flavor,
1443     int       value,
1444     int       value2)
1445 {
1446 	struct task_pend_token pend_token = {};
1447 
1448 	task_lock(task);
1449 
1450 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1451 	    (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_START,
1452 	    task_pid(task), trequested_0(task),
1453 	    trequested_1(task), value, 0);
1454 
1455 	proc_set_task_policy_locked(task, category, flavor, value, value2);
1456 
1457 	task_policy_update_locked(task, &pend_token);
1458 
1459 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1460 	    (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_END,
1461 	    task_pid(task), trequested_0(task),
1462 	    trequested_1(task), tpending(&pend_token), 0);
1463 
1464 	task_unlock(task);
1465 
1466 	task_policy_update_complete_unlocked(task, &pend_token);
1467 }
1468 
1469 /*
1470  * Set the requested state for a specific flavor to a specific value.
1471  *
1472  *  TODO:
1473  *  Verify that arguments to non iopol things are 1 or 0
1474  */
1475 static void
proc_set_task_policy_locked(task_t task,int category,int flavor,int value,int value2)1476 proc_set_task_policy_locked(task_t      task,
1477     int         category,
1478     int         flavor,
1479     int         value,
1480     int         value2)
1481 {
1482 	int tier, passive;
1483 
1484 	struct task_requested_policy requested = task->requested_policy;
1485 
1486 	switch (flavor) {
1487 	/* Category: EXTERNAL and INTERNAL */
1488 
1489 	case TASK_POLICY_DARWIN_BG:
1490 		if (category == TASK_POLICY_EXTERNAL) {
1491 			requested.trp_ext_darwinbg = value;
1492 		} else {
1493 			requested.trp_int_darwinbg = value;
1494 		}
1495 		break;
1496 
1497 	case TASK_POLICY_IOPOL:
1498 		proc_iopol_to_tier(value, &tier, &passive);
1499 		if (category == TASK_POLICY_EXTERNAL) {
1500 			requested.trp_ext_iotier  = tier;
1501 			requested.trp_ext_iopassive = passive;
1502 		} else {
1503 			requested.trp_int_iotier  = tier;
1504 			requested.trp_int_iopassive = passive;
1505 		}
1506 		break;
1507 
1508 	case TASK_POLICY_IO:
1509 		if (category == TASK_POLICY_EXTERNAL) {
1510 			requested.trp_ext_iotier = value;
1511 		} else {
1512 			requested.trp_int_iotier = value;
1513 		}
1514 		break;
1515 
1516 	case TASK_POLICY_PASSIVE_IO:
1517 		if (category == TASK_POLICY_EXTERNAL) {
1518 			requested.trp_ext_iopassive = value;
1519 		} else {
1520 			requested.trp_int_iopassive = value;
1521 		}
1522 		break;
1523 
1524 	/* Category: INTERNAL */
1525 
1526 	case TASK_POLICY_DARWIN_BG_IOPOL:
1527 		assert(category == TASK_POLICY_INTERNAL);
1528 		proc_iopol_to_tier(value, &tier, &passive);
1529 		requested.trp_bg_iotier = tier;
1530 		break;
1531 
1532 	/* Category: ATTRIBUTE */
1533 
1534 	case TASK_POLICY_BOOST:
1535 		assert(category == TASK_POLICY_ATTRIBUTE);
1536 		requested.trp_boosted = value;
1537 		break;
1538 
1539 	case TASK_POLICY_ROLE:
1540 		assert(category == TASK_POLICY_ATTRIBUTE);
1541 		requested.trp_role = value;
1542 		break;
1543 
1544 	case TASK_POLICY_TERMINATED:
1545 		assert(category == TASK_POLICY_ATTRIBUTE);
1546 		requested.trp_terminated = value;
1547 		break;
1548 
1549 	case TASK_BASE_LATENCY_QOS_POLICY:
1550 		assert(category == TASK_POLICY_ATTRIBUTE);
1551 		requested.trp_base_latency_qos = value;
1552 		break;
1553 
1554 	case TASK_BASE_THROUGHPUT_QOS_POLICY:
1555 		assert(category == TASK_POLICY_ATTRIBUTE);
1556 		requested.trp_base_through_qos = value;
1557 		break;
1558 
1559 	case TASK_POLICY_SFI_MANAGED:
1560 		assert(category == TASK_POLICY_ATTRIBUTE);
1561 		requested.trp_sfi_managed = value;
1562 		break;
1563 
1564 	case TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS:
1565 		assert(category == TASK_POLICY_ATTRIBUTE);
1566 		requested.trp_base_latency_qos = value;
1567 		requested.trp_base_through_qos = value2;
1568 		break;
1569 
1570 	case TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS:
1571 		assert(category == TASK_POLICY_ATTRIBUTE);
1572 		requested.trp_over_latency_qos = value;
1573 		requested.trp_over_through_qos = value2;
1574 		break;
1575 
1576 	default:
1577 		panic("unknown task policy: %d %d %d %d", category, flavor, value, value2);
1578 		break;
1579 	}
1580 
1581 	task->requested_policy = requested;
1582 }
1583 
1584 /*
1585  * Gets what you set. Effective values may be different.
1586  */
1587 int
proc_get_task_policy(task_t task,int category,int flavor)1588 proc_get_task_policy(task_t     task,
1589     int        category,
1590     int        flavor)
1591 {
1592 	int value = 0;
1593 
1594 	task_lock(task);
1595 
1596 	struct task_requested_policy requested = task->requested_policy;
1597 
1598 	switch (flavor) {
1599 	case TASK_POLICY_DARWIN_BG:
1600 		if (category == TASK_POLICY_EXTERNAL) {
1601 			value = requested.trp_ext_darwinbg;
1602 		} else {
1603 			value = requested.trp_int_darwinbg;
1604 		}
1605 		break;
1606 	case TASK_POLICY_IOPOL:
1607 		if (category == TASK_POLICY_EXTERNAL) {
1608 			value = proc_tier_to_iopol(requested.trp_ext_iotier,
1609 			    requested.trp_ext_iopassive);
1610 		} else {
1611 			value = proc_tier_to_iopol(requested.trp_int_iotier,
1612 			    requested.trp_int_iopassive);
1613 		}
1614 		break;
1615 	case TASK_POLICY_IO:
1616 		if (category == TASK_POLICY_EXTERNAL) {
1617 			value = requested.trp_ext_iotier;
1618 		} else {
1619 			value = requested.trp_int_iotier;
1620 		}
1621 		break;
1622 	case TASK_POLICY_PASSIVE_IO:
1623 		if (category == TASK_POLICY_EXTERNAL) {
1624 			value = requested.trp_ext_iopassive;
1625 		} else {
1626 			value = requested.trp_int_iopassive;
1627 		}
1628 		break;
1629 	case TASK_POLICY_DARWIN_BG_IOPOL:
1630 		assert(category == TASK_POLICY_INTERNAL);
1631 		value = proc_tier_to_iopol(requested.trp_bg_iotier, 0);
1632 		break;
1633 	case TASK_POLICY_ROLE:
1634 		assert(category == TASK_POLICY_ATTRIBUTE);
1635 		value = requested.trp_role;
1636 		break;
1637 	case TASK_POLICY_SFI_MANAGED:
1638 		assert(category == TASK_POLICY_ATTRIBUTE);
1639 		value = requested.trp_sfi_managed;
1640 		break;
1641 	default:
1642 		panic("unknown policy_flavor %d", flavor);
1643 		break;
1644 	}
1645 
1646 	task_unlock(task);
1647 
1648 	return value;
1649 }
1650 
1651 /*
1652  * Variant of proc_get_task_policy() that returns two scalar outputs.
1653  */
1654 void
proc_get_task_policy2(task_t task,__assert_only int category,int flavor,int * value1,int * value2)1655 proc_get_task_policy2(task_t task,
1656     __assert_only int category,
1657     int flavor,
1658     int *value1,
1659     int *value2)
1660 {
1661 	task_lock(task);
1662 
1663 	struct task_requested_policy requested = task->requested_policy;
1664 
1665 	switch (flavor) {
1666 	case TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS:
1667 		assert(category == TASK_POLICY_ATTRIBUTE);
1668 		*value1 = requested.trp_base_latency_qos;
1669 		*value2 = requested.trp_base_through_qos;
1670 		break;
1671 
1672 	case TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS:
1673 		assert(category == TASK_POLICY_ATTRIBUTE);
1674 		*value1 = requested.trp_over_latency_qos;
1675 		*value2 = requested.trp_over_through_qos;
1676 		break;
1677 
1678 	default:
1679 		panic("unknown policy_flavor %d", flavor);
1680 		break;
1681 	}
1682 
1683 	task_unlock(task);
1684 }
1685 
1686 /*
1687  * Function for querying effective state for relevant subsystems
1688  * Gets what is actually in effect, for subsystems which pull policy instead of receive updates.
1689  *
1690  * ONLY the relevant subsystem should query this.
1691  * NEVER take a value from the 'effective' function and stuff it into a setter.
1692  *
1693  * NOTE: This accessor does not take the task lock.
1694  * Notifications of state updates need to be externally synchronized with state queries.
1695  * This routine *MUST* remain interrupt safe, as it is potentially invoked
1696  * within the context of a timer interrupt.  It is also called in KDP context for stackshot.
1697  */
1698 int
proc_get_effective_task_policy(task_t task,int flavor)1699 proc_get_effective_task_policy(task_t   task,
1700     int      flavor)
1701 {
1702 	int value = 0;
1703 
1704 	switch (flavor) {
1705 	case TASK_POLICY_DARWIN_BG:
1706 		/*
1707 		 * This backs the KPI call proc_pidbackgrounded to find
1708 		 * out if a pid is backgrounded.
1709 		 * It is used to communicate state to the VM system, as well as
1710 		 * prioritizing requests to the graphics system.
1711 		 * Returns 1 for background mode, 0 for normal mode
1712 		 */
1713 		value = task->effective_policy.tep_darwinbg;
1714 		break;
1715 	case TASK_POLICY_ALL_SOCKETS_BG:
1716 		/*
1717 		 * do_background_socket() calls this to determine what it should do to the proc's sockets
1718 		 * Returns 1 for background mode, 0 for normal mode
1719 		 *
1720 		 * This consults both thread and task so un-DBGing a thread while the task is BG
1721 		 * doesn't get you out of the network throttle.
1722 		 */
1723 		value = task->effective_policy.tep_all_sockets_bg;
1724 		break;
1725 	case TASK_POLICY_SUP_ACTIVE:
1726 		/*
1727 		 * Is the task in AppNap? This is used to determine the urgency
1728 		 * that's passed to the performance management subsystem for threads
1729 		 * that are running at a priority <= MAXPRI_THROTTLE.
1730 		 */
1731 		value = task->effective_policy.tep_sup_active;
1732 		break;
1733 	case TASK_POLICY_LATENCY_QOS:
1734 		/*
1735 		 * timer arming calls into here to find out the timer coalescing level
1736 		 * Returns a QoS tier (0-6)
1737 		 */
1738 		value = task->effective_policy.tep_latency_qos;
1739 		break;
1740 	case TASK_POLICY_THROUGH_QOS:
1741 		/*
1742 		 * This value is passed into the urgency callout from the scheduler
1743 		 * to the performance management subsystem.
1744 		 * Returns a QoS tier (0-6)
1745 		 */
1746 		value = task->effective_policy.tep_through_qos;
1747 		break;
1748 	case TASK_POLICY_ROLE:
1749 		/*
1750 		 * This controls various things that ask whether a process is foreground,
1751 		 * like SFI, VM, access to GPU, etc
1752 		 */
1753 		value = task->effective_policy.tep_role;
1754 		break;
1755 	case TASK_POLICY_WATCHERS_BG:
1756 		/*
1757 		 * This controls whether or not a thread watching this process should be BG.
1758 		 */
1759 		value = task->effective_policy.tep_watchers_bg;
1760 		break;
1761 	case TASK_POLICY_SFI_MANAGED:
1762 		/*
1763 		 * This controls whether or not a process is targeted for specific control by thermald.
1764 		 */
1765 		value = task->effective_policy.tep_sfi_managed;
1766 		break;
1767 	default:
1768 		panic("unknown policy_flavor %d", flavor);
1769 		break;
1770 	}
1771 
1772 	return value;
1773 }
1774 
1775 /*
1776  * Convert from IOPOL_* values to throttle tiers.
1777  *
1778  * TODO: Can this be made more compact, like an array lookup
1779  * Note that it is possible to support e.g. IOPOL_PASSIVE_STANDARD in the future
1780  */
1781 
1782 void
proc_iopol_to_tier(int iopolicy,int * tier,int * passive)1783 proc_iopol_to_tier(int iopolicy, int *tier, int *passive)
1784 {
1785 	*passive = 0;
1786 	*tier = 0;
1787 	switch (iopolicy) {
1788 	case IOPOL_IMPORTANT:
1789 		*tier = THROTTLE_LEVEL_TIER0;
1790 		break;
1791 	case IOPOL_PASSIVE:
1792 		*tier = THROTTLE_LEVEL_TIER0;
1793 		*passive = 1;
1794 		break;
1795 	case IOPOL_STANDARD:
1796 		*tier = THROTTLE_LEVEL_TIER1;
1797 		break;
1798 	case IOPOL_UTILITY:
1799 		*tier = THROTTLE_LEVEL_TIER2;
1800 		break;
1801 	case IOPOL_THROTTLE:
1802 		*tier = THROTTLE_LEVEL_TIER3;
1803 		break;
1804 	default:
1805 		panic("unknown I/O policy %d", iopolicy);
1806 		break;
1807 	}
1808 }
1809 
1810 int
proc_tier_to_iopol(int tier,int passive)1811 proc_tier_to_iopol(int tier, int passive)
1812 {
1813 	if (passive == 1) {
1814 		switch (tier) {
1815 		case THROTTLE_LEVEL_TIER0:
1816 			return IOPOL_PASSIVE;
1817 		default:
1818 			panic("unknown passive tier %d", tier);
1819 			return IOPOL_DEFAULT;
1820 		}
1821 	} else {
1822 		switch (tier) {
1823 		case THROTTLE_LEVEL_NONE:
1824 		case THROTTLE_LEVEL_TIER0:
1825 			return IOPOL_DEFAULT;
1826 		case THROTTLE_LEVEL_TIER1:
1827 			return IOPOL_STANDARD;
1828 		case THROTTLE_LEVEL_TIER2:
1829 			return IOPOL_UTILITY;
1830 		case THROTTLE_LEVEL_TIER3:
1831 			return IOPOL_THROTTLE;
1832 		default:
1833 			panic("unknown tier %d", tier);
1834 			return IOPOL_DEFAULT;
1835 		}
1836 	}
1837 }
1838 
1839 int
proc_darwin_role_to_task_role(int darwin_role,task_role_t * task_role)1840 proc_darwin_role_to_task_role(int darwin_role, task_role_t* task_role)
1841 {
1842 	integer_t role = TASK_UNSPECIFIED;
1843 
1844 	switch (darwin_role) {
1845 	case PRIO_DARWIN_ROLE_DEFAULT:
1846 		role = TASK_UNSPECIFIED;
1847 		break;
1848 	case PRIO_DARWIN_ROLE_UI_FOCAL:
1849 		role = TASK_FOREGROUND_APPLICATION;
1850 		break;
1851 	case PRIO_DARWIN_ROLE_UI:
1852 		role = TASK_DEFAULT_APPLICATION;
1853 		break;
1854 	case PRIO_DARWIN_ROLE_NON_UI:
1855 		role = TASK_NONUI_APPLICATION;
1856 		break;
1857 	case PRIO_DARWIN_ROLE_UI_NON_FOCAL:
1858 		role = TASK_BACKGROUND_APPLICATION;
1859 		break;
1860 	case PRIO_DARWIN_ROLE_TAL_LAUNCH:
1861 		role = TASK_THROTTLE_APPLICATION;
1862 		break;
1863 	case PRIO_DARWIN_ROLE_DARWIN_BG:
1864 		role = TASK_DARWINBG_APPLICATION;
1865 		break;
1866 	default:
1867 		return EINVAL;
1868 	}
1869 
1870 	*task_role = role;
1871 
1872 	return 0;
1873 }
1874 
1875 int
proc_task_role_to_darwin_role(task_role_t task_role)1876 proc_task_role_to_darwin_role(task_role_t task_role)
1877 {
1878 	switch (task_role) {
1879 	case TASK_FOREGROUND_APPLICATION:
1880 		return PRIO_DARWIN_ROLE_UI_FOCAL;
1881 	case TASK_BACKGROUND_APPLICATION:
1882 		return PRIO_DARWIN_ROLE_UI_NON_FOCAL;
1883 	case TASK_NONUI_APPLICATION:
1884 		return PRIO_DARWIN_ROLE_NON_UI;
1885 	case TASK_DEFAULT_APPLICATION:
1886 		return PRIO_DARWIN_ROLE_UI;
1887 	case TASK_THROTTLE_APPLICATION:
1888 		return PRIO_DARWIN_ROLE_TAL_LAUNCH;
1889 	case TASK_DARWINBG_APPLICATION:
1890 		return PRIO_DARWIN_ROLE_DARWIN_BG;
1891 	case TASK_UNSPECIFIED:
1892 	default:
1893 		return PRIO_DARWIN_ROLE_DEFAULT;
1894 	}
1895 }
1896 
1897 
1898 /* TODO: remove this variable when interactive daemon audit period is over */
1899 static TUNABLE(bool, ipc_importance_interactive_receiver,
1900     "imp_interactive_receiver", false);
1901 
1902 /*
1903  * Called at process exec to initialize the apptype, qos clamp, and qos seed of a process
1904  *
1905  * TODO: Make this function more table-driven instead of ad-hoc
1906  */
1907 void
proc_set_task_spawnpolicy(task_t task,thread_t thread,int apptype,int qos_clamp,task_role_t role,ipc_port_t * portwatch_ports,uint32_t portwatch_count)1908 proc_set_task_spawnpolicy(task_t task, thread_t thread, int apptype, int qos_clamp, task_role_t role,
1909     ipc_port_t * portwatch_ports, uint32_t portwatch_count)
1910 {
1911 	struct task_pend_token pend_token = {};
1912 
1913 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1914 	    (IMPORTANCE_CODE(IMP_TASK_APPTYPE, apptype)) | DBG_FUNC_START,
1915 	    task_pid(task), trequested_0(task), trequested_1(task),
1916 	    apptype, 0);
1917 
1918 	switch (apptype) {
1919 	case TASK_APPTYPE_APP_DEFAULT:
1920 		/* Apps become donors via the 'live-donor' flag instead of the static donor flag */
1921 		task_importance_mark_donor(task, FALSE);
1922 		task_importance_mark_live_donor(task, TRUE);
1923 		task_importance_mark_receiver(task, FALSE);
1924 #if !defined(XNU_TARGET_OS_OSX)
1925 		task_importance_mark_denap_receiver(task, FALSE);
1926 #else
1927 		/* Apps are de-nap recievers on macOS for suppression behaviors */
1928 		task_importance_mark_denap_receiver(task, TRUE);
1929 #endif /* !defined(XNU_TARGET_OS_OSX) */
1930 		break;
1931 
1932 	case TASK_APPTYPE_DAEMON_INTERACTIVE:
1933 		task_importance_mark_donor(task, TRUE);
1934 		task_importance_mark_live_donor(task, FALSE);
1935 
1936 		/*
1937 		 * A boot arg controls whether interactive daemons are importance receivers.
1938 		 * Normally, they are not.  But for testing their behavior as an adaptive
1939 		 * daemon, the boot-arg can be set.
1940 		 *
1941 		 * TODO: remove this when the interactive daemon audit period is over.
1942 		 */
1943 		task_importance_mark_receiver(task, /* FALSE */ ipc_importance_interactive_receiver);
1944 		task_importance_mark_denap_receiver(task, FALSE);
1945 		break;
1946 
1947 	case TASK_APPTYPE_DAEMON_STANDARD:
1948 		task_importance_mark_donor(task, TRUE);
1949 		task_importance_mark_live_donor(task, FALSE);
1950 		task_importance_mark_receiver(task, FALSE);
1951 		task_importance_mark_denap_receiver(task, FALSE);
1952 		break;
1953 
1954 	case TASK_APPTYPE_DAEMON_ADAPTIVE:
1955 		task_importance_mark_donor(task, FALSE);
1956 		task_importance_mark_live_donor(task, FALSE);
1957 		task_importance_mark_receiver(task, TRUE);
1958 		task_importance_mark_denap_receiver(task, FALSE);
1959 		break;
1960 
1961 	case TASK_APPTYPE_DAEMON_BACKGROUND:
1962 		task_importance_mark_donor(task, FALSE);
1963 		task_importance_mark_live_donor(task, FALSE);
1964 		task_importance_mark_receiver(task, FALSE);
1965 		task_importance_mark_denap_receiver(task, FALSE);
1966 		break;
1967 
1968 	case TASK_APPTYPE_DRIVER:
1969 		task_importance_mark_donor(task, FALSE);
1970 		task_importance_mark_live_donor(task, FALSE);
1971 		task_importance_mark_receiver(task, FALSE);
1972 		task_importance_mark_denap_receiver(task, FALSE);
1973 		break;
1974 
1975 	case TASK_APPTYPE_NONE:
1976 		break;
1977 	}
1978 
1979 	if (portwatch_ports != NULL && apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) {
1980 		int portwatch_boosts = 0;
1981 
1982 		for (uint32_t i = 0; i < portwatch_count; i++) {
1983 			ipc_port_t port = NULL;
1984 
1985 			if (IP_VALID(port = portwatch_ports[i])) {
1986 				int boost = 0;
1987 				task_add_importance_watchport(task, port, &boost);
1988 				portwatch_boosts += boost;
1989 			}
1990 		}
1991 
1992 		if (portwatch_boosts > 0) {
1993 			task_importance_hold_internal_assertion(task, portwatch_boosts);
1994 		}
1995 	}
1996 
1997 	/* Redirect the turnstile push of watchports to task */
1998 	if (portwatch_count && portwatch_ports != NULL) {
1999 		task_add_turnstile_watchports(task, thread, portwatch_ports, portwatch_count);
2000 	}
2001 
2002 	task_lock(task);
2003 
2004 	if (apptype != TASK_APPTYPE_NONE) {
2005 		task->requested_policy.trp_apptype = apptype;
2006 	}
2007 
2008 #if !defined(XNU_TARGET_OS_OSX)
2009 	/* Remove this after launchd starts setting it properly */
2010 	if (apptype == TASK_APPTYPE_APP_DEFAULT && role == TASK_UNSPECIFIED) {
2011 		task->requested_policy.trp_role = TASK_FOREGROUND_APPLICATION;
2012 	} else
2013 #endif
2014 	if (role != TASK_UNSPECIFIED) {
2015 		task->requested_policy.trp_role = (uint32_t)role;
2016 	}
2017 
2018 	if (qos_clamp != THREAD_QOS_UNSPECIFIED) {
2019 		task->requested_policy.trp_qos_clamp = qos_clamp;
2020 	}
2021 
2022 	task_policy_update_locked(task, &pend_token);
2023 
2024 	task_unlock(task);
2025 
2026 	/* Ensure the donor bit is updated to be in sync with the new live donor status */
2027 	pend_token.tpt_update_live_donor = 1;
2028 
2029 	task_policy_update_complete_unlocked(task, &pend_token);
2030 
2031 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2032 	    (IMPORTANCE_CODE(IMP_TASK_APPTYPE, apptype)) | DBG_FUNC_END,
2033 	    task_pid(task), trequested_0(task), trequested_1(task),
2034 	    task_is_importance_receiver(task), 0);
2035 }
2036 
2037 /*
2038  * Inherit task role across exec
2039  */
2040 void
proc_inherit_task_role(task_t new_task,task_t old_task)2041 proc_inherit_task_role(task_t new_task,
2042     task_t old_task)
2043 {
2044 	int role;
2045 
2046 	/* inherit the role from old task to new task */
2047 	role = proc_get_task_policy(old_task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE);
2048 	proc_set_task_policy(new_task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE, role);
2049 }
2050 
2051 extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc;
2052 
2053 /*
2054  * Compute the default main thread qos for a task
2055  */
2056 thread_qos_t
task_compute_main_thread_qos(task_t task)2057 task_compute_main_thread_qos(task_t task)
2058 {
2059 	thread_qos_t primordial_qos = THREAD_QOS_UNSPECIFIED;
2060 
2061 	thread_qos_t qos_clamp = task->requested_policy.trp_qos_clamp;
2062 
2063 	switch (task->requested_policy.trp_apptype) {
2064 	case TASK_APPTYPE_APP_TAL:
2065 	case TASK_APPTYPE_APP_DEFAULT:
2066 		primordial_qos = THREAD_QOS_USER_INTERACTIVE;
2067 		break;
2068 
2069 	case TASK_APPTYPE_DAEMON_INTERACTIVE:
2070 	case TASK_APPTYPE_DAEMON_STANDARD:
2071 	case TASK_APPTYPE_DAEMON_ADAPTIVE:
2072 	case TASK_APPTYPE_DRIVER:
2073 		primordial_qos = THREAD_QOS_LEGACY;
2074 		break;
2075 
2076 	case TASK_APPTYPE_DAEMON_BACKGROUND:
2077 		primordial_qos = THREAD_QOS_BACKGROUND;
2078 		break;
2079 	}
2080 
2081 	if (task->bsd_info == initproc) {
2082 		/* PID 1 gets a special case */
2083 		primordial_qos = MAX(primordial_qos, THREAD_QOS_USER_INITIATED);
2084 	}
2085 
2086 	if (qos_clamp != THREAD_QOS_UNSPECIFIED) {
2087 		if (primordial_qos != THREAD_QOS_UNSPECIFIED) {
2088 			primordial_qos = MIN(qos_clamp, primordial_qos);
2089 		} else {
2090 			primordial_qos = qos_clamp;
2091 		}
2092 	}
2093 
2094 	return primordial_qos;
2095 }
2096 
2097 
2098 /* for process_policy to check before attempting to set */
2099 boolean_t
proc_task_is_tal(task_t task)2100 proc_task_is_tal(task_t task)
2101 {
2102 	return (task->requested_policy.trp_apptype == TASK_APPTYPE_APP_TAL) ? TRUE : FALSE;
2103 }
2104 
2105 int
task_get_apptype(task_t task)2106 task_get_apptype(task_t task)
2107 {
2108 	return task->requested_policy.trp_apptype;
2109 }
2110 
2111 boolean_t
task_is_daemon(task_t task)2112 task_is_daemon(task_t task)
2113 {
2114 	switch (task->requested_policy.trp_apptype) {
2115 	case TASK_APPTYPE_DAEMON_INTERACTIVE:
2116 	case TASK_APPTYPE_DAEMON_STANDARD:
2117 	case TASK_APPTYPE_DAEMON_ADAPTIVE:
2118 	case TASK_APPTYPE_DAEMON_BACKGROUND:
2119 		return TRUE;
2120 	default:
2121 		return FALSE;
2122 	}
2123 }
2124 
2125 bool
task_is_driver(task_t task)2126 task_is_driver(task_t task)
2127 {
2128 	if (!task) {
2129 		return FALSE;
2130 	}
2131 	return task->requested_policy.trp_apptype == TASK_APPTYPE_DRIVER;
2132 }
2133 
2134 boolean_t
task_is_app(task_t task)2135 task_is_app(task_t task)
2136 {
2137 	switch (task->requested_policy.trp_apptype) {
2138 	case TASK_APPTYPE_APP_DEFAULT:
2139 	case TASK_APPTYPE_APP_TAL:
2140 		return TRUE;
2141 	default:
2142 		return FALSE;
2143 	}
2144 }
2145 
2146 /* for telemetry */
2147 integer_t
task_grab_latency_qos(task_t task)2148 task_grab_latency_qos(task_t task)
2149 {
2150 	return qos_latency_policy_package(proc_get_effective_task_policy(task, TASK_POLICY_LATENCY_QOS));
2151 }
2152 
2153 /* update the darwin background action state in the flags field for libproc */
2154 int
proc_get_darwinbgstate(task_t task,uint32_t * flagsp)2155 proc_get_darwinbgstate(task_t task, uint32_t * flagsp)
2156 {
2157 	if (task->requested_policy.trp_ext_darwinbg) {
2158 		*flagsp |= PROC_FLAG_EXT_DARWINBG;
2159 	}
2160 
2161 	if (task->requested_policy.trp_int_darwinbg) {
2162 		*flagsp |= PROC_FLAG_DARWINBG;
2163 	}
2164 
2165 #if !defined(XNU_TARGET_OS_OSX)
2166 	if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_BACKGROUND) {
2167 		*flagsp |= PROC_FLAG_IOS_APPLEDAEMON;
2168 	}
2169 
2170 	if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) {
2171 		*flagsp |= PROC_FLAG_IOS_IMPPROMOTION;
2172 	}
2173 #endif /* !defined(XNU_TARGET_OS_OSX) */
2174 
2175 	if (task->requested_policy.trp_apptype == TASK_APPTYPE_APP_DEFAULT ||
2176 	    task->requested_policy.trp_apptype == TASK_APPTYPE_APP_TAL) {
2177 		*flagsp |= PROC_FLAG_APPLICATION;
2178 	}
2179 
2180 	if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) {
2181 		*flagsp |= PROC_FLAG_ADAPTIVE;
2182 	}
2183 
2184 	if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE &&
2185 	    task->requested_policy.trp_boosted == 1) {
2186 		*flagsp |= PROC_FLAG_ADAPTIVE_IMPORTANT;
2187 	}
2188 
2189 	if (task_is_importance_donor(task)) {
2190 		*flagsp |= PROC_FLAG_IMPORTANCE_DONOR;
2191 	}
2192 
2193 	if (task->effective_policy.tep_sup_active) {
2194 		*flagsp |= PROC_FLAG_SUPPRESSED;
2195 	}
2196 
2197 	return 0;
2198 }
2199 
2200 /*
2201  * Tracepoint data... Reading the tracepoint data can be somewhat complicated.
2202  * The current scheme packs as much data into a single tracepoint as it can.
2203  *
2204  * Each task/thread requested/effective structure is 64 bits in size. Any
2205  * given tracepoint will emit either requested or effective data, but not both.
2206  *
2207  * A tracepoint may emit any of task, thread, or task & thread data.
2208  *
2209  * The type of data emitted varies with pointer size. Where possible, both
2210  * task and thread data are emitted. In LP32 systems, the first and second
2211  * halves of either the task or thread data is emitted.
2212  *
2213  * The code uses uintptr_t array indexes instead of high/low to avoid
2214  * confusion WRT big vs little endian.
2215  *
2216  * The truth table for the tracepoint data functions is below, and has the
2217  * following invariants:
2218  *
2219  * 1) task and thread are uintptr_t*
2220  * 2) task may never be NULL
2221  *
2222  *
2223  *                                     LP32            LP64
2224  * trequested_0(task, NULL)            task[0]         task[0]
2225  * trequested_1(task, NULL)            task[1]         NULL
2226  * trequested_0(task, thread)          thread[0]       task[0]
2227  * trequested_1(task, thread)          thread[1]       thread[0]
2228  *
2229  * Basically, you get a full task or thread on LP32, and both on LP64.
2230  *
2231  * The uintptr_t munging here is squicky enough to deserve a comment.
2232  *
2233  * The variables we are accessing are laid out in memory like this:
2234  *
2235  * [            LP64 uintptr_t  0          ]
2236  * [ LP32 uintptr_t 0 ] [ LP32 uintptr_t 1 ]
2237  *
2238  *      1   2   3   4     5   6   7   8
2239  *
2240  */
2241 
2242 static uintptr_t
trequested_0(task_t task)2243 trequested_0(task_t task)
2244 {
2245 	static_assert(sizeof(struct task_requested_policy) == sizeof(uint64_t), "size invariant violated");
2246 
2247 	uintptr_t* raw = (uintptr_t*)&task->requested_policy;
2248 
2249 	return raw[0];
2250 }
2251 
2252 static uintptr_t
trequested_1(task_t task)2253 trequested_1(task_t task)
2254 {
2255 #if defined __LP64__
2256 	(void)task;
2257 	return 0;
2258 #else
2259 	uintptr_t* raw = (uintptr_t*)(&task->requested_policy);
2260 	return raw[1];
2261 #endif
2262 }
2263 
2264 static uintptr_t
teffective_0(task_t task)2265 teffective_0(task_t task)
2266 {
2267 	uintptr_t* raw = (uintptr_t*)&task->effective_policy;
2268 
2269 	return raw[0];
2270 }
2271 
2272 static uintptr_t
teffective_1(task_t task)2273 teffective_1(task_t task)
2274 {
2275 #if defined __LP64__
2276 	(void)task;
2277 	return 0;
2278 #else
2279 	uintptr_t* raw = (uintptr_t*)(&task->effective_policy);
2280 	return raw[1];
2281 #endif
2282 }
2283 
2284 /* dump pending for tracepoint */
2285 uint32_t
tpending(task_pend_token_t pend_token)2286 tpending(task_pend_token_t pend_token)
2287 {
2288 	return *(uint32_t*)(void*)(pend_token);
2289 }
2290 
2291 uint64_t
task_requested_bitfield(task_t task)2292 task_requested_bitfield(task_t task)
2293 {
2294 	uint64_t bits = 0;
2295 	struct task_requested_policy requested = task->requested_policy;
2296 
2297 	bits |= (requested.trp_int_darwinbg     ? POLICY_REQ_INT_DARWIN_BG  : 0);
2298 	bits |= (requested.trp_ext_darwinbg     ? POLICY_REQ_EXT_DARWIN_BG  : 0);
2299 	bits |= (requested.trp_int_iotier       ? (((uint64_t)requested.trp_int_iotier) << POLICY_REQ_INT_IO_TIER_SHIFT) : 0);
2300 	bits |= (requested.trp_ext_iotier       ? (((uint64_t)requested.trp_ext_iotier) << POLICY_REQ_EXT_IO_TIER_SHIFT) : 0);
2301 	bits |= (requested.trp_int_iopassive    ? POLICY_REQ_INT_PASSIVE_IO : 0);
2302 	bits |= (requested.trp_ext_iopassive    ? POLICY_REQ_EXT_PASSIVE_IO : 0);
2303 	bits |= (requested.trp_bg_iotier        ? (((uint64_t)requested.trp_bg_iotier) << POLICY_REQ_BG_IOTIER_SHIFT)   : 0);
2304 	bits |= (requested.trp_terminated       ? POLICY_REQ_TERMINATED     : 0);
2305 
2306 	bits |= (requested.trp_boosted          ? POLICY_REQ_BOOSTED        : 0);
2307 	bits |= (requested.trp_tal_enabled      ? POLICY_REQ_TAL_ENABLED    : 0);
2308 	bits |= (requested.trp_apptype          ? (((uint64_t)requested.trp_apptype) << POLICY_REQ_APPTYPE_SHIFT)  : 0);
2309 	bits |= (requested.trp_role             ? (((uint64_t)requested.trp_role) << POLICY_REQ_ROLE_SHIFT)     : 0);
2310 
2311 	bits |= (requested.trp_sup_active       ? POLICY_REQ_SUP_ACTIVE         : 0);
2312 	bits |= (requested.trp_sup_lowpri_cpu   ? POLICY_REQ_SUP_LOWPRI_CPU     : 0);
2313 	bits |= (requested.trp_sup_cpu          ? POLICY_REQ_SUP_CPU            : 0);
2314 	bits |= (requested.trp_sup_timer        ? (((uint64_t)requested.trp_sup_timer) << POLICY_REQ_SUP_TIMER_THROTTLE_SHIFT) : 0);
2315 	bits |= (requested.trp_sup_throughput   ? (((uint64_t)requested.trp_sup_throughput) << POLICY_REQ_SUP_THROUGHPUT_SHIFT)     : 0);
2316 	bits |= (requested.trp_sup_disk         ? POLICY_REQ_SUP_DISK_THROTTLE  : 0);
2317 	bits |= (requested.trp_sup_bg_sockets   ? POLICY_REQ_SUP_BG_SOCKETS     : 0);
2318 
2319 	bits |= (requested.trp_base_latency_qos ? (((uint64_t)requested.trp_base_latency_qos) << POLICY_REQ_BASE_LATENCY_QOS_SHIFT) : 0);
2320 	bits |= (requested.trp_over_latency_qos ? (((uint64_t)requested.trp_over_latency_qos) << POLICY_REQ_OVER_LATENCY_QOS_SHIFT) : 0);
2321 	bits |= (requested.trp_base_through_qos ? (((uint64_t)requested.trp_base_through_qos) << POLICY_REQ_BASE_THROUGH_QOS_SHIFT) : 0);
2322 	bits |= (requested.trp_over_through_qos ? (((uint64_t)requested.trp_over_through_qos) << POLICY_REQ_OVER_THROUGH_QOS_SHIFT) : 0);
2323 	bits |= (requested.trp_sfi_managed      ? POLICY_REQ_SFI_MANAGED        : 0);
2324 	bits |= (requested.trp_qos_clamp        ? (((uint64_t)requested.trp_qos_clamp) << POLICY_REQ_QOS_CLAMP_SHIFT)        : 0);
2325 
2326 	return bits;
2327 }
2328 
2329 uint64_t
task_effective_bitfield(task_t task)2330 task_effective_bitfield(task_t task)
2331 {
2332 	uint64_t bits = 0;
2333 	struct task_effective_policy effective = task->effective_policy;
2334 
2335 	bits |= (effective.tep_io_tier          ? (((uint64_t)effective.tep_io_tier) << POLICY_EFF_IO_TIER_SHIFT) : 0);
2336 	bits |= (effective.tep_io_passive       ? POLICY_EFF_IO_PASSIVE     : 0);
2337 	bits |= (effective.tep_darwinbg         ? POLICY_EFF_DARWIN_BG      : 0);
2338 	bits |= (effective.tep_lowpri_cpu       ? POLICY_EFF_LOWPRI_CPU     : 0);
2339 	bits |= (effective.tep_terminated       ? POLICY_EFF_TERMINATED     : 0);
2340 	bits |= (effective.tep_all_sockets_bg   ? POLICY_EFF_ALL_SOCKETS_BG : 0);
2341 	bits |= (effective.tep_new_sockets_bg   ? POLICY_EFF_NEW_SOCKETS_BG : 0);
2342 	bits |= (effective.tep_bg_iotier        ? (((uint64_t)effective.tep_bg_iotier) << POLICY_EFF_BG_IOTIER_SHIFT) : 0);
2343 	bits |= (effective.tep_qos_ui_is_urgent ? POLICY_EFF_QOS_UI_IS_URGENT : 0);
2344 
2345 	bits |= (effective.tep_tal_engaged      ? POLICY_EFF_TAL_ENGAGED    : 0);
2346 	bits |= (effective.tep_watchers_bg      ? POLICY_EFF_WATCHERS_BG    : 0);
2347 	bits |= (effective.tep_sup_active       ? POLICY_EFF_SUP_ACTIVE     : 0);
2348 	bits |= (effective.tep_suppressed_cpu   ? POLICY_EFF_SUP_CPU        : 0);
2349 	bits |= (effective.tep_role             ? (((uint64_t)effective.tep_role) << POLICY_EFF_ROLE_SHIFT)        : 0);
2350 	bits |= (effective.tep_latency_qos      ? (((uint64_t)effective.tep_latency_qos) << POLICY_EFF_LATENCY_QOS_SHIFT) : 0);
2351 	bits |= (effective.tep_through_qos      ? (((uint64_t)effective.tep_through_qos) << POLICY_EFF_THROUGH_QOS_SHIFT) : 0);
2352 	bits |= (effective.tep_sfi_managed      ? POLICY_EFF_SFI_MANAGED    : 0);
2353 	bits |= (effective.tep_qos_ceiling      ? (((uint64_t)effective.tep_qos_ceiling) << POLICY_EFF_QOS_CEILING_SHIFT) : 0);
2354 
2355 	return bits;
2356 }
2357 
2358 
2359 /*
2360  * Resource usage and CPU related routines
2361  */
2362 
2363 int
proc_get_task_ruse_cpu(task_t task,uint32_t * policyp,uint8_t * percentagep,uint64_t * intervalp,uint64_t * deadlinep)2364 proc_get_task_ruse_cpu(task_t task, uint32_t *policyp, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep)
2365 {
2366 	int error = 0;
2367 	int scope;
2368 
2369 	task_lock(task);
2370 
2371 
2372 	error = task_get_cpuusage(task, percentagep, intervalp, deadlinep, &scope);
2373 	task_unlock(task);
2374 
2375 	/*
2376 	 * Reverse-map from CPU resource limit scopes back to policies (see comment below).
2377 	 */
2378 	if (scope == TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
2379 		*policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC;
2380 	} else if (scope == TASK_RUSECPU_FLAGS_PROC_LIMIT) {
2381 		*policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_THROTTLE;
2382 	} else if (scope == TASK_RUSECPU_FLAGS_DEADLINE) {
2383 		*policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE;
2384 	}
2385 
2386 	return error;
2387 }
2388 
2389 /*
2390  * Configure the default CPU usage monitor parameters.
2391  *
2392  * For tasks which have this mechanism activated: if any thread in the
2393  * process consumes more CPU than this, an EXC_RESOURCE exception will be generated.
2394  */
2395 void
proc_init_cpumon_params(void)2396 proc_init_cpumon_params(void)
2397 {
2398 	/*
2399 	 * The max CPU percentage can be configured via the boot-args and
2400 	 * a key in the device tree. The boot-args are honored first, then the
2401 	 * device tree.
2402 	 */
2403 	if (!PE_parse_boot_argn("max_cpumon_percentage", &proc_max_cpumon_percentage,
2404 	    sizeof(proc_max_cpumon_percentage))) {
2405 		uint64_t max_percentage = 0ULL;
2406 
2407 		if (!PE_get_default("kern.max_cpumon_percentage", &max_percentage,
2408 		    sizeof(max_percentage))) {
2409 			max_percentage = DEFAULT_CPUMON_PERCENTAGE;
2410 		}
2411 
2412 		assert(max_percentage <= UINT8_MAX);
2413 		proc_max_cpumon_percentage = (uint8_t) max_percentage;
2414 	}
2415 
2416 	if (proc_max_cpumon_percentage > 100) {
2417 		proc_max_cpumon_percentage = 100;
2418 	}
2419 
2420 	/*
2421 	 * The interval should be specified in seconds.
2422 	 *
2423 	 * Like the max CPU percentage, the max CPU interval can be configured
2424 	 * via boot-args and the device tree.
2425 	 */
2426 	if (!PE_parse_boot_argn("max_cpumon_interval", &proc_max_cpumon_interval,
2427 	    sizeof(proc_max_cpumon_interval))) {
2428 		if (!PE_get_default("kern.max_cpumon_interval", &proc_max_cpumon_interval,
2429 		    sizeof(proc_max_cpumon_interval))) {
2430 			proc_max_cpumon_interval = DEFAULT_CPUMON_INTERVAL;
2431 		}
2432 	}
2433 
2434 	proc_max_cpumon_interval *= NSEC_PER_SEC;
2435 
2436 	/* TEMPORARY boot arg to control App suppression */
2437 	PE_parse_boot_argn("task_policy_suppression_flags",
2438 	    &task_policy_suppression_flags,
2439 	    sizeof(task_policy_suppression_flags));
2440 
2441 	/* adjust suppression disk policy if called for in boot arg */
2442 	if (task_policy_suppression_flags & TASK_POLICY_SUPPRESSION_IOTIER2) {
2443 		proc_suppressed_disk_tier = THROTTLE_LEVEL_TIER2;
2444 	}
2445 }
2446 
2447 /*
2448  * Currently supported configurations for CPU limits.
2449  *
2450  * Policy				| Deadline-based CPU limit | Percentage-based CPU limit
2451  * -------------------------------------+--------------------------+------------------------------
2452  * PROC_POLICY_RSRCACT_THROTTLE		| ENOTSUP		   | Task-wide scope only
2453  * PROC_POLICY_RSRCACT_SUSPEND		| Task-wide scope only	   | ENOTSUP
2454  * PROC_POLICY_RSRCACT_TERMINATE	| Task-wide scope only	   | ENOTSUP
2455  * PROC_POLICY_RSRCACT_NOTIFY_KQ	| Task-wide scope only	   | ENOTSUP
2456  * PROC_POLICY_RSRCACT_NOTIFY_EXC	| ENOTSUP		   | Per-thread scope only
2457  *
2458  * A deadline-based CPU limit is actually a simple wallclock timer - the requested action is performed
2459  * after the specified amount of wallclock time has elapsed.
2460  *
2461  * A percentage-based CPU limit performs the requested action after the specified amount of actual CPU time
2462  * has been consumed -- regardless of how much wallclock time has elapsed -- by either the task as an
2463  * aggregate entity (so-called "Task-wide" or "Proc-wide" scope, whereby the CPU time consumed by all threads
2464  * in the task are added together), or by any one thread in the task (so-called "per-thread" scope).
2465  *
2466  * We support either deadline != 0 OR percentage != 0, but not both. The original intention in having them
2467  * share an API was to use actual CPU time as the basis of the deadline-based limit (as in: perform an action
2468  * after I have used some amount of CPU time; this is different than the recurring percentage/interval model)
2469  * but the potential consumer of the API at the time was insisting on wallclock time instead.
2470  *
2471  * Currently, requesting notification via an exception is the only way to get per-thread scope for a
2472  * CPU limit. All other types of notifications force task-wide scope for the limit.
2473  */
2474 int
proc_set_task_ruse_cpu(task_t task,uint16_t policy,uint8_t percentage,uint64_t interval,uint64_t deadline,int cpumon_entitled)2475 proc_set_task_ruse_cpu(task_t task, uint16_t policy, uint8_t percentage, uint64_t interval, uint64_t deadline,
2476     int cpumon_entitled)
2477 {
2478 	int error = 0;
2479 	int scope;
2480 
2481 	/*
2482 	 * Enforce the matrix of supported configurations for policy, percentage, and deadline.
2483 	 */
2484 	switch (policy) {
2485 	// If no policy is explicitly given, the default is to throttle.
2486 	case TASK_POLICY_RESOURCE_ATTRIBUTE_NONE:
2487 	case TASK_POLICY_RESOURCE_ATTRIBUTE_THROTTLE:
2488 		if (deadline != 0) {
2489 			return ENOTSUP;
2490 		}
2491 		scope = TASK_RUSECPU_FLAGS_PROC_LIMIT;
2492 		break;
2493 	case TASK_POLICY_RESOURCE_ATTRIBUTE_SUSPEND:
2494 	case TASK_POLICY_RESOURCE_ATTRIBUTE_TERMINATE:
2495 	case TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_KQ:
2496 		if (percentage != 0) {
2497 			return ENOTSUP;
2498 		}
2499 		scope = TASK_RUSECPU_FLAGS_DEADLINE;
2500 		break;
2501 	case TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC:
2502 		if (deadline != 0) {
2503 			return ENOTSUP;
2504 		}
2505 		scope = TASK_RUSECPU_FLAGS_PERTHR_LIMIT;
2506 #ifdef CONFIG_NOMONITORS
2507 		return error;
2508 #endif /* CONFIG_NOMONITORS */
2509 		break;
2510 	default:
2511 		return EINVAL;
2512 	}
2513 
2514 	task_lock(task);
2515 	if (task != current_task()) {
2516 		task->policy_ru_cpu_ext = policy;
2517 	} else {
2518 		task->policy_ru_cpu = policy;
2519 	}
2520 	error = task_set_cpuusage(task, percentage, interval, deadline, scope, cpumon_entitled);
2521 	task_unlock(task);
2522 	return error;
2523 }
2524 
2525 /* TODO: get rid of these */
2526 #define TASK_POLICY_CPU_RESOURCE_USAGE          0
2527 #define TASK_POLICY_WIREDMEM_RESOURCE_USAGE     1
2528 #define TASK_POLICY_VIRTUALMEM_RESOURCE_USAGE   2
2529 #define TASK_POLICY_DISK_RESOURCE_USAGE         3
2530 #define TASK_POLICY_NETWORK_RESOURCE_USAGE      4
2531 #define TASK_POLICY_POWER_RESOURCE_USAGE        5
2532 
2533 #define TASK_POLICY_RESOURCE_USAGE_COUNT        6
2534 
2535 int
proc_clear_task_ruse_cpu(task_t task,int cpumon_entitled)2536 proc_clear_task_ruse_cpu(task_t task, int cpumon_entitled)
2537 {
2538 	int error = 0;
2539 	int action;
2540 	void * bsdinfo = NULL;
2541 
2542 	task_lock(task);
2543 	if (task != current_task()) {
2544 		task->policy_ru_cpu_ext = TASK_POLICY_RESOURCE_ATTRIBUTE_DEFAULT;
2545 	} else {
2546 		task->policy_ru_cpu = TASK_POLICY_RESOURCE_ATTRIBUTE_DEFAULT;
2547 	}
2548 
2549 	error = task_clear_cpuusage_locked(task, cpumon_entitled);
2550 	if (error != 0) {
2551 		goto out;
2552 	}
2553 
2554 	action = task->applied_ru_cpu;
2555 	if (task->applied_ru_cpu_ext != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
2556 		/* reset action */
2557 		task->applied_ru_cpu_ext = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE;
2558 	}
2559 	if (action != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
2560 		bsdinfo = task->bsd_info;
2561 		task_unlock(task);
2562 		proc_restore_resource_actions(bsdinfo, TASK_POLICY_CPU_RESOURCE_USAGE, action);
2563 		goto out1;
2564 	}
2565 
2566 out:
2567 	task_unlock(task);
2568 out1:
2569 	return error;
2570 }
2571 
2572 /* used to apply resource limit related actions */
2573 static int
task_apply_resource_actions(task_t task,int type)2574 task_apply_resource_actions(task_t task, int type)
2575 {
2576 	int action = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE;
2577 	void * bsdinfo = NULL;
2578 
2579 	switch (type) {
2580 	case TASK_POLICY_CPU_RESOURCE_USAGE:
2581 		break;
2582 	case TASK_POLICY_WIREDMEM_RESOURCE_USAGE:
2583 	case TASK_POLICY_VIRTUALMEM_RESOURCE_USAGE:
2584 	case TASK_POLICY_DISK_RESOURCE_USAGE:
2585 	case TASK_POLICY_NETWORK_RESOURCE_USAGE:
2586 	case TASK_POLICY_POWER_RESOURCE_USAGE:
2587 		return 0;
2588 
2589 	default:
2590 		return 1;
2591 	}
2592 	;
2593 
2594 	/* only cpu actions for now */
2595 	task_lock(task);
2596 
2597 	if (task->applied_ru_cpu_ext == TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
2598 		/* apply action */
2599 		task->applied_ru_cpu_ext = task->policy_ru_cpu_ext;
2600 		action = task->applied_ru_cpu_ext;
2601 	} else {
2602 		action = task->applied_ru_cpu_ext;
2603 	}
2604 
2605 	if (action != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
2606 		bsdinfo = task->bsd_info;
2607 		task_unlock(task);
2608 		proc_apply_resource_actions(bsdinfo, TASK_POLICY_CPU_RESOURCE_USAGE, action);
2609 	} else {
2610 		task_unlock(task);
2611 	}
2612 
2613 	return 0;
2614 }
2615 
2616 /*
2617  * XXX This API is somewhat broken; we support multiple simultaneous CPU limits, but the get/set API
2618  * only allows for one at a time. This means that if there is a per-thread limit active, the other
2619  * "scopes" will not be accessible via this API. We could change it to pass in the scope of interest
2620  * to the caller, and prefer that, but there's no need for that at the moment.
2621  */
2622 static int
task_get_cpuusage(task_t task,uint8_t * percentagep,uint64_t * intervalp,uint64_t * deadlinep,int * scope)2623 task_get_cpuusage(task_t task, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep, int *scope)
2624 {
2625 	*percentagep = 0;
2626 	*intervalp = 0;
2627 	*deadlinep = 0;
2628 
2629 	if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) != 0) {
2630 		*scope = TASK_RUSECPU_FLAGS_PERTHR_LIMIT;
2631 		*percentagep = task->rusage_cpu_perthr_percentage;
2632 		*intervalp = task->rusage_cpu_perthr_interval;
2633 	} else if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PROC_LIMIT) != 0) {
2634 		*scope = TASK_RUSECPU_FLAGS_PROC_LIMIT;
2635 		*percentagep = task->rusage_cpu_percentage;
2636 		*intervalp = task->rusage_cpu_interval;
2637 	} else if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_DEADLINE) != 0) {
2638 		*scope = TASK_RUSECPU_FLAGS_DEADLINE;
2639 		*deadlinep = task->rusage_cpu_deadline;
2640 	} else {
2641 		*scope = 0;
2642 	}
2643 
2644 	return 0;
2645 }
2646 
2647 /*
2648  * Suspend the CPU usage monitor for the task.  Return value indicates
2649  * if the mechanism was actually enabled.
2650  */
2651 int
task_suspend_cpumon(task_t task)2652 task_suspend_cpumon(task_t task)
2653 {
2654 	thread_t thread;
2655 
2656 	task_lock_assert_owned(task);
2657 
2658 	if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) == 0) {
2659 		return KERN_INVALID_ARGUMENT;
2660 	}
2661 
2662 #if CONFIG_TELEMETRY
2663 	/*
2664 	 * Disable task-wide telemetry if it was ever enabled by the CPU usage
2665 	 * monitor's warning zone.
2666 	 */
2667 	telemetry_task_ctl_locked(task, TF_CPUMON_WARNING, 0);
2668 #endif
2669 
2670 	/*
2671 	 * Suspend monitoring for the task, and propagate that change to each thread.
2672 	 */
2673 	task->rusage_cpu_flags &= ~(TASK_RUSECPU_FLAGS_PERTHR_LIMIT | TASK_RUSECPU_FLAGS_FATAL_CPUMON);
2674 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
2675 		act_set_astledger(thread);
2676 	}
2677 
2678 	return KERN_SUCCESS;
2679 }
2680 
2681 /*
2682  * Remove all traces of the CPU monitor.
2683  */
2684 int
task_disable_cpumon(task_t task)2685 task_disable_cpumon(task_t task)
2686 {
2687 	int kret;
2688 
2689 	task_lock_assert_owned(task);
2690 
2691 	kret = task_suspend_cpumon(task);
2692 	if (kret) {
2693 		return kret;
2694 	}
2695 
2696 	/* Once we clear these values, the monitor can't be resumed */
2697 	task->rusage_cpu_perthr_percentage = 0;
2698 	task->rusage_cpu_perthr_interval = 0;
2699 
2700 	return KERN_SUCCESS;
2701 }
2702 
2703 
2704 static int
task_enable_cpumon_locked(task_t task)2705 task_enable_cpumon_locked(task_t task)
2706 {
2707 	thread_t thread;
2708 	task_lock_assert_owned(task);
2709 
2710 	if (task->rusage_cpu_perthr_percentage == 0 ||
2711 	    task->rusage_cpu_perthr_interval == 0) {
2712 		return KERN_INVALID_ARGUMENT;
2713 	}
2714 
2715 	task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_PERTHR_LIMIT;
2716 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
2717 		act_set_astledger(thread);
2718 	}
2719 
2720 	return KERN_SUCCESS;
2721 }
2722 
2723 int
task_resume_cpumon(task_t task)2724 task_resume_cpumon(task_t task)
2725 {
2726 	kern_return_t kret;
2727 
2728 	if (!task) {
2729 		return EINVAL;
2730 	}
2731 
2732 	task_lock(task);
2733 	kret = task_enable_cpumon_locked(task);
2734 	task_unlock(task);
2735 
2736 	return kret;
2737 }
2738 
2739 
2740 /* duplicate values from bsd/sys/process_policy.h */
2741 #define PROC_POLICY_CPUMON_DISABLE      0xFF
2742 #define PROC_POLICY_CPUMON_DEFAULTS     0xFE
2743 
2744 static int
task_set_cpuusage(task_t task,uint8_t percentage,uint64_t interval,uint64_t deadline,int scope,int cpumon_entitled)2745 task_set_cpuusage(task_t task, uint8_t percentage, uint64_t interval, uint64_t deadline, int scope, int cpumon_entitled)
2746 {
2747 	uint64_t abstime = 0;
2748 	uint64_t limittime = 0;
2749 
2750 	lck_mtx_assert(&task->lock, LCK_MTX_ASSERT_OWNED);
2751 
2752 	/* By default, refill once per second */
2753 	if (interval == 0) {
2754 		interval = NSEC_PER_SEC;
2755 	}
2756 
2757 	if (percentage != 0) {
2758 		if (scope == TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
2759 			boolean_t warn = FALSE;
2760 
2761 			/*
2762 			 * A per-thread CPU limit on a task generates an exception
2763 			 * (LEDGER_ACTION_EXCEPTION) if any one thread in the task
2764 			 * exceeds the limit.
2765 			 */
2766 
2767 			if (percentage == PROC_POLICY_CPUMON_DISABLE) {
2768 				if (cpumon_entitled) {
2769 					/* 25095698 - task_disable_cpumon() should be reliable */
2770 					task_disable_cpumon(task);
2771 					return 0;
2772 				}
2773 
2774 				/*
2775 				 * This task wishes to disable the CPU usage monitor, but it's
2776 				 * missing the required entitlement:
2777 				 *     com.apple.private.kernel.override-cpumon
2778 				 *
2779 				 * Instead, treat this as a request to reset its params
2780 				 * back to the defaults.
2781 				 */
2782 				warn = TRUE;
2783 				percentage = PROC_POLICY_CPUMON_DEFAULTS;
2784 			}
2785 
2786 			if (percentage == PROC_POLICY_CPUMON_DEFAULTS) {
2787 				percentage = proc_max_cpumon_percentage;
2788 				interval   = proc_max_cpumon_interval;
2789 			}
2790 
2791 			if (percentage > 100) {
2792 				percentage = 100;
2793 			}
2794 
2795 			/*
2796 			 * Passing in an interval of -1 means either:
2797 			 * - Leave the interval as-is, if there's already a per-thread
2798 			 *   limit configured
2799 			 * - Use the system default.
2800 			 */
2801 			if (interval == -1ULL) {
2802 				if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
2803 					interval = task->rusage_cpu_perthr_interval;
2804 				} else {
2805 					interval = proc_max_cpumon_interval;
2806 				}
2807 			}
2808 
2809 			/*
2810 			 * Enforce global caps on CPU usage monitor here if the process is not
2811 			 * entitled to escape the global caps.
2812 			 */
2813 			if ((percentage > proc_max_cpumon_percentage) && (cpumon_entitled == 0)) {
2814 				warn = TRUE;
2815 				percentage = proc_max_cpumon_percentage;
2816 			}
2817 
2818 			if ((interval > proc_max_cpumon_interval) && (cpumon_entitled == 0)) {
2819 				warn = TRUE;
2820 				interval = proc_max_cpumon_interval;
2821 			}
2822 
2823 			if (warn) {
2824 				int       pid = 0;
2825 				const char *procname = "unknown";
2826 
2827 #ifdef MACH_BSD
2828 				pid = proc_selfpid();
2829 				if (current_task()->bsd_info != NULL) {
2830 					procname = proc_name_address(current_task()->bsd_info);
2831 				}
2832 #endif
2833 
2834 				printf("process %s[%d] denied attempt to escape CPU monitor"
2835 				    " (missing required entitlement).\n", procname, pid);
2836 			}
2837 
2838 			/* configure the limit values */
2839 			task->rusage_cpu_perthr_percentage = percentage;
2840 			task->rusage_cpu_perthr_interval = interval;
2841 
2842 			/* and enable the CPU monitor */
2843 			(void)task_enable_cpumon_locked(task);
2844 		} else if (scope == TASK_RUSECPU_FLAGS_PROC_LIMIT) {
2845 			/*
2846 			 * Currently, a proc-wide CPU limit always blocks if the limit is
2847 			 * exceeded (LEDGER_ACTION_BLOCK).
2848 			 */
2849 			task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_PROC_LIMIT;
2850 			task->rusage_cpu_percentage = percentage;
2851 			task->rusage_cpu_interval = interval;
2852 
2853 			limittime = (interval * percentage) / 100;
2854 			nanoseconds_to_absolutetime(limittime, &abstime);
2855 
2856 			ledger_set_limit(task->ledger, task_ledgers.cpu_time, abstime, 0);
2857 			ledger_set_period(task->ledger, task_ledgers.cpu_time, interval);
2858 			ledger_set_action(task->ledger, task_ledgers.cpu_time, LEDGER_ACTION_BLOCK);
2859 		}
2860 	}
2861 
2862 	if (deadline != 0) {
2863 		assert(scope == TASK_RUSECPU_FLAGS_DEADLINE);
2864 
2865 		/* if already in use, cancel and wait for it to cleanout */
2866 		if (task->rusage_cpu_callt != NULL) {
2867 			task_unlock(task);
2868 			thread_call_cancel_wait(task->rusage_cpu_callt);
2869 			task_lock(task);
2870 		}
2871 		if (task->rusage_cpu_callt == NULL) {
2872 			task->rusage_cpu_callt = thread_call_allocate_with_priority(task_action_cpuusage, (thread_call_param_t)task, THREAD_CALL_PRIORITY_KERNEL);
2873 		}
2874 		/* setup callout */
2875 		if (task->rusage_cpu_callt != 0) {
2876 			uint64_t save_abstime = 0;
2877 
2878 			task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_DEADLINE;
2879 			task->rusage_cpu_deadline = deadline;
2880 
2881 			nanoseconds_to_absolutetime(deadline, &abstime);
2882 			save_abstime = abstime;
2883 			clock_absolutetime_interval_to_deadline(save_abstime, &abstime);
2884 			thread_call_enter_delayed(task->rusage_cpu_callt, abstime);
2885 		}
2886 	}
2887 
2888 	return 0;
2889 }
2890 
2891 int
task_clear_cpuusage(task_t task,int cpumon_entitled)2892 task_clear_cpuusage(task_t task, int cpumon_entitled)
2893 {
2894 	int retval = 0;
2895 
2896 	task_lock(task);
2897 	retval = task_clear_cpuusage_locked(task, cpumon_entitled);
2898 	task_unlock(task);
2899 
2900 	return retval;
2901 }
2902 
2903 static int
task_clear_cpuusage_locked(task_t task,int cpumon_entitled)2904 task_clear_cpuusage_locked(task_t task, int cpumon_entitled)
2905 {
2906 	thread_call_t savecallt;
2907 
2908 	/* cancel percentage handling if set */
2909 	if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PROC_LIMIT) {
2910 		task->rusage_cpu_flags &= ~TASK_RUSECPU_FLAGS_PROC_LIMIT;
2911 		ledger_set_limit(task->ledger, task_ledgers.cpu_time, LEDGER_LIMIT_INFINITY, 0);
2912 		task->rusage_cpu_percentage = 0;
2913 		task->rusage_cpu_interval = 0;
2914 	}
2915 
2916 	/*
2917 	 * Disable the CPU usage monitor.
2918 	 */
2919 	if (cpumon_entitled) {
2920 		task_disable_cpumon(task);
2921 	}
2922 
2923 	/* cancel deadline handling if set */
2924 	if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_DEADLINE) {
2925 		task->rusage_cpu_flags &= ~TASK_RUSECPU_FLAGS_DEADLINE;
2926 		if (task->rusage_cpu_callt != 0) {
2927 			savecallt = task->rusage_cpu_callt;
2928 			task->rusage_cpu_callt = NULL;
2929 			task->rusage_cpu_deadline = 0;
2930 			task_unlock(task);
2931 			thread_call_cancel_wait(savecallt);
2932 			thread_call_free(savecallt);
2933 			task_lock(task);
2934 		}
2935 	}
2936 	return 0;
2937 }
2938 
2939 /* called by ledger unit to enforce action due to resource usage criteria being met */
2940 static void
task_action_cpuusage(thread_call_param_t param0,__unused thread_call_param_t param1)2941 task_action_cpuusage(thread_call_param_t param0, __unused thread_call_param_t param1)
2942 {
2943 	task_t task = (task_t)param0;
2944 	(void)task_apply_resource_actions(task, TASK_POLICY_CPU_RESOURCE_USAGE);
2945 	return;
2946 }
2947 
2948 
2949 /*
2950  * Routines for taskwatch and pidbind
2951  */
2952 
2953 #if CONFIG_TASKWATCH
2954 
2955 LCK_MTX_DECLARE_ATTR(task_watch_mtx, &task_lck_grp, &task_lck_attr);
2956 
2957 static void
task_watch_lock(void)2958 task_watch_lock(void)
2959 {
2960 	lck_mtx_lock(&task_watch_mtx);
2961 }
2962 
2963 static void
task_watch_unlock(void)2964 task_watch_unlock(void)
2965 {
2966 	lck_mtx_unlock(&task_watch_mtx);
2967 }
2968 
2969 static void
add_taskwatch_locked(task_t task,task_watch_t * twp)2970 add_taskwatch_locked(task_t task, task_watch_t * twp)
2971 {
2972 	queue_enter(&task->task_watchers, twp, task_watch_t *, tw_links);
2973 	task->num_taskwatchers++;
2974 }
2975 
2976 static void
remove_taskwatch_locked(task_t task,task_watch_t * twp)2977 remove_taskwatch_locked(task_t task, task_watch_t * twp)
2978 {
2979 	queue_remove(&task->task_watchers, twp, task_watch_t *, tw_links);
2980 	task->num_taskwatchers--;
2981 }
2982 
2983 
2984 int
proc_lf_pidbind(task_t curtask,uint64_t tid,task_t target_task,int bind)2985 proc_lf_pidbind(task_t curtask, uint64_t tid, task_t target_task, int bind)
2986 {
2987 	thread_t target_thread = NULL;
2988 	int ret = 0, setbg = 0;
2989 	task_watch_t *twp = NULL;
2990 	task_t task = TASK_NULL;
2991 
2992 	target_thread = task_findtid(curtask, tid);
2993 	if (target_thread == NULL) {
2994 		return ESRCH;
2995 	}
2996 	/* holds thread reference */
2997 
2998 	if (bind != 0) {
2999 		/* task is still active ? */
3000 		task_lock(target_task);
3001 		if (target_task->active == 0) {
3002 			task_unlock(target_task);
3003 			ret = ESRCH;
3004 			goto out;
3005 		}
3006 		task_unlock(target_task);
3007 
3008 		twp = kalloc_type(task_watch_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
3009 
3010 		task_watch_lock();
3011 
3012 		if (target_thread->taskwatch != NULL) {
3013 			/* already bound to another task */
3014 			task_watch_unlock();
3015 
3016 			kfree_type(task_watch_t, twp);
3017 			ret = EBUSY;
3018 			goto out;
3019 		}
3020 
3021 		task_reference(target_task);
3022 
3023 		setbg = proc_get_effective_task_policy(target_task, TASK_POLICY_WATCHERS_BG);
3024 
3025 		twp->tw_task = target_task;             /* holds the task reference */
3026 		twp->tw_thread = target_thread;         /* holds the thread reference */
3027 		twp->tw_state = setbg;
3028 		twp->tw_importance = target_thread->importance;
3029 
3030 		add_taskwatch_locked(target_task, twp);
3031 
3032 		target_thread->taskwatch = twp;
3033 
3034 		task_watch_unlock();
3035 
3036 		if (setbg) {
3037 			set_thread_appbg(target_thread, setbg, INT_MIN);
3038 		}
3039 
3040 		/* retain the thread reference as it is in twp */
3041 		target_thread = NULL;
3042 	} else {
3043 		/* unbind */
3044 		task_watch_lock();
3045 		if ((twp = target_thread->taskwatch) != NULL) {
3046 			task = twp->tw_task;
3047 			target_thread->taskwatch = NULL;
3048 			remove_taskwatch_locked(task, twp);
3049 
3050 			task_watch_unlock();
3051 
3052 			task_deallocate(task);                  /* drop task ref in twp */
3053 			set_thread_appbg(target_thread, 0, twp->tw_importance);
3054 			thread_deallocate(target_thread);       /* drop thread ref in twp */
3055 			kfree_type(task_watch_t, twp);
3056 		} else {
3057 			task_watch_unlock();
3058 			ret = 0;                /* return success if it not alredy bound */
3059 			goto out;
3060 		}
3061 	}
3062 out:
3063 	thread_deallocate(target_thread);       /* drop thread ref acquired in this routine */
3064 	return ret;
3065 }
3066 
3067 static void
set_thread_appbg(thread_t thread,int setbg,__unused int importance)3068 set_thread_appbg(thread_t thread, int setbg, __unused int importance)
3069 {
3070 	int enable = (setbg ? TASK_POLICY_ENABLE : TASK_POLICY_DISABLE);
3071 
3072 	proc_set_thread_policy(thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_PIDBIND_BG, enable);
3073 }
3074 
3075 static void
apply_appstate_watchers(task_t task)3076 apply_appstate_watchers(task_t task)
3077 {
3078 	int numwatchers = 0, i, j, setbg;
3079 	thread_watchlist_t * threadlist;
3080 	task_watch_t * twp;
3081 
3082 retry:
3083 	/* if no watchers on the list return */
3084 	if ((numwatchers = task->num_taskwatchers) == 0) {
3085 		return;
3086 	}
3087 
3088 	threadlist = kalloc_type(thread_watchlist_t, numwatchers, Z_WAITOK | Z_ZERO);
3089 	if (threadlist == NULL) {
3090 		return;
3091 	}
3092 
3093 	task_watch_lock();
3094 	/*serialize application of app state changes */
3095 
3096 	if (task->watchapplying != 0) {
3097 		lck_mtx_sleep(&task_watch_mtx, LCK_SLEEP_DEFAULT, &task->watchapplying, THREAD_UNINT);
3098 		task_watch_unlock();
3099 		kfree_type(thread_watchlist_t, numwatchers, threadlist);
3100 		goto retry;
3101 	}
3102 
3103 	if (numwatchers != task->num_taskwatchers) {
3104 		task_watch_unlock();
3105 		kfree_type(thread_watchlist_t, numwatchers, threadlist);
3106 		goto retry;
3107 	}
3108 
3109 	setbg = proc_get_effective_task_policy(task, TASK_POLICY_WATCHERS_BG);
3110 
3111 	task->watchapplying = 1;
3112 	i = 0;
3113 	queue_iterate(&task->task_watchers, twp, task_watch_t *, tw_links) {
3114 		threadlist[i].thread = twp->tw_thread;
3115 		thread_reference(threadlist[i].thread);
3116 		if (setbg != 0) {
3117 			twp->tw_importance = twp->tw_thread->importance;
3118 			threadlist[i].importance = INT_MIN;
3119 		} else {
3120 			threadlist[i].importance = twp->tw_importance;
3121 		}
3122 		i++;
3123 		if (i > numwatchers) {
3124 			break;
3125 		}
3126 	}
3127 
3128 	task_watch_unlock();
3129 
3130 	for (j = 0; j < i; j++) {
3131 		set_thread_appbg(threadlist[j].thread, setbg, threadlist[j].importance);
3132 		thread_deallocate(threadlist[j].thread);
3133 	}
3134 	kfree_type(thread_watchlist_t, numwatchers, threadlist);
3135 
3136 
3137 	task_watch_lock();
3138 	task->watchapplying = 0;
3139 	thread_wakeup_one(&task->watchapplying);
3140 	task_watch_unlock();
3141 }
3142 
3143 void
thead_remove_taskwatch(thread_t thread)3144 thead_remove_taskwatch(thread_t thread)
3145 {
3146 	task_watch_t * twp;
3147 	int importance = 0;
3148 
3149 	task_watch_lock();
3150 	if ((twp = thread->taskwatch) != NULL) {
3151 		thread->taskwatch = NULL;
3152 		remove_taskwatch_locked(twp->tw_task, twp);
3153 	}
3154 	task_watch_unlock();
3155 	if (twp != NULL) {
3156 		thread_deallocate(twp->tw_thread);
3157 		task_deallocate(twp->tw_task);
3158 		importance = twp->tw_importance;
3159 		kfree_type(task_watch_t, twp);
3160 		/* remove the thread and networkbg */
3161 		set_thread_appbg(thread, 0, importance);
3162 	}
3163 }
3164 
3165 void
task_removewatchers(task_t task)3166 task_removewatchers(task_t task)
3167 {
3168 	queue_head_t queue;
3169 	task_watch_t *twp;
3170 
3171 	task_watch_lock();
3172 	queue_new_head(&task->task_watchers, &queue, task_watch_t *, tw_links);
3173 	queue_init(&task->task_watchers);
3174 
3175 	queue_iterate(&queue, twp, task_watch_t *, tw_links) {
3176 		/*
3177 		 * Since the linkage is removed and thead state cleanup is already set up,
3178 		 * remove the refernce from the thread.
3179 		 */
3180 		twp->tw_thread->taskwatch = NULL;       /* removed linkage, clear thread holding ref */
3181 	}
3182 
3183 	task->num_taskwatchers = 0;
3184 	task_watch_unlock();
3185 
3186 	while (!queue_empty(&queue)) {
3187 		queue_remove_first(&queue, twp, task_watch_t *, tw_links);
3188 		/* remove thread and network bg */
3189 		set_thread_appbg(twp->tw_thread, 0, twp->tw_importance);
3190 		thread_deallocate(twp->tw_thread);
3191 		task_deallocate(twp->tw_task);
3192 		kfree_type(task_watch_t, twp);
3193 	}
3194 }
3195 #endif /* CONFIG_TASKWATCH */
3196 
3197 /*
3198  * Routines for importance donation/inheritance/boosting
3199  */
3200 
3201 static void
task_importance_update_live_donor(task_t target_task)3202 task_importance_update_live_donor(task_t target_task)
3203 {
3204 #if IMPORTANCE_INHERITANCE
3205 
3206 	ipc_importance_task_t task_imp;
3207 
3208 	task_imp = ipc_importance_for_task(target_task, FALSE);
3209 	if (IIT_NULL != task_imp) {
3210 		ipc_importance_task_update_live_donor(task_imp);
3211 		ipc_importance_task_release(task_imp);
3212 	}
3213 #endif /* IMPORTANCE_INHERITANCE */
3214 }
3215 
3216 void
task_importance_mark_donor(task_t task,boolean_t donating)3217 task_importance_mark_donor(task_t task, boolean_t donating)
3218 {
3219 #if IMPORTANCE_INHERITANCE
3220 	ipc_importance_task_t task_imp;
3221 
3222 	task_imp = ipc_importance_for_task(task, FALSE);
3223 	if (IIT_NULL != task_imp) {
3224 		ipc_importance_task_mark_donor(task_imp, donating);
3225 		ipc_importance_task_release(task_imp);
3226 	}
3227 #endif /* IMPORTANCE_INHERITANCE */
3228 }
3229 
3230 void
task_importance_mark_live_donor(task_t task,boolean_t live_donating)3231 task_importance_mark_live_donor(task_t task, boolean_t live_donating)
3232 {
3233 #if IMPORTANCE_INHERITANCE
3234 	ipc_importance_task_t task_imp;
3235 
3236 	task_imp = ipc_importance_for_task(task, FALSE);
3237 	if (IIT_NULL != task_imp) {
3238 		ipc_importance_task_mark_live_donor(task_imp, live_donating);
3239 		ipc_importance_task_release(task_imp);
3240 	}
3241 #endif /* IMPORTANCE_INHERITANCE */
3242 }
3243 
3244 void
task_importance_mark_receiver(task_t task,boolean_t receiving)3245 task_importance_mark_receiver(task_t task, boolean_t receiving)
3246 {
3247 #if IMPORTANCE_INHERITANCE
3248 	ipc_importance_task_t task_imp;
3249 
3250 	task_imp = ipc_importance_for_task(task, FALSE);
3251 	if (IIT_NULL != task_imp) {
3252 		ipc_importance_task_mark_receiver(task_imp, receiving);
3253 		ipc_importance_task_release(task_imp);
3254 	}
3255 #endif /* IMPORTANCE_INHERITANCE */
3256 }
3257 
3258 void
task_importance_mark_denap_receiver(task_t task,boolean_t denap)3259 task_importance_mark_denap_receiver(task_t task, boolean_t denap)
3260 {
3261 #if IMPORTANCE_INHERITANCE
3262 	ipc_importance_task_t task_imp;
3263 
3264 	task_imp = ipc_importance_for_task(task, FALSE);
3265 	if (IIT_NULL != task_imp) {
3266 		ipc_importance_task_mark_denap_receiver(task_imp, denap);
3267 		ipc_importance_task_release(task_imp);
3268 	}
3269 #endif /* IMPORTANCE_INHERITANCE */
3270 }
3271 
3272 void
task_importance_reset(__imp_only task_t task)3273 task_importance_reset(__imp_only task_t task)
3274 {
3275 #if IMPORTANCE_INHERITANCE
3276 	ipc_importance_task_t task_imp;
3277 
3278 	/* TODO: Lower importance downstream before disconnect */
3279 	task_imp = task->task_imp_base;
3280 	ipc_importance_reset(task_imp, FALSE);
3281 	task_importance_update_live_donor(task);
3282 #endif /* IMPORTANCE_INHERITANCE */
3283 }
3284 
3285 void
task_importance_init_from_parent(__imp_only task_t new_task,__imp_only task_t parent_task)3286 task_importance_init_from_parent(__imp_only task_t new_task, __imp_only task_t parent_task)
3287 {
3288 #if IMPORTANCE_INHERITANCE
3289 	ipc_importance_task_t new_task_imp = IIT_NULL;
3290 
3291 	new_task->task_imp_base = NULL;
3292 	if (!parent_task) {
3293 		return;
3294 	}
3295 
3296 	if (task_is_marked_importance_donor(parent_task)) {
3297 		new_task_imp = ipc_importance_for_task(new_task, FALSE);
3298 		assert(IIT_NULL != new_task_imp);
3299 		ipc_importance_task_mark_donor(new_task_imp, TRUE);
3300 	}
3301 	if (task_is_marked_live_importance_donor(parent_task)) {
3302 		if (IIT_NULL == new_task_imp) {
3303 			new_task_imp = ipc_importance_for_task(new_task, FALSE);
3304 		}
3305 		assert(IIT_NULL != new_task_imp);
3306 		ipc_importance_task_mark_live_donor(new_task_imp, TRUE);
3307 	}
3308 	/* Do not inherit 'receiver' on fork, vfexec or true spawn */
3309 	if (task_is_exec_copy(new_task) &&
3310 	    task_is_marked_importance_receiver(parent_task)) {
3311 		if (IIT_NULL == new_task_imp) {
3312 			new_task_imp = ipc_importance_for_task(new_task, FALSE);
3313 		}
3314 		assert(IIT_NULL != new_task_imp);
3315 		ipc_importance_task_mark_receiver(new_task_imp, TRUE);
3316 	}
3317 	if (task_is_marked_importance_denap_receiver(parent_task)) {
3318 		if (IIT_NULL == new_task_imp) {
3319 			new_task_imp = ipc_importance_for_task(new_task, FALSE);
3320 		}
3321 		assert(IIT_NULL != new_task_imp);
3322 		ipc_importance_task_mark_denap_receiver(new_task_imp, TRUE);
3323 	}
3324 	if (IIT_NULL != new_task_imp) {
3325 		assert(new_task->task_imp_base == new_task_imp);
3326 		ipc_importance_task_release(new_task_imp);
3327 	}
3328 #endif /* IMPORTANCE_INHERITANCE */
3329 }
3330 
3331 #if IMPORTANCE_INHERITANCE
3332 /*
3333  * Sets the task boost bit to the provided value.  Does NOT run the update function.
3334  *
3335  * Task lock must be held.
3336  */
3337 static void
task_set_boost_locked(task_t task,boolean_t boost_active)3338 task_set_boost_locked(task_t task, boolean_t boost_active)
3339 {
3340 #if IMPORTANCE_TRACE
3341 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_BOOST, (boost_active ? IMP_BOOSTED : IMP_UNBOOSTED)) | DBG_FUNC_START),
3342 	    proc_selfpid(), task_pid(task), trequested_0(task), trequested_1(task), 0);
3343 #endif /* IMPORTANCE_TRACE */
3344 
3345 	task->requested_policy.trp_boosted = boost_active;
3346 
3347 #if IMPORTANCE_TRACE
3348 	if (boost_active == TRUE) {
3349 		DTRACE_BOOST2(boost, task_t, task, int, task_pid(task));
3350 	} else {
3351 		DTRACE_BOOST2(unboost, task_t, task, int, task_pid(task));
3352 	}
3353 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_BOOST, (boost_active ? IMP_BOOSTED : IMP_UNBOOSTED)) | DBG_FUNC_END),
3354 	    proc_selfpid(), task_pid(task),
3355 	    trequested_0(task), trequested_1(task), 0);
3356 #endif /* IMPORTANCE_TRACE */
3357 }
3358 
3359 /*
3360  * Sets the task boost bit to the provided value and applies the update.
3361  *
3362  * Task lock must be held.  Must call update complete after unlocking the task.
3363  */
3364 void
task_update_boost_locked(task_t task,boolean_t boost_active,task_pend_token_t pend_token)3365 task_update_boost_locked(task_t task, boolean_t boost_active, task_pend_token_t pend_token)
3366 {
3367 	task_set_boost_locked(task, boost_active);
3368 
3369 	task_policy_update_locked(task, pend_token);
3370 }
3371 
3372 /*
3373  * Check if this task should donate importance.
3374  *
3375  * May be called without taking the task lock. In that case, donor status can change
3376  * so you must check only once for each donation event.
3377  */
3378 boolean_t
task_is_importance_donor(task_t task)3379 task_is_importance_donor(task_t task)
3380 {
3381 	if (task->task_imp_base == IIT_NULL) {
3382 		return FALSE;
3383 	}
3384 	return ipc_importance_task_is_donor(task->task_imp_base);
3385 }
3386 
3387 /*
3388  * Query the status of the task's donor mark.
3389  */
3390 boolean_t
task_is_marked_importance_donor(task_t task)3391 task_is_marked_importance_donor(task_t task)
3392 {
3393 	if (task->task_imp_base == IIT_NULL) {
3394 		return FALSE;
3395 	}
3396 	return ipc_importance_task_is_marked_donor(task->task_imp_base);
3397 }
3398 
3399 /*
3400  * Query the status of the task's live donor and donor mark.
3401  */
3402 boolean_t
task_is_marked_live_importance_donor(task_t task)3403 task_is_marked_live_importance_donor(task_t task)
3404 {
3405 	if (task->task_imp_base == IIT_NULL) {
3406 		return FALSE;
3407 	}
3408 	return ipc_importance_task_is_marked_live_donor(task->task_imp_base);
3409 }
3410 
3411 
3412 /*
3413  * This routine may be called without holding task lock
3414  * since the value of imp_receiver can never be unset.
3415  */
3416 boolean_t
task_is_importance_receiver(task_t task)3417 task_is_importance_receiver(task_t task)
3418 {
3419 	if (task->task_imp_base == IIT_NULL) {
3420 		return FALSE;
3421 	}
3422 	return ipc_importance_task_is_marked_receiver(task->task_imp_base);
3423 }
3424 
3425 /*
3426  * Query the task's receiver mark.
3427  */
3428 boolean_t
task_is_marked_importance_receiver(task_t task)3429 task_is_marked_importance_receiver(task_t task)
3430 {
3431 	if (task->task_imp_base == IIT_NULL) {
3432 		return FALSE;
3433 	}
3434 	return ipc_importance_task_is_marked_receiver(task->task_imp_base);
3435 }
3436 
3437 /*
3438  * This routine may be called without holding task lock
3439  * since the value of de-nap receiver can never be unset.
3440  */
3441 boolean_t
task_is_importance_denap_receiver(task_t task)3442 task_is_importance_denap_receiver(task_t task)
3443 {
3444 	if (task->task_imp_base == IIT_NULL) {
3445 		return FALSE;
3446 	}
3447 	return ipc_importance_task_is_denap_receiver(task->task_imp_base);
3448 }
3449 
3450 /*
3451  * Query the task's de-nap receiver mark.
3452  */
3453 boolean_t
task_is_marked_importance_denap_receiver(task_t task)3454 task_is_marked_importance_denap_receiver(task_t task)
3455 {
3456 	if (task->task_imp_base == IIT_NULL) {
3457 		return FALSE;
3458 	}
3459 	return ipc_importance_task_is_marked_denap_receiver(task->task_imp_base);
3460 }
3461 
3462 /*
3463  * This routine may be called without holding task lock
3464  * since the value of imp_receiver can never be unset.
3465  */
3466 boolean_t
task_is_importance_receiver_type(task_t task)3467 task_is_importance_receiver_type(task_t task)
3468 {
3469 	if (task->task_imp_base == IIT_NULL) {
3470 		return FALSE;
3471 	}
3472 	return task_is_importance_receiver(task) ||
3473 	       task_is_importance_denap_receiver(task);
3474 }
3475 
3476 /*
3477  * External importance assertions are managed by the process in userspace
3478  * Internal importance assertions are the responsibility of the kernel
3479  * Assertions are changed from internal to external via task_importance_externalize_assertion
3480  */
3481 
3482 int
task_importance_hold_internal_assertion(task_t target_task,uint32_t count)3483 task_importance_hold_internal_assertion(task_t target_task, uint32_t count)
3484 {
3485 	ipc_importance_task_t task_imp;
3486 	kern_return_t ret;
3487 
3488 	/* may be first time, so allow for possible importance setup */
3489 	task_imp = ipc_importance_for_task(target_task, FALSE);
3490 	if (IIT_NULL == task_imp) {
3491 		return EOVERFLOW;
3492 	}
3493 	ret = ipc_importance_task_hold_internal_assertion(task_imp, count);
3494 	ipc_importance_task_release(task_imp);
3495 
3496 	return (KERN_SUCCESS != ret) ? ENOTSUP : 0;
3497 }
3498 
3499 int
task_importance_hold_file_lock_assertion(task_t target_task,uint32_t count)3500 task_importance_hold_file_lock_assertion(task_t target_task, uint32_t count)
3501 {
3502 	ipc_importance_task_t task_imp;
3503 	kern_return_t ret;
3504 
3505 	/* may be first time, so allow for possible importance setup */
3506 	task_imp = ipc_importance_for_task(target_task, FALSE);
3507 	if (IIT_NULL == task_imp) {
3508 		return EOVERFLOW;
3509 	}
3510 	ret = ipc_importance_task_hold_file_lock_assertion(task_imp, count);
3511 	ipc_importance_task_release(task_imp);
3512 
3513 	return (KERN_SUCCESS != ret) ? ENOTSUP : 0;
3514 }
3515 
3516 int
task_importance_hold_legacy_external_assertion(task_t target_task,uint32_t count)3517 task_importance_hold_legacy_external_assertion(task_t target_task, uint32_t count)
3518 {
3519 	ipc_importance_task_t task_imp;
3520 	kern_return_t ret;
3521 
3522 	/* must already have set up an importance */
3523 	task_imp = target_task->task_imp_base;
3524 	if (IIT_NULL == task_imp) {
3525 		return EOVERFLOW;
3526 	}
3527 	ret = ipc_importance_task_hold_legacy_external_assertion(task_imp, count);
3528 	return (KERN_SUCCESS != ret) ? ENOTSUP : 0;
3529 }
3530 
3531 int
task_importance_drop_file_lock_assertion(task_t target_task,uint32_t count)3532 task_importance_drop_file_lock_assertion(task_t target_task, uint32_t count)
3533 {
3534 	ipc_importance_task_t task_imp;
3535 	kern_return_t ret;
3536 
3537 	/* must already have set up an importance */
3538 	task_imp = target_task->task_imp_base;
3539 	if (IIT_NULL == task_imp) {
3540 		return EOVERFLOW;
3541 	}
3542 	ret = ipc_importance_task_drop_file_lock_assertion(target_task->task_imp_base, count);
3543 	return (KERN_SUCCESS != ret) ? EOVERFLOW : 0;
3544 }
3545 
3546 int
task_importance_drop_legacy_external_assertion(task_t target_task,uint32_t count)3547 task_importance_drop_legacy_external_assertion(task_t target_task, uint32_t count)
3548 {
3549 	ipc_importance_task_t task_imp;
3550 	kern_return_t ret;
3551 
3552 	/* must already have set up an importance */
3553 	task_imp = target_task->task_imp_base;
3554 	if (IIT_NULL == task_imp) {
3555 		return EOVERFLOW;
3556 	}
3557 	ret = ipc_importance_task_drop_legacy_external_assertion(task_imp, count);
3558 	return (KERN_SUCCESS != ret) ? EOVERFLOW : 0;
3559 }
3560 
3561 static void
task_add_importance_watchport(task_t task,mach_port_t port,int * boostp)3562 task_add_importance_watchport(task_t task, mach_port_t port, int *boostp)
3563 {
3564 	int boost = 0;
3565 
3566 	__imptrace_only int released_pid = 0;
3567 	__imptrace_only int pid = task_pid(task);
3568 
3569 	ipc_importance_task_t release_imp_task = IIT_NULL;
3570 
3571 	if (IP_VALID(port) != 0) {
3572 		ipc_importance_task_t new_imp_task = ipc_importance_for_task(task, FALSE);
3573 
3574 		ip_mq_lock(port);
3575 
3576 		/*
3577 		 * The port must have been marked tempowner already.
3578 		 * This also filters out ports whose receive rights
3579 		 * are already enqueued in a message, as you can't
3580 		 * change the right's destination once it's already
3581 		 * on its way.
3582 		 */
3583 		if (port->ip_tempowner != 0) {
3584 			assert(port->ip_impdonation != 0);
3585 
3586 			boost = port->ip_impcount;
3587 			if (IIT_NULL != ip_get_imp_task(port)) {
3588 				/*
3589 				 * if this port is already bound to a task,
3590 				 * release the task reference and drop any
3591 				 * watchport-forwarded boosts
3592 				 */
3593 				release_imp_task = ip_get_imp_task(port);
3594 				port->ip_imp_task = IIT_NULL;
3595 			}
3596 
3597 			/* mark the port is watching another task (reference held in port->ip_imp_task) */
3598 			if (ipc_importance_task_is_marked_receiver(new_imp_task)) {
3599 				port->ip_imp_task = new_imp_task;
3600 				new_imp_task = IIT_NULL;
3601 			}
3602 		}
3603 		ip_mq_unlock(port);
3604 
3605 		if (IIT_NULL != new_imp_task) {
3606 			ipc_importance_task_release(new_imp_task);
3607 		}
3608 
3609 		if (IIT_NULL != release_imp_task) {
3610 			if (boost > 0) {
3611 				ipc_importance_task_drop_internal_assertion(release_imp_task, boost);
3612 			}
3613 
3614 			// released_pid = task_pid(release_imp_task); /* TODO: Need ref-safe way to get pid */
3615 			ipc_importance_task_release(release_imp_task);
3616 		}
3617 #if IMPORTANCE_TRACE
3618 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_WATCHPORT, 0)) | DBG_FUNC_NONE,
3619 		    proc_selfpid(), pid, boost, released_pid, 0);
3620 #endif /* IMPORTANCE_TRACE */
3621 	}
3622 
3623 	*boostp = boost;
3624 	return;
3625 }
3626 
3627 #endif /* IMPORTANCE_INHERITANCE */
3628 
3629 /*
3630  * Routines for VM to query task importance
3631  */
3632 
3633 
3634 /*
3635  * Order to be considered while estimating importance
3636  * for low memory notification and purging purgeable memory.
3637  */
3638 #define TASK_IMPORTANCE_FOREGROUND     4
3639 #define TASK_IMPORTANCE_NOTDARWINBG    1
3640 
3641 
3642 /*
3643  * (Un)Mark the task as a privileged listener for memory notifications.
3644  * if marked, this task will be among the first to be notified amongst
3645  * the bulk of all other tasks when the system enters a pressure level
3646  * of interest to this task.
3647  */
3648 int
task_low_mem_privileged_listener(task_t task,boolean_t new_value,boolean_t * old_value)3649 task_low_mem_privileged_listener(task_t task, boolean_t new_value, boolean_t *old_value)
3650 {
3651 	if (old_value != NULL) {
3652 		*old_value = (boolean_t)task->low_mem_privileged_listener;
3653 	} else {
3654 		task_lock(task);
3655 		task->low_mem_privileged_listener = (uint32_t)new_value;
3656 		task_unlock(task);
3657 	}
3658 
3659 	return 0;
3660 }
3661 
3662 /*
3663  * Checks if the task is already notified.
3664  *
3665  * Condition: task lock should be held while calling this function.
3666  */
3667 boolean_t
task_has_been_notified(task_t task,int pressurelevel)3668 task_has_been_notified(task_t task, int pressurelevel)
3669 {
3670 	if (task == NULL) {
3671 		return FALSE;
3672 	}
3673 
3674 	if (pressurelevel == kVMPressureWarning) {
3675 		return task->low_mem_notified_warn ? TRUE : FALSE;
3676 	} else if (pressurelevel == kVMPressureCritical) {
3677 		return task->low_mem_notified_critical ? TRUE : FALSE;
3678 	} else {
3679 		return TRUE;
3680 	}
3681 }
3682 
3683 
3684 /*
3685  * Checks if the task is used for purging.
3686  *
3687  * Condition: task lock should be held while calling this function.
3688  */
3689 boolean_t
task_used_for_purging(task_t task,int pressurelevel)3690 task_used_for_purging(task_t task, int pressurelevel)
3691 {
3692 	if (task == NULL) {
3693 		return FALSE;
3694 	}
3695 
3696 	if (pressurelevel == kVMPressureWarning) {
3697 		return task->purged_memory_warn ? TRUE : FALSE;
3698 	} else if (pressurelevel == kVMPressureCritical) {
3699 		return task->purged_memory_critical ? TRUE : FALSE;
3700 	} else {
3701 		return TRUE;
3702 	}
3703 }
3704 
3705 
3706 /*
3707  * Mark the task as notified with memory notification.
3708  *
3709  * Condition: task lock should be held while calling this function.
3710  */
3711 void
task_mark_has_been_notified(task_t task,int pressurelevel)3712 task_mark_has_been_notified(task_t task, int pressurelevel)
3713 {
3714 	if (task == NULL) {
3715 		return;
3716 	}
3717 
3718 	if (pressurelevel == kVMPressureWarning) {
3719 		task->low_mem_notified_warn = 1;
3720 	} else if (pressurelevel == kVMPressureCritical) {
3721 		task->low_mem_notified_critical = 1;
3722 	}
3723 }
3724 
3725 
3726 /*
3727  * Mark the task as purged.
3728  *
3729  * Condition: task lock should be held while calling this function.
3730  */
3731 void
task_mark_used_for_purging(task_t task,int pressurelevel)3732 task_mark_used_for_purging(task_t task, int pressurelevel)
3733 {
3734 	if (task == NULL) {
3735 		return;
3736 	}
3737 
3738 	if (pressurelevel == kVMPressureWarning) {
3739 		task->purged_memory_warn = 1;
3740 	} else if (pressurelevel == kVMPressureCritical) {
3741 		task->purged_memory_critical = 1;
3742 	}
3743 }
3744 
3745 
3746 /*
3747  * Mark the task eligible for low memory notification.
3748  *
3749  * Condition: task lock should be held while calling this function.
3750  */
3751 void
task_clear_has_been_notified(task_t task,int pressurelevel)3752 task_clear_has_been_notified(task_t task, int pressurelevel)
3753 {
3754 	if (task == NULL) {
3755 		return;
3756 	}
3757 
3758 	if (pressurelevel == kVMPressureWarning) {
3759 		task->low_mem_notified_warn = 0;
3760 	} else if (pressurelevel == kVMPressureCritical) {
3761 		task->low_mem_notified_critical = 0;
3762 	}
3763 }
3764 
3765 
3766 /*
3767  * Mark the task eligible for purging its purgeable memory.
3768  *
3769  * Condition: task lock should be held while calling this function.
3770  */
3771 void
task_clear_used_for_purging(task_t task)3772 task_clear_used_for_purging(task_t task)
3773 {
3774 	if (task == NULL) {
3775 		return;
3776 	}
3777 
3778 	task->purged_memory_warn = 0;
3779 	task->purged_memory_critical = 0;
3780 }
3781 
3782 
3783 /*
3784  * Estimate task importance for purging its purgeable memory
3785  * and low memory notification.
3786  *
3787  * Importance is calculated in the following order of criteria:
3788  * -Task role : Background vs Foreground
3789  * -Boost status: Not boosted vs Boosted
3790  * -Darwin BG status.
3791  *
3792  * Returns: Estimated task importance. Less important task will have lower
3793  *          estimated importance.
3794  */
3795 int
task_importance_estimate(task_t task)3796 task_importance_estimate(task_t task)
3797 {
3798 	int task_importance = 0;
3799 
3800 	if (task == NULL) {
3801 		return 0;
3802 	}
3803 
3804 	if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) == TASK_FOREGROUND_APPLICATION) {
3805 		task_importance += TASK_IMPORTANCE_FOREGROUND;
3806 	}
3807 
3808 	if (proc_get_effective_task_policy(task, TASK_POLICY_DARWIN_BG) == 0) {
3809 		task_importance += TASK_IMPORTANCE_NOTDARWINBG;
3810 	}
3811 
3812 	return task_importance;
3813 }
3814 
3815 boolean_t
task_has_assertions(task_t task)3816 task_has_assertions(task_t task)
3817 {
3818 	return task->task_imp_base->iit_assertcnt? TRUE : FALSE;
3819 }
3820 
3821 
3822 kern_return_t
send_resource_violation(typeof(send_cpu_usage_violation) sendfunc,task_t violator,struct ledger_entry_info * linfo,resource_notify_flags_t flags)3823 send_resource_violation(typeof(send_cpu_usage_violation) sendfunc,
3824     task_t violator,
3825     struct ledger_entry_info *linfo,
3826     resource_notify_flags_t flags)
3827 {
3828 #ifndef MACH_BSD
3829 	return KERN_NOT_SUPPORTED;
3830 #else
3831 	kern_return_t   kr = KERN_SUCCESS;
3832 	proc_t          proc = NULL;
3833 	posix_path_t    proc_path = "";
3834 	proc_name_t     procname = "<unknown>";
3835 	int             pid = -1;
3836 	clock_sec_t     secs;
3837 	clock_nsec_t    nsecs;
3838 	mach_timespec_t timestamp;
3839 	thread_t        curthread = current_thread();
3840 	ipc_port_t      dstport = MACH_PORT_NULL;
3841 
3842 	if (!violator) {
3843 		kr = KERN_INVALID_ARGUMENT; goto finish;
3844 	}
3845 
3846 	/* extract violator information */
3847 	task_lock(violator);
3848 	if (!(proc = get_bsdtask_info(violator))) {
3849 		task_unlock(violator);
3850 		kr = KERN_INVALID_ARGUMENT; goto finish;
3851 	}
3852 	(void)mig_strncpy(procname, proc_best_name(proc), sizeof(procname));
3853 	pid = task_pid(violator);
3854 	if (flags & kRNFatalLimitFlag) {
3855 		kr = proc_pidpathinfo_internal(proc, 0, proc_path,
3856 		    sizeof(proc_path), NULL);
3857 	}
3858 	task_unlock(violator);
3859 	if (kr) {
3860 		goto finish;
3861 	}
3862 
3863 	/* violation time ~ now */
3864 	clock_get_calendar_nanotime(&secs, &nsecs);
3865 	timestamp.tv_sec = (int32_t)secs;
3866 	timestamp.tv_nsec = (int32_t)nsecs;
3867 	/* 25567702 tracks widening mach_timespec_t */
3868 
3869 	/* send message */
3870 	kr = host_get_special_port(host_priv_self(), HOST_LOCAL_NODE,
3871 	    HOST_RESOURCE_NOTIFY_PORT, &dstport);
3872 	if (kr) {
3873 		goto finish;
3874 	}
3875 
3876 	thread_set_honor_qlimit(curthread);
3877 	kr = sendfunc(dstport,
3878 	    procname, pid, proc_path, timestamp,
3879 	    linfo->lei_balance, linfo->lei_last_refill,
3880 	    linfo->lei_limit, linfo->lei_refill_period,
3881 	    flags);
3882 	thread_clear_honor_qlimit(curthread);
3883 
3884 	ipc_port_release_send(dstport);
3885 
3886 finish:
3887 	return kr;
3888 #endif      /* MACH_BSD */
3889 }
3890 
3891 kern_return_t
send_resource_violation_with_fatal_port(typeof(send_port_space_violation) sendfunc,task_t violator,int64_t current_size,int64_t limit,mach_port_t fatal_port,resource_notify_flags_t flags)3892 send_resource_violation_with_fatal_port(typeof(send_port_space_violation) sendfunc,
3893     task_t violator,
3894     int64_t current_size,
3895     int64_t limit,
3896     mach_port_t fatal_port,
3897     resource_notify_flags_t flags)
3898 {
3899 #ifndef MACH_BSD
3900 	kr = KERN_NOT_SUPPORTED; goto finish;
3901 #else
3902 	kern_return_t   kr = KERN_SUCCESS;
3903 	proc_t          proc = NULL;
3904 	proc_name_t     procname = "<unknown>";
3905 	int             pid = -1;
3906 	clock_sec_t     secs;
3907 	clock_nsec_t    nsecs;
3908 	mach_timespec_t timestamp;
3909 	thread_t        curthread = current_thread();
3910 	ipc_port_t      dstport = MACH_PORT_NULL;
3911 
3912 	if (!violator) {
3913 		kr = KERN_INVALID_ARGUMENT; goto finish;
3914 	}
3915 
3916 	/* extract violator information; no need to acquire task lock */
3917 	assert(violator == current_task());
3918 	if (!(proc = get_bsdtask_info(violator))) {
3919 		kr = KERN_INVALID_ARGUMENT; goto finish;
3920 	}
3921 	(void)mig_strncpy(procname, proc_best_name(proc), sizeof(procname));
3922 	pid = task_pid(violator);
3923 
3924 	/* violation time ~ now */
3925 	clock_get_calendar_nanotime(&secs, &nsecs);
3926 	timestamp.tv_sec = (int32_t)secs;
3927 	timestamp.tv_nsec = (int32_t)nsecs;
3928 	/* 25567702 tracks widening mach_timespec_t */
3929 
3930 	/* send message */
3931 	kr = task_get_special_port(current_task(), TASK_RESOURCE_NOTIFY_PORT, &dstport);
3932 	if (dstport == MACH_PORT_NULL) {
3933 		kr = host_get_special_port(host_priv_self(), HOST_LOCAL_NODE,
3934 		    HOST_RESOURCE_NOTIFY_PORT, &dstport);
3935 		if (kr) {
3936 			goto finish;
3937 		}
3938 	}
3939 
3940 	thread_set_honor_qlimit(curthread);
3941 	kr = sendfunc(dstport,
3942 	    procname, pid, timestamp,
3943 	    current_size, limit, fatal_port,
3944 	    flags);
3945 	thread_clear_honor_qlimit(curthread);
3946 
3947 	ipc_port_release_send(dstport);
3948 
3949 #endif /* MACH_BSD */
3950 finish:
3951 	return kr;
3952 }
3953 
3954 /*
3955  * Resource violations trace four 64-bit integers.  For K32, two additional
3956  * codes are allocated, the first with the low nibble doubled.  So if the K64
3957  * code is 0x042, the K32 codes would be 0x044 and 0x45.
3958  */
3959 #ifdef __LP64__
3960 void
trace_resource_violation(uint16_t code,struct ledger_entry_info * linfo)3961 trace_resource_violation(uint16_t code,
3962     struct ledger_entry_info *linfo)
3963 {
3964 	KERNEL_DBG_IST_SANE(KDBG_CODE(DBG_MACH, DBG_MACH_RESOURCE, code),
3965 	    linfo->lei_balance, linfo->lei_last_refill,
3966 	    linfo->lei_limit, linfo->lei_refill_period);
3967 }
3968 #else /* K32 */
3969 /* TODO: create/find a trace_two_LLs() for K32 systems */
3970 #define MASK32 0xffffffff
3971 void
trace_resource_violation(uint16_t code,struct ledger_entry_info * linfo)3972 trace_resource_violation(uint16_t code,
3973     struct ledger_entry_info *linfo)
3974 {
3975 	int8_t lownibble = (code & 0x3) * 2;
3976 	int16_t codeA = (code & 0xffc) | lownibble;
3977 	int16_t codeB = codeA + 1;
3978 
3979 	int32_t balance_high = (linfo->lei_balance >> 32) & MASK32;
3980 	int32_t balance_low = linfo->lei_balance & MASK32;
3981 	int32_t last_refill_high = (linfo->lei_last_refill >> 32) & MASK32;
3982 	int32_t last_refill_low = linfo->lei_last_refill & MASK32;
3983 
3984 	int32_t limit_high = (linfo->lei_limit >> 32) & MASK32;
3985 	int32_t limit_low = linfo->lei_limit & MASK32;
3986 	int32_t refill_period_high = (linfo->lei_refill_period >> 32) & MASK32;
3987 	int32_t refill_period_low = linfo->lei_refill_period & MASK32;
3988 
3989 	KERNEL_DBG_IST_SANE(KDBG_CODE(DBG_MACH, DBG_MACH_RESOURCE, codeA),
3990 	    balance_high, balance_low,
3991 	    last_refill_high, last_refill_low);
3992 	KERNEL_DBG_IST_SANE(KDBG_CODE(DBG_MACH, DBG_MACH_RESOURCE, codeB),
3993 	    limit_high, limit_low,
3994 	    refill_period_high, refill_period_low);
3995 }
3996 #endif /* K64/K32 */
3997