1 /*
2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/kern_types.h>
30 #include <mach/mach_types.h>
31 #include <mach/boolean.h>
32
33 #include <kern/coalition.h>
34 #include <kern/exc_resource.h>
35 #include <kern/host.h>
36 #include <kern/ledger.h>
37 #include <kern/mach_param.h> /* for TASK_CHUNK */
38 #include <kern/monotonic.h>
39 #include <kern/policy_internal.h>
40 #include <kern/task.h>
41 #include <kern/smr_hash.h>
42 #include <kern/thread_group.h>
43 #include <kern/zalloc.h>
44 #include <vm/vm_pageout.h>
45
46 #include <libkern/OSAtomic.h>
47
48 #include <mach/coalition_notification_server.h>
49 #include <mach/host_priv.h>
50 #include <mach/host_special_ports.h>
51
52 #include <os/log.h>
53
54 #include <sys/errno.h>
55
56 /*
57 * BSD interface functions
58 */
59 size_t coalitions_get_list(int type, struct procinfo_coalinfo *coal_list, size_t list_sz);
60 coalition_t task_get_coalition(task_t task, int type);
61 boolean_t coalition_is_leader(task_t task, coalition_t coal);
62 task_t coalition_get_leader(coalition_t coal);
63 int coalition_get_task_count(coalition_t coal);
64 uint64_t coalition_get_page_count(coalition_t coal, int *ntasks);
65 int coalition_get_pid_list(coalition_t coal, uint32_t rolemask, int sort_order,
66 int *pid_list, int list_sz);
67
68 /* defined in task.c */
69 extern ledger_template_t task_ledger_template;
70
71 /*
72 * Templates; task template is copied due to potential allocation limits on
73 * task ledgers.
74 */
75 ledger_template_t coalition_task_ledger_template = NULL;
76 ledger_template_t coalition_ledger_template = NULL;
77
78 extern int proc_selfpid(void);
79 /*
80 * Coalition zone needs limits. We expect there will be as many coalitions as
81 * tasks (same order of magnitude), so use the task zone's limits.
82 * */
83 #define CONFIG_COALITION_MAX CONFIG_TASK_MAX
84 #define COALITION_CHUNK TASK_CHUNK
85
86 #if DEBUG || DEVELOPMENT
87 TUNABLE_WRITEABLE(int, unrestrict_coalition_syscalls, "unrestrict_coalition_syscalls", 0);
88 #else
89 #define unrestrict_coalition_syscalls false
90 #endif
91
92 static LCK_GRP_DECLARE(coalitions_lck_grp, "coalition");
93
94 /* coalitions_list_lock protects coalition_hash, next_coalition_id. */
95 static LCK_MTX_DECLARE(coalitions_list_lock, &coalitions_lck_grp);
96 static uint64_t coalition_next_id = 1;
97 static struct smr_hash coalition_hash;
98
99 coalition_t init_coalition[COALITION_NUM_TYPES];
100 coalition_t corpse_coalition[COALITION_NUM_TYPES];
101
102 static const char *
coal_type_str(int type)103 coal_type_str(int type)
104 {
105 switch (type) {
106 case COALITION_TYPE_RESOURCE:
107 return "RESOURCE";
108 case COALITION_TYPE_JETSAM:
109 return "JETSAM";
110 default:
111 return "<unknown>";
112 }
113 }
114
115 struct coalition_type {
116 int type;
117 int has_default;
118 /*
119 * init
120 * pre-condition: coalition just allocated (unlocked), unreferenced,
121 * type field set
122 */
123 kern_return_t (*init)(coalition_t coal, boolean_t privileged, boolean_t efficient);
124
125 /*
126 * dealloc
127 * pre-condition: coalition unlocked
128 * pre-condition: coalition refcount=0, active_count=0,
129 * termrequested=1, terminated=1, reaped=1
130 */
131 void (*dealloc)(coalition_t coal);
132
133 /*
134 * adopt_task
135 * pre-condition: coalition locked
136 * pre-condition: coalition !repead and !terminated
137 */
138 kern_return_t (*adopt_task)(coalition_t coal, task_t task);
139
140 /*
141 * remove_task
142 * pre-condition: coalition locked
143 * pre-condition: task has been removed from coalition's task list
144 */
145 kern_return_t (*remove_task)(coalition_t coal, task_t task);
146
147 /*
148 * set_taskrole
149 * pre-condition: coalition locked
150 * pre-condition: task added to coalition's task list,
151 * active_count >= 1 (at least the given task is active)
152 */
153 kern_return_t (*set_taskrole)(coalition_t coal, task_t task, int role);
154
155 /*
156 * get_taskrole
157 * pre-condition: coalition locked
158 * pre-condition: task added to coalition's task list,
159 * active_count >= 1 (at least the given task is active)
160 */
161 int (*get_taskrole)(coalition_t coal, task_t task);
162
163 /*
164 * iterate_tasks
165 * pre-condition: coalition locked
166 */
167 void (*iterate_tasks)(coalition_t coal, void *ctx, void (*callback)(coalition_t, void *, task_t));
168 };
169
170 /*
171 * COALITION_TYPE_RESOURCE
172 */
173
174 static kern_return_t i_coal_resource_init(coalition_t coal, boolean_t privileged, boolean_t efficient);
175 static void i_coal_resource_dealloc(coalition_t coal);
176 static kern_return_t i_coal_resource_adopt_task(coalition_t coal, task_t task);
177 static kern_return_t i_coal_resource_remove_task(coalition_t coal, task_t task);
178 static kern_return_t i_coal_resource_set_taskrole(coalition_t coal,
179 task_t task, int role);
180 static int i_coal_resource_get_taskrole(coalition_t coal, task_t task);
181 static void i_coal_resource_iterate_tasks(coalition_t coal, void *ctx,
182 void (*callback)(coalition_t, void *, task_t));
183
184 /*
185 * Ensure COALITION_NUM_THREAD_QOS_TYPES defined in mach/coalition.h still
186 * matches THREAD_QOS_LAST defined in mach/thread_policy.h
187 */
188 static_assert(COALITION_NUM_THREAD_QOS_TYPES == THREAD_QOS_LAST);
189
190 struct i_resource_coalition {
191 /*
192 * This keeps track of resource utilization of tasks that are no longer active
193 * in the coalition and is updated when a task is removed from the coalition.
194 */
195 ledger_t ledger;
196 uint64_t bytesread;
197 uint64_t byteswritten;
198 uint64_t energy;
199 uint64_t gpu_time;
200 uint64_t logical_immediate_writes;
201 uint64_t logical_deferred_writes;
202 uint64_t logical_invalidated_writes;
203 uint64_t logical_metadata_writes;
204 uint64_t logical_immediate_writes_to_external;
205 uint64_t logical_deferred_writes_to_external;
206 uint64_t logical_invalidated_writes_to_external;
207 uint64_t logical_metadata_writes_to_external;
208 uint64_t cpu_time_eqos[COALITION_NUM_THREAD_QOS_TYPES]; /* cpu time per effective QoS class */
209 uint64_t cpu_time_rqos[COALITION_NUM_THREAD_QOS_TYPES]; /* cpu time per requested QoS class */
210 uint64_t cpu_instructions;
211 uint64_t cpu_cycles;
212 struct recount_coalition co_recount;
213
214 uint64_t task_count; /* tasks that have started in this coalition */
215 uint64_t dead_task_count; /* tasks that have exited in this coalition;
216 * subtract from task_count to get count
217 * of "active" tasks */
218 /*
219 * Count the length of time this coalition had at least one active task.
220 * This can be a 'denominator' to turn e.g. cpu_time to %cpu.
221 * */
222 uint64_t last_became_nonempty_time;
223 uint64_t time_nonempty;
224
225 queue_head_t tasks; /* List of active tasks in the coalition */
226 /*
227 * This ledger is used for triggering resource exception. For the tracked resources, this is updated
228 * when the member tasks' resource usage changes.
229 */
230 ledger_t resource_monitor_ledger;
231 #if CONFIG_PHYS_WRITE_ACCT
232 uint64_t fs_metadata_writes;
233 #endif /* CONFIG_PHYS_WRITE_ACCT */
234 };
235
236 /*
237 * COALITION_TYPE_JETSAM
238 */
239
240 static kern_return_t i_coal_jetsam_init(coalition_t coal, boolean_t privileged, boolean_t efficient);
241 static void i_coal_jetsam_dealloc(coalition_t coal);
242 static kern_return_t i_coal_jetsam_adopt_task(coalition_t coal, task_t task);
243 static kern_return_t i_coal_jetsam_remove_task(coalition_t coal, task_t task);
244 static kern_return_t i_coal_jetsam_set_taskrole(coalition_t coal,
245 task_t task, int role);
246 int i_coal_jetsam_get_taskrole(coalition_t coal, task_t task);
247 static void i_coal_jetsam_iterate_tasks(coalition_t coal, void *ctx,
248 void (*callback)(coalition_t, void *, task_t));
249
250 struct i_jetsam_coalition {
251 task_t leader;
252 queue_head_t extensions;
253 queue_head_t services;
254 queue_head_t other;
255 struct thread_group *thread_group;
256 bool swap_enabled;
257 };
258
259
260 /*
261 * main coalition structure
262 */
263 struct coalition {
264 uint64_t id; /* monotonically increasing */
265 uint32_t type;
266 uint32_t role; /* default task role (background, adaptive, interactive, etc) */
267 os_ref_atomic_t ref_count; /* Number of references to the memory containing this struct */
268 uint32_t active_count; /* Number of members of (tasks in) the
269 * coalition, plus vouchers referring
270 * to the coalition */
271 uint32_t focal_task_count; /* Number of TASK_FOREGROUND_APPLICATION tasks in the coalition */
272 uint32_t nonfocal_task_count; /* Number of TASK_BACKGROUND_APPLICATION tasks in the coalition */
273 uint32_t game_task_count; /* Number of GAME_MODE tasks in the coalition */
274
275 /* coalition flags */
276 uint32_t privileged : 1; /* Members of this coalition may create
277 * and manage coalitions and may posix_spawn
278 * processes into selected coalitions */
279 /* ast? */
280 /* voucher */
281 uint32_t termrequested : 1; /* launchd has requested termination when coalition becomes empty */
282 uint32_t terminated : 1; /* coalition became empty and spawns are now forbidden */
283 uint32_t reaped : 1; /* reaped, invisible to userspace, but waiting for ref_count to go to zero */
284 uint32_t notified : 1; /* no-more-processes notification was sent via special port */
285 uint32_t efficient : 1; /* launchd has marked the coalition as efficient */
286 #if DEVELOPMENT || DEBUG
287 uint32_t should_notify : 1; /* should this coalition send notifications (default: yes) */
288 #endif
289
290 struct smrq_slink link; /* global list of coalitions */
291
292 union {
293 lck_mtx_t lock; /* Coalition lock. */
294 struct smr_node smr_node;
295 };
296
297 /* put coalition type-specific structures here */
298 union {
299 struct i_resource_coalition r;
300 struct i_jetsam_coalition j;
301 };
302 };
303
304 os_refgrp_decl(static, coal_ref_grp, "coalitions", NULL);
305 #define coal_ref_init(coal, c) os_ref_init_count_raw(&(coal)->ref_count, &coal_ref_grp, c)
306 #define coal_ref_count(coal) os_ref_get_count_raw(&(coal)->ref_count)
307 #define coal_ref_try_retain(coal) os_ref_retain_try_raw(&(coal)->ref_count, &coal_ref_grp)
308 #define coal_ref_retain(coal) os_ref_retain_raw(&(coal)->ref_count, &coal_ref_grp)
309 #define coal_ref_release(coal) os_ref_release_raw(&(coal)->ref_count, &coal_ref_grp)
310 #define coal_ref_release_live(coal) os_ref_release_live_raw(&(coal)->ref_count, &coal_ref_grp)
311
312 #define COALITION_HASH_SIZE_MIN 16
313
314 static bool
coal_hash_obj_try_get(void * coal)315 coal_hash_obj_try_get(void *coal)
316 {
317 return coal_ref_try_retain((struct coalition *)coal);
318 }
319
320 SMRH_TRAITS_DEFINE_SCALAR(coal_hash_traits, struct coalition, id, link,
321 .domain = &smr_proc_task,
322 .obj_try_get = coal_hash_obj_try_get);
323
324 /*
325 * register different coalition types:
326 * these must be kept in the order specified in coalition.h
327 */
328 static const struct coalition_type s_coalition_types[COALITION_NUM_TYPES] = {
329 {
330 COALITION_TYPE_RESOURCE,
331 1,
332 i_coal_resource_init,
333 i_coal_resource_dealloc,
334 i_coal_resource_adopt_task,
335 i_coal_resource_remove_task,
336 i_coal_resource_set_taskrole,
337 i_coal_resource_get_taskrole,
338 i_coal_resource_iterate_tasks,
339 }, {
340 COALITION_TYPE_JETSAM,
341 1,
342 i_coal_jetsam_init,
343 i_coal_jetsam_dealloc,
344 i_coal_jetsam_adopt_task,
345 i_coal_jetsam_remove_task,
346 i_coal_jetsam_set_taskrole,
347 i_coal_jetsam_get_taskrole,
348 i_coal_jetsam_iterate_tasks,
349 },
350 };
351
352 static KALLOC_TYPE_DEFINE(coalition_zone, struct coalition, KT_PRIV_ACCT);
353
354 #define coal_call(coal, func, ...) \
355 (s_coalition_types[(coal)->type].func)(coal, ## __VA_ARGS__)
356
357
358 #define coalition_lock(c) lck_mtx_lock(&(c)->lock)
359 #define coalition_unlock(c) lck_mtx_unlock(&(c)->lock)
360
361 /*
362 * Define the coalition type to track focal tasks.
363 * On embedded, track them using jetsam coalitions since they have associated thread
364 * groups which reflect this property as a flag (and pass it down to CLPC).
365 * On non-embedded platforms, since not all coalitions have jetsam coalitions
366 * track focal counts on the resource coalition.
367 */
368 #if !XNU_TARGET_OS_OSX
369 #define COALITION_FOCAL_TASKS_ACCOUNTING COALITION_TYPE_JETSAM
370 #else /* !XNU_TARGET_OS_OSX */
371 #define COALITION_FOCAL_TASKS_ACCOUNTING COALITION_TYPE_RESOURCE
372 #endif /* !XNU_TARGET_OS_OSX */
373
374
375 /*
376 *
377 * Coalition ledger implementation
378 *
379 */
380
381 struct coalition_ledger_indices coalition_ledgers =
382 {.logical_writes = -1, };
383 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_COALITION_IS_CAUSING_TOO_MUCH_IO(int flavor);
384
385 ledger_t
coalition_ledger_get_from_task(task_t task)386 coalition_ledger_get_from_task(task_t task)
387 {
388 ledger_t ledger = LEDGER_NULL;
389 coalition_t coal = task->coalition[COALITION_TYPE_RESOURCE];
390
391 if (coal != NULL && (!queue_empty(&task->task_coalition[COALITION_TYPE_RESOURCE]))) {
392 ledger = coal->r.resource_monitor_ledger;
393 ledger_reference(ledger);
394 }
395 return ledger;
396 }
397
398
399 enum {
400 COALITION_IO_LEDGER_ENABLE,
401 COALITION_IO_LEDGER_DISABLE
402 };
403
404 void
coalition_io_monitor_ctl(struct coalition * coalition,uint32_t flags,int64_t limit)405 coalition_io_monitor_ctl(struct coalition *coalition, uint32_t flags, int64_t limit)
406 {
407 ledger_t ledger = coalition->r.resource_monitor_ledger;
408
409 if (flags == COALITION_IO_LEDGER_ENABLE) {
410 /* Configure the logical I/O ledger */
411 ledger_set_limit(ledger, coalition_ledgers.logical_writes, (limit * 1024 * 1024), 0);
412 ledger_set_period(ledger, coalition_ledgers.logical_writes, (COALITION_LEDGER_MONITOR_INTERVAL_SECS * NSEC_PER_SEC));
413 } else if (flags == COALITION_IO_LEDGER_DISABLE) {
414 ledger_disable_refill(ledger, coalition_ledgers.logical_writes);
415 ledger_disable_callback(ledger, coalition_ledgers.logical_writes);
416 }
417 }
418
419 int
coalition_ledger_set_logical_writes_limit(struct coalition * coalition,int64_t limit)420 coalition_ledger_set_logical_writes_limit(struct coalition *coalition, int64_t limit)
421 {
422 int error = 0;
423
424 /* limit = -1 will be used to disable the limit and the callback */
425 if (limit > COALITION_MAX_LOGICAL_WRITES_LIMIT || limit == 0 || limit < -1) {
426 error = EINVAL;
427 goto out;
428 }
429
430 coalition_lock(coalition);
431 if (limit == -1) {
432 coalition_io_monitor_ctl(coalition, COALITION_IO_LEDGER_DISABLE, limit);
433 } else {
434 coalition_io_monitor_ctl(coalition, COALITION_IO_LEDGER_ENABLE, limit);
435 }
436 coalition_unlock(coalition);
437 out:
438 return error;
439 }
440
441 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_COALITION_IS_CAUSING_TOO_MUCH_IO(int flavor)442 SENDING_NOTIFICATION__THIS_COALITION_IS_CAUSING_TOO_MUCH_IO(int flavor)
443 {
444 int pid = proc_selfpid();
445 ledger_amount_t new_limit;
446 task_t task = current_task();
447 struct ledger_entry_info lei;
448 kern_return_t kr;
449 ledger_t ledger;
450 struct coalition *coalition = task->coalition[COALITION_TYPE_RESOURCE];
451
452 assert(coalition != NULL);
453 ledger = coalition->r.resource_monitor_ledger;
454
455 switch (flavor) {
456 case FLAVOR_IO_LOGICAL_WRITES:
457 ledger_get_entry_info(ledger, coalition_ledgers.logical_writes, &lei);
458 trace_resource_violation(RMON_LOGWRITES_VIOLATED, &lei);
459 break;
460 default:
461 goto Exit;
462 }
463
464 os_log(OS_LOG_DEFAULT, "Coalition [%lld] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]. Triggered by process [%d]\n",
465 coalition->id, flavor, (lei.lei_balance / (1024 * 1024)), (lei.lei_limit / (1024 * 1024)),
466 (lei.lei_refill_period / NSEC_PER_SEC), pid);
467
468 kr = send_resource_violation(send_disk_writes_violation, task, &lei, kRNFlagsNone);
469 if (kr) {
470 os_log(OS_LOG_DEFAULT, "ERROR %#x returned from send_resource_violation(disk_writes, ...)\n", kr);
471 }
472
473 /*
474 * Continue to monitor the coalition after it hits the initital limit, but increase
475 * the limit exponentially so that we don't spam the listener.
476 */
477 new_limit = (lei.lei_limit / 1024 / 1024) * 4;
478 coalition_lock(coalition);
479 if (new_limit > COALITION_MAX_LOGICAL_WRITES_LIMIT) {
480 coalition_io_monitor_ctl(coalition, COALITION_IO_LEDGER_DISABLE, -1);
481 } else {
482 coalition_io_monitor_ctl(coalition, COALITION_IO_LEDGER_ENABLE, new_limit);
483 }
484 coalition_unlock(coalition);
485
486 Exit:
487 return;
488 }
489
490 void
coalition_io_rate_exceeded(int warning,const void * param0,__unused const void * param1)491 coalition_io_rate_exceeded(int warning, const void *param0, __unused const void *param1)
492 {
493 if (warning == 0) {
494 SENDING_NOTIFICATION__THIS_COALITION_IS_CAUSING_TOO_MUCH_IO((int)param0);
495 }
496 }
497
498 void
init_coalition_ledgers(void)499 init_coalition_ledgers(void)
500 {
501 ledger_template_t t;
502 assert(coalition_ledger_template == NULL);
503
504 if ((t = ledger_template_create("Per-coalition ledgers")) == NULL) {
505 panic("couldn't create coalition ledger template");
506 }
507
508 coalition_ledgers.logical_writes = ledger_entry_add(t, "logical_writes", "res", "bytes");
509
510 if (coalition_ledgers.logical_writes < 0) {
511 panic("couldn't create entries for coaliton ledger template");
512 }
513
514 ledger_set_callback(t, coalition_ledgers.logical_writes, coalition_io_rate_exceeded, (void *)FLAVOR_IO_LOGICAL_WRITES, NULL);
515 ledger_template_complete(t);
516
517 coalition_task_ledger_template = ledger_template_copy(task_ledger_template, "Coalition task ledgers");
518
519 if (coalition_task_ledger_template == NULL) {
520 panic("couldn't create coalition task ledger template");
521 }
522
523 ledger_template_complete(coalition_task_ledger_template);
524
525 coalition_ledger_template = t;
526 }
527
528 void
coalition_io_ledger_update(task_t task,int32_t flavor,boolean_t is_credit,uint32_t io_size)529 coalition_io_ledger_update(task_t task, int32_t flavor, boolean_t is_credit, uint32_t io_size)
530 {
531 ledger_t ledger;
532 coalition_t coal = task->coalition[COALITION_TYPE_RESOURCE];
533
534 assert(coal != NULL);
535 ledger = coal->r.resource_monitor_ledger;
536 if (LEDGER_VALID(ledger)) {
537 if (flavor == FLAVOR_IO_LOGICAL_WRITES) {
538 if (is_credit) {
539 ledger_credit(ledger, coalition_ledgers.logical_writes, io_size);
540 } else {
541 ledger_debit(ledger, coalition_ledgers.logical_writes, io_size);
542 }
543 }
544 }
545 }
546
547 static void
coalition_notify_user(uint64_t id,uint32_t flags)548 coalition_notify_user(uint64_t id, uint32_t flags)
549 {
550 mach_port_t user_port;
551 kern_return_t kr;
552
553 kr = host_get_coalition_port(host_priv_self(), &user_port);
554 if ((kr != KERN_SUCCESS) || !IPC_PORT_VALID(user_port)) {
555 return;
556 }
557
558 coalition_notification(user_port, id, flags);
559 ipc_port_release_send(user_port);
560 }
561
562 /*
563 *
564 * COALITION_TYPE_RESOURCE
565 *
566 */
567 static kern_return_t
i_coal_resource_init(coalition_t coal,boolean_t privileged,boolean_t efficient)568 i_coal_resource_init(coalition_t coal, boolean_t privileged, boolean_t efficient)
569 {
570 #pragma unused(privileged, efficient)
571
572 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
573
574 recount_coalition_init(&coal->r.co_recount);
575 coal->r.ledger = ledger_instantiate(coalition_task_ledger_template,
576 LEDGER_CREATE_ACTIVE_ENTRIES);
577 if (coal->r.ledger == NULL) {
578 return KERN_RESOURCE_SHORTAGE;
579 }
580
581 coal->r.resource_monitor_ledger = ledger_instantiate(coalition_ledger_template,
582 LEDGER_CREATE_ACTIVE_ENTRIES);
583 if (coal->r.resource_monitor_ledger == NULL) {
584 return KERN_RESOURCE_SHORTAGE;
585 }
586
587 queue_init(&coal->r.tasks);
588
589 return KERN_SUCCESS;
590 }
591
592 static void
i_coal_resource_dealloc(coalition_t coal)593 i_coal_resource_dealloc(coalition_t coal)
594 {
595 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
596
597 recount_coalition_deinit(&coal->r.co_recount);
598 ledger_dereference(coal->r.ledger);
599 ledger_dereference(coal->r.resource_monitor_ledger);
600 }
601
602 static kern_return_t
i_coal_resource_adopt_task(coalition_t coal,task_t task)603 i_coal_resource_adopt_task(coalition_t coal, task_t task)
604 {
605 struct i_resource_coalition *cr;
606
607 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
608 assert(queue_empty(&task->task_coalition[COALITION_TYPE_RESOURCE]));
609
610 cr = &coal->r;
611 cr->task_count++;
612
613 if (cr->task_count < cr->dead_task_count) {
614 panic("%s: coalition %p id:%llu type:%s task_count(%llu) < dead_task_count(%llu)",
615 __func__, coal, coal->id, coal_type_str(coal->type),
616 cr->task_count, cr->dead_task_count);
617 }
618
619 /* If moving from 0->1 active tasks */
620 if (cr->task_count - cr->dead_task_count == 1) {
621 cr->last_became_nonempty_time = mach_absolute_time();
622 }
623
624 /* put the task on the coalition's list of tasks */
625 enqueue_tail(&cr->tasks, &task->task_coalition[COALITION_TYPE_RESOURCE]);
626
627 coal_dbg("Added PID:%d to id:%llu, task_count:%llu, dead_count:%llu, nonempty_time:%llu",
628 task_pid(task), coal->id, cr->task_count, cr->dead_task_count,
629 cr->last_became_nonempty_time);
630
631 return KERN_SUCCESS;
632 }
633
634 static kern_return_t
i_coal_resource_remove_task(coalition_t coal,task_t task)635 i_coal_resource_remove_task(coalition_t coal, task_t task)
636 {
637 struct i_resource_coalition *cr;
638
639 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
640 assert(task->coalition[COALITION_TYPE_RESOURCE] == coal);
641 assert(!queue_empty(&task->task_coalition[COALITION_TYPE_RESOURCE]));
642
643 /*
644 * handle resource coalition accounting rollup for dead tasks
645 */
646 cr = &coal->r;
647
648 cr->dead_task_count++;
649
650 if (cr->task_count < cr->dead_task_count) {
651 panic("%s: coalition %p id:%llu type:%s task_count(%llu) < dead_task_count(%llu)",
652 __func__, coal, coal->id, coal_type_str(coal->type), cr->task_count, cr->dead_task_count);
653 }
654
655 /* If moving from 1->0 active tasks */
656 if (cr->task_count - cr->dead_task_count == 0) {
657 uint64_t last_time_nonempty = mach_absolute_time() - cr->last_became_nonempty_time;
658 cr->last_became_nonempty_time = 0;
659 cr->time_nonempty += last_time_nonempty;
660 }
661
662 /* Do not roll up for exec'd task or exec copy task */
663 if (!task_is_exec_copy(task) && !task_did_exec(task)) {
664 ledger_rollup(cr->ledger, task->ledger);
665 cr->bytesread += task->task_io_stats->disk_reads.size;
666 cr->byteswritten += task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size;
667 #if defined(__x86_64__)
668 cr->gpu_time += task_gpu_utilisation(task);
669 #endif /* defined(__x86_64__) */
670
671 cr->logical_immediate_writes += task->task_writes_counters_internal.task_immediate_writes;
672 cr->logical_deferred_writes += task->task_writes_counters_internal.task_deferred_writes;
673 cr->logical_invalidated_writes += task->task_writes_counters_internal.task_invalidated_writes;
674 cr->logical_metadata_writes += task->task_writes_counters_internal.task_metadata_writes;
675 cr->logical_immediate_writes_to_external += task->task_writes_counters_external.task_immediate_writes;
676 cr->logical_deferred_writes_to_external += task->task_writes_counters_external.task_deferred_writes;
677 cr->logical_invalidated_writes_to_external += task->task_writes_counters_external.task_invalidated_writes;
678 cr->logical_metadata_writes_to_external += task->task_writes_counters_external.task_metadata_writes;
679 #if CONFIG_PHYS_WRITE_ACCT
680 cr->fs_metadata_writes += task->task_fs_metadata_writes;
681 #endif /* CONFIG_PHYS_WRITE_ACCT */
682 task_update_cpu_time_qos_stats(task, cr->cpu_time_eqos, cr->cpu_time_rqos);
683 recount_coalition_rollup_task(&cr->co_recount, &task->tk_recount);
684 }
685
686 /* remove the task from the coalition's list */
687 remqueue(&task->task_coalition[COALITION_TYPE_RESOURCE]);
688 queue_chain_init(task->task_coalition[COALITION_TYPE_RESOURCE]);
689
690 coal_dbg("removed PID:%d from id:%llu, task_count:%llu, dead_count:%llu",
691 task_pid(task), coal->id, cr->task_count, cr->dead_task_count);
692
693 return KERN_SUCCESS;
694 }
695
696 static kern_return_t
i_coal_resource_set_taskrole(__unused coalition_t coal,__unused task_t task,__unused int role)697 i_coal_resource_set_taskrole(__unused coalition_t coal,
698 __unused task_t task, __unused int role)
699 {
700 return KERN_SUCCESS;
701 }
702
703 static int
i_coal_resource_get_taskrole(__unused coalition_t coal,__unused task_t task)704 i_coal_resource_get_taskrole(__unused coalition_t coal, __unused task_t task)
705 {
706 task_t t;
707
708 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
709
710 qe_foreach_element(t, &coal->r.tasks, task_coalition[COALITION_TYPE_RESOURCE]) {
711 if (t == task) {
712 return COALITION_TASKROLE_UNDEF;
713 }
714 }
715
716 return -1;
717 }
718
719 static void
i_coal_resource_iterate_tasks(coalition_t coal,void * ctx,void (* callback)(coalition_t,void *,task_t))720 i_coal_resource_iterate_tasks(coalition_t coal, void *ctx, void (*callback)(coalition_t, void *, task_t))
721 {
722 task_t t;
723 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
724
725 qe_foreach_element(t, &coal->r.tasks, task_coalition[COALITION_TYPE_RESOURCE])
726 callback(coal, ctx, t);
727 }
728
729 #if CONFIG_PHYS_WRITE_ACCT
730 extern uint64_t kernel_pm_writes;
731 #endif /* CONFIG_PHYS_WRITE_ACCT */
732
733 kern_return_t
coalition_resource_usage_internal(coalition_t coal,struct coalition_resource_usage * cru_out)734 coalition_resource_usage_internal(coalition_t coal, struct coalition_resource_usage *cru_out)
735 {
736 kern_return_t kr;
737 ledger_amount_t credit, debit;
738 int i;
739
740 if (coal->type != COALITION_TYPE_RESOURCE) {
741 return KERN_INVALID_ARGUMENT;
742 }
743
744 /* Return KERN_INVALID_ARGUMENT for Corpse coalition */
745 for (i = 0; i < COALITION_NUM_TYPES; i++) {
746 if (coal == corpse_coalition[i]) {
747 return KERN_INVALID_ARGUMENT;
748 }
749 }
750
751 ledger_t sum_ledger = ledger_instantiate(coalition_task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
752 if (sum_ledger == LEDGER_NULL) {
753 return KERN_RESOURCE_SHORTAGE;
754 }
755
756 coalition_lock(coal);
757
758 /*
759 * Start with the coalition's ledger, which holds the totals from all
760 * the dead tasks.
761 */
762 ledger_rollup(sum_ledger, coal->r.ledger);
763 uint64_t bytesread = coal->r.bytesread;
764 uint64_t byteswritten = coal->r.byteswritten;
765 uint64_t gpu_time = coal->r.gpu_time;
766 uint64_t logical_immediate_writes = coal->r.logical_immediate_writes;
767 uint64_t logical_deferred_writes = coal->r.logical_deferred_writes;
768 uint64_t logical_invalidated_writes = coal->r.logical_invalidated_writes;
769 uint64_t logical_metadata_writes = coal->r.logical_metadata_writes;
770 uint64_t logical_immediate_writes_to_external = coal->r.logical_immediate_writes_to_external;
771 uint64_t logical_deferred_writes_to_external = coal->r.logical_deferred_writes_to_external;
772 uint64_t logical_invalidated_writes_to_external = coal->r.logical_invalidated_writes_to_external;
773 uint64_t logical_metadata_writes_to_external = coal->r.logical_metadata_writes_to_external;
774 #if CONFIG_PHYS_WRITE_ACCT
775 uint64_t fs_metadata_writes = coal->r.fs_metadata_writes;
776 #endif /* CONFIG_PHYS_WRITE_ACCT */
777 int64_t cpu_time_billed_to_me = 0;
778 int64_t cpu_time_billed_to_others = 0;
779 int64_t energy_billed_to_me = 0;
780 int64_t energy_billed_to_others = 0;
781 struct recount_usage stats_sum = { 0 };
782 struct recount_usage stats_perf_only = { 0 };
783 recount_coalition_usage_perf_only(&coal->r.co_recount, &stats_sum,
784 &stats_perf_only);
785 uint64_t cpu_time_eqos[COALITION_NUM_THREAD_QOS_TYPES] = { 0 };
786 uint64_t cpu_time_rqos[COALITION_NUM_THREAD_QOS_TYPES] = { 0 };
787 /*
788 * Add to that all the active tasks' ledgers. Tasks cannot deallocate
789 * out from under us, since we hold the coalition lock.
790 */
791 task_t task;
792 qe_foreach_element(task, &coal->r.tasks, task_coalition[COALITION_TYPE_RESOURCE]) {
793 /*
794 * Rolling up stats for exec copy task or exec'd task will lead to double accounting.
795 * Cannot take task lock after taking coaliton lock
796 */
797 if (task_is_exec_copy(task) || task_did_exec(task)) {
798 continue;
799 }
800
801 ledger_rollup(sum_ledger, task->ledger);
802 bytesread += task->task_io_stats->disk_reads.size;
803 byteswritten += task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size;
804 #if defined(__x86_64__)
805 gpu_time += task_gpu_utilisation(task);
806 #endif /* defined(__x86_64__) */
807
808 logical_immediate_writes += task->task_writes_counters_internal.task_immediate_writes;
809 logical_deferred_writes += task->task_writes_counters_internal.task_deferred_writes;
810 logical_invalidated_writes += task->task_writes_counters_internal.task_invalidated_writes;
811 logical_metadata_writes += task->task_writes_counters_internal.task_metadata_writes;
812 logical_immediate_writes_to_external += task->task_writes_counters_external.task_immediate_writes;
813 logical_deferred_writes_to_external += task->task_writes_counters_external.task_deferred_writes;
814 logical_invalidated_writes_to_external += task->task_writes_counters_external.task_invalidated_writes;
815 logical_metadata_writes_to_external += task->task_writes_counters_external.task_metadata_writes;
816 #if CONFIG_PHYS_WRITE_ACCT
817 fs_metadata_writes += task->task_fs_metadata_writes;
818 #endif /* CONFIG_PHYS_WRITE_ACCT */
819
820 task_update_cpu_time_qos_stats(task, cpu_time_eqos, cpu_time_rqos);
821 recount_task_usage_perf_only(task, &stats_sum, &stats_perf_only);
822 }
823
824 kr = ledger_get_balance(sum_ledger, task_ledgers.cpu_time_billed_to_me, (int64_t *)&cpu_time_billed_to_me);
825 if (kr != KERN_SUCCESS || cpu_time_billed_to_me < 0) {
826 cpu_time_billed_to_me = 0;
827 }
828
829 kr = ledger_get_balance(sum_ledger, task_ledgers.cpu_time_billed_to_others, (int64_t *)&cpu_time_billed_to_others);
830 if (kr != KERN_SUCCESS || cpu_time_billed_to_others < 0) {
831 cpu_time_billed_to_others = 0;
832 }
833
834 kr = ledger_get_balance(sum_ledger, task_ledgers.energy_billed_to_me, (int64_t *)&energy_billed_to_me);
835 if (kr != KERN_SUCCESS || energy_billed_to_me < 0) {
836 energy_billed_to_me = 0;
837 }
838
839 kr = ledger_get_balance(sum_ledger, task_ledgers.energy_billed_to_others, (int64_t *)&energy_billed_to_others);
840 if (kr != KERN_SUCCESS || energy_billed_to_others < 0) {
841 energy_billed_to_others = 0;
842 }
843
844 /* collect information from the coalition itself */
845 cru_out->tasks_started = coal->r.task_count;
846 cru_out->tasks_exited = coal->r.dead_task_count;
847
848 uint64_t time_nonempty = coal->r.time_nonempty;
849 uint64_t last_became_nonempty_time = coal->r.last_became_nonempty_time;
850
851 coalition_unlock(coal);
852
853 /* Copy the totals out of sum_ledger */
854 kr = ledger_get_entries(sum_ledger, task_ledgers.cpu_time,
855 &credit, &debit);
856 if (kr != KERN_SUCCESS) {
857 credit = 0;
858 }
859 cru_out->cpu_time = credit;
860 cru_out->cpu_time_billed_to_me = (uint64_t)cpu_time_billed_to_me;
861 cru_out->cpu_time_billed_to_others = (uint64_t)cpu_time_billed_to_others;
862 cru_out->energy_billed_to_me = (uint64_t)energy_billed_to_me;
863 cru_out->energy_billed_to_others = (uint64_t)energy_billed_to_others;
864
865 kr = ledger_get_entries(sum_ledger, task_ledgers.interrupt_wakeups,
866 &credit, &debit);
867 if (kr != KERN_SUCCESS) {
868 credit = 0;
869 }
870 cru_out->interrupt_wakeups = credit;
871
872 kr = ledger_get_entries(sum_ledger, task_ledgers.platform_idle_wakeups,
873 &credit, &debit);
874 if (kr != KERN_SUCCESS) {
875 credit = 0;
876 }
877 cru_out->platform_idle_wakeups = credit;
878
879 cru_out->bytesread = bytesread;
880 cru_out->byteswritten = byteswritten;
881 cru_out->gpu_time = gpu_time;
882 cru_out->logical_immediate_writes = logical_immediate_writes;
883 cru_out->logical_deferred_writes = logical_deferred_writes;
884 cru_out->logical_invalidated_writes = logical_invalidated_writes;
885 cru_out->logical_metadata_writes = logical_metadata_writes;
886 cru_out->logical_immediate_writes_to_external = logical_immediate_writes_to_external;
887 cru_out->logical_deferred_writes_to_external = logical_deferred_writes_to_external;
888 cru_out->logical_invalidated_writes_to_external = logical_invalidated_writes_to_external;
889 cru_out->logical_metadata_writes_to_external = logical_metadata_writes_to_external;
890 #if CONFIG_PHYS_WRITE_ACCT
891 cru_out->fs_metadata_writes = fs_metadata_writes;
892 #else
893 cru_out->fs_metadata_writes = 0;
894 #endif /* CONFIG_PHYS_WRITE_ACCT */
895 cru_out->cpu_time_eqos_len = COALITION_NUM_THREAD_QOS_TYPES;
896 memcpy(cru_out->cpu_time_eqos, cpu_time_eqos, sizeof(cru_out->cpu_time_eqos));
897
898 cru_out->cpu_ptime = recount_usage_time_mach(&stats_perf_only);
899 #if CONFIG_PERVASIVE_CPI
900 cru_out->cpu_instructions = recount_usage_instructions(&stats_sum);
901 cru_out->cpu_cycles = recount_usage_cycles(&stats_sum);
902 cru_out->cpu_pinstructions = recount_usage_instructions(&stats_perf_only);
903 cru_out->cpu_pcycles = recount_usage_cycles(&stats_perf_only);
904 #endif // CONFIG_PERVASIVE_CPI
905
906 ledger_dereference(sum_ledger);
907 sum_ledger = LEDGER_NULL;
908
909 #if CONFIG_PERVASIVE_ENERGY
910 cru_out->energy = stats_sum.ru_energy_nj;
911 #endif /* CONFIG_PERVASIVE_ENERGY */
912
913 #if CONFIG_PHYS_WRITE_ACCT
914 // kernel_pm_writes are only recorded under kernel_task coalition
915 if (coalition_id(coal) == COALITION_ID_KERNEL) {
916 cru_out->pm_writes = kernel_pm_writes;
917 } else {
918 cru_out->pm_writes = 0;
919 }
920 #else
921 cru_out->pm_writes = 0;
922 #endif /* CONFIG_PHYS_WRITE_ACCT */
923
924 if (last_became_nonempty_time) {
925 time_nonempty += mach_absolute_time() - last_became_nonempty_time;
926 }
927 absolutetime_to_nanoseconds(time_nonempty, &cru_out->time_nonempty);
928
929 return KERN_SUCCESS;
930 }
931
932 kern_return_t
coalition_debug_info_internal(coalition_t coal,struct coalinfo_debuginfo * c_debuginfo)933 coalition_debug_info_internal(coalition_t coal,
934 struct coalinfo_debuginfo *c_debuginfo)
935 {
936 /* Return KERN_INVALID_ARGUMENT for Corpse coalition */
937 for (int i = 0; i < COALITION_NUM_TYPES; i++) {
938 if (coal == corpse_coalition[i]) {
939 return KERN_INVALID_ARGUMENT;
940 }
941 }
942
943 if (coal->type == COALITION_FOCAL_TASKS_ACCOUNTING) {
944 c_debuginfo->focal_task_count = coal->focal_task_count;
945 c_debuginfo->nonfocal_task_count = coal->nonfocal_task_count;
946 c_debuginfo->game_task_count = coal->game_task_count;
947 }
948
949 #if CONFIG_THREAD_GROUPS
950 struct thread_group * group = coalition_get_thread_group(coal);
951
952 if (group != NULL) {
953 c_debuginfo->thread_group_id = thread_group_id(group);
954 c_debuginfo->thread_group_flags = thread_group_get_flags(group);
955 c_debuginfo->thread_group_recommendation = thread_group_recommendation(group);
956 }
957 #endif /* CONFIG_THREAD_GROUPS */
958
959 return KERN_SUCCESS;
960 }
961
962 /*
963 *
964 * COALITION_TYPE_JETSAM
965 *
966 */
967 static kern_return_t
i_coal_jetsam_init(coalition_t coal,boolean_t privileged,boolean_t efficient)968 i_coal_jetsam_init(coalition_t coal, boolean_t privileged, boolean_t efficient)
969 {
970 assert(coal && coal->type == COALITION_TYPE_JETSAM);
971 (void)privileged;
972 (void)efficient;
973
974 coal->j.leader = TASK_NULL;
975 queue_head_init(coal->j.extensions);
976 queue_head_init(coal->j.services);
977 queue_head_init(coal->j.other);
978
979 #if CONFIG_THREAD_GROUPS
980 switch (coal->role) {
981 case COALITION_ROLE_SYSTEM:
982 coal->j.thread_group = thread_group_find_by_id_and_retain(THREAD_GROUP_SYSTEM);
983 break;
984 case COALITION_ROLE_BACKGROUND:
985 coal->j.thread_group = thread_group_find_by_id_and_retain(THREAD_GROUP_BACKGROUND);
986 break;
987 default:
988 coal->j.thread_group = thread_group_create_and_retain(efficient ? THREAD_GROUP_FLAGS_EFFICIENT : THREAD_GROUP_FLAGS_DEFAULT);
989 }
990 assert(coal->j.thread_group != NULL);
991 #endif
992 return KERN_SUCCESS;
993 }
994
995 static void
i_coal_jetsam_dealloc(__unused coalition_t coal)996 i_coal_jetsam_dealloc(__unused coalition_t coal)
997 {
998 assert(coal && coal->type == COALITION_TYPE_JETSAM);
999
1000 /* the coalition should be completely clear at this point */
1001 assert(queue_empty(&coal->j.extensions));
1002 assert(queue_empty(&coal->j.services));
1003 assert(queue_empty(&coal->j.other));
1004 assert(coal->j.leader == TASK_NULL);
1005
1006 #if CONFIG_THREAD_GROUPS
1007 /* disassociate from the thread group */
1008 assert(coal->j.thread_group != NULL);
1009 thread_group_release(coal->j.thread_group);
1010 coal->j.thread_group = NULL;
1011 #endif
1012 }
1013
1014 static kern_return_t
i_coal_jetsam_adopt_task(coalition_t coal,task_t task)1015 i_coal_jetsam_adopt_task(coalition_t coal, task_t task)
1016 {
1017 struct i_jetsam_coalition *cj;
1018 assert(coal && coal->type == COALITION_TYPE_JETSAM);
1019
1020 cj = &coal->j;
1021
1022 assert(queue_empty(&task->task_coalition[COALITION_TYPE_JETSAM]));
1023
1024 /* put each task initially in the "other" list */
1025 enqueue_tail(&cj->other, &task->task_coalition[COALITION_TYPE_JETSAM]);
1026 coal_dbg("coalition %lld adopted PID:%d as UNDEF",
1027 coal->id, task_pid(task));
1028
1029 return KERN_SUCCESS;
1030 }
1031
1032 static kern_return_t
i_coal_jetsam_remove_task(coalition_t coal,task_t task)1033 i_coal_jetsam_remove_task(coalition_t coal, task_t task)
1034 {
1035 assert(coal && coal->type == COALITION_TYPE_JETSAM);
1036 assert(task->coalition[COALITION_TYPE_JETSAM] == coal);
1037
1038 coal_dbg("removing PID:%d from coalition id:%lld",
1039 task_pid(task), coal->id);
1040
1041 if (task == coal->j.leader) {
1042 coal->j.leader = NULL;
1043 coal_dbg(" PID:%d was the leader!", task_pid(task));
1044 } else {
1045 assert(!queue_empty(&task->task_coalition[COALITION_TYPE_JETSAM]));
1046 }
1047
1048 /* remove the task from the specific coalition role queue */
1049 remqueue(&task->task_coalition[COALITION_TYPE_JETSAM]);
1050 queue_chain_init(task->task_coalition[COALITION_TYPE_RESOURCE]);
1051
1052 return KERN_SUCCESS;
1053 }
1054
1055 static kern_return_t
i_coal_jetsam_set_taskrole(coalition_t coal,task_t task,int role)1056 i_coal_jetsam_set_taskrole(coalition_t coal, task_t task, int role)
1057 {
1058 struct i_jetsam_coalition *cj;
1059 queue_t q = NULL;
1060 assert(coal && coal->type == COALITION_TYPE_JETSAM);
1061 assert(task->coalition[COALITION_TYPE_JETSAM] == coal);
1062
1063 cj = &coal->j;
1064
1065 switch (role) {
1066 case COALITION_TASKROLE_LEADER:
1067 coal_dbg("setting PID:%d as LEADER of %lld",
1068 task_pid(task), coal->id);
1069 if (cj->leader != TASK_NULL) {
1070 /* re-queue the exiting leader onto the "other" list */
1071 coal_dbg(" re-queue existing leader (%d) as OTHER",
1072 task_pid(cj->leader));
1073 re_queue_tail(&cj->other, &cj->leader->task_coalition[COALITION_TYPE_JETSAM]);
1074 }
1075 /*
1076 * remove the task from the "other" list
1077 * (where it was put by default)
1078 */
1079 remqueue(&task->task_coalition[COALITION_TYPE_JETSAM]);
1080 queue_chain_init(task->task_coalition[COALITION_TYPE_JETSAM]);
1081
1082 /* set the coalition leader */
1083 cj->leader = task;
1084 break;
1085 case COALITION_TASKROLE_XPC:
1086 coal_dbg("setting PID:%d as XPC in %lld",
1087 task_pid(task), coal->id);
1088 q = (queue_t)&cj->services;
1089 break;
1090 case COALITION_TASKROLE_EXT:
1091 coal_dbg("setting PID:%d as EXT in %lld",
1092 task_pid(task), coal->id);
1093 q = (queue_t)&cj->extensions;
1094 break;
1095 case COALITION_TASKROLE_NONE:
1096 /*
1097 * Tasks with a role of "none" should fall through to an
1098 * undefined role so long as the task is currently a member
1099 * of the coalition. This scenario can happen if a task is
1100 * killed (usually via jetsam) during exec.
1101 */
1102 if (task->coalition[COALITION_TYPE_JETSAM] != coal) {
1103 panic("%s: task %p attempting to set role %d "
1104 "in coalition %p to which it does not belong!", __func__, task, role, coal);
1105 }
1106 OS_FALLTHROUGH;
1107 case COALITION_TASKROLE_UNDEF:
1108 coal_dbg("setting PID:%d as UNDEF in %lld",
1109 task_pid(task), coal->id);
1110 q = (queue_t)&cj->other;
1111 break;
1112 default:
1113 panic("%s: invalid role(%d) for task", __func__, role);
1114 return KERN_INVALID_ARGUMENT;
1115 }
1116
1117 if (q != NULL) {
1118 re_queue_tail(q, &task->task_coalition[COALITION_TYPE_JETSAM]);
1119 }
1120
1121 return KERN_SUCCESS;
1122 }
1123
1124 int
i_coal_jetsam_get_taskrole(coalition_t coal,task_t task)1125 i_coal_jetsam_get_taskrole(coalition_t coal, task_t task)
1126 {
1127 struct i_jetsam_coalition *cj;
1128 task_t t;
1129
1130 assert(coal && coal->type == COALITION_TYPE_JETSAM);
1131 assert(task->coalition[COALITION_TYPE_JETSAM] == coal);
1132
1133 cj = &coal->j;
1134
1135 if (task == cj->leader) {
1136 return COALITION_TASKROLE_LEADER;
1137 }
1138
1139 qe_foreach_element(t, &cj->services, task_coalition[COALITION_TYPE_JETSAM]) {
1140 if (t == task) {
1141 return COALITION_TASKROLE_XPC;
1142 }
1143 }
1144
1145 qe_foreach_element(t, &cj->extensions, task_coalition[COALITION_TYPE_JETSAM]) {
1146 if (t == task) {
1147 return COALITION_TASKROLE_EXT;
1148 }
1149 }
1150
1151 qe_foreach_element(t, &cj->other, task_coalition[COALITION_TYPE_JETSAM]) {
1152 if (t == task) {
1153 return COALITION_TASKROLE_UNDEF;
1154 }
1155 }
1156
1157 /* task not in the coalition?! */
1158 return COALITION_TASKROLE_NONE;
1159 }
1160
1161 static void
i_coal_jetsam_iterate_tasks(coalition_t coal,void * ctx,void (* callback)(coalition_t,void *,task_t))1162 i_coal_jetsam_iterate_tasks(coalition_t coal, void *ctx, void (*callback)(coalition_t, void *, task_t))
1163 {
1164 struct i_jetsam_coalition *cj;
1165 task_t t;
1166
1167 assert(coal && coal->type == COALITION_TYPE_JETSAM);
1168
1169 cj = &coal->j;
1170
1171 if (cj->leader) {
1172 callback(coal, ctx, cj->leader);
1173 }
1174
1175 qe_foreach_element(t, &cj->services, task_coalition[COALITION_TYPE_JETSAM])
1176 callback(coal, ctx, t);
1177
1178 qe_foreach_element(t, &cj->extensions, task_coalition[COALITION_TYPE_JETSAM])
1179 callback(coal, ctx, t);
1180
1181 qe_foreach_element(t, &cj->other, task_coalition[COALITION_TYPE_JETSAM])
1182 callback(coal, ctx, t);
1183 }
1184
1185
1186 /*
1187 *
1188 * Main Coalition implementation
1189 *
1190 */
1191
1192 /*
1193 * coalition_create_internal
1194 * Returns: New coalition object, referenced for the caller and unlocked.
1195 * Condition: coalitions_list_lock must be UNLOCKED.
1196 */
1197 kern_return_t
coalition_create_internal(int type,int role,boolean_t privileged,boolean_t efficient,coalition_t * out,uint64_t * coalition_id)1198 coalition_create_internal(int type, int role, boolean_t privileged, boolean_t efficient, coalition_t *out, uint64_t *coalition_id)
1199 {
1200 kern_return_t kr;
1201 struct coalition *new_coal;
1202 uint64_t cid;
1203
1204 if (type < 0 || type > COALITION_TYPE_MAX) {
1205 return KERN_INVALID_ARGUMENT;
1206 }
1207
1208 new_coal = zalloc_flags(coalition_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1209
1210 new_coal->type = type;
1211 new_coal->role = role;
1212
1213 /* initialize type-specific resources */
1214 kr = coal_call(new_coal, init, privileged, efficient);
1215 if (kr != KERN_SUCCESS) {
1216 zfree(coalition_zone, new_coal);
1217 return kr;
1218 }
1219
1220 /* One for caller, one for coalitions list */
1221 coal_ref_init(new_coal, 2);
1222
1223 new_coal->privileged = privileged ? TRUE : FALSE;
1224 new_coal->efficient = efficient ? TRUE : FALSE;
1225 #if DEVELOPMENT || DEBUG
1226 new_coal->should_notify = 1;
1227 #endif
1228
1229 lck_mtx_init(&new_coal->lock, &coalitions_lck_grp, LCK_ATTR_NULL);
1230
1231 lck_mtx_lock(&coalitions_list_lock);
1232 new_coal->id = cid = coalition_next_id++;
1233
1234 smr_hash_serialized_insert(&coalition_hash, &new_coal->link,
1235 &coal_hash_traits);
1236
1237 #if CONFIG_THREAD_GROUPS
1238 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_NEW),
1239 new_coal->id, new_coal->type,
1240 (new_coal->type == COALITION_TYPE_JETSAM && new_coal->j.thread_group) ?
1241 thread_group_get_id(new_coal->j.thread_group) : 0);
1242
1243 #else
1244 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_NEW),
1245 new_coal->id, new_coal->type);
1246 #endif
1247
1248 if (smr_hash_serialized_should_grow(&coalition_hash, 1, 4)) {
1249 /* grow if more more than 4 elements per 1 bucket */
1250 smr_hash_grow_and_unlock(&coalition_hash,
1251 &coalitions_list_lock, &coal_hash_traits);
1252 } else {
1253 lck_mtx_unlock(&coalitions_list_lock);
1254 }
1255
1256 coal_dbg("id:%llu, type:%s", cid, coal_type_str(type));
1257
1258 if (coalition_id != NULL) {
1259 *coalition_id = cid;
1260 }
1261
1262 *out = new_coal;
1263 return KERN_SUCCESS;
1264 }
1265
1266 static void
coalition_free(struct smr_node * node)1267 coalition_free(struct smr_node *node)
1268 {
1269 struct coalition *coal;
1270
1271 coal = __container_of(node, struct coalition, smr_node);
1272 zfree(coalition_zone, coal);
1273 }
1274
1275 static __attribute__((noinline)) void
coalition_retire(coalition_t coal)1276 coalition_retire(coalition_t coal)
1277 {
1278 coalition_lock(coal);
1279
1280 coal_dbg("id:%llu type:%s active_count:%u%s",
1281 coal->id, coal_type_str(coal->type), coal->active_count,
1282 rc <= 0 ? ", will deallocate now" : "");
1283
1284 assert(coal->termrequested);
1285 assert(coal->terminated);
1286 assert(coal->active_count == 0);
1287 assert(coal->reaped);
1288 assert(coal->focal_task_count == 0);
1289 assert(coal->nonfocal_task_count == 0);
1290 assert(coal->game_task_count == 0);
1291 #if CONFIG_THREAD_GROUPS
1292 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_FREE),
1293 coal->id, coal->type,
1294 coal->type == COALITION_TYPE_JETSAM ?
1295 coal->j.thread_group : 0);
1296 #else
1297 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_FREE),
1298 coal->id, coal->type);
1299 #endif
1300
1301 coal_call(coal, dealloc);
1302
1303 coalition_unlock(coal);
1304
1305 lck_mtx_destroy(&coal->lock, &coalitions_lck_grp);
1306
1307 smr_proc_task_call(&coal->smr_node, sizeof(*coal), coalition_free);
1308 }
1309
1310 /*
1311 * coalition_release
1312 * Condition: coalition must be UNLOCKED.
1313 * */
1314 void
coalition_release(coalition_t coal)1315 coalition_release(coalition_t coal)
1316 {
1317 if (coal_ref_release(coal) > 0) {
1318 return;
1319 }
1320
1321 coalition_retire(coal);
1322 }
1323
1324 /*
1325 * coalition_find_by_id
1326 * Returns: Coalition object with specified id, referenced.
1327 */
1328 coalition_t
coalition_find_by_id(uint64_t cid)1329 coalition_find_by_id(uint64_t cid)
1330 {
1331 smrh_key_t key = SMRH_SCALAR_KEY(cid);
1332
1333 if (cid == 0) {
1334 return COALITION_NULL;
1335 }
1336
1337 return smr_hash_get(&coalition_hash, key, &coal_hash_traits);
1338 }
1339
1340 /*
1341 * coalition_find_and_activate_by_id
1342 * Returns: Coalition object with specified id, referenced, and activated.
1343 * This is the function to use when putting a 'new' thing into a coalition,
1344 * like posix_spawn of an XPC service by launchd.
1345 * See also coalition_extend_active.
1346 */
1347 coalition_t
coalition_find_and_activate_by_id(uint64_t cid)1348 coalition_find_and_activate_by_id(uint64_t cid)
1349 {
1350 coalition_t coal = coalition_find_by_id(cid);
1351
1352 if (coal == COALITION_NULL) {
1353 return COALITION_NULL;
1354 }
1355
1356 coalition_lock(coal);
1357
1358 if (coal->reaped || coal->terminated) {
1359 /* Too late to put something new into this coalition, it's
1360 * already on its way out the door */
1361 coalition_unlock(coal);
1362 coalition_release(coal);
1363 return COALITION_NULL;
1364 }
1365
1366 coal->active_count++;
1367
1368 #if COALITION_DEBUG
1369 uint32_t rc = coal_ref_count(coal);
1370 uint32_t ac = coal->active_count;
1371 #endif
1372 coalition_unlock(coal);
1373
1374 coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u",
1375 coal->id, coal_type_str(coal->type), rc, ac);
1376
1377 return coal;
1378 }
1379
1380 uint64_t
coalition_id(coalition_t coal)1381 coalition_id(coalition_t coal)
1382 {
1383 assert(coal != COALITION_NULL);
1384 return coal->id;
1385 }
1386
1387 void
task_coalition_ids(task_t task,uint64_t ids[COALITION_NUM_TYPES])1388 task_coalition_ids(task_t task, uint64_t ids[COALITION_NUM_TYPES])
1389 {
1390 int i;
1391 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1392 if (task->coalition[i]) {
1393 ids[i] = task->coalition[i]->id;
1394 } else {
1395 ids[i] = 0;
1396 }
1397 }
1398 }
1399
1400 void
task_coalition_roles(task_t task,int roles[COALITION_NUM_TYPES])1401 task_coalition_roles(task_t task, int roles[COALITION_NUM_TYPES])
1402 {
1403 int i;
1404 memset(roles, 0, COALITION_NUM_TYPES * sizeof(roles[0]));
1405
1406 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1407 if (task->coalition[i]) {
1408 coalition_lock(task->coalition[i]);
1409 roles[i] = coal_call(task->coalition[i],
1410 get_taskrole, task);
1411 coalition_unlock(task->coalition[i]);
1412 } else {
1413 roles[i] = COALITION_TASKROLE_NONE;
1414 }
1415 }
1416 }
1417
1418 int
task_coalition_role_for_type(task_t task,int coalition_type)1419 task_coalition_role_for_type(task_t task, int coalition_type)
1420 {
1421 coalition_t coal;
1422 int role;
1423 if (coalition_type >= COALITION_NUM_TYPES) {
1424 panic("Attempt to call task_coalition_role_for_type with invalid coalition_type: %d\n", coalition_type);
1425 }
1426 coal = task->coalition[coalition_type];
1427 if (coal == NULL) {
1428 return COALITION_TASKROLE_NONE;
1429 }
1430 coalition_lock(coal);
1431 role = coal_call(coal, get_taskrole, task);
1432 coalition_unlock(coal);
1433 return role;
1434 }
1435
1436 int
coalition_type(coalition_t coal)1437 coalition_type(coalition_t coal)
1438 {
1439 return coal->type;
1440 }
1441
1442 boolean_t
coalition_term_requested(coalition_t coal)1443 coalition_term_requested(coalition_t coal)
1444 {
1445 return coal->termrequested;
1446 }
1447
1448 boolean_t
coalition_is_terminated(coalition_t coal)1449 coalition_is_terminated(coalition_t coal)
1450 {
1451 return coal->terminated;
1452 }
1453
1454 boolean_t
coalition_is_reaped(coalition_t coal)1455 coalition_is_reaped(coalition_t coal)
1456 {
1457 return coal->reaped;
1458 }
1459
1460 boolean_t
coalition_is_privileged(coalition_t coal)1461 coalition_is_privileged(coalition_t coal)
1462 {
1463 return coal->privileged || unrestrict_coalition_syscalls;
1464 }
1465
1466 boolean_t
task_is_in_privileged_coalition(task_t task,int type)1467 task_is_in_privileged_coalition(task_t task, int type)
1468 {
1469 if (type < 0 || type > COALITION_TYPE_MAX) {
1470 return FALSE;
1471 }
1472 if (unrestrict_coalition_syscalls) {
1473 return TRUE;
1474 }
1475 if (!task->coalition[type]) {
1476 return FALSE;
1477 }
1478 return task->coalition[type]->privileged;
1479 }
1480
1481 void
task_coalition_update_gpu_stats(task_t task,uint64_t gpu_ns_delta)1482 task_coalition_update_gpu_stats(task_t task, uint64_t gpu_ns_delta)
1483 {
1484 coalition_t coal;
1485
1486 assert(task != TASK_NULL);
1487 if (gpu_ns_delta == 0) {
1488 return;
1489 }
1490
1491 coal = task->coalition[COALITION_TYPE_RESOURCE];
1492 assert(coal != COALITION_NULL);
1493
1494 coalition_lock(coal);
1495 coal->r.gpu_time += gpu_ns_delta;
1496 coalition_unlock(coal);
1497 }
1498
1499 boolean_t
task_coalition_adjust_focal_count(task_t task,int count,uint32_t * new_count)1500 task_coalition_adjust_focal_count(task_t task, int count, uint32_t *new_count)
1501 {
1502 coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING];
1503 if (coal == COALITION_NULL) {
1504 return FALSE;
1505 }
1506
1507 *new_count = os_atomic_add(&coal->focal_task_count, count, relaxed);
1508 assert(*new_count != UINT32_MAX);
1509 return TRUE;
1510 }
1511
1512 uint32_t
task_coalition_focal_count(task_t task)1513 task_coalition_focal_count(task_t task)
1514 {
1515 coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING];
1516 if (coal == COALITION_NULL) {
1517 return 0;
1518 }
1519
1520 return coal->focal_task_count;
1521 }
1522
1523 uint32_t
task_coalition_game_mode_count(task_t task)1524 task_coalition_game_mode_count(task_t task)
1525 {
1526 coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING];
1527 if (coal == COALITION_NULL) {
1528 return 0;
1529 }
1530
1531 return coal->game_task_count;
1532 }
1533
1534
1535 boolean_t
task_coalition_adjust_nonfocal_count(task_t task,int count,uint32_t * new_count)1536 task_coalition_adjust_nonfocal_count(task_t task, int count, uint32_t *new_count)
1537 {
1538 coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING];
1539 if (coal == COALITION_NULL) {
1540 return FALSE;
1541 }
1542
1543 *new_count = os_atomic_add(&coal->nonfocal_task_count, count, relaxed);
1544 assert(*new_count != UINT32_MAX);
1545 return TRUE;
1546 }
1547
1548 uint32_t
task_coalition_nonfocal_count(task_t task)1549 task_coalition_nonfocal_count(task_t task)
1550 {
1551 coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING];
1552 if (coal == COALITION_NULL) {
1553 return 0;
1554 }
1555
1556 return coal->nonfocal_task_count;
1557 }
1558
1559 bool
task_coalition_adjust_game_mode_count(task_t task,int count,uint32_t * new_count)1560 task_coalition_adjust_game_mode_count(task_t task, int count, uint32_t *new_count)
1561 {
1562 coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING];
1563 if (coal == COALITION_NULL) {
1564 return false;
1565 }
1566
1567 *new_count = os_atomic_add(&coal->game_task_count, count, relaxed);
1568 assert(*new_count != UINT32_MAX);
1569 return true;
1570 }
1571
1572 #if CONFIG_THREAD_GROUPS
1573
1574 /* Thread group lives as long as the task is holding the coalition reference */
1575 struct thread_group *
task_coalition_get_thread_group(task_t task)1576 task_coalition_get_thread_group(task_t task)
1577 {
1578 coalition_t coal = task->coalition[COALITION_TYPE_JETSAM];
1579 /* return system thread group for non-jetsam coalitions */
1580 if (coal == COALITION_NULL) {
1581 return init_coalition[COALITION_TYPE_JETSAM]->j.thread_group;
1582 }
1583 return coal->j.thread_group;
1584 }
1585
1586
1587 struct thread_group *
kdp_coalition_get_thread_group(coalition_t coal)1588 kdp_coalition_get_thread_group(coalition_t coal)
1589 {
1590 if (coal->type != COALITION_TYPE_JETSAM) {
1591 return NULL;
1592 }
1593 assert(coal->j.thread_group != NULL);
1594 return coal->j.thread_group;
1595 }
1596
1597 /* Thread group lives as long as the coalition reference is held */
1598 struct thread_group *
coalition_get_thread_group(coalition_t coal)1599 coalition_get_thread_group(coalition_t coal)
1600 {
1601 if (coal->type != COALITION_TYPE_JETSAM) {
1602 return NULL;
1603 }
1604 assert(coal->j.thread_group != NULL);
1605 return coal->j.thread_group;
1606 }
1607
1608 /* Donates the thread group reference to the coalition */
1609 void
coalition_set_thread_group(coalition_t coal,struct thread_group * tg)1610 coalition_set_thread_group(coalition_t coal, struct thread_group *tg)
1611 {
1612 assert(coal != COALITION_NULL);
1613 assert(tg != NULL);
1614
1615 if (coal->type != COALITION_TYPE_JETSAM) {
1616 return;
1617 }
1618 struct thread_group *old_tg = coal->j.thread_group;
1619 assert(old_tg != NULL);
1620 coal->j.thread_group = tg;
1621
1622 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_THREAD_GROUP_SET),
1623 coal->id, coal->type, thread_group_get_id(tg));
1624
1625 thread_group_release(old_tg);
1626 }
1627
1628 void
task_coalition_thread_group_focal_update(task_t task)1629 task_coalition_thread_group_focal_update(task_t task)
1630 {
1631 assert(task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING] != COALITION_NULL);
1632 thread_group_flags_update_lock();
1633 uint32_t focal_count = task_coalition_focal_count(task);
1634 if (focal_count) {
1635 thread_group_set_flags_locked(task_coalition_get_thread_group(task), THREAD_GROUP_FLAGS_UI_APP);
1636 } else {
1637 thread_group_clear_flags_locked(task_coalition_get_thread_group(task), THREAD_GROUP_FLAGS_UI_APP);
1638 }
1639 thread_group_flags_update_unlock();
1640 }
1641
1642 void
task_coalition_thread_group_game_mode_update(task_t task)1643 task_coalition_thread_group_game_mode_update(task_t task)
1644 {
1645 assert(task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING] != COALITION_NULL);
1646 thread_group_flags_update_lock();
1647 if (task_coalition_game_mode_count(task)) {
1648 thread_group_set_flags_locked(task_coalition_get_thread_group(task), THREAD_GROUP_FLAGS_GAME_MODE);
1649 } else {
1650 thread_group_clear_flags_locked(task_coalition_get_thread_group(task), THREAD_GROUP_FLAGS_GAME_MODE);
1651 }
1652 thread_group_flags_update_unlock();
1653 }
1654
1655 void
task_coalition_thread_group_application_set(task_t task)1656 task_coalition_thread_group_application_set(task_t task)
1657 {
1658 /*
1659 * Setting the "Application" flag on the thread group is a one way transition.
1660 * Once a coalition has a single task with an application apptype, the
1661 * thread group associated with the coalition is tagged as Application.
1662 */
1663 thread_group_flags_update_lock();
1664 thread_group_set_flags_locked(task_coalition_get_thread_group(task), THREAD_GROUP_FLAGS_APPLICATION);
1665 thread_group_flags_update_unlock();
1666 }
1667
1668 #endif /* CONFIG_THREAD_GROUPS */
1669
1670 void
coalition_for_each_task(coalition_t coal,void * ctx,void (* callback)(coalition_t,void *,task_t))1671 coalition_for_each_task(coalition_t coal, void *ctx,
1672 void (*callback)(coalition_t, void *, task_t))
1673 {
1674 assert(coal != COALITION_NULL);
1675
1676 coal_dbg("iterating tasks in coalition %p id:%llu type:%s, active_count:%u",
1677 coal, coal->id, coal_type_str(coal->type), coal->active_count);
1678
1679 coalition_lock(coal);
1680
1681 coal_call(coal, iterate_tasks, ctx, callback);
1682
1683 coalition_unlock(coal);
1684 }
1685
1686
1687 void
coalition_remove_active(coalition_t coal)1688 coalition_remove_active(coalition_t coal)
1689 {
1690 coalition_lock(coal);
1691
1692 assert(!coalition_is_reaped(coal));
1693 assert(coal->active_count > 0);
1694
1695 coal->active_count--;
1696
1697 boolean_t do_notify = FALSE;
1698 uint64_t notify_id = 0;
1699 uint32_t notify_flags = 0;
1700 if (coal->termrequested && coal->active_count == 0) {
1701 /* We only notify once, when active_count reaches zero.
1702 * We just decremented, so if it reached zero, we mustn't have
1703 * notified already.
1704 */
1705 assert(!coal->terminated);
1706 coal->terminated = TRUE;
1707
1708 assert(!coal->notified);
1709
1710 coal->notified = TRUE;
1711 #if DEVELOPMENT || DEBUG
1712 do_notify = coal->should_notify;
1713 #else
1714 do_notify = TRUE;
1715 #endif
1716 notify_id = coal->id;
1717 notify_flags = 0;
1718 }
1719
1720 #if COALITION_DEBUG
1721 uint64_t cid = coal->id;
1722 uint32_t rc = coal_ref_count(coal);
1723 int ac = coal->active_count;
1724 int ct = coal->type;
1725 #endif
1726 coalition_unlock(coal);
1727
1728 coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u,%s",
1729 cid, coal_type_str(ct), rc, ac, do_notify ? " NOTIFY" : " ");
1730
1731 if (do_notify) {
1732 coalition_notify_user(notify_id, notify_flags);
1733 }
1734 }
1735
1736 /* Used for kernel_task, launchd, launchd's early boot tasks... */
1737 kern_return_t
coalitions_adopt_init_task(task_t task)1738 coalitions_adopt_init_task(task_t task)
1739 {
1740 kern_return_t kr;
1741 kr = coalitions_adopt_task(init_coalition, task);
1742 if (kr != KERN_SUCCESS) {
1743 panic("failed to adopt task %p into default coalition: %d", task, kr);
1744 }
1745 return kr;
1746 }
1747
1748 /* Used for forked corpses. */
1749 kern_return_t
coalitions_adopt_corpse_task(task_t task)1750 coalitions_adopt_corpse_task(task_t task)
1751 {
1752 kern_return_t kr;
1753 kr = coalitions_adopt_task(corpse_coalition, task);
1754 if (kr != KERN_SUCCESS) {
1755 panic("failed to adopt task %p into corpse coalition: %d", task, kr);
1756 }
1757 return kr;
1758 }
1759
1760 /*
1761 * coalition_adopt_task_internal
1762 * Condition: Coalition must be referenced and unlocked. Will fail if coalition
1763 * is already terminated.
1764 */
1765 static kern_return_t
coalition_adopt_task_internal(coalition_t coal,task_t task)1766 coalition_adopt_task_internal(coalition_t coal, task_t task)
1767 {
1768 kern_return_t kr;
1769
1770 if (task->coalition[coal->type]) {
1771 return KERN_ALREADY_IN_SET;
1772 }
1773
1774 coalition_lock(coal);
1775
1776 if (coal->reaped || coal->terminated) {
1777 coalition_unlock(coal);
1778 return KERN_TERMINATED;
1779 }
1780
1781 kr = coal_call(coal, adopt_task, task);
1782 if (kr != KERN_SUCCESS) {
1783 goto out_unlock;
1784 }
1785
1786 coal->active_count++;
1787
1788 coal_ref_retain(coal);
1789
1790 task->coalition[coal->type] = coal;
1791
1792 out_unlock:
1793 #if COALITION_DEBUG
1794 (void)coal; /* need expression after label */
1795 uint64_t cid = coal->id;
1796 uint32_t rc = coal_ref_count(coal);
1797 uint32_t ct = coal->type;
1798 #endif
1799 if (get_task_uniqueid(task) != UINT64_MAX) {
1800 /* On 32-bit targets, uniqueid will get truncated to 32 bits */
1801 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_ADOPT),
1802 coal->id, get_task_uniqueid(task));
1803 }
1804
1805 coalition_unlock(coal);
1806
1807 coal_dbg("task:%d, id:%llu type:%s ref_count:%u, kr=%d",
1808 task_pid(task), cid, coal_type_str(ct), rc, kr);
1809 return kr;
1810 }
1811
1812 static kern_return_t
coalition_remove_task_internal(task_t task,int type)1813 coalition_remove_task_internal(task_t task, int type)
1814 {
1815 kern_return_t kr;
1816
1817 coalition_t coal = task->coalition[type];
1818
1819 if (!coal) {
1820 return KERN_SUCCESS;
1821 }
1822
1823 assert(coal->type == (uint32_t)type);
1824
1825 coalition_lock(coal);
1826
1827 kr = coal_call(coal, remove_task, task);
1828
1829 #if COALITION_DEBUG
1830 uint64_t cid = coal->id;
1831 uint32_t rc = coal_ref_count(coal);
1832 int ac = coal->active_count;
1833 int ct = coal->type;
1834 #endif
1835 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_REMOVE),
1836 coal->id, get_task_uniqueid(task));
1837 coalition_unlock(coal);
1838
1839 coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u, kr=%d",
1840 cid, coal_type_str(ct), rc, ac, kr);
1841
1842 coalition_remove_active(coal);
1843
1844 return kr;
1845 }
1846
1847 /*
1848 * coalitions_adopt_task
1849 * Condition: All coalitions must be referenced and unlocked.
1850 * Will fail if any coalition is already terminated.
1851 */
1852 kern_return_t
coalitions_adopt_task(coalition_t * coals,task_t task)1853 coalitions_adopt_task(coalition_t *coals, task_t task)
1854 {
1855 int i;
1856 kern_return_t kr;
1857
1858 if (!coals || coals[COALITION_TYPE_RESOURCE] == COALITION_NULL) {
1859 return KERN_INVALID_ARGUMENT;
1860 }
1861
1862 /* verify that the incoming coalitions are what they say they are */
1863 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1864 if (coals[i] && coals[i]->type != (uint32_t)i) {
1865 return KERN_INVALID_ARGUMENT;
1866 }
1867 }
1868
1869 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1870 kr = KERN_SUCCESS;
1871 if (coals[i]) {
1872 kr = coalition_adopt_task_internal(coals[i], task);
1873 }
1874 if (kr != KERN_SUCCESS) {
1875 /* dis-associate any coalitions that just adopted this task */
1876 while (--i >= 0) {
1877 if (task->coalition[i]) {
1878 coalition_remove_task_internal(task, i);
1879 }
1880 }
1881 break;
1882 }
1883 }
1884 return kr;
1885 }
1886
1887 /*
1888 * coalitions_remove_task
1889 * Condition: task must be referenced and UNLOCKED; all task's coalitions must be UNLOCKED
1890 */
1891 kern_return_t
coalitions_remove_task(task_t task)1892 coalitions_remove_task(task_t task)
1893 {
1894 kern_return_t kr;
1895 int i;
1896
1897 task_lock(task);
1898 if (!task_is_coalition_member(task)) {
1899 task_unlock(task);
1900 return KERN_SUCCESS;
1901 }
1902
1903 task_clear_coalition_member(task);
1904 task_unlock(task);
1905
1906 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1907 kr = coalition_remove_task_internal(task, i);
1908 assert(kr == KERN_SUCCESS);
1909 }
1910
1911 return kr;
1912 }
1913
1914 /*
1915 * task_release_coalitions
1916 * helper function to release references to all coalitions in which
1917 * 'task' is a member.
1918 */
1919 void
task_release_coalitions(task_t task)1920 task_release_coalitions(task_t task)
1921 {
1922 int i;
1923 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1924 if (task->coalition[i]) {
1925 coalition_release(task->coalition[i]);
1926 } else if (i == COALITION_TYPE_RESOURCE) {
1927 panic("deallocating task %p was not a member of a resource coalition", task);
1928 }
1929 }
1930 }
1931
1932 /*
1933 * coalitions_set_roles
1934 * for each type of coalition, if the task is a member of a coalition of
1935 * that type (given in the coalitions parameter) then set the role of
1936 * the task within that that coalition.
1937 */
1938 kern_return_t
coalitions_set_roles(coalition_t coalitions[COALITION_NUM_TYPES],task_t task,int roles[COALITION_NUM_TYPES])1939 coalitions_set_roles(coalition_t coalitions[COALITION_NUM_TYPES],
1940 task_t task, int roles[COALITION_NUM_TYPES])
1941 {
1942 kern_return_t kr = KERN_SUCCESS;
1943 int i;
1944
1945 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1946 if (!coalitions[i]) {
1947 continue;
1948 }
1949 coalition_lock(coalitions[i]);
1950 kr = coal_call(coalitions[i], set_taskrole, task, roles[i]);
1951 coalition_unlock(coalitions[i]);
1952 assert(kr == KERN_SUCCESS);
1953 }
1954
1955 return kr;
1956 }
1957
1958 /*
1959 * coalition_terminate_internal
1960 * Condition: Coalition must be referenced and UNLOCKED.
1961 */
1962 kern_return_t
coalition_request_terminate_internal(coalition_t coal)1963 coalition_request_terminate_internal(coalition_t coal)
1964 {
1965 assert(coal->type >= 0 && coal->type <= COALITION_TYPE_MAX);
1966
1967 if (coal == init_coalition[coal->type]) {
1968 return KERN_DEFAULT_SET;
1969 }
1970
1971 coalition_lock(coal);
1972
1973 if (coal->reaped) {
1974 coalition_unlock(coal);
1975 return KERN_INVALID_NAME;
1976 }
1977
1978 if (coal->terminated || coal->termrequested) {
1979 coalition_unlock(coal);
1980 return KERN_TERMINATED;
1981 }
1982
1983 coal->termrequested = TRUE;
1984
1985 boolean_t do_notify = FALSE;
1986 uint64_t note_id = 0;
1987 uint32_t note_flags = 0;
1988
1989 if (coal->active_count == 0) {
1990 /*
1991 * We only notify once, when active_count reaches zero.
1992 * We just set termrequested to zero. If the active count
1993 * was already at zero (tasks died before we could request
1994 * a termination notification), we should notify.
1995 */
1996 assert(!coal->terminated);
1997 coal->terminated = TRUE;
1998
1999 assert(!coal->notified);
2000
2001 coal->notified = TRUE;
2002 #if DEVELOPMENT || DEBUG
2003 do_notify = coal->should_notify;
2004 #else
2005 do_notify = TRUE;
2006 #endif
2007 note_id = coal->id;
2008 note_flags = 0;
2009 }
2010
2011 coalition_unlock(coal);
2012
2013 if (do_notify) {
2014 coalition_notify_user(note_id, note_flags);
2015 }
2016
2017 return KERN_SUCCESS;
2018 }
2019
2020 /*
2021 * coalition_reap_internal
2022 * Condition: Coalition must be referenced and UNLOCKED.
2023 */
2024 kern_return_t
coalition_reap_internal(coalition_t coal)2025 coalition_reap_internal(coalition_t coal)
2026 {
2027 assert(coal->type <= COALITION_TYPE_MAX);
2028
2029 if (coal == init_coalition[coal->type]) {
2030 return KERN_DEFAULT_SET;
2031 }
2032
2033 coalition_lock(coal);
2034 if (coal->reaped) {
2035 coalition_unlock(coal);
2036 return KERN_TERMINATED;
2037 }
2038 if (!coal->terminated) {
2039 coalition_unlock(coal);
2040 return KERN_FAILURE;
2041 }
2042 assert(coal->termrequested);
2043 if (coal->active_count > 0) {
2044 coalition_unlock(coal);
2045 return KERN_FAILURE;
2046 }
2047
2048 coal->reaped = TRUE;
2049
2050 coalition_unlock(coal);
2051
2052 lck_mtx_lock(&coalitions_list_lock);
2053 smr_hash_serialized_remove(&coalition_hash, &coal->link,
2054 &coal_hash_traits);
2055 if (smr_hash_serialized_should_shrink(&coalition_hash,
2056 COALITION_HASH_SIZE_MIN, 2, 1)) {
2057 /* shrink if more than 2 buckets per 1 element */
2058 smr_hash_shrink_and_unlock(&coalition_hash,
2059 &coalitions_list_lock, &coal_hash_traits);
2060 } else {
2061 lck_mtx_unlock(&coalitions_list_lock);
2062 }
2063
2064 /* Release the list's reference and launchd's reference. */
2065 coal_ref_release_live(coal);
2066 coalition_release(coal);
2067
2068 return KERN_SUCCESS;
2069 }
2070
2071 #if DEVELOPMENT || DEBUG
2072 int
coalition_should_notify(coalition_t coal)2073 coalition_should_notify(coalition_t coal)
2074 {
2075 int should;
2076 if (!coal) {
2077 return -1;
2078 }
2079 coalition_lock(coal);
2080 should = coal->should_notify;
2081 coalition_unlock(coal);
2082
2083 return should;
2084 }
2085
2086 void
coalition_set_notify(coalition_t coal,int notify)2087 coalition_set_notify(coalition_t coal, int notify)
2088 {
2089 if (!coal) {
2090 return;
2091 }
2092 coalition_lock(coal);
2093 coal->should_notify = !!notify;
2094 coalition_unlock(coal);
2095 }
2096 #endif
2097
2098 void
coalitions_init(void)2099 coalitions_init(void)
2100 {
2101 kern_return_t kr;
2102 int i;
2103 const struct coalition_type *ctype;
2104
2105 smr_hash_init(&coalition_hash, COALITION_HASH_SIZE_MIN);
2106
2107 init_task_ledgers();
2108
2109 init_coalition_ledgers();
2110
2111 for (i = 0, ctype = &s_coalition_types[0]; i < COALITION_NUM_TYPES; ctype++, i++) {
2112 /* verify the entry in the global coalition types array */
2113 if (ctype->type != i ||
2114 !ctype->init ||
2115 !ctype->dealloc ||
2116 !ctype->adopt_task ||
2117 !ctype->remove_task) {
2118 panic("%s: Malformed coalition type %s(%d) in slot for type:%s(%d)",
2119 __func__, coal_type_str(ctype->type), ctype->type, coal_type_str(i), i);
2120 }
2121 if (!ctype->has_default) {
2122 continue;
2123 }
2124 kr = coalition_create_internal(ctype->type, COALITION_ROLE_SYSTEM, TRUE, FALSE, &init_coalition[ctype->type], NULL);
2125 if (kr != KERN_SUCCESS) {
2126 panic("%s: could not create init %s coalition: kr:%d",
2127 __func__, coal_type_str(i), kr);
2128 }
2129 if (i == COALITION_TYPE_RESOURCE) {
2130 assert(COALITION_ID_KERNEL == init_coalition[ctype->type]->id);
2131 }
2132 kr = coalition_create_internal(ctype->type, COALITION_ROLE_SYSTEM, FALSE, FALSE, &corpse_coalition[ctype->type], NULL);
2133 if (kr != KERN_SUCCESS) {
2134 panic("%s: could not create corpse %s coalition: kr:%d",
2135 __func__, coal_type_str(i), kr);
2136 }
2137 }
2138
2139 /* "Leak" our reference to the global object */
2140 }
2141
2142 /*
2143 * BSD Kernel interface functions
2144 *
2145 */
2146 static void
coalition_fill_procinfo(struct coalition * coal,struct procinfo_coalinfo * coalinfo)2147 coalition_fill_procinfo(struct coalition *coal,
2148 struct procinfo_coalinfo *coalinfo)
2149 {
2150 coalinfo->coalition_id = coal->id;
2151 coalinfo->coalition_type = coal->type;
2152 coalinfo->coalition_tasks = coalition_get_task_count(coal);
2153 }
2154
2155
2156 size_t
coalitions_get_list(int type,struct procinfo_coalinfo * coal_list,size_t list_sz)2157 coalitions_get_list(int type, struct procinfo_coalinfo *coal_list, size_t list_sz)
2158 {
2159 size_t ncoals = 0;
2160 struct coalition *coal;
2161
2162 lck_mtx_lock(&coalitions_list_lock);
2163 smr_hash_foreach(coal, &coalition_hash, &coal_hash_traits) {
2164 if (!coal->reaped && (type < 0 || type == (int)coal->type)) {
2165 if (coal_list && ncoals < list_sz) {
2166 coalition_fill_procinfo(coal, &coal_list[ncoals]);
2167 }
2168 ++ncoals;
2169 }
2170 }
2171 lck_mtx_unlock(&coalitions_list_lock);
2172
2173 return ncoals;
2174 }
2175
2176 /*
2177 * Return the coaltion of the given type to which the task belongs.
2178 */
2179 coalition_t
task_get_coalition(task_t task,int coal_type)2180 task_get_coalition(task_t task, int coal_type)
2181 {
2182 coalition_t c;
2183
2184 if (task == NULL || coal_type > COALITION_TYPE_MAX) {
2185 return COALITION_NULL;
2186 }
2187
2188 c = task->coalition[coal_type];
2189 assert(c == COALITION_NULL || (int)c->type == coal_type);
2190 return c;
2191 }
2192
2193 /*
2194 * Report if the given task is the leader of the given jetsam coalition.
2195 */
2196 boolean_t
coalition_is_leader(task_t task,coalition_t coal)2197 coalition_is_leader(task_t task, coalition_t coal)
2198 {
2199 boolean_t ret = FALSE;
2200
2201 if (coal != COALITION_NULL) {
2202 coalition_lock(coal);
2203
2204 ret = (coal->type == COALITION_TYPE_JETSAM && coal->j.leader == task);
2205
2206 coalition_unlock(coal);
2207 }
2208
2209 return ret;
2210 }
2211
2212 kern_return_t
coalition_iterate_stackshot(coalition_iterate_fn_t callout,void * arg,uint32_t coalition_type)2213 coalition_iterate_stackshot(coalition_iterate_fn_t callout, void *arg, uint32_t coalition_type)
2214 {
2215 coalition_t coal;
2216 int i = 0;
2217
2218 smr_hash_foreach(coal, &coalition_hash, &coal_hash_traits) {
2219 if (coal == NULL || !ml_validate_nofault((vm_offset_t)coal, sizeof(struct coalition))) {
2220 return KERN_FAILURE;
2221 }
2222
2223 if (coalition_type == coal->type) {
2224 callout(arg, i++, coal);
2225 }
2226 }
2227
2228 return KERN_SUCCESS;
2229 }
2230
2231 task_t
kdp_coalition_get_leader(coalition_t coal)2232 kdp_coalition_get_leader(coalition_t coal)
2233 {
2234 if (!coal) {
2235 return TASK_NULL;
2236 }
2237
2238 if (coal->type == COALITION_TYPE_JETSAM) {
2239 return coal->j.leader;
2240 }
2241 return TASK_NULL;
2242 }
2243
2244 task_t
coalition_get_leader(coalition_t coal)2245 coalition_get_leader(coalition_t coal)
2246 {
2247 task_t leader = TASK_NULL;
2248
2249 if (!coal) {
2250 return TASK_NULL;
2251 }
2252
2253 coalition_lock(coal);
2254 if (coal->type != COALITION_TYPE_JETSAM) {
2255 goto out_unlock;
2256 }
2257
2258 leader = coal->j.leader;
2259 if (leader != TASK_NULL) {
2260 task_reference(leader);
2261 }
2262
2263 out_unlock:
2264 coalition_unlock(coal);
2265 return leader;
2266 }
2267
2268
2269 int
coalition_get_task_count(coalition_t coal)2270 coalition_get_task_count(coalition_t coal)
2271 {
2272 int ntasks = 0;
2273 struct queue_entry *qe;
2274 if (!coal) {
2275 return 0;
2276 }
2277
2278 coalition_lock(coal);
2279 switch (coal->type) {
2280 case COALITION_TYPE_RESOURCE:
2281 qe_foreach(qe, &coal->r.tasks)
2282 ntasks++;
2283 break;
2284 case COALITION_TYPE_JETSAM:
2285 if (coal->j.leader) {
2286 ntasks++;
2287 }
2288 qe_foreach(qe, &coal->j.other)
2289 ntasks++;
2290 qe_foreach(qe, &coal->j.extensions)
2291 ntasks++;
2292 qe_foreach(qe, &coal->j.services)
2293 ntasks++;
2294 break;
2295 default:
2296 break;
2297 }
2298 coalition_unlock(coal);
2299
2300 return ntasks;
2301 }
2302
2303
2304 static uint64_t
i_get_list_footprint(queue_t list,int type,int * ntasks)2305 i_get_list_footprint(queue_t list, int type, int *ntasks)
2306 {
2307 task_t task;
2308 uint64_t bytes = 0;
2309
2310 qe_foreach_element(task, list, task_coalition[type]) {
2311 bytes += get_task_phys_footprint(task);
2312 coal_dbg(" [%d] task_pid:%d, type:%d, footprint:%lld",
2313 *ntasks, task_pid(task), type, bytes);
2314 *ntasks += 1;
2315 }
2316
2317 return bytes;
2318 }
2319
2320 uint64_t
coalition_get_page_count(coalition_t coal,int * ntasks)2321 coalition_get_page_count(coalition_t coal, int *ntasks)
2322 {
2323 uint64_t bytes = 0;
2324 int num_tasks = 0;
2325
2326 if (ntasks) {
2327 *ntasks = 0;
2328 }
2329 if (!coal) {
2330 return bytes;
2331 }
2332
2333 coalition_lock(coal);
2334
2335 switch (coal->type) {
2336 case COALITION_TYPE_RESOURCE:
2337 bytes += i_get_list_footprint(&coal->r.tasks, COALITION_TYPE_RESOURCE, &num_tasks);
2338 break;
2339 case COALITION_TYPE_JETSAM:
2340 if (coal->j.leader) {
2341 bytes += get_task_phys_footprint(coal->j.leader);
2342 num_tasks = 1;
2343 }
2344 bytes += i_get_list_footprint(&coal->j.extensions, COALITION_TYPE_JETSAM, &num_tasks);
2345 bytes += i_get_list_footprint(&coal->j.services, COALITION_TYPE_JETSAM, &num_tasks);
2346 bytes += i_get_list_footprint(&coal->j.other, COALITION_TYPE_JETSAM, &num_tasks);
2347 break;
2348 default:
2349 break;
2350 }
2351
2352 coalition_unlock(coal);
2353
2354 if (ntasks) {
2355 *ntasks = num_tasks;
2356 }
2357
2358 return bytes / PAGE_SIZE_64;
2359 }
2360
2361 struct coal_sort_s {
2362 int pid;
2363 int usr_order;
2364 uint64_t bytes;
2365 };
2366
2367 /*
2368 * return < 0 for a < b
2369 * 0 for a == b
2370 * > 0 for a > b
2371 */
2372 typedef int (*cmpfunc_t)(const void *a, const void *b);
2373
2374 extern void
2375 qsort(void *a, size_t n, size_t es, cmpfunc_t cmp);
2376
2377 static int
dflt_cmp(const void * a,const void * b)2378 dflt_cmp(const void *a, const void *b)
2379 {
2380 const struct coal_sort_s *csA = (const struct coal_sort_s *)a;
2381 const struct coal_sort_s *csB = (const struct coal_sort_s *)b;
2382
2383 /*
2384 * if both A and B are equal, use a memory descending sort
2385 */
2386 if (csA->usr_order == csB->usr_order) {
2387 return (int)((int64_t)csB->bytes - (int64_t)csA->bytes);
2388 }
2389
2390 /* otherwise, return the relationship between user specified orders */
2391 return csA->usr_order - csB->usr_order;
2392 }
2393
2394 static int
mem_asc_cmp(const void * a,const void * b)2395 mem_asc_cmp(const void *a, const void *b)
2396 {
2397 const struct coal_sort_s *csA = (const struct coal_sort_s *)a;
2398 const struct coal_sort_s *csB = (const struct coal_sort_s *)b;
2399
2400 return (int)((int64_t)csA->bytes - (int64_t)csB->bytes);
2401 }
2402
2403 static int
mem_dec_cmp(const void * a,const void * b)2404 mem_dec_cmp(const void *a, const void *b)
2405 {
2406 const struct coal_sort_s *csA = (const struct coal_sort_s *)a;
2407 const struct coal_sort_s *csB = (const struct coal_sort_s *)b;
2408
2409 return (int)((int64_t)csB->bytes - (int64_t)csA->bytes);
2410 }
2411
2412 static int
usr_asc_cmp(const void * a,const void * b)2413 usr_asc_cmp(const void *a, const void *b)
2414 {
2415 const struct coal_sort_s *csA = (const struct coal_sort_s *)a;
2416 const struct coal_sort_s *csB = (const struct coal_sort_s *)b;
2417
2418 return csA->usr_order - csB->usr_order;
2419 }
2420
2421 static int
usr_dec_cmp(const void * a,const void * b)2422 usr_dec_cmp(const void *a, const void *b)
2423 {
2424 const struct coal_sort_s *csA = (const struct coal_sort_s *)a;
2425 const struct coal_sort_s *csB = (const struct coal_sort_s *)b;
2426
2427 return csB->usr_order - csA->usr_order;
2428 }
2429
2430 /* avoid dynamic allocation in this path */
2431 #define MAX_SORTED_PIDS 80
2432
2433 static int
coalition_get_sort_list(coalition_t coal,int sort_order,queue_t list,struct coal_sort_s * sort_array,int array_sz)2434 coalition_get_sort_list(coalition_t coal, int sort_order, queue_t list,
2435 struct coal_sort_s *sort_array, int array_sz)
2436 {
2437 int ntasks = 0;
2438 task_t task;
2439
2440 assert(sort_array != NULL);
2441
2442 if (array_sz <= 0) {
2443 return 0;
2444 }
2445
2446 if (!list) {
2447 /*
2448 * this function will only be called with a NULL
2449 * list for JETSAM-type coalitions, and is intended
2450 * to investigate the leader process
2451 */
2452 if (coal->type != COALITION_TYPE_JETSAM ||
2453 coal->j.leader == TASK_NULL) {
2454 return 0;
2455 }
2456 sort_array[0].pid = task_pid(coal->j.leader);
2457 switch (sort_order) {
2458 case COALITION_SORT_DEFAULT:
2459 sort_array[0].usr_order = 0;
2460 OS_FALLTHROUGH;
2461 case COALITION_SORT_MEM_ASC:
2462 case COALITION_SORT_MEM_DEC:
2463 sort_array[0].bytes = get_task_phys_footprint(coal->j.leader);
2464 break;
2465 case COALITION_SORT_USER_ASC:
2466 case COALITION_SORT_USER_DEC:
2467 sort_array[0].usr_order = 0;
2468 break;
2469 default:
2470 break;
2471 }
2472 return 1;
2473 }
2474
2475 qe_foreach_element(task, list, task_coalition[coal->type]) {
2476 if (ntasks >= array_sz) {
2477 printf("WARNING: more than %d pids in coalition %llu\n",
2478 MAX_SORTED_PIDS, coal->id);
2479 break;
2480 }
2481
2482 sort_array[ntasks].pid = task_pid(task);
2483
2484 switch (sort_order) {
2485 case COALITION_SORT_DEFAULT:
2486 sort_array[ntasks].usr_order = 0;
2487 OS_FALLTHROUGH;
2488 case COALITION_SORT_MEM_ASC:
2489 case COALITION_SORT_MEM_DEC:
2490 sort_array[ntasks].bytes = get_task_phys_footprint(task);
2491 break;
2492 case COALITION_SORT_USER_ASC:
2493 case COALITION_SORT_USER_DEC:
2494 sort_array[ntasks].usr_order = 0;
2495 break;
2496 default:
2497 break;
2498 }
2499
2500 ntasks++;
2501 }
2502
2503 return ntasks;
2504 }
2505
2506 int
coalition_get_pid_list(coalition_t coal,uint32_t rolemask,int sort_order,int * pid_list,int list_sz)2507 coalition_get_pid_list(coalition_t coal, uint32_t rolemask, int sort_order,
2508 int *pid_list, int list_sz)
2509 {
2510 struct i_jetsam_coalition *cj;
2511 int ntasks = 0;
2512 cmpfunc_t cmp_func = NULL;
2513 struct coal_sort_s sort_array[MAX_SORTED_PIDS] = { {0, 0, 0} }; /* keep to < 2k */
2514
2515 if (!coal ||
2516 !(rolemask & COALITION_ROLEMASK_ALLROLES) ||
2517 !pid_list || list_sz < 1) {
2518 coal_dbg("Invalid parameters: coal:%p, type:%d, rolemask:0x%x, "
2519 "pid_list:%p, list_sz:%d", coal, coal ? coal->type : -1,
2520 rolemask, pid_list, list_sz);
2521 return -EINVAL;
2522 }
2523
2524 switch (sort_order) {
2525 case COALITION_SORT_NOSORT:
2526 cmp_func = NULL;
2527 break;
2528 case COALITION_SORT_DEFAULT:
2529 cmp_func = dflt_cmp;
2530 break;
2531 case COALITION_SORT_MEM_ASC:
2532 cmp_func = mem_asc_cmp;
2533 break;
2534 case COALITION_SORT_MEM_DEC:
2535 cmp_func = mem_dec_cmp;
2536 break;
2537 case COALITION_SORT_USER_ASC:
2538 cmp_func = usr_asc_cmp;
2539 break;
2540 case COALITION_SORT_USER_DEC:
2541 cmp_func = usr_dec_cmp;
2542 break;
2543 default:
2544 return -ENOTSUP;
2545 }
2546
2547 coalition_lock(coal);
2548
2549 if (coal->type == COALITION_TYPE_RESOURCE) {
2550 ntasks += coalition_get_sort_list(coal, sort_order, &coal->r.tasks,
2551 sort_array, MAX_SORTED_PIDS);
2552 goto unlock_coal;
2553 }
2554
2555 cj = &coal->j;
2556
2557 if (rolemask & COALITION_ROLEMASK_UNDEF) {
2558 ntasks += coalition_get_sort_list(coal, sort_order, &cj->other,
2559 sort_array + ntasks,
2560 MAX_SORTED_PIDS - ntasks);
2561 }
2562
2563 if (rolemask & COALITION_ROLEMASK_XPC) {
2564 ntasks += coalition_get_sort_list(coal, sort_order, &cj->services,
2565 sort_array + ntasks,
2566 MAX_SORTED_PIDS - ntasks);
2567 }
2568
2569 if (rolemask & COALITION_ROLEMASK_EXT) {
2570 ntasks += coalition_get_sort_list(coal, sort_order, &cj->extensions,
2571 sort_array + ntasks,
2572 MAX_SORTED_PIDS - ntasks);
2573 }
2574
2575 if (rolemask & COALITION_ROLEMASK_LEADER) {
2576 ntasks += coalition_get_sort_list(coal, sort_order, NULL,
2577 sort_array + ntasks,
2578 MAX_SORTED_PIDS - ntasks);
2579 }
2580
2581 unlock_coal:
2582 coalition_unlock(coal);
2583
2584 /* sort based on the chosen criterion (no sense sorting 1 item) */
2585 if (cmp_func && ntasks > 1) {
2586 qsort(sort_array, ntasks, sizeof(struct coal_sort_s), cmp_func);
2587 }
2588
2589 for (int i = 0; i < ntasks; i++) {
2590 if (i >= list_sz) {
2591 break;
2592 }
2593 coal_dbg(" [%d] PID:%d, footprint:%lld, usr_order:%d",
2594 i, sort_array[i].pid, sort_array[i].bytes,
2595 sort_array[i].usr_order);
2596 pid_list[i] = sort_array[i].pid;
2597 }
2598
2599 return ntasks;
2600 }
2601
2602 static void
mark_coalition_member_as_swappable(__unused coalition_t coal,__unused void * ctx,task_t task)2603 mark_coalition_member_as_swappable(__unused coalition_t coal, __unused void *ctx, task_t task)
2604 {
2605 vm_task_set_selfdonate_pages(task, true);
2606 }
2607
2608 void
coalition_mark_swappable(coalition_t coal)2609 coalition_mark_swappable(coalition_t coal)
2610 {
2611 struct i_jetsam_coalition *cj = NULL;
2612
2613 coalition_lock(coal);
2614 assert(coal && coal->type == COALITION_TYPE_JETSAM);
2615
2616 cj = &coal->j;
2617 cj->swap_enabled = true;
2618
2619 i_coal_jetsam_iterate_tasks(coal, NULL, mark_coalition_member_as_swappable);
2620
2621 coalition_unlock(coal);
2622 }
2623
2624 bool
coalition_is_swappable(coalition_t coal)2625 coalition_is_swappable(coalition_t coal)
2626 {
2627 struct i_jetsam_coalition *cj = NULL;
2628
2629 coalition_lock(coal);
2630 assert(coal && coal->type == COALITION_TYPE_JETSAM);
2631
2632 cj = &coal->j;
2633 bool enabled = cj->swap_enabled;
2634
2635 coalition_unlock(coal);
2636
2637 return enabled;
2638 }
2639