1 /*
2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/kern_types.h>
30 #include <mach/mach_types.h>
31 #include <mach/boolean.h>
32
33 #include <kern/coalition.h>
34 #include <kern/exc_resource.h>
35 #include <kern/host.h>
36 #include <kern/ledger.h>
37 #include <kern/mach_param.h> /* for TASK_CHUNK */
38 #if MONOTONIC
39 #include <kern/monotonic.h>
40 #endif /* MONOTONIC */
41 #include <kern/policy_internal.h>
42 #include <kern/task.h>
43 #include <kern/thread_group.h>
44 #include <kern/zalloc.h>
45 #include <vm/vm_pageout.h>
46
47 #include <libkern/OSAtomic.h>
48
49 #include <mach/coalition_notification_server.h>
50 #include <mach/host_priv.h>
51 #include <mach/host_special_ports.h>
52
53 #include <os/log.h>
54
55 #include <sys/errno.h>
56
57 /*
58 * BSD interface functions
59 */
60 size_t coalitions_get_list(int type, struct procinfo_coalinfo *coal_list, size_t list_sz);
61 coalition_t task_get_coalition(task_t task, int type);
62 boolean_t coalition_is_leader(task_t task, coalition_t coal);
63 task_t coalition_get_leader(coalition_t coal);
64 int coalition_get_task_count(coalition_t coal);
65 uint64_t coalition_get_page_count(coalition_t coal, int *ntasks);
66 int coalition_get_pid_list(coalition_t coal, uint32_t rolemask, int sort_order,
67 int *pid_list, int list_sz);
68
69 /* defined in task.c */
70 extern ledger_template_t task_ledger_template;
71
72 /*
73 * Templates; task template is copied due to potential allocation limits on
74 * task ledgers.
75 */
76 ledger_template_t coalition_task_ledger_template = NULL;
77 ledger_template_t coalition_ledger_template = NULL;
78
79 extern int proc_selfpid(void);
80 /*
81 * Coalition zone needs limits. We expect there will be as many coalitions as
82 * tasks (same order of magnitude), so use the task zone's limits.
83 * */
84 #define CONFIG_COALITION_MAX CONFIG_TASK_MAX
85 #define COALITION_CHUNK TASK_CHUNK
86
87 #if DEBUG || DEVELOPMENT
88 TUNABLE_WRITEABLE(int, unrestrict_coalition_syscalls, "unrestrict_coalition_syscalls", 0);
89 #else
90 #define unrestrict_coalition_syscalls false
91 #endif
92
93 LCK_GRP_DECLARE(coalitions_lck_grp, "coalition");
94
95 /* coalitions_list_lock protects coalition_count, coalitions queue, next_coalition_id. */
96 static LCK_RW_DECLARE(coalitions_list_lock, &coalitions_lck_grp);
97 static uint64_t coalition_count;
98 static uint64_t coalition_next_id = 1;
99 static queue_head_t coalitions_q;
100
101 coalition_t init_coalition[COALITION_NUM_TYPES];
102 coalition_t corpse_coalition[COALITION_NUM_TYPES];
103
104 static const char *
coal_type_str(int type)105 coal_type_str(int type)
106 {
107 switch (type) {
108 case COALITION_TYPE_RESOURCE:
109 return "RESOURCE";
110 case COALITION_TYPE_JETSAM:
111 return "JETSAM";
112 default:
113 return "<unknown>";
114 }
115 }
116
117 struct coalition_type {
118 int type;
119 int has_default;
120 /*
121 * init
122 * pre-condition: coalition just allocated (unlocked), unreferenced,
123 * type field set
124 */
125 kern_return_t (*init)(coalition_t coal, boolean_t privileged, boolean_t efficient);
126
127 /*
128 * dealloc
129 * pre-condition: coalition unlocked
130 * pre-condition: coalition refcount=0, active_count=0,
131 * termrequested=1, terminated=1, reaped=1
132 */
133 void (*dealloc)(coalition_t coal);
134
135 /*
136 * adopt_task
137 * pre-condition: coalition locked
138 * pre-condition: coalition !repead and !terminated
139 */
140 kern_return_t (*adopt_task)(coalition_t coal, task_t task);
141
142 /*
143 * remove_task
144 * pre-condition: coalition locked
145 * pre-condition: task has been removed from coalition's task list
146 */
147 kern_return_t (*remove_task)(coalition_t coal, task_t task);
148
149 /*
150 * set_taskrole
151 * pre-condition: coalition locked
152 * pre-condition: task added to coalition's task list,
153 * active_count >= 1 (at least the given task is active)
154 */
155 kern_return_t (*set_taskrole)(coalition_t coal, task_t task, int role);
156
157 /*
158 * get_taskrole
159 * pre-condition: coalition locked
160 * pre-condition: task added to coalition's task list,
161 * active_count >= 1 (at least the given task is active)
162 */
163 int (*get_taskrole)(coalition_t coal, task_t task);
164
165 /*
166 * iterate_tasks
167 * pre-condition: coalition locked
168 */
169 void (*iterate_tasks)(coalition_t coal, void *ctx, void (*callback)(coalition_t, void *, task_t));
170 };
171
172 /*
173 * COALITION_TYPE_RESOURCE
174 */
175
176 static kern_return_t i_coal_resource_init(coalition_t coal, boolean_t privileged, boolean_t efficient);
177 static void i_coal_resource_dealloc(coalition_t coal);
178 static kern_return_t i_coal_resource_adopt_task(coalition_t coal, task_t task);
179 static kern_return_t i_coal_resource_remove_task(coalition_t coal, task_t task);
180 static kern_return_t i_coal_resource_set_taskrole(coalition_t coal,
181 task_t task, int role);
182 static int i_coal_resource_get_taskrole(coalition_t coal, task_t task);
183 static void i_coal_resource_iterate_tasks(coalition_t coal, void *ctx,
184 void (*callback)(coalition_t, void *, task_t));
185
186 /*
187 * Ensure COALITION_NUM_THREAD_QOS_TYPES defined in mach/coalition.h still
188 * matches THREAD_QOS_LAST defined in mach/thread_policy.h
189 */
190 static_assert(COALITION_NUM_THREAD_QOS_TYPES == THREAD_QOS_LAST);
191
192 struct i_resource_coalition {
193 /*
194 * This keeps track of resource utilization of tasks that are no longer active
195 * in the coalition and is updated when a task is removed from the coalition.
196 */
197 ledger_t ledger;
198 uint64_t bytesread;
199 uint64_t byteswritten;
200 uint64_t energy;
201 uint64_t gpu_time;
202 uint64_t logical_immediate_writes;
203 uint64_t logical_deferred_writes;
204 uint64_t logical_invalidated_writes;
205 uint64_t logical_metadata_writes;
206 uint64_t logical_immediate_writes_to_external;
207 uint64_t logical_deferred_writes_to_external;
208 uint64_t logical_invalidated_writes_to_external;
209 uint64_t logical_metadata_writes_to_external;
210 uint64_t cpu_time_eqos[COALITION_NUM_THREAD_QOS_TYPES]; /* cpu time per effective QoS class */
211 uint64_t cpu_time_rqos[COALITION_NUM_THREAD_QOS_TYPES]; /* cpu time per requested QoS class */
212 uint64_t cpu_instructions;
213 uint64_t cpu_cycles;
214 struct recount_coalition co_recount;
215
216 uint64_t task_count; /* tasks that have started in this coalition */
217 uint64_t dead_task_count; /* tasks that have exited in this coalition;
218 * subtract from task_count to get count
219 * of "active" tasks */
220 /*
221 * Count the length of time this coalition had at least one active task.
222 * This can be a 'denominator' to turn e.g. cpu_time to %cpu.
223 * */
224 uint64_t last_became_nonempty_time;
225 uint64_t time_nonempty;
226
227 queue_head_t tasks; /* List of active tasks in the coalition */
228 /*
229 * This ledger is used for triggering resource exception. For the tracked resources, this is updated
230 * when the member tasks' resource usage changes.
231 */
232 ledger_t resource_monitor_ledger;
233 #if CONFIG_PHYS_WRITE_ACCT
234 uint64_t fs_metadata_writes;
235 #endif /* CONFIG_PHYS_WRITE_ACCT */
236 };
237
238 /*
239 * COALITION_TYPE_JETSAM
240 */
241
242 static kern_return_t i_coal_jetsam_init(coalition_t coal, boolean_t privileged, boolean_t efficient);
243 static void i_coal_jetsam_dealloc(coalition_t coal);
244 static kern_return_t i_coal_jetsam_adopt_task(coalition_t coal, task_t task);
245 static kern_return_t i_coal_jetsam_remove_task(coalition_t coal, task_t task);
246 static kern_return_t i_coal_jetsam_set_taskrole(coalition_t coal,
247 task_t task, int role);
248 int i_coal_jetsam_get_taskrole(coalition_t coal, task_t task);
249 static void i_coal_jetsam_iterate_tasks(coalition_t coal, void *ctx,
250 void (*callback)(coalition_t, void *, task_t));
251
252 struct i_jetsam_coalition {
253 task_t leader;
254 queue_head_t extensions;
255 queue_head_t services;
256 queue_head_t other;
257 struct thread_group *thread_group;
258 bool swap_enabled;
259 };
260
261
262 /*
263 * main coalition structure
264 */
265 struct coalition {
266 uint64_t id; /* monotonically increasing */
267 uint32_t type;
268 uint32_t role; /* default task role (background, adaptive, interactive, etc) */
269 uint32_t ref_count; /* Number of references to the memory containing this struct */
270 uint32_t active_count; /* Number of members of (tasks in) the
271 * coalition, plus vouchers referring
272 * to the coalition */
273 uint32_t focal_task_count; /* Number of TASK_FOREGROUND_APPLICATION tasks in the coalition */
274 uint32_t nonfocal_task_count; /* Number of TASK_BACKGROUND_APPLICATION tasks in the coalition */
275
276 /* coalition flags */
277 uint32_t privileged : 1; /* Members of this coalition may create
278 * and manage coalitions and may posix_spawn
279 * processes into selected coalitions */
280 /* ast? */
281 /* voucher */
282 uint32_t termrequested : 1; /* launchd has requested termination when coalition becomes empty */
283 uint32_t terminated : 1; /* coalition became empty and spawns are now forbidden */
284 uint32_t reaped : 1; /* reaped, invisible to userspace, but waiting for ref_count to go to zero */
285 uint32_t notified : 1; /* no-more-processes notification was sent via special port */
286 uint32_t efficient : 1; /* launchd has marked the coalition as efficient */
287 #if DEVELOPMENT || DEBUG
288 uint32_t should_notify : 1; /* should this coalition send notifications (default: yes) */
289 #endif
290
291 queue_chain_t coalitions; /* global list of coalitions */
292
293 decl_lck_mtx_data(, lock); /* Coalition lock. */
294
295 /* put coalition type-specific structures here */
296 union {
297 struct i_resource_coalition r;
298 struct i_jetsam_coalition j;
299 };
300 };
301
302 /*
303 * register different coalition types:
304 * these must be kept in the order specified in coalition.h
305 */
306 static const struct coalition_type
307 s_coalition_types[COALITION_NUM_TYPES] = {
308 {
309 COALITION_TYPE_RESOURCE,
310 1,
311 i_coal_resource_init,
312 i_coal_resource_dealloc,
313 i_coal_resource_adopt_task,
314 i_coal_resource_remove_task,
315 i_coal_resource_set_taskrole,
316 i_coal_resource_get_taskrole,
317 i_coal_resource_iterate_tasks,
318 },
319 {
320 COALITION_TYPE_JETSAM,
321 1,
322 i_coal_jetsam_init,
323 i_coal_jetsam_dealloc,
324 i_coal_jetsam_adopt_task,
325 i_coal_jetsam_remove_task,
326 i_coal_jetsam_set_taskrole,
327 i_coal_jetsam_get_taskrole,
328 i_coal_jetsam_iterate_tasks,
329 },
330 };
331
332 ZONE_DEFINE_TYPE(coalition_zone, "coalitions",
333 struct coalition, ZC_ZFREE_CLEARMEM);
334
335 #define coal_call(coal, func, ...) \
336 (s_coalition_types[(coal)->type].func)(coal, ## __VA_ARGS__)
337
338
339 #define coalition_lock(c) do{ lck_mtx_lock(&c->lock); }while(0)
340 #define coalition_unlock(c) do{ lck_mtx_unlock(&c->lock); }while(0)
341
342 /*
343 * Define the coalition type to track focal tasks.
344 * On embedded, track them using jetsam coalitions since they have associated thread
345 * groups which reflect this property as a flag (and pass it down to CLPC).
346 * On non-embedded platforms, since not all coalitions have jetsam coalitions
347 * track focal counts on the resource coalition.
348 */
349 #if !XNU_TARGET_OS_OSX
350 #define COALITION_FOCAL_TASKS_ACCOUNTING COALITION_TYPE_JETSAM
351 #else /* !XNU_TARGET_OS_OSX */
352 #define COALITION_FOCAL_TASKS_ACCOUNTING COALITION_TYPE_RESOURCE
353 #endif /* !XNU_TARGET_OS_OSX */
354
355
356 /*
357 *
358 * Coalition ledger implementation
359 *
360 */
361
362 struct coalition_ledger_indices coalition_ledgers =
363 {.logical_writes = -1, };
364 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_COALITION_IS_CAUSING_TOO_MUCH_IO(int flavor);
365
366 ledger_t
coalition_ledger_get_from_task(task_t task)367 coalition_ledger_get_from_task(task_t task)
368 {
369 ledger_t ledger = LEDGER_NULL;
370 coalition_t coal = task->coalition[COALITION_TYPE_RESOURCE];
371
372 if (coal != NULL && (!queue_empty(&task->task_coalition[COALITION_TYPE_RESOURCE]))) {
373 ledger = coal->r.resource_monitor_ledger;
374 ledger_reference(ledger);
375 }
376 return ledger;
377 }
378
379
380 enum {
381 COALITION_IO_LEDGER_ENABLE,
382 COALITION_IO_LEDGER_DISABLE
383 };
384
385 void
coalition_io_monitor_ctl(struct coalition * coalition,uint32_t flags,int64_t limit)386 coalition_io_monitor_ctl(struct coalition *coalition, uint32_t flags, int64_t limit)
387 {
388 ledger_t ledger = coalition->r.resource_monitor_ledger;
389
390 if (flags == COALITION_IO_LEDGER_ENABLE) {
391 /* Configure the logical I/O ledger */
392 ledger_set_limit(ledger, coalition_ledgers.logical_writes, (limit * 1024 * 1024), 0);
393 ledger_set_period(ledger, coalition_ledgers.logical_writes, (COALITION_LEDGER_MONITOR_INTERVAL_SECS * NSEC_PER_SEC));
394 } else if (flags == COALITION_IO_LEDGER_DISABLE) {
395 ledger_disable_refill(ledger, coalition_ledgers.logical_writes);
396 ledger_disable_callback(ledger, coalition_ledgers.logical_writes);
397 }
398 }
399
400 int
coalition_ledger_set_logical_writes_limit(struct coalition * coalition,int64_t limit)401 coalition_ledger_set_logical_writes_limit(struct coalition *coalition, int64_t limit)
402 {
403 int error = 0;
404
405 /* limit = -1 will be used to disable the limit and the callback */
406 if (limit > COALITION_MAX_LOGICAL_WRITES_LIMIT || limit == 0 || limit < -1) {
407 error = EINVAL;
408 goto out;
409 }
410
411 coalition_lock(coalition);
412 if (limit == -1) {
413 coalition_io_monitor_ctl(coalition, COALITION_IO_LEDGER_DISABLE, limit);
414 } else {
415 coalition_io_monitor_ctl(coalition, COALITION_IO_LEDGER_ENABLE, limit);
416 }
417 coalition_unlock(coalition);
418 out:
419 return error;
420 }
421
422 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_COALITION_IS_CAUSING_TOO_MUCH_IO(int flavor)423 SENDING_NOTIFICATION__THIS_COALITION_IS_CAUSING_TOO_MUCH_IO(int flavor)
424 {
425 int pid = proc_selfpid();
426 ledger_amount_t new_limit;
427 task_t task = current_task();
428 struct ledger_entry_info lei;
429 kern_return_t kr;
430 ledger_t ledger;
431 struct coalition *coalition = task->coalition[COALITION_TYPE_RESOURCE];
432
433 assert(coalition != NULL);
434 ledger = coalition->r.resource_monitor_ledger;
435
436 switch (flavor) {
437 case FLAVOR_IO_LOGICAL_WRITES:
438 ledger_get_entry_info(ledger, coalition_ledgers.logical_writes, &lei);
439 trace_resource_violation(RMON_LOGWRITES_VIOLATED, &lei);
440 break;
441 default:
442 goto Exit;
443 }
444
445 os_log(OS_LOG_DEFAULT, "Coalition [%lld] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]. Triggered by process [%d]\n",
446 coalition->id, flavor, (lei.lei_balance / (1024 * 1024)), (lei.lei_limit / (1024 * 1024)),
447 (lei.lei_refill_period / NSEC_PER_SEC), pid);
448
449 kr = send_resource_violation(send_disk_writes_violation, task, &lei, kRNFlagsNone);
450 if (kr) {
451 os_log(OS_LOG_DEFAULT, "ERROR %#x returned from send_resource_violation(disk_writes, ...)\n", kr);
452 }
453
454 /*
455 * Continue to monitor the coalition after it hits the initital limit, but increase
456 * the limit exponentially so that we don't spam the listener.
457 */
458 new_limit = (lei.lei_limit / 1024 / 1024) * 4;
459 coalition_lock(coalition);
460 if (new_limit > COALITION_MAX_LOGICAL_WRITES_LIMIT) {
461 coalition_io_monitor_ctl(coalition, COALITION_IO_LEDGER_DISABLE, -1);
462 } else {
463 coalition_io_monitor_ctl(coalition, COALITION_IO_LEDGER_ENABLE, new_limit);
464 }
465 coalition_unlock(coalition);
466
467 Exit:
468 return;
469 }
470
471 void
coalition_io_rate_exceeded(int warning,const void * param0,__unused const void * param1)472 coalition_io_rate_exceeded(int warning, const void *param0, __unused const void *param1)
473 {
474 if (warning == 0) {
475 SENDING_NOTIFICATION__THIS_COALITION_IS_CAUSING_TOO_MUCH_IO((int)param0);
476 }
477 }
478
479 void
init_coalition_ledgers(void)480 init_coalition_ledgers(void)
481 {
482 ledger_template_t t;
483 assert(coalition_ledger_template == NULL);
484
485 if ((t = ledger_template_create("Per-coalition ledgers")) == NULL) {
486 panic("couldn't create coalition ledger template");
487 }
488
489 coalition_ledgers.logical_writes = ledger_entry_add(t, "logical_writes", "res", "bytes");
490
491 if (coalition_ledgers.logical_writes < 0) {
492 panic("couldn't create entries for coaliton ledger template");
493 }
494
495 ledger_set_callback(t, coalition_ledgers.logical_writes, coalition_io_rate_exceeded, (void *)FLAVOR_IO_LOGICAL_WRITES, NULL);
496 ledger_template_complete(t);
497
498 coalition_task_ledger_template = ledger_template_copy(task_ledger_template, "Coalition task ledgers");
499
500 if (coalition_task_ledger_template == NULL) {
501 panic("couldn't create coalition task ledger template");
502 }
503
504 ledger_template_complete(coalition_task_ledger_template);
505
506 coalition_ledger_template = t;
507 }
508
509 void
coalition_io_ledger_update(task_t task,int32_t flavor,boolean_t is_credit,uint32_t io_size)510 coalition_io_ledger_update(task_t task, int32_t flavor, boolean_t is_credit, uint32_t io_size)
511 {
512 ledger_t ledger;
513 coalition_t coal = task->coalition[COALITION_TYPE_RESOURCE];
514
515 assert(coal != NULL);
516 ledger = coal->r.resource_monitor_ledger;
517 if (LEDGER_VALID(ledger)) {
518 if (flavor == FLAVOR_IO_LOGICAL_WRITES) {
519 if (is_credit) {
520 ledger_credit(ledger, coalition_ledgers.logical_writes, io_size);
521 } else {
522 ledger_debit(ledger, coalition_ledgers.logical_writes, io_size);
523 }
524 }
525 }
526 }
527
528 static void
coalition_notify_user(uint64_t id,uint32_t flags)529 coalition_notify_user(uint64_t id, uint32_t flags)
530 {
531 mach_port_t user_port;
532 kern_return_t kr;
533
534 kr = host_get_coalition_port(host_priv_self(), &user_port);
535 if ((kr != KERN_SUCCESS) || !IPC_PORT_VALID(user_port)) {
536 return;
537 }
538
539 coalition_notification(user_port, id, flags);
540 ipc_port_release_send(user_port);
541 }
542
543 /*
544 *
545 * COALITION_TYPE_RESOURCE
546 *
547 */
548 static kern_return_t
i_coal_resource_init(coalition_t coal,boolean_t privileged,boolean_t efficient)549 i_coal_resource_init(coalition_t coal, boolean_t privileged, boolean_t efficient)
550 {
551 #pragma unused(privileged, efficient)
552
553 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
554
555 recount_coalition_init(&coal->r.co_recount);
556 coal->r.ledger = ledger_instantiate(coalition_task_ledger_template,
557 LEDGER_CREATE_ACTIVE_ENTRIES);
558 if (coal->r.ledger == NULL) {
559 return KERN_RESOURCE_SHORTAGE;
560 }
561
562 coal->r.resource_monitor_ledger = ledger_instantiate(coalition_ledger_template,
563 LEDGER_CREATE_ACTIVE_ENTRIES);
564 if (coal->r.resource_monitor_ledger == NULL) {
565 return KERN_RESOURCE_SHORTAGE;
566 }
567
568 queue_init(&coal->r.tasks);
569
570 return KERN_SUCCESS;
571 }
572
573 static void
i_coal_resource_dealloc(coalition_t coal)574 i_coal_resource_dealloc(coalition_t coal)
575 {
576 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
577
578 recount_coalition_deinit(&coal->r.co_recount);
579 ledger_dereference(coal->r.ledger);
580 ledger_dereference(coal->r.resource_monitor_ledger);
581 }
582
583 static kern_return_t
i_coal_resource_adopt_task(coalition_t coal,task_t task)584 i_coal_resource_adopt_task(coalition_t coal, task_t task)
585 {
586 struct i_resource_coalition *cr;
587
588 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
589 assert(queue_empty(&task->task_coalition[COALITION_TYPE_RESOURCE]));
590
591 cr = &coal->r;
592 cr->task_count++;
593
594 if (cr->task_count < cr->dead_task_count) {
595 panic("%s: coalition %p id:%llu type:%s task_count(%llu) < dead_task_count(%llu)",
596 __func__, coal, coal->id, coal_type_str(coal->type),
597 cr->task_count, cr->dead_task_count);
598 }
599
600 /* If moving from 0->1 active tasks */
601 if (cr->task_count - cr->dead_task_count == 1) {
602 cr->last_became_nonempty_time = mach_absolute_time();
603 }
604
605 /* put the task on the coalition's list of tasks */
606 enqueue_tail(&cr->tasks, &task->task_coalition[COALITION_TYPE_RESOURCE]);
607
608 coal_dbg("Added PID:%d to id:%llu, task_count:%llu, dead_count:%llu, nonempty_time:%llu",
609 task_pid(task), coal->id, cr->task_count, cr->dead_task_count,
610 cr->last_became_nonempty_time);
611
612 return KERN_SUCCESS;
613 }
614
615 static kern_return_t
i_coal_resource_remove_task(coalition_t coal,task_t task)616 i_coal_resource_remove_task(coalition_t coal, task_t task)
617 {
618 struct i_resource_coalition *cr;
619
620 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
621 assert(task->coalition[COALITION_TYPE_RESOURCE] == coal);
622 assert(!queue_empty(&task->task_coalition[COALITION_TYPE_RESOURCE]));
623
624 /*
625 * handle resource coalition accounting rollup for dead tasks
626 */
627 cr = &coal->r;
628
629 cr->dead_task_count++;
630
631 if (cr->task_count < cr->dead_task_count) {
632 panic("%s: coalition %p id:%llu type:%s task_count(%llu) < dead_task_count(%llu)",
633 __func__, coal, coal->id, coal_type_str(coal->type), cr->task_count, cr->dead_task_count);
634 }
635
636 /* If moving from 1->0 active tasks */
637 if (cr->task_count - cr->dead_task_count == 0) {
638 uint64_t last_time_nonempty = mach_absolute_time() - cr->last_became_nonempty_time;
639 cr->last_became_nonempty_time = 0;
640 cr->time_nonempty += last_time_nonempty;
641 }
642
643 /* Do not roll up for exec'd task or exec copy task */
644 if (!task_is_exec_copy(task) && !task_did_exec(task)) {
645 ledger_rollup(cr->ledger, task->ledger);
646 cr->bytesread += task->task_io_stats->disk_reads.size;
647 cr->byteswritten += task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size;
648 #if defined(__x86_64__)
649 cr->gpu_time += task_gpu_utilisation(task);
650 #endif /* defined(__x86_64__) */
651
652 cr->logical_immediate_writes += task->task_writes_counters_internal.task_immediate_writes;
653 cr->logical_deferred_writes += task->task_writes_counters_internal.task_deferred_writes;
654 cr->logical_invalidated_writes += task->task_writes_counters_internal.task_invalidated_writes;
655 cr->logical_metadata_writes += task->task_writes_counters_internal.task_metadata_writes;
656 cr->logical_immediate_writes_to_external += task->task_writes_counters_external.task_immediate_writes;
657 cr->logical_deferred_writes_to_external += task->task_writes_counters_external.task_deferred_writes;
658 cr->logical_invalidated_writes_to_external += task->task_writes_counters_external.task_invalidated_writes;
659 cr->logical_metadata_writes_to_external += task->task_writes_counters_external.task_metadata_writes;
660 #if CONFIG_PHYS_WRITE_ACCT
661 cr->fs_metadata_writes += task->task_fs_metadata_writes;
662 #endif /* CONFIG_PHYS_WRITE_ACCT */
663 task_update_cpu_time_qos_stats(task, cr->cpu_time_eqos, cr->cpu_time_rqos);
664 recount_coalition_rollup_task(&cr->co_recount, &task->tk_recount);
665 }
666
667 /* remove the task from the coalition's list */
668 remqueue(&task->task_coalition[COALITION_TYPE_RESOURCE]);
669 queue_chain_init(task->task_coalition[COALITION_TYPE_RESOURCE]);
670
671 coal_dbg("removed PID:%d from id:%llu, task_count:%llu, dead_count:%llu",
672 task_pid(task), coal->id, cr->task_count, cr->dead_task_count);
673
674 return KERN_SUCCESS;
675 }
676
677 static kern_return_t
i_coal_resource_set_taskrole(__unused coalition_t coal,__unused task_t task,__unused int role)678 i_coal_resource_set_taskrole(__unused coalition_t coal,
679 __unused task_t task, __unused int role)
680 {
681 return KERN_SUCCESS;
682 }
683
684 static int
i_coal_resource_get_taskrole(__unused coalition_t coal,__unused task_t task)685 i_coal_resource_get_taskrole(__unused coalition_t coal, __unused task_t task)
686 {
687 task_t t;
688
689 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
690
691 qe_foreach_element(t, &coal->r.tasks, task_coalition[COALITION_TYPE_RESOURCE]) {
692 if (t == task) {
693 return COALITION_TASKROLE_UNDEF;
694 }
695 }
696
697 return -1;
698 }
699
700 static void
i_coal_resource_iterate_tasks(coalition_t coal,void * ctx,void (* callback)(coalition_t,void *,task_t))701 i_coal_resource_iterate_tasks(coalition_t coal, void *ctx, void (*callback)(coalition_t, void *, task_t))
702 {
703 task_t t;
704 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
705
706 qe_foreach_element(t, &coal->r.tasks, task_coalition[COALITION_TYPE_RESOURCE])
707 callback(coal, ctx, t);
708 }
709
710 #if CONFIG_PHYS_WRITE_ACCT
711 extern uint64_t kernel_pm_writes;
712 #endif /* CONFIG_PHYS_WRITE_ACCT */
713
714 kern_return_t
coalition_resource_usage_internal(coalition_t coal,struct coalition_resource_usage * cru_out)715 coalition_resource_usage_internal(coalition_t coal, struct coalition_resource_usage *cru_out)
716 {
717 kern_return_t kr;
718 ledger_amount_t credit, debit;
719 int i;
720
721 if (coal->type != COALITION_TYPE_RESOURCE) {
722 return KERN_INVALID_ARGUMENT;
723 }
724
725 /* Return KERN_INVALID_ARGUMENT for Corpse coalition */
726 for (i = 0; i < COALITION_NUM_TYPES; i++) {
727 if (coal == corpse_coalition[i]) {
728 return KERN_INVALID_ARGUMENT;
729 }
730 }
731
732 ledger_t sum_ledger = ledger_instantiate(coalition_task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
733 if (sum_ledger == LEDGER_NULL) {
734 return KERN_RESOURCE_SHORTAGE;
735 }
736
737 coalition_lock(coal);
738
739 /*
740 * Start with the coalition's ledger, which holds the totals from all
741 * the dead tasks.
742 */
743 ledger_rollup(sum_ledger, coal->r.ledger);
744 uint64_t bytesread = coal->r.bytesread;
745 uint64_t byteswritten = coal->r.byteswritten;
746 uint64_t gpu_time = coal->r.gpu_time;
747 uint64_t logical_immediate_writes = coal->r.logical_immediate_writes;
748 uint64_t logical_deferred_writes = coal->r.logical_deferred_writes;
749 uint64_t logical_invalidated_writes = coal->r.logical_invalidated_writes;
750 uint64_t logical_metadata_writes = coal->r.logical_metadata_writes;
751 uint64_t logical_immediate_writes_to_external = coal->r.logical_immediate_writes_to_external;
752 uint64_t logical_deferred_writes_to_external = coal->r.logical_deferred_writes_to_external;
753 uint64_t logical_invalidated_writes_to_external = coal->r.logical_invalidated_writes_to_external;
754 uint64_t logical_metadata_writes_to_external = coal->r.logical_metadata_writes_to_external;
755 #if CONFIG_PHYS_WRITE_ACCT
756 uint64_t fs_metadata_writes = coal->r.fs_metadata_writes;
757 #endif /* CONFIG_PHYS_WRITE_ACCT */
758 int64_t cpu_time_billed_to_me = 0;
759 int64_t cpu_time_billed_to_others = 0;
760 int64_t energy_billed_to_me = 0;
761 int64_t energy_billed_to_others = 0;
762 struct recount_usage stats_sum = { 0 };
763 struct recount_usage stats_perf_only = { 0 };
764 recount_coalition_usage_perf_only(&coal->r.co_recount, &stats_sum,
765 &stats_perf_only);
766 uint64_t cpu_time_eqos[COALITION_NUM_THREAD_QOS_TYPES] = { 0 };
767 uint64_t cpu_time_rqos[COALITION_NUM_THREAD_QOS_TYPES] = { 0 };
768 /*
769 * Add to that all the active tasks' ledgers. Tasks cannot deallocate
770 * out from under us, since we hold the coalition lock.
771 */
772 task_t task;
773 qe_foreach_element(task, &coal->r.tasks, task_coalition[COALITION_TYPE_RESOURCE]) {
774 /*
775 * Rolling up stats for exec copy task or exec'd task will lead to double accounting.
776 * Cannot take task lock after taking coaliton lock
777 */
778 if (task_is_exec_copy(task) || task_did_exec(task)) {
779 continue;
780 }
781
782 ledger_rollup(sum_ledger, task->ledger);
783 bytesread += task->task_io_stats->disk_reads.size;
784 byteswritten += task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size;
785 #if defined(__x86_64__)
786 gpu_time += task_gpu_utilisation(task);
787 #endif /* defined(__x86_64__) */
788
789 logical_immediate_writes += task->task_writes_counters_internal.task_immediate_writes;
790 logical_deferred_writes += task->task_writes_counters_internal.task_deferred_writes;
791 logical_invalidated_writes += task->task_writes_counters_internal.task_invalidated_writes;
792 logical_metadata_writes += task->task_writes_counters_internal.task_metadata_writes;
793 logical_immediate_writes_to_external += task->task_writes_counters_external.task_immediate_writes;
794 logical_deferred_writes_to_external += task->task_writes_counters_external.task_deferred_writes;
795 logical_invalidated_writes_to_external += task->task_writes_counters_external.task_invalidated_writes;
796 logical_metadata_writes_to_external += task->task_writes_counters_external.task_metadata_writes;
797 #if CONFIG_PHYS_WRITE_ACCT
798 fs_metadata_writes += task->task_fs_metadata_writes;
799 #endif /* CONFIG_PHYS_WRITE_ACCT */
800
801 task_update_cpu_time_qos_stats(task, cpu_time_eqos, cpu_time_rqos);
802 recount_task_usage_perf_only(task, &stats_sum, &stats_perf_only);
803 }
804
805 kr = ledger_get_balance(sum_ledger, task_ledgers.cpu_time_billed_to_me, (int64_t *)&cpu_time_billed_to_me);
806 if (kr != KERN_SUCCESS || cpu_time_billed_to_me < 0) {
807 cpu_time_billed_to_me = 0;
808 }
809
810 kr = ledger_get_balance(sum_ledger, task_ledgers.cpu_time_billed_to_others, (int64_t *)&cpu_time_billed_to_others);
811 if (kr != KERN_SUCCESS || cpu_time_billed_to_others < 0) {
812 cpu_time_billed_to_others = 0;
813 }
814
815 kr = ledger_get_balance(sum_ledger, task_ledgers.energy_billed_to_me, (int64_t *)&energy_billed_to_me);
816 if (kr != KERN_SUCCESS || energy_billed_to_me < 0) {
817 energy_billed_to_me = 0;
818 }
819
820 kr = ledger_get_balance(sum_ledger, task_ledgers.energy_billed_to_others, (int64_t *)&energy_billed_to_others);
821 if (kr != KERN_SUCCESS || energy_billed_to_others < 0) {
822 energy_billed_to_others = 0;
823 }
824
825 /* collect information from the coalition itself */
826 cru_out->tasks_started = coal->r.task_count;
827 cru_out->tasks_exited = coal->r.dead_task_count;
828
829 uint64_t time_nonempty = coal->r.time_nonempty;
830 uint64_t last_became_nonempty_time = coal->r.last_became_nonempty_time;
831
832 coalition_unlock(coal);
833
834 /* Copy the totals out of sum_ledger */
835 kr = ledger_get_entries(sum_ledger, task_ledgers.cpu_time,
836 &credit, &debit);
837 if (kr != KERN_SUCCESS) {
838 credit = 0;
839 }
840 cru_out->cpu_time = credit;
841 cru_out->cpu_time_billed_to_me = (uint64_t)cpu_time_billed_to_me;
842 cru_out->cpu_time_billed_to_others = (uint64_t)cpu_time_billed_to_others;
843 cru_out->energy_billed_to_me = (uint64_t)energy_billed_to_me;
844 cru_out->energy_billed_to_others = (uint64_t)energy_billed_to_others;
845
846 kr = ledger_get_entries(sum_ledger, task_ledgers.interrupt_wakeups,
847 &credit, &debit);
848 if (kr != KERN_SUCCESS) {
849 credit = 0;
850 }
851 cru_out->interrupt_wakeups = credit;
852
853 kr = ledger_get_entries(sum_ledger, task_ledgers.platform_idle_wakeups,
854 &credit, &debit);
855 if (kr != KERN_SUCCESS) {
856 credit = 0;
857 }
858 cru_out->platform_idle_wakeups = credit;
859
860 cru_out->bytesread = bytesread;
861 cru_out->byteswritten = byteswritten;
862 cru_out->gpu_time = gpu_time;
863 cru_out->logical_immediate_writes = logical_immediate_writes;
864 cru_out->logical_deferred_writes = logical_deferred_writes;
865 cru_out->logical_invalidated_writes = logical_invalidated_writes;
866 cru_out->logical_metadata_writes = logical_metadata_writes;
867 cru_out->logical_immediate_writes_to_external = logical_immediate_writes_to_external;
868 cru_out->logical_deferred_writes_to_external = logical_deferred_writes_to_external;
869 cru_out->logical_invalidated_writes_to_external = logical_invalidated_writes_to_external;
870 cru_out->logical_metadata_writes_to_external = logical_metadata_writes_to_external;
871 #if CONFIG_PHYS_WRITE_ACCT
872 cru_out->fs_metadata_writes = fs_metadata_writes;
873 #else
874 cru_out->fs_metadata_writes = 0;
875 #endif /* CONFIG_PHYS_WRITE_ACCT */
876 cru_out->cpu_time_eqos_len = COALITION_NUM_THREAD_QOS_TYPES;
877 memcpy(cru_out->cpu_time_eqos, cpu_time_eqos, sizeof(cru_out->cpu_time_eqos));
878
879 cru_out->cpu_ptime = stats_perf_only.ru_system_time_mach +
880 stats_perf_only.ru_user_time_mach;
881 #if CONFIG_PERVASIVE_CPI
882 cru_out->cpu_cycles = stats_sum.ru_cycles;
883 cru_out->cpu_instructions = stats_sum.ru_instructions;
884 cru_out->cpu_pinstructions = stats_perf_only.ru_instructions;
885 cru_out->cpu_pcycles = stats_perf_only.ru_cycles;
886 #endif // CONFIG_PERVASIVE_CPI
887
888 ledger_dereference(sum_ledger);
889 sum_ledger = LEDGER_NULL;
890
891 #if CONFIG_PERVASIVE_ENERGY
892 cru_out->energy = stats_sum.ru_energy_nj;
893 #endif /* CONFIG_PERVASIVE_ENERGY */
894
895 #if CONFIG_PHYS_WRITE_ACCT
896 // kernel_pm_writes are only recorded under kernel_task coalition
897 if (coalition_id(coal) == COALITION_ID_KERNEL) {
898 cru_out->pm_writes = kernel_pm_writes;
899 } else {
900 cru_out->pm_writes = 0;
901 }
902 #else
903 cru_out->pm_writes = 0;
904 #endif /* CONFIG_PHYS_WRITE_ACCT */
905
906 if (last_became_nonempty_time) {
907 time_nonempty += mach_absolute_time() - last_became_nonempty_time;
908 }
909 absolutetime_to_nanoseconds(time_nonempty, &cru_out->time_nonempty);
910
911 return KERN_SUCCESS;
912 }
913
914 /*
915 *
916 * COALITION_TYPE_JETSAM
917 *
918 */
919 static kern_return_t
i_coal_jetsam_init(coalition_t coal,boolean_t privileged,boolean_t efficient)920 i_coal_jetsam_init(coalition_t coal, boolean_t privileged, boolean_t efficient)
921 {
922 assert(coal && coal->type == COALITION_TYPE_JETSAM);
923 (void)privileged;
924 (void)efficient;
925
926 coal->j.leader = TASK_NULL;
927 queue_head_init(coal->j.extensions);
928 queue_head_init(coal->j.services);
929 queue_head_init(coal->j.other);
930
931 #if CONFIG_THREAD_GROUPS
932 switch (coal->role) {
933 case COALITION_ROLE_SYSTEM:
934 coal->j.thread_group = thread_group_find_by_id_and_retain(THREAD_GROUP_SYSTEM);
935 break;
936 case COALITION_ROLE_BACKGROUND:
937 coal->j.thread_group = thread_group_find_by_id_and_retain(THREAD_GROUP_BACKGROUND);
938 break;
939 default:
940 coal->j.thread_group = thread_group_create_and_retain(efficient ? THREAD_GROUP_FLAGS_EFFICIENT : THREAD_GROUP_FLAGS_DEFAULT);
941 }
942 assert(coal->j.thread_group != NULL);
943 #endif
944 return KERN_SUCCESS;
945 }
946
947 static void
i_coal_jetsam_dealloc(__unused coalition_t coal)948 i_coal_jetsam_dealloc(__unused coalition_t coal)
949 {
950 assert(coal && coal->type == COALITION_TYPE_JETSAM);
951
952 /* the coalition should be completely clear at this point */
953 assert(queue_empty(&coal->j.extensions));
954 assert(queue_empty(&coal->j.services));
955 assert(queue_empty(&coal->j.other));
956 assert(coal->j.leader == TASK_NULL);
957
958 #if CONFIG_THREAD_GROUPS
959 /* disassociate from the thread group */
960 assert(coal->j.thread_group != NULL);
961 thread_group_release(coal->j.thread_group);
962 coal->j.thread_group = NULL;
963 #endif
964 }
965
966 static kern_return_t
i_coal_jetsam_adopt_task(coalition_t coal,task_t task)967 i_coal_jetsam_adopt_task(coalition_t coal, task_t task)
968 {
969 struct i_jetsam_coalition *cj;
970 assert(coal && coal->type == COALITION_TYPE_JETSAM);
971
972 cj = &coal->j;
973
974 assert(queue_empty(&task->task_coalition[COALITION_TYPE_JETSAM]));
975
976 /* put each task initially in the "other" list */
977 enqueue_tail(&cj->other, &task->task_coalition[COALITION_TYPE_JETSAM]);
978 coal_dbg("coalition %lld adopted PID:%d as UNDEF",
979 coal->id, task_pid(task));
980
981 return KERN_SUCCESS;
982 }
983
984 static kern_return_t
i_coal_jetsam_remove_task(coalition_t coal,task_t task)985 i_coal_jetsam_remove_task(coalition_t coal, task_t task)
986 {
987 assert(coal && coal->type == COALITION_TYPE_JETSAM);
988 assert(task->coalition[COALITION_TYPE_JETSAM] == coal);
989
990 coal_dbg("removing PID:%d from coalition id:%lld",
991 task_pid(task), coal->id);
992
993 if (task == coal->j.leader) {
994 coal->j.leader = NULL;
995 coal_dbg(" PID:%d was the leader!", task_pid(task));
996 } else {
997 assert(!queue_empty(&task->task_coalition[COALITION_TYPE_JETSAM]));
998 }
999
1000 /* remove the task from the specific coalition role queue */
1001 remqueue(&task->task_coalition[COALITION_TYPE_JETSAM]);
1002 queue_chain_init(task->task_coalition[COALITION_TYPE_RESOURCE]);
1003
1004 return KERN_SUCCESS;
1005 }
1006
1007 static kern_return_t
i_coal_jetsam_set_taskrole(coalition_t coal,task_t task,int role)1008 i_coal_jetsam_set_taskrole(coalition_t coal, task_t task, int role)
1009 {
1010 struct i_jetsam_coalition *cj;
1011 queue_t q = NULL;
1012 assert(coal && coal->type == COALITION_TYPE_JETSAM);
1013 assert(task->coalition[COALITION_TYPE_JETSAM] == coal);
1014
1015 cj = &coal->j;
1016
1017 switch (role) {
1018 case COALITION_TASKROLE_LEADER:
1019 coal_dbg("setting PID:%d as LEADER of %lld",
1020 task_pid(task), coal->id);
1021 if (cj->leader != TASK_NULL) {
1022 /* re-queue the exiting leader onto the "other" list */
1023 coal_dbg(" re-queue existing leader (%d) as OTHER",
1024 task_pid(cj->leader));
1025 re_queue_tail(&cj->other, &cj->leader->task_coalition[COALITION_TYPE_JETSAM]);
1026 }
1027 /*
1028 * remove the task from the "other" list
1029 * (where it was put by default)
1030 */
1031 remqueue(&task->task_coalition[COALITION_TYPE_JETSAM]);
1032 queue_chain_init(task->task_coalition[COALITION_TYPE_JETSAM]);
1033
1034 /* set the coalition leader */
1035 cj->leader = task;
1036 break;
1037 case COALITION_TASKROLE_XPC:
1038 coal_dbg("setting PID:%d as XPC in %lld",
1039 task_pid(task), coal->id);
1040 q = (queue_t)&cj->services;
1041 break;
1042 case COALITION_TASKROLE_EXT:
1043 coal_dbg("setting PID:%d as EXT in %lld",
1044 task_pid(task), coal->id);
1045 q = (queue_t)&cj->extensions;
1046 break;
1047 case COALITION_TASKROLE_NONE:
1048 /*
1049 * Tasks with a role of "none" should fall through to an
1050 * undefined role so long as the task is currently a member
1051 * of the coalition. This scenario can happen if a task is
1052 * killed (usually via jetsam) during exec.
1053 */
1054 if (task->coalition[COALITION_TYPE_JETSAM] != coal) {
1055 panic("%s: task %p attempting to set role %d "
1056 "in coalition %p to which it does not belong!", __func__, task, role, coal);
1057 }
1058 OS_FALLTHROUGH;
1059 case COALITION_TASKROLE_UNDEF:
1060 coal_dbg("setting PID:%d as UNDEF in %lld",
1061 task_pid(task), coal->id);
1062 q = (queue_t)&cj->other;
1063 break;
1064 default:
1065 panic("%s: invalid role(%d) for task", __func__, role);
1066 return KERN_INVALID_ARGUMENT;
1067 }
1068
1069 if (q != NULL) {
1070 re_queue_tail(q, &task->task_coalition[COALITION_TYPE_JETSAM]);
1071 }
1072
1073 return KERN_SUCCESS;
1074 }
1075
1076 int
i_coal_jetsam_get_taskrole(coalition_t coal,task_t task)1077 i_coal_jetsam_get_taskrole(coalition_t coal, task_t task)
1078 {
1079 struct i_jetsam_coalition *cj;
1080 task_t t;
1081
1082 assert(coal && coal->type == COALITION_TYPE_JETSAM);
1083 assert(task->coalition[COALITION_TYPE_JETSAM] == coal);
1084
1085 cj = &coal->j;
1086
1087 if (task == cj->leader) {
1088 return COALITION_TASKROLE_LEADER;
1089 }
1090
1091 qe_foreach_element(t, &cj->services, task_coalition[COALITION_TYPE_JETSAM]) {
1092 if (t == task) {
1093 return COALITION_TASKROLE_XPC;
1094 }
1095 }
1096
1097 qe_foreach_element(t, &cj->extensions, task_coalition[COALITION_TYPE_JETSAM]) {
1098 if (t == task) {
1099 return COALITION_TASKROLE_EXT;
1100 }
1101 }
1102
1103 qe_foreach_element(t, &cj->other, task_coalition[COALITION_TYPE_JETSAM]) {
1104 if (t == task) {
1105 return COALITION_TASKROLE_UNDEF;
1106 }
1107 }
1108
1109 /* task not in the coalition?! */
1110 return COALITION_TASKROLE_NONE;
1111 }
1112
1113 static void
i_coal_jetsam_iterate_tasks(coalition_t coal,void * ctx,void (* callback)(coalition_t,void *,task_t))1114 i_coal_jetsam_iterate_tasks(coalition_t coal, void *ctx, void (*callback)(coalition_t, void *, task_t))
1115 {
1116 struct i_jetsam_coalition *cj;
1117 task_t t;
1118
1119 assert(coal && coal->type == COALITION_TYPE_JETSAM);
1120
1121 cj = &coal->j;
1122
1123 if (cj->leader) {
1124 callback(coal, ctx, cj->leader);
1125 }
1126
1127 qe_foreach_element(t, &cj->services, task_coalition[COALITION_TYPE_JETSAM])
1128 callback(coal, ctx, t);
1129
1130 qe_foreach_element(t, &cj->extensions, task_coalition[COALITION_TYPE_JETSAM])
1131 callback(coal, ctx, t);
1132
1133 qe_foreach_element(t, &cj->other, task_coalition[COALITION_TYPE_JETSAM])
1134 callback(coal, ctx, t);
1135 }
1136
1137
1138 /*
1139 *
1140 * Main Coalition implementation
1141 *
1142 */
1143
1144 /*
1145 * coalition_create_internal
1146 * Returns: New coalition object, referenced for the caller and unlocked.
1147 * Condition: coalitions_list_lock must be UNLOCKED.
1148 */
1149 kern_return_t
coalition_create_internal(int type,int role,boolean_t privileged,boolean_t efficient,coalition_t * out,uint64_t * coalition_id)1150 coalition_create_internal(int type, int role, boolean_t privileged, boolean_t efficient, coalition_t *out, uint64_t *coalition_id)
1151 {
1152 kern_return_t kr;
1153 struct coalition *new_coal;
1154 uint64_t cid;
1155 uint32_t ctype;
1156
1157 if (type < 0 || type > COALITION_TYPE_MAX) {
1158 return KERN_INVALID_ARGUMENT;
1159 }
1160
1161 new_coal = zalloc_flags(coalition_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1162
1163 new_coal->type = type;
1164 new_coal->role = role;
1165
1166 /* initialize type-specific resources */
1167 kr = coal_call(new_coal, init, privileged, efficient);
1168 if (kr != KERN_SUCCESS) {
1169 zfree(coalition_zone, new_coal);
1170 return kr;
1171 }
1172
1173 /* One for caller, one for coalitions list */
1174 new_coal->ref_count = 2;
1175
1176 new_coal->privileged = privileged ? TRUE : FALSE;
1177 new_coal->efficient = efficient ? TRUE : FALSE;
1178 #if DEVELOPMENT || DEBUG
1179 new_coal->should_notify = 1;
1180 #endif
1181
1182 lck_mtx_init(&new_coal->lock, &coalitions_lck_grp, LCK_ATTR_NULL);
1183
1184 lck_rw_lock_exclusive(&coalitions_list_lock);
1185 new_coal->id = coalition_next_id++;
1186 coalition_count++;
1187 enqueue_tail(&coalitions_q, &new_coal->coalitions);
1188
1189 #if CONFIG_THREAD_GROUPS
1190 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_NEW),
1191 new_coal->id, new_coal->type,
1192 (new_coal->type == COALITION_TYPE_JETSAM && new_coal->j.thread_group) ?
1193 thread_group_get_id(new_coal->j.thread_group) : 0);
1194
1195 #else
1196 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_NEW),
1197 new_coal->id, new_coal->type);
1198 #endif
1199 cid = new_coal->id;
1200 ctype = new_coal->type;
1201 lck_rw_unlock_exclusive(&coalitions_list_lock);
1202
1203 coal_dbg("id:%llu, type:%s", cid, coal_type_str(ctype));
1204
1205 if (coalition_id != NULL) {
1206 *coalition_id = cid;
1207 }
1208
1209 *out = new_coal;
1210 return KERN_SUCCESS;
1211 }
1212
1213 /*
1214 * coalition_release
1215 * Condition: coalition must be UNLOCKED.
1216 * */
1217 void
coalition_release(coalition_t coal)1218 coalition_release(coalition_t coal)
1219 {
1220 /* TODO: This can be done with atomics. */
1221 coalition_lock(coal);
1222 coal->ref_count--;
1223
1224 #if COALITION_DEBUG
1225 uint32_t rc = coal->ref_count;
1226 uint32_t ac = coal->active_count;
1227 #endif /* COALITION_DEBUG */
1228
1229 coal_dbg("id:%llu type:%s ref_count:%u active_count:%u%s",
1230 coal->id, coal_type_str(coal->type), rc, ac,
1231 rc <= 0 ? ", will deallocate now" : "");
1232
1233 if (coal->ref_count > 0) {
1234 coalition_unlock(coal);
1235 return;
1236 }
1237
1238 assert(coal->termrequested);
1239 assert(coal->terminated);
1240 assert(coal->active_count == 0);
1241 assert(coal->reaped);
1242 assert(coal->focal_task_count == 0);
1243 assert(coal->nonfocal_task_count == 0);
1244 #if CONFIG_THREAD_GROUPS
1245 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_FREE),
1246 coal->id, coal->type,
1247 coal->type == COALITION_TYPE_JETSAM ?
1248 coal->j.thread_group : 0);
1249 #else
1250 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_FREE),
1251 coal->id, coal->type);
1252 #endif
1253
1254 coal_call(coal, dealloc);
1255
1256 coalition_unlock(coal);
1257
1258 lck_mtx_destroy(&coal->lock, &coalitions_lck_grp);
1259
1260 zfree(coalition_zone, coal);
1261 }
1262
1263 /*
1264 * coalition_find_by_id_internal
1265 * Returns: Coalition object with specified id, NOT referenced.
1266 * If not found, returns COALITION_NULL.
1267 * If found, returns a locked coalition.
1268 *
1269 * Condition: No locks held
1270 */
1271 static coalition_t
coalition_find_by_id_internal(uint64_t coal_id)1272 coalition_find_by_id_internal(uint64_t coal_id)
1273 {
1274 coalition_t coal;
1275
1276 if (coal_id == 0) {
1277 return COALITION_NULL;
1278 }
1279
1280 lck_rw_lock_shared(&coalitions_list_lock);
1281 qe_foreach_element(coal, &coalitions_q, coalitions) {
1282 if (coal->id == coal_id) {
1283 coalition_lock(coal);
1284 lck_rw_unlock_shared(&coalitions_list_lock);
1285 return coal;
1286 }
1287 }
1288 lck_rw_unlock_shared(&coalitions_list_lock);
1289
1290 return COALITION_NULL;
1291 }
1292
1293 /*
1294 * coalition_find_by_id
1295 * Returns: Coalition object with specified id, referenced.
1296 * Condition: coalitions_list_lock must be UNLOCKED.
1297 */
1298 coalition_t
coalition_find_by_id(uint64_t cid)1299 coalition_find_by_id(uint64_t cid)
1300 {
1301 coalition_t coal = coalition_find_by_id_internal(cid);
1302
1303 if (coal == COALITION_NULL) {
1304 return COALITION_NULL;
1305 }
1306
1307 /* coal is locked */
1308
1309 if (coal->reaped) {
1310 coalition_unlock(coal);
1311 return COALITION_NULL;
1312 }
1313
1314 if (coal->ref_count == 0) {
1315 panic("resurrecting coalition %p id:%llu type:%s, active_count:%u",
1316 coal, coal->id, coal_type_str(coal->type), coal->active_count);
1317 }
1318 coal->ref_count++;
1319 #if COALITION_DEBUG
1320 uint32_t rc = coal->ref_count;
1321 #endif
1322
1323 coalition_unlock(coal);
1324
1325 coal_dbg("id:%llu type:%s ref_count:%u",
1326 coal->id, coal_type_str(coal->type), rc);
1327
1328 return coal;
1329 }
1330
1331 /*
1332 * coalition_find_and_activate_by_id
1333 * Returns: Coalition object with specified id, referenced, and activated.
1334 * Condition: coalitions_list_lock must be UNLOCKED.
1335 * This is the function to use when putting a 'new' thing into a coalition,
1336 * like posix_spawn of an XPC service by launchd.
1337 * See also coalition_extend_active.
1338 */
1339 coalition_t
coalition_find_and_activate_by_id(uint64_t cid)1340 coalition_find_and_activate_by_id(uint64_t cid)
1341 {
1342 coalition_t coal = coalition_find_by_id_internal(cid);
1343
1344 if (coal == COALITION_NULL) {
1345 return COALITION_NULL;
1346 }
1347
1348 /* coal is locked */
1349
1350 if (coal->reaped || coal->terminated) {
1351 /* Too late to put something new into this coalition, it's
1352 * already on its way out the door */
1353 coalition_unlock(coal);
1354 return COALITION_NULL;
1355 }
1356
1357 if (coal->ref_count == 0) {
1358 panic("resurrecting coalition %p id:%llu type:%s, active_count:%u",
1359 coal, coal->id, coal_type_str(coal->type), coal->active_count);
1360 }
1361
1362 coal->ref_count++;
1363 coal->active_count++;
1364
1365 #if COALITION_DEBUG
1366 uint32_t rc = coal->ref_count;
1367 uint32_t ac = coal->active_count;
1368 #endif
1369
1370 coalition_unlock(coal);
1371
1372 coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u",
1373 coal->id, coal_type_str(coal->type), rc, ac);
1374
1375 return coal;
1376 }
1377
1378 uint64_t
coalition_id(coalition_t coal)1379 coalition_id(coalition_t coal)
1380 {
1381 assert(coal != COALITION_NULL);
1382 return coal->id;
1383 }
1384
1385 void
task_coalition_ids(task_t task,uint64_t ids[COALITION_NUM_TYPES])1386 task_coalition_ids(task_t task, uint64_t ids[COALITION_NUM_TYPES])
1387 {
1388 int i;
1389 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1390 if (task->coalition[i]) {
1391 ids[i] = task->coalition[i]->id;
1392 } else {
1393 ids[i] = 0;
1394 }
1395 }
1396 }
1397
1398 void
task_coalition_roles(task_t task,int roles[COALITION_NUM_TYPES])1399 task_coalition_roles(task_t task, int roles[COALITION_NUM_TYPES])
1400 {
1401 int i;
1402 memset(roles, 0, COALITION_NUM_TYPES * sizeof(roles[0]));
1403
1404 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1405 if (task->coalition[i]) {
1406 coalition_lock(task->coalition[i]);
1407 roles[i] = coal_call(task->coalition[i],
1408 get_taskrole, task);
1409 coalition_unlock(task->coalition[i]);
1410 } else {
1411 roles[i] = COALITION_TASKROLE_NONE;
1412 }
1413 }
1414 }
1415
1416 int
task_coalition_role_for_type(task_t task,int coalition_type)1417 task_coalition_role_for_type(task_t task, int coalition_type)
1418 {
1419 coalition_t coal;
1420 int role;
1421 if (coalition_type >= COALITION_NUM_TYPES) {
1422 panic("Attempt to call task_coalition_role_for_type with invalid coalition_type: %d\n", coalition_type);
1423 }
1424 coal = task->coalition[coalition_type];
1425 if (coal == NULL) {
1426 return COALITION_TASKROLE_NONE;
1427 }
1428 coalition_lock(coal);
1429 role = coal_call(coal, get_taskrole, task);
1430 coalition_unlock(coal);
1431 return role;
1432 }
1433
1434 int
coalition_type(coalition_t coal)1435 coalition_type(coalition_t coal)
1436 {
1437 return coal->type;
1438 }
1439
1440 boolean_t
coalition_term_requested(coalition_t coal)1441 coalition_term_requested(coalition_t coal)
1442 {
1443 return coal->termrequested;
1444 }
1445
1446 boolean_t
coalition_is_terminated(coalition_t coal)1447 coalition_is_terminated(coalition_t coal)
1448 {
1449 return coal->terminated;
1450 }
1451
1452 boolean_t
coalition_is_reaped(coalition_t coal)1453 coalition_is_reaped(coalition_t coal)
1454 {
1455 return coal->reaped;
1456 }
1457
1458 boolean_t
coalition_is_privileged(coalition_t coal)1459 coalition_is_privileged(coalition_t coal)
1460 {
1461 return coal->privileged || unrestrict_coalition_syscalls;
1462 }
1463
1464 boolean_t
task_is_in_privileged_coalition(task_t task,int type)1465 task_is_in_privileged_coalition(task_t task, int type)
1466 {
1467 if (type < 0 || type > COALITION_TYPE_MAX) {
1468 return FALSE;
1469 }
1470 if (unrestrict_coalition_syscalls) {
1471 return TRUE;
1472 }
1473 if (!task->coalition[type]) {
1474 return FALSE;
1475 }
1476 return task->coalition[type]->privileged;
1477 }
1478
1479 void
task_coalition_update_gpu_stats(task_t task,uint64_t gpu_ns_delta)1480 task_coalition_update_gpu_stats(task_t task, uint64_t gpu_ns_delta)
1481 {
1482 coalition_t coal;
1483
1484 assert(task != TASK_NULL);
1485 if (gpu_ns_delta == 0) {
1486 return;
1487 }
1488
1489 coal = task->coalition[COALITION_TYPE_RESOURCE];
1490 assert(coal != COALITION_NULL);
1491
1492 coalition_lock(coal);
1493 coal->r.gpu_time += gpu_ns_delta;
1494 coalition_unlock(coal);
1495 }
1496
1497 boolean_t
task_coalition_adjust_focal_count(task_t task,int count,uint32_t * new_count)1498 task_coalition_adjust_focal_count(task_t task, int count, uint32_t *new_count)
1499 {
1500 coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING];
1501 if (coal == COALITION_NULL) {
1502 return FALSE;
1503 }
1504
1505 *new_count = os_atomic_add(&coal->focal_task_count, count, relaxed);
1506 assert(*new_count != UINT32_MAX);
1507 return TRUE;
1508 }
1509
1510 uint32_t
task_coalition_focal_count(task_t task)1511 task_coalition_focal_count(task_t task)
1512 {
1513 coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING];
1514 if (coal == COALITION_NULL) {
1515 return 0;
1516 }
1517
1518 return coal->focal_task_count;
1519 }
1520
1521 boolean_t
task_coalition_adjust_nonfocal_count(task_t task,int count,uint32_t * new_count)1522 task_coalition_adjust_nonfocal_count(task_t task, int count, uint32_t *new_count)
1523 {
1524 coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING];
1525 if (coal == COALITION_NULL) {
1526 return FALSE;
1527 }
1528
1529 *new_count = os_atomic_add(&coal->nonfocal_task_count, count, relaxed);
1530 assert(*new_count != UINT32_MAX);
1531 return TRUE;
1532 }
1533
1534 uint32_t
task_coalition_nonfocal_count(task_t task)1535 task_coalition_nonfocal_count(task_t task)
1536 {
1537 coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING];
1538 if (coal == COALITION_NULL) {
1539 return 0;
1540 }
1541
1542 return coal->nonfocal_task_count;
1543 }
1544
1545 #if CONFIG_THREAD_GROUPS
1546 struct thread_group *
task_coalition_get_thread_group(task_t task)1547 task_coalition_get_thread_group(task_t task)
1548 {
1549 coalition_t coal = task->coalition[COALITION_TYPE_JETSAM];
1550 /* return system thread group for non-jetsam coalitions */
1551 if (coal == COALITION_NULL) {
1552 return init_coalition[COALITION_TYPE_JETSAM]->j.thread_group;
1553 }
1554 return coal->j.thread_group;
1555 }
1556
1557
1558 struct thread_group *
kdp_coalition_get_thread_group(coalition_t coal)1559 kdp_coalition_get_thread_group(coalition_t coal)
1560 {
1561 if (coal->type != COALITION_TYPE_JETSAM) {
1562 return NULL;
1563 }
1564 assert(coal->j.thread_group != NULL);
1565 return coal->j.thread_group;
1566 }
1567
1568 struct thread_group *
coalition_get_thread_group(coalition_t coal)1569 coalition_get_thread_group(coalition_t coal)
1570 {
1571 if (coal->type != COALITION_TYPE_JETSAM) {
1572 return NULL;
1573 }
1574 assert(coal->j.thread_group != NULL);
1575 return thread_group_retain(coal->j.thread_group);
1576 }
1577
1578 void
coalition_set_thread_group(coalition_t coal,struct thread_group * tg)1579 coalition_set_thread_group(coalition_t coal, struct thread_group *tg)
1580 {
1581 assert(coal != COALITION_NULL);
1582 assert(tg != NULL);
1583
1584 if (coal->type != COALITION_TYPE_JETSAM) {
1585 return;
1586 }
1587 struct thread_group *old_tg = coal->j.thread_group;
1588 assert(old_tg != NULL);
1589 coal->j.thread_group = tg;
1590
1591 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_THREAD_GROUP_SET),
1592 coal->id, coal->type, thread_group_get_id(tg));
1593
1594 thread_group_release(old_tg);
1595 }
1596
1597 void
task_coalition_thread_group_focal_update(task_t task)1598 task_coalition_thread_group_focal_update(task_t task)
1599 {
1600 assert(task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING] != COALITION_NULL);
1601 thread_group_flags_update_lock();
1602 uint32_t focal_count = task_coalition_focal_count(task);
1603 if (focal_count) {
1604 thread_group_set_flags_locked(task_coalition_get_thread_group(task), THREAD_GROUP_FLAGS_UI_APP);
1605 } else {
1606 thread_group_clear_flags_locked(task_coalition_get_thread_group(task), THREAD_GROUP_FLAGS_UI_APP);
1607 }
1608 thread_group_flags_update_unlock();
1609 }
1610
1611 void
task_coalition_thread_group_application_set(task_t task)1612 task_coalition_thread_group_application_set(task_t task)
1613 {
1614 /*
1615 * Setting the "Application" flag on the thread group is a one way transition.
1616 * Once a coalition has a single task with an application apptype, the
1617 * thread group associated with the coalition is tagged as Application.
1618 */
1619 thread_group_flags_update_lock();
1620 thread_group_set_flags_locked(task_coalition_get_thread_group(task), THREAD_GROUP_FLAGS_APPLICATION);
1621 thread_group_flags_update_unlock();
1622 }
1623
1624 #endif
1625
1626 void
coalition_for_each_task(coalition_t coal,void * ctx,void (* callback)(coalition_t,void *,task_t))1627 coalition_for_each_task(coalition_t coal, void *ctx,
1628 void (*callback)(coalition_t, void *, task_t))
1629 {
1630 assert(coal != COALITION_NULL);
1631
1632 coal_dbg("iterating tasks in coalition %p id:%llu type:%s, active_count:%u",
1633 coal, coal->id, coal_type_str(coal->type), coal->active_count);
1634
1635 coalition_lock(coal);
1636
1637 coal_call(coal, iterate_tasks, ctx, callback);
1638
1639 coalition_unlock(coal);
1640 }
1641
1642
1643 void
coalition_remove_active(coalition_t coal)1644 coalition_remove_active(coalition_t coal)
1645 {
1646 coalition_lock(coal);
1647
1648 assert(!coal->reaped);
1649 assert(coal->active_count > 0);
1650
1651 coal->active_count--;
1652
1653 boolean_t do_notify = FALSE;
1654 uint64_t notify_id = 0;
1655 uint32_t notify_flags = 0;
1656 if (coal->termrequested && coal->active_count == 0) {
1657 /* We only notify once, when active_count reaches zero.
1658 * We just decremented, so if it reached zero, we mustn't have
1659 * notified already.
1660 */
1661 assert(!coal->terminated);
1662 coal->terminated = TRUE;
1663
1664 assert(!coal->notified);
1665
1666 coal->notified = TRUE;
1667 #if DEVELOPMENT || DEBUG
1668 do_notify = coal->should_notify;
1669 #else
1670 do_notify = TRUE;
1671 #endif
1672 notify_id = coal->id;
1673 notify_flags = 0;
1674 }
1675
1676 #if COALITION_DEBUG
1677 uint64_t cid = coal->id;
1678 uint32_t rc = coal->ref_count;
1679 int ac = coal->active_count;
1680 int ct = coal->type;
1681 #endif
1682 coalition_unlock(coal);
1683
1684 coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u,%s",
1685 cid, coal_type_str(ct), rc, ac, do_notify ? " NOTIFY" : " ");
1686
1687 if (do_notify) {
1688 coalition_notify_user(notify_id, notify_flags);
1689 }
1690 }
1691
1692 /* Used for kernel_task, launchd, launchd's early boot tasks... */
1693 kern_return_t
coalitions_adopt_init_task(task_t task)1694 coalitions_adopt_init_task(task_t task)
1695 {
1696 kern_return_t kr;
1697 kr = coalitions_adopt_task(init_coalition, task);
1698 if (kr != KERN_SUCCESS) {
1699 panic("failed to adopt task %p into default coalition: %d", task, kr);
1700 }
1701 return kr;
1702 }
1703
1704 /* Used for forked corpses. */
1705 kern_return_t
coalitions_adopt_corpse_task(task_t task)1706 coalitions_adopt_corpse_task(task_t task)
1707 {
1708 kern_return_t kr;
1709 kr = coalitions_adopt_task(corpse_coalition, task);
1710 if (kr != KERN_SUCCESS) {
1711 panic("failed to adopt task %p into corpse coalition: %d", task, kr);
1712 }
1713 return kr;
1714 }
1715
1716 /*
1717 * coalition_adopt_task_internal
1718 * Condition: Coalition must be referenced and unlocked. Will fail if coalition
1719 * is already terminated.
1720 */
1721 static kern_return_t
coalition_adopt_task_internal(coalition_t coal,task_t task)1722 coalition_adopt_task_internal(coalition_t coal, task_t task)
1723 {
1724 kern_return_t kr;
1725
1726 if (task->coalition[coal->type]) {
1727 return KERN_ALREADY_IN_SET;
1728 }
1729
1730 coalition_lock(coal);
1731
1732 if (coal->reaped || coal->terminated) {
1733 coalition_unlock(coal);
1734 return KERN_TERMINATED;
1735 }
1736
1737 kr = coal_call(coal, adopt_task, task);
1738 if (kr != KERN_SUCCESS) {
1739 goto out_unlock;
1740 }
1741
1742 coal->active_count++;
1743
1744 coal->ref_count++;
1745
1746 task->coalition[coal->type] = coal;
1747
1748 out_unlock:
1749 #if COALITION_DEBUG
1750 (void)coal; /* need expression after label */
1751 uint64_t cid = coal->id;
1752 uint32_t rc = coal->ref_count;
1753 uint32_t ct = coal->type;
1754 #endif
1755 if (get_task_uniqueid(task) != UINT64_MAX) {
1756 /* On 32-bit targets, uniqueid will get truncated to 32 bits */
1757 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_ADOPT),
1758 coal->id, get_task_uniqueid(task));
1759 }
1760
1761 coalition_unlock(coal);
1762
1763 coal_dbg("task:%d, id:%llu type:%s ref_count:%u, kr=%d",
1764 task_pid(task), cid, coal_type_str(ct), rc, kr);
1765 return kr;
1766 }
1767
1768 static kern_return_t
coalition_remove_task_internal(task_t task,int type)1769 coalition_remove_task_internal(task_t task, int type)
1770 {
1771 kern_return_t kr;
1772
1773 coalition_t coal = task->coalition[type];
1774
1775 if (!coal) {
1776 return KERN_SUCCESS;
1777 }
1778
1779 assert(coal->type == (uint32_t)type);
1780
1781 coalition_lock(coal);
1782
1783 kr = coal_call(coal, remove_task, task);
1784
1785 #if COALITION_DEBUG
1786 uint64_t cid = coal->id;
1787 uint32_t rc = coal->ref_count;
1788 int ac = coal->active_count;
1789 int ct = coal->type;
1790 #endif
1791 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_REMOVE),
1792 coal->id, get_task_uniqueid(task));
1793 coalition_unlock(coal);
1794
1795 coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u, kr=%d",
1796 cid, coal_type_str(ct), rc, ac, kr);
1797
1798 coalition_remove_active(coal);
1799
1800 return kr;
1801 }
1802
1803 /*
1804 * coalitions_adopt_task
1805 * Condition: All coalitions must be referenced and unlocked.
1806 * Will fail if any coalition is already terminated.
1807 */
1808 kern_return_t
coalitions_adopt_task(coalition_t * coals,task_t task)1809 coalitions_adopt_task(coalition_t *coals, task_t task)
1810 {
1811 int i;
1812 kern_return_t kr;
1813
1814 if (!coals || coals[COALITION_TYPE_RESOURCE] == COALITION_NULL) {
1815 return KERN_INVALID_ARGUMENT;
1816 }
1817
1818 /* verify that the incoming coalitions are what they say they are */
1819 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1820 if (coals[i] && coals[i]->type != (uint32_t)i) {
1821 return KERN_INVALID_ARGUMENT;
1822 }
1823 }
1824
1825 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1826 kr = KERN_SUCCESS;
1827 if (coals[i]) {
1828 kr = coalition_adopt_task_internal(coals[i], task);
1829 }
1830 if (kr != KERN_SUCCESS) {
1831 /* dis-associate any coalitions that just adopted this task */
1832 while (--i >= 0) {
1833 if (task->coalition[i]) {
1834 coalition_remove_task_internal(task, i);
1835 }
1836 }
1837 break;
1838 }
1839 }
1840 return kr;
1841 }
1842
1843 /*
1844 * coalitions_remove_task
1845 * Condition: task must be referenced and UNLOCKED; all task's coalitions must be UNLOCKED
1846 */
1847 kern_return_t
coalitions_remove_task(task_t task)1848 coalitions_remove_task(task_t task)
1849 {
1850 kern_return_t kr;
1851 int i;
1852
1853 task_lock(task);
1854 if (!task_is_coalition_member(task)) {
1855 task_unlock(task);
1856 return KERN_SUCCESS;
1857 }
1858
1859 task_clear_coalition_member(task);
1860 task_unlock(task);
1861
1862 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1863 kr = coalition_remove_task_internal(task, i);
1864 assert(kr == KERN_SUCCESS);
1865 }
1866
1867 return kr;
1868 }
1869
1870 /*
1871 * task_release_coalitions
1872 * helper function to release references to all coalitions in which
1873 * 'task' is a member.
1874 */
1875 void
task_release_coalitions(task_t task)1876 task_release_coalitions(task_t task)
1877 {
1878 int i;
1879 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1880 if (task->coalition[i]) {
1881 coalition_release(task->coalition[i]);
1882 } else if (i == COALITION_TYPE_RESOURCE) {
1883 panic("deallocating task %p was not a member of a resource coalition", task);
1884 }
1885 }
1886 }
1887
1888 /*
1889 * coalitions_set_roles
1890 * for each type of coalition, if the task is a member of a coalition of
1891 * that type (given in the coalitions parameter) then set the role of
1892 * the task within that that coalition.
1893 */
1894 kern_return_t
coalitions_set_roles(coalition_t coalitions[COALITION_NUM_TYPES],task_t task,int roles[COALITION_NUM_TYPES])1895 coalitions_set_roles(coalition_t coalitions[COALITION_NUM_TYPES],
1896 task_t task, int roles[COALITION_NUM_TYPES])
1897 {
1898 kern_return_t kr = KERN_SUCCESS;
1899 int i;
1900
1901 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1902 if (!coalitions[i]) {
1903 continue;
1904 }
1905 coalition_lock(coalitions[i]);
1906 kr = coal_call(coalitions[i], set_taskrole, task, roles[i]);
1907 coalition_unlock(coalitions[i]);
1908 assert(kr == KERN_SUCCESS);
1909 }
1910
1911 return kr;
1912 }
1913
1914 /*
1915 * coalition_terminate_internal
1916 * Condition: Coalition must be referenced and UNLOCKED.
1917 */
1918 kern_return_t
coalition_request_terminate_internal(coalition_t coal)1919 coalition_request_terminate_internal(coalition_t coal)
1920 {
1921 assert(coal->type >= 0 && coal->type <= COALITION_TYPE_MAX);
1922
1923 if (coal == init_coalition[coal->type]) {
1924 return KERN_DEFAULT_SET;
1925 }
1926
1927 coalition_lock(coal);
1928
1929 if (coal->reaped) {
1930 coalition_unlock(coal);
1931 return KERN_INVALID_NAME;
1932 }
1933
1934 if (coal->terminated || coal->termrequested) {
1935 coalition_unlock(coal);
1936 return KERN_TERMINATED;
1937 }
1938
1939 coal->termrequested = TRUE;
1940
1941 boolean_t do_notify = FALSE;
1942 uint64_t note_id = 0;
1943 uint32_t note_flags = 0;
1944
1945 if (coal->active_count == 0) {
1946 /*
1947 * We only notify once, when active_count reaches zero.
1948 * We just set termrequested to zero. If the active count
1949 * was already at zero (tasks died before we could request
1950 * a termination notification), we should notify.
1951 */
1952 assert(!coal->terminated);
1953 coal->terminated = TRUE;
1954
1955 assert(!coal->notified);
1956
1957 coal->notified = TRUE;
1958 #if DEVELOPMENT || DEBUG
1959 do_notify = coal->should_notify;
1960 #else
1961 do_notify = TRUE;
1962 #endif
1963 note_id = coal->id;
1964 note_flags = 0;
1965 }
1966
1967 coalition_unlock(coal);
1968
1969 if (do_notify) {
1970 coalition_notify_user(note_id, note_flags);
1971 }
1972
1973 return KERN_SUCCESS;
1974 }
1975
1976 /*
1977 * coalition_reap_internal
1978 * Condition: Coalition must be referenced and UNLOCKED.
1979 */
1980 kern_return_t
coalition_reap_internal(coalition_t coal)1981 coalition_reap_internal(coalition_t coal)
1982 {
1983 assert(coal->type <= COALITION_TYPE_MAX);
1984
1985 if (coal == init_coalition[coal->type]) {
1986 return KERN_DEFAULT_SET;
1987 }
1988
1989 coalition_lock(coal);
1990 if (coal->reaped) {
1991 coalition_unlock(coal);
1992 return KERN_TERMINATED;
1993 }
1994 if (!coal->terminated) {
1995 coalition_unlock(coal);
1996 return KERN_FAILURE;
1997 }
1998 assert(coal->termrequested);
1999 if (coal->active_count > 0) {
2000 coalition_unlock(coal);
2001 return KERN_FAILURE;
2002 }
2003
2004 coal->reaped = TRUE;
2005
2006 /* Caller, launchd, and coalitions list should each have a reference */
2007 assert(coal->ref_count > 2);
2008
2009 coalition_unlock(coal);
2010
2011 lck_rw_lock_exclusive(&coalitions_list_lock);
2012 coalition_count--;
2013 remqueue(&coal->coalitions);
2014 lck_rw_unlock_exclusive(&coalitions_list_lock);
2015
2016 /* Release the list's reference and launchd's reference. */
2017 coalition_release(coal);
2018 coalition_release(coal);
2019
2020 return KERN_SUCCESS;
2021 }
2022
2023 #if DEVELOPMENT || DEBUG
2024 int
coalition_should_notify(coalition_t coal)2025 coalition_should_notify(coalition_t coal)
2026 {
2027 int should;
2028 if (!coal) {
2029 return -1;
2030 }
2031 coalition_lock(coal);
2032 should = coal->should_notify;
2033 coalition_unlock(coal);
2034
2035 return should;
2036 }
2037
2038 void
coalition_set_notify(coalition_t coal,int notify)2039 coalition_set_notify(coalition_t coal, int notify)
2040 {
2041 if (!coal) {
2042 return;
2043 }
2044 coalition_lock(coal);
2045 coal->should_notify = !!notify;
2046 coalition_unlock(coal);
2047 }
2048 #endif
2049
2050 void
coalitions_init(void)2051 coalitions_init(void)
2052 {
2053 kern_return_t kr;
2054 int i;
2055 const struct coalition_type *ctype;
2056
2057 queue_head_init(coalitions_q);
2058
2059 init_task_ledgers();
2060
2061 init_coalition_ledgers();
2062
2063 for (i = 0, ctype = &s_coalition_types[0]; i < COALITION_NUM_TYPES; ctype++, i++) {
2064 /* verify the entry in the global coalition types array */
2065 if (ctype->type != i ||
2066 !ctype->init ||
2067 !ctype->dealloc ||
2068 !ctype->adopt_task ||
2069 !ctype->remove_task) {
2070 panic("%s: Malformed coalition type %s(%d) in slot for type:%s(%d)",
2071 __func__, coal_type_str(ctype->type), ctype->type, coal_type_str(i), i);
2072 }
2073 if (!ctype->has_default) {
2074 continue;
2075 }
2076 kr = coalition_create_internal(ctype->type, COALITION_ROLE_SYSTEM, TRUE, FALSE, &init_coalition[ctype->type], NULL);
2077 if (kr != KERN_SUCCESS) {
2078 panic("%s: could not create init %s coalition: kr:%d",
2079 __func__, coal_type_str(i), kr);
2080 }
2081 if (i == COALITION_TYPE_RESOURCE) {
2082 assert(COALITION_ID_KERNEL == init_coalition[ctype->type]->id);
2083 }
2084 kr = coalition_create_internal(ctype->type, COALITION_ROLE_SYSTEM, FALSE, FALSE, &corpse_coalition[ctype->type], NULL);
2085 if (kr != KERN_SUCCESS) {
2086 panic("%s: could not create corpse %s coalition: kr:%d",
2087 __func__, coal_type_str(i), kr);
2088 }
2089 }
2090
2091 /* "Leak" our reference to the global object */
2092 }
2093
2094 /*
2095 * BSD Kernel interface functions
2096 *
2097 */
2098 static void
coalition_fill_procinfo(struct coalition * coal,struct procinfo_coalinfo * coalinfo)2099 coalition_fill_procinfo(struct coalition *coal,
2100 struct procinfo_coalinfo *coalinfo)
2101 {
2102 coalinfo->coalition_id = coal->id;
2103 coalinfo->coalition_type = coal->type;
2104 coalinfo->coalition_tasks = coalition_get_task_count(coal);
2105 }
2106
2107
2108 size_t
coalitions_get_list(int type,struct procinfo_coalinfo * coal_list,size_t list_sz)2109 coalitions_get_list(int type, struct procinfo_coalinfo *coal_list, size_t list_sz)
2110 {
2111 size_t ncoals = 0;
2112 struct coalition *coal;
2113
2114 lck_rw_lock_shared(&coalitions_list_lock);
2115 qe_foreach_element(coal, &coalitions_q, coalitions) {
2116 if (!coal->reaped && (type < 0 || type == (int)coal->type)) {
2117 if (coal_list && ncoals < list_sz) {
2118 coalition_fill_procinfo(coal, &coal_list[ncoals]);
2119 }
2120 ++ncoals;
2121 }
2122 }
2123 lck_rw_unlock_shared(&coalitions_list_lock);
2124
2125 return ncoals;
2126 }
2127
2128 /*
2129 * Return the coaltion of the given type to which the task belongs.
2130 */
2131 coalition_t
task_get_coalition(task_t task,int coal_type)2132 task_get_coalition(task_t task, int coal_type)
2133 {
2134 coalition_t c;
2135
2136 if (task == NULL || coal_type > COALITION_TYPE_MAX) {
2137 return COALITION_NULL;
2138 }
2139
2140 c = task->coalition[coal_type];
2141 assert(c == COALITION_NULL || (int)c->type == coal_type);
2142 return c;
2143 }
2144
2145 /*
2146 * Report if the given task is the leader of the given jetsam coalition.
2147 */
2148 boolean_t
coalition_is_leader(task_t task,coalition_t coal)2149 coalition_is_leader(task_t task, coalition_t coal)
2150 {
2151 boolean_t ret = FALSE;
2152
2153 if (coal != COALITION_NULL) {
2154 coalition_lock(coal);
2155
2156 ret = (coal->type == COALITION_TYPE_JETSAM && coal->j.leader == task);
2157
2158 coalition_unlock(coal);
2159 }
2160
2161 return ret;
2162 }
2163
2164 kern_return_t
coalition_iterate_stackshot(coalition_iterate_fn_t callout,void * arg,uint32_t coalition_type)2165 coalition_iterate_stackshot(coalition_iterate_fn_t callout, void *arg, uint32_t coalition_type)
2166 {
2167 coalition_t coal;
2168 int i = 0;
2169
2170 qe_foreach_element(coal, &coalitions_q, coalitions) {
2171 if (coal == NULL || !ml_validate_nofault((vm_offset_t)coal, sizeof(struct coalition))) {
2172 return KERN_FAILURE;
2173 }
2174
2175 if (coalition_type == coal->type) {
2176 callout(arg, i++, coal);
2177 }
2178 }
2179
2180 return KERN_SUCCESS;
2181 }
2182
2183 task_t
kdp_coalition_get_leader(coalition_t coal)2184 kdp_coalition_get_leader(coalition_t coal)
2185 {
2186 if (!coal) {
2187 return TASK_NULL;
2188 }
2189
2190 if (coal->type == COALITION_TYPE_JETSAM) {
2191 return coal->j.leader;
2192 }
2193 return TASK_NULL;
2194 }
2195
2196 task_t
coalition_get_leader(coalition_t coal)2197 coalition_get_leader(coalition_t coal)
2198 {
2199 task_t leader = TASK_NULL;
2200
2201 if (!coal) {
2202 return TASK_NULL;
2203 }
2204
2205 coalition_lock(coal);
2206 if (coal->type != COALITION_TYPE_JETSAM) {
2207 goto out_unlock;
2208 }
2209
2210 leader = coal->j.leader;
2211 if (leader != TASK_NULL) {
2212 task_reference(leader);
2213 }
2214
2215 out_unlock:
2216 coalition_unlock(coal);
2217 return leader;
2218 }
2219
2220
2221 int
coalition_get_task_count(coalition_t coal)2222 coalition_get_task_count(coalition_t coal)
2223 {
2224 int ntasks = 0;
2225 struct queue_entry *qe;
2226 if (!coal) {
2227 return 0;
2228 }
2229
2230 coalition_lock(coal);
2231 switch (coal->type) {
2232 case COALITION_TYPE_RESOURCE:
2233 qe_foreach(qe, &coal->r.tasks)
2234 ntasks++;
2235 break;
2236 case COALITION_TYPE_JETSAM:
2237 if (coal->j.leader) {
2238 ntasks++;
2239 }
2240 qe_foreach(qe, &coal->j.other)
2241 ntasks++;
2242 qe_foreach(qe, &coal->j.extensions)
2243 ntasks++;
2244 qe_foreach(qe, &coal->j.services)
2245 ntasks++;
2246 break;
2247 default:
2248 break;
2249 }
2250 coalition_unlock(coal);
2251
2252 return ntasks;
2253 }
2254
2255
2256 static uint64_t
i_get_list_footprint(queue_t list,int type,int * ntasks)2257 i_get_list_footprint(queue_t list, int type, int *ntasks)
2258 {
2259 task_t task;
2260 uint64_t bytes = 0;
2261
2262 qe_foreach_element(task, list, task_coalition[type]) {
2263 bytes += get_task_phys_footprint(task);
2264 coal_dbg(" [%d] task_pid:%d, type:%d, footprint:%lld",
2265 *ntasks, task_pid(task), type, bytes);
2266 *ntasks += 1;
2267 }
2268
2269 return bytes;
2270 }
2271
2272 uint64_t
coalition_get_page_count(coalition_t coal,int * ntasks)2273 coalition_get_page_count(coalition_t coal, int *ntasks)
2274 {
2275 uint64_t bytes = 0;
2276 int num_tasks = 0;
2277
2278 if (ntasks) {
2279 *ntasks = 0;
2280 }
2281 if (!coal) {
2282 return bytes;
2283 }
2284
2285 coalition_lock(coal);
2286
2287 switch (coal->type) {
2288 case COALITION_TYPE_RESOURCE:
2289 bytes += i_get_list_footprint(&coal->r.tasks, COALITION_TYPE_RESOURCE, &num_tasks);
2290 break;
2291 case COALITION_TYPE_JETSAM:
2292 if (coal->j.leader) {
2293 bytes += get_task_phys_footprint(coal->j.leader);
2294 num_tasks = 1;
2295 }
2296 bytes += i_get_list_footprint(&coal->j.extensions, COALITION_TYPE_JETSAM, &num_tasks);
2297 bytes += i_get_list_footprint(&coal->j.services, COALITION_TYPE_JETSAM, &num_tasks);
2298 bytes += i_get_list_footprint(&coal->j.other, COALITION_TYPE_JETSAM, &num_tasks);
2299 break;
2300 default:
2301 break;
2302 }
2303
2304 coalition_unlock(coal);
2305
2306 if (ntasks) {
2307 *ntasks = num_tasks;
2308 }
2309
2310 return bytes / PAGE_SIZE_64;
2311 }
2312
2313 struct coal_sort_s {
2314 int pid;
2315 int usr_order;
2316 uint64_t bytes;
2317 };
2318
2319 /*
2320 * return < 0 for a < b
2321 * 0 for a == b
2322 * > 0 for a > b
2323 */
2324 typedef int (*cmpfunc_t)(const void *a, const void *b);
2325
2326 extern void
2327 qsort(void *a, size_t n, size_t es, cmpfunc_t cmp);
2328
2329 static int
dflt_cmp(const void * a,const void * b)2330 dflt_cmp(const void *a, const void *b)
2331 {
2332 const struct coal_sort_s *csA = (const struct coal_sort_s *)a;
2333 const struct coal_sort_s *csB = (const struct coal_sort_s *)b;
2334
2335 /*
2336 * if both A and B are equal, use a memory descending sort
2337 */
2338 if (csA->usr_order == csB->usr_order) {
2339 return (int)((int64_t)csB->bytes - (int64_t)csA->bytes);
2340 }
2341
2342 /* otherwise, return the relationship between user specified orders */
2343 return csA->usr_order - csB->usr_order;
2344 }
2345
2346 static int
mem_asc_cmp(const void * a,const void * b)2347 mem_asc_cmp(const void *a, const void *b)
2348 {
2349 const struct coal_sort_s *csA = (const struct coal_sort_s *)a;
2350 const struct coal_sort_s *csB = (const struct coal_sort_s *)b;
2351
2352 return (int)((int64_t)csA->bytes - (int64_t)csB->bytes);
2353 }
2354
2355 static int
mem_dec_cmp(const void * a,const void * b)2356 mem_dec_cmp(const void *a, const void *b)
2357 {
2358 const struct coal_sort_s *csA = (const struct coal_sort_s *)a;
2359 const struct coal_sort_s *csB = (const struct coal_sort_s *)b;
2360
2361 return (int)((int64_t)csB->bytes - (int64_t)csA->bytes);
2362 }
2363
2364 static int
usr_asc_cmp(const void * a,const void * b)2365 usr_asc_cmp(const void *a, const void *b)
2366 {
2367 const struct coal_sort_s *csA = (const struct coal_sort_s *)a;
2368 const struct coal_sort_s *csB = (const struct coal_sort_s *)b;
2369
2370 return csA->usr_order - csB->usr_order;
2371 }
2372
2373 static int
usr_dec_cmp(const void * a,const void * b)2374 usr_dec_cmp(const void *a, const void *b)
2375 {
2376 const struct coal_sort_s *csA = (const struct coal_sort_s *)a;
2377 const struct coal_sort_s *csB = (const struct coal_sort_s *)b;
2378
2379 return csB->usr_order - csA->usr_order;
2380 }
2381
2382 /* avoid dynamic allocation in this path */
2383 #define MAX_SORTED_PIDS 80
2384
2385 static int
coalition_get_sort_list(coalition_t coal,int sort_order,queue_t list,struct coal_sort_s * sort_array,int array_sz)2386 coalition_get_sort_list(coalition_t coal, int sort_order, queue_t list,
2387 struct coal_sort_s *sort_array, int array_sz)
2388 {
2389 int ntasks = 0;
2390 task_t task;
2391
2392 assert(sort_array != NULL);
2393
2394 if (array_sz <= 0) {
2395 return 0;
2396 }
2397
2398 if (!list) {
2399 /*
2400 * this function will only be called with a NULL
2401 * list for JETSAM-type coalitions, and is intended
2402 * to investigate the leader process
2403 */
2404 if (coal->type != COALITION_TYPE_JETSAM ||
2405 coal->j.leader == TASK_NULL) {
2406 return 0;
2407 }
2408 sort_array[0].pid = task_pid(coal->j.leader);
2409 switch (sort_order) {
2410 case COALITION_SORT_DEFAULT:
2411 sort_array[0].usr_order = 0;
2412 OS_FALLTHROUGH;
2413 case COALITION_SORT_MEM_ASC:
2414 case COALITION_SORT_MEM_DEC:
2415 sort_array[0].bytes = get_task_phys_footprint(coal->j.leader);
2416 break;
2417 case COALITION_SORT_USER_ASC:
2418 case COALITION_SORT_USER_DEC:
2419 sort_array[0].usr_order = 0;
2420 break;
2421 default:
2422 break;
2423 }
2424 return 1;
2425 }
2426
2427 qe_foreach_element(task, list, task_coalition[coal->type]) {
2428 if (ntasks >= array_sz) {
2429 printf("WARNING: more than %d pids in coalition %llu\n",
2430 MAX_SORTED_PIDS, coal->id);
2431 break;
2432 }
2433
2434 sort_array[ntasks].pid = task_pid(task);
2435
2436 switch (sort_order) {
2437 case COALITION_SORT_DEFAULT:
2438 sort_array[ntasks].usr_order = 0;
2439 OS_FALLTHROUGH;
2440 case COALITION_SORT_MEM_ASC:
2441 case COALITION_SORT_MEM_DEC:
2442 sort_array[ntasks].bytes = get_task_phys_footprint(task);
2443 break;
2444 case COALITION_SORT_USER_ASC:
2445 case COALITION_SORT_USER_DEC:
2446 sort_array[ntasks].usr_order = 0;
2447 break;
2448 default:
2449 break;
2450 }
2451
2452 ntasks++;
2453 }
2454
2455 return ntasks;
2456 }
2457
2458 int
coalition_get_pid_list(coalition_t coal,uint32_t rolemask,int sort_order,int * pid_list,int list_sz)2459 coalition_get_pid_list(coalition_t coal, uint32_t rolemask, int sort_order,
2460 int *pid_list, int list_sz)
2461 {
2462 struct i_jetsam_coalition *cj;
2463 int ntasks = 0;
2464 cmpfunc_t cmp_func = NULL;
2465 struct coal_sort_s sort_array[MAX_SORTED_PIDS] = { {0, 0, 0} }; /* keep to < 2k */
2466
2467 if (!coal ||
2468 !(rolemask & COALITION_ROLEMASK_ALLROLES) ||
2469 !pid_list || list_sz < 1) {
2470 coal_dbg("Invalid parameters: coal:%p, type:%d, rolemask:0x%x, "
2471 "pid_list:%p, list_sz:%d", coal, coal ? coal->type : -1,
2472 rolemask, pid_list, list_sz);
2473 return -EINVAL;
2474 }
2475
2476 switch (sort_order) {
2477 case COALITION_SORT_NOSORT:
2478 cmp_func = NULL;
2479 break;
2480 case COALITION_SORT_DEFAULT:
2481 cmp_func = dflt_cmp;
2482 break;
2483 case COALITION_SORT_MEM_ASC:
2484 cmp_func = mem_asc_cmp;
2485 break;
2486 case COALITION_SORT_MEM_DEC:
2487 cmp_func = mem_dec_cmp;
2488 break;
2489 case COALITION_SORT_USER_ASC:
2490 cmp_func = usr_asc_cmp;
2491 break;
2492 case COALITION_SORT_USER_DEC:
2493 cmp_func = usr_dec_cmp;
2494 break;
2495 default:
2496 return -ENOTSUP;
2497 }
2498
2499 coalition_lock(coal);
2500
2501 if (coal->type == COALITION_TYPE_RESOURCE) {
2502 ntasks += coalition_get_sort_list(coal, sort_order, &coal->r.tasks,
2503 sort_array, MAX_SORTED_PIDS);
2504 goto unlock_coal;
2505 }
2506
2507 cj = &coal->j;
2508
2509 if (rolemask & COALITION_ROLEMASK_UNDEF) {
2510 ntasks += coalition_get_sort_list(coal, sort_order, &cj->other,
2511 sort_array + ntasks,
2512 MAX_SORTED_PIDS - ntasks);
2513 }
2514
2515 if (rolemask & COALITION_ROLEMASK_XPC) {
2516 ntasks += coalition_get_sort_list(coal, sort_order, &cj->services,
2517 sort_array + ntasks,
2518 MAX_SORTED_PIDS - ntasks);
2519 }
2520
2521 if (rolemask & COALITION_ROLEMASK_EXT) {
2522 ntasks += coalition_get_sort_list(coal, sort_order, &cj->extensions,
2523 sort_array + ntasks,
2524 MAX_SORTED_PIDS - ntasks);
2525 }
2526
2527 if (rolemask & COALITION_ROLEMASK_LEADER) {
2528 ntasks += coalition_get_sort_list(coal, sort_order, NULL,
2529 sort_array + ntasks,
2530 MAX_SORTED_PIDS - ntasks);
2531 }
2532
2533 unlock_coal:
2534 coalition_unlock(coal);
2535
2536 /* sort based on the chosen criterion (no sense sorting 1 item) */
2537 if (cmp_func && ntasks > 1) {
2538 qsort(sort_array, ntasks, sizeof(struct coal_sort_s), cmp_func);
2539 }
2540
2541 for (int i = 0; i < ntasks; i++) {
2542 if (i >= list_sz) {
2543 break;
2544 }
2545 coal_dbg(" [%d] PID:%d, footprint:%lld, usr_order:%d",
2546 i, sort_array[i].pid, sort_array[i].bytes,
2547 sort_array[i].usr_order);
2548 pid_list[i] = sort_array[i].pid;
2549 }
2550
2551 return ntasks;
2552 }
2553
2554 static void
mark_coalition_member_as_swappable(__unused coalition_t coal,__unused void * ctx,task_t task)2555 mark_coalition_member_as_swappable(__unused coalition_t coal, __unused void *ctx, task_t task)
2556 {
2557 vm_task_set_selfdonate_pages(task, true);
2558 }
2559
2560 void
coalition_mark_swappable(coalition_t coal)2561 coalition_mark_swappable(coalition_t coal)
2562 {
2563 struct i_jetsam_coalition *cj = NULL;
2564
2565 coalition_lock(coal);
2566 assert(coal && coal->type == COALITION_TYPE_JETSAM);
2567
2568 cj = &coal->j;
2569 cj->swap_enabled = true;
2570
2571 i_coal_jetsam_iterate_tasks(coal, NULL, mark_coalition_member_as_swappable);
2572
2573 coalition_unlock(coal);
2574 }
2575
2576 bool
coalition_is_swappable(coalition_t coal)2577 coalition_is_swappable(coalition_t coal)
2578 {
2579 struct i_jetsam_coalition *cj = NULL;
2580
2581 coalition_lock(coal);
2582 assert(coal && coal->type == COALITION_TYPE_JETSAM);
2583
2584 cj = &coal->j;
2585 bool enabled = cj->swap_enabled;
2586
2587 coalition_unlock(coal);
2588
2589 return enabled;
2590 }
2591