1 /*
2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/kern_types.h>
30 #include <mach/mach_types.h>
31 #include <mach/boolean.h>
32
33 #include <kern/coalition.h>
34 #include <kern/exc_resource.h>
35 #include <kern/host.h>
36 #include <kern/ledger.h>
37 #include <kern/mach_param.h> /* for TASK_CHUNK */
38 #if MONOTONIC
39 #include <kern/monotonic.h>
40 #endif /* MONOTONIC */
41 #include <kern/policy_internal.h>
42 #include <kern/task.h>
43 #include <kern/thread_group.h>
44 #include <kern/zalloc.h>
45
46 #include <libkern/OSAtomic.h>
47
48 #include <mach/coalition_notification_server.h>
49 #include <mach/host_priv.h>
50 #include <mach/host_special_ports.h>
51
52 #include <os/log.h>
53
54 #include <sys/errno.h>
55
56 /*
57 * BSD interface functions
58 */
59 int coalitions_get_list(int type, struct procinfo_coalinfo *coal_list, int list_sz);
60 coalition_t task_get_coalition(task_t task, int type);
61 boolean_t coalition_is_leader(task_t task, coalition_t coal);
62 task_t coalition_get_leader(coalition_t coal);
63 int coalition_get_task_count(coalition_t coal);
64 uint64_t coalition_get_page_count(coalition_t coal, int *ntasks);
65 int coalition_get_pid_list(coalition_t coal, uint32_t rolemask, int sort_order,
66 int *pid_list, int list_sz);
67
68 /* defined in task.c */
69 extern ledger_template_t task_ledger_template;
70
71 /*
72 * Templates; task template is copied due to potential allocation limits on
73 * task ledgers.
74 */
75 ledger_template_t coalition_task_ledger_template = NULL;
76 ledger_template_t coalition_ledger_template = NULL;
77
78 extern int proc_selfpid(void);
79 /*
80 * Coalition zone needs limits. We expect there will be as many coalitions as
81 * tasks (same order of magnitude), so use the task zone's limits.
82 * */
83 #define CONFIG_COALITION_MAX CONFIG_TASK_MAX
84 #define COALITION_CHUNK TASK_CHUNK
85
86 #if DEBUG || DEVELOPMENT
87 TUNABLE_WRITEABLE(int, unrestrict_coalition_syscalls, "unrestrict_coalition_syscalls", 0);
88 #else
89 #define unrestrict_coalition_syscalls false
90 #endif
91
92 LCK_GRP_DECLARE(coalitions_lck_grp, "coalition");
93
94 /* coalitions_list_lock protects coalition_count, coalitions queue, next_coalition_id. */
95 static LCK_RW_DECLARE(coalitions_list_lock, &coalitions_lck_grp);
96 static uint64_t coalition_count;
97 static uint64_t coalition_next_id = 1;
98 static queue_head_t coalitions_q;
99
100 coalition_t init_coalition[COALITION_NUM_TYPES];
101 coalition_t corpse_coalition[COALITION_NUM_TYPES];
102
103 static const char *
coal_type_str(int type)104 coal_type_str(int type)
105 {
106 switch (type) {
107 case COALITION_TYPE_RESOURCE:
108 return "RESOURCE";
109 case COALITION_TYPE_JETSAM:
110 return "JETSAM";
111 default:
112 return "<unknown>";
113 }
114 }
115
116 struct coalition_type {
117 int type;
118 int has_default;
119 /*
120 * init
121 * pre-condition: coalition just allocated (unlocked), unreferenced,
122 * type field set
123 */
124 kern_return_t (*init)(coalition_t coal, boolean_t privileged, boolean_t efficient);
125
126 /*
127 * dealloc
128 * pre-condition: coalition unlocked
129 * pre-condition: coalition refcount=0, active_count=0,
130 * termrequested=1, terminated=1, reaped=1
131 */
132 void (*dealloc)(coalition_t coal);
133
134 /*
135 * adopt_task
136 * pre-condition: coalition locked
137 * pre-condition: coalition !repead and !terminated
138 */
139 kern_return_t (*adopt_task)(coalition_t coal, task_t task);
140
141 /*
142 * remove_task
143 * pre-condition: coalition locked
144 * pre-condition: task has been removed from coalition's task list
145 */
146 kern_return_t (*remove_task)(coalition_t coal, task_t task);
147
148 /*
149 * set_taskrole
150 * pre-condition: coalition locked
151 * pre-condition: task added to coalition's task list,
152 * active_count >= 1 (at least the given task is active)
153 */
154 kern_return_t (*set_taskrole)(coalition_t coal, task_t task, int role);
155
156 /*
157 * get_taskrole
158 * pre-condition: coalition locked
159 * pre-condition: task added to coalition's task list,
160 * active_count >= 1 (at least the given task is active)
161 */
162 int (*get_taskrole)(coalition_t coal, task_t task);
163
164 /*
165 * iterate_tasks
166 * pre-condition: coalition locked
167 */
168 void (*iterate_tasks)(coalition_t coal, void *ctx, void (*callback)(coalition_t, void *, task_t));
169 };
170
171 /*
172 * COALITION_TYPE_RESOURCE
173 */
174
175 static kern_return_t i_coal_resource_init(coalition_t coal, boolean_t privileged, boolean_t efficient);
176 static void i_coal_resource_dealloc(coalition_t coal);
177 static kern_return_t i_coal_resource_adopt_task(coalition_t coal, task_t task);
178 static kern_return_t i_coal_resource_remove_task(coalition_t coal, task_t task);
179 static kern_return_t i_coal_resource_set_taskrole(coalition_t coal,
180 task_t task, int role);
181 static int i_coal_resource_get_taskrole(coalition_t coal, task_t task);
182 static void i_coal_resource_iterate_tasks(coalition_t coal, void *ctx,
183 void (*callback)(coalition_t, void *, task_t));
184
185 /*
186 * Ensure COALITION_NUM_THREAD_QOS_TYPES defined in mach/coalition.h still
187 * matches THREAD_QOS_LAST defined in mach/thread_policy.h
188 */
189 static_assert(COALITION_NUM_THREAD_QOS_TYPES == THREAD_QOS_LAST);
190
191 struct i_resource_coalition {
192 /*
193 * This keeps track of resource utilization of tasks that are no longer active
194 * in the coalition and is updated when a task is removed from the coalition.
195 */
196 ledger_t ledger;
197 uint64_t bytesread;
198 uint64_t byteswritten;
199 uint64_t energy;
200 uint64_t gpu_time;
201 uint64_t logical_immediate_writes;
202 uint64_t logical_deferred_writes;
203 uint64_t logical_invalidated_writes;
204 uint64_t logical_metadata_writes;
205 uint64_t logical_immediate_writes_to_external;
206 uint64_t logical_deferred_writes_to_external;
207 uint64_t logical_invalidated_writes_to_external;
208 uint64_t logical_metadata_writes_to_external;
209 uint64_t cpu_ptime;
210 uint64_t cpu_time_eqos[COALITION_NUM_THREAD_QOS_TYPES]; /* cpu time per effective QoS class */
211 uint64_t cpu_time_rqos[COALITION_NUM_THREAD_QOS_TYPES]; /* cpu time per requested QoS class */
212 uint64_t cpu_instructions;
213 uint64_t cpu_cycles;
214
215 uint64_t task_count; /* tasks that have started in this coalition */
216 uint64_t dead_task_count; /* tasks that have exited in this coalition;
217 * subtract from task_count to get count
218 * of "active" tasks */
219 /*
220 * Count the length of time this coalition had at least one active task.
221 * This can be a 'denominator' to turn e.g. cpu_time to %cpu.
222 * */
223 uint64_t last_became_nonempty_time;
224 uint64_t time_nonempty;
225
226 queue_head_t tasks; /* List of active tasks in the coalition */
227 /*
228 * This ledger is used for triggering resource exception. For the tracked resources, this is updated
229 * when the member tasks' resource usage changes.
230 */
231 ledger_t resource_monitor_ledger;
232 #if CONFIG_PHYS_WRITE_ACCT
233 uint64_t fs_metadata_writes;
234 #endif /* CONFIG_PHYS_WRITE_ACCT */
235 };
236
237 /*
238 * COALITION_TYPE_JETSAM
239 */
240
241 static kern_return_t i_coal_jetsam_init(coalition_t coal, boolean_t privileged, boolean_t efficient);
242 static void i_coal_jetsam_dealloc(coalition_t coal);
243 static kern_return_t i_coal_jetsam_adopt_task(coalition_t coal, task_t task);
244 static kern_return_t i_coal_jetsam_remove_task(coalition_t coal, task_t task);
245 static kern_return_t i_coal_jetsam_set_taskrole(coalition_t coal,
246 task_t task, int role);
247 int i_coal_jetsam_get_taskrole(coalition_t coal, task_t task);
248 static void i_coal_jetsam_iterate_tasks(coalition_t coal, void *ctx,
249 void (*callback)(coalition_t, void *, task_t));
250
251 struct i_jetsam_coalition {
252 task_t leader;
253 queue_head_t extensions;
254 queue_head_t services;
255 queue_head_t other;
256 struct thread_group *thread_group;
257 };
258
259
260 /*
261 * main coalition structure
262 */
263 struct coalition {
264 uint64_t id; /* monotonically increasing */
265 uint32_t type;
266 uint32_t role; /* default task role (background, adaptive, interactive, etc) */
267 uint32_t ref_count; /* Number of references to the memory containing this struct */
268 uint32_t active_count; /* Number of members of (tasks in) the
269 * coalition, plus vouchers referring
270 * to the coalition */
271 uint32_t focal_task_count; /* Number of TASK_FOREGROUND_APPLICATION tasks in the coalition */
272 uint32_t nonfocal_task_count; /* Number of TASK_BACKGROUND_APPLICATION tasks in the coalition */
273
274 /* coalition flags */
275 uint32_t privileged : 1; /* Members of this coalition may create
276 * and manage coalitions and may posix_spawn
277 * processes into selected coalitions */
278 /* ast? */
279 /* voucher */
280 uint32_t termrequested : 1; /* launchd has requested termination when coalition becomes empty */
281 uint32_t terminated : 1; /* coalition became empty and spawns are now forbidden */
282 uint32_t reaped : 1; /* reaped, invisible to userspace, but waiting for ref_count to go to zero */
283 uint32_t notified : 1; /* no-more-processes notification was sent via special port */
284 uint32_t efficient : 1; /* launchd has marked the coalition as efficient */
285 #if DEVELOPMENT || DEBUG
286 uint32_t should_notify : 1; /* should this coalition send notifications (default: yes) */
287 #endif
288
289 queue_chain_t coalitions; /* global list of coalitions */
290
291 decl_lck_mtx_data(, lock); /* Coalition lock. */
292
293 /* put coalition type-specific structures here */
294 union {
295 struct i_resource_coalition r;
296 struct i_jetsam_coalition j;
297 };
298 };
299
300 /*
301 * register different coalition types:
302 * these must be kept in the order specified in coalition.h
303 */
304 static const struct coalition_type
305 s_coalition_types[COALITION_NUM_TYPES] = {
306 {
307 COALITION_TYPE_RESOURCE,
308 1,
309 i_coal_resource_init,
310 i_coal_resource_dealloc,
311 i_coal_resource_adopt_task,
312 i_coal_resource_remove_task,
313 i_coal_resource_set_taskrole,
314 i_coal_resource_get_taskrole,
315 i_coal_resource_iterate_tasks,
316 },
317 {
318 COALITION_TYPE_JETSAM,
319 1,
320 i_coal_jetsam_init,
321 i_coal_jetsam_dealloc,
322 i_coal_jetsam_adopt_task,
323 i_coal_jetsam_remove_task,
324 i_coal_jetsam_set_taskrole,
325 i_coal_jetsam_get_taskrole,
326 i_coal_jetsam_iterate_tasks,
327 },
328 };
329
330 ZONE_DEFINE_TYPE(coalition_zone, "coalitions",
331 struct coalition, ZC_ZFREE_CLEARMEM);
332
333 #define coal_call(coal, func, ...) \
334 (s_coalition_types[(coal)->type].func)(coal, ## __VA_ARGS__)
335
336
337 #define coalition_lock(c) do{ lck_mtx_lock(&c->lock); }while(0)
338 #define coalition_unlock(c) do{ lck_mtx_unlock(&c->lock); }while(0)
339
340 /*
341 * Define the coalition type to track focal tasks.
342 * On embedded, track them using jetsam coalitions since they have associated thread
343 * groups which reflect this property as a flag (and pass it down to CLPC).
344 * On non-embedded platforms, since not all coalitions have jetsam coalitions
345 * track focal counts on the resource coalition.
346 */
347 #if !XNU_TARGET_OS_OSX
348 #define COALITION_FOCAL_TASKS_ACCOUNTING COALITION_TYPE_JETSAM
349 #else /* !XNU_TARGET_OS_OSX */
350 #define COALITION_FOCAL_TASKS_ACCOUNTING COALITION_TYPE_RESOURCE
351 #endif /* !XNU_TARGET_OS_OSX */
352
353
354 /*
355 *
356 * Coalition ledger implementation
357 *
358 */
359
360 struct coalition_ledger_indices coalition_ledgers =
361 {.logical_writes = -1, };
362 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_COALITION_IS_CAUSING_TOO_MUCH_IO(int flavor);
363
364 ledger_t
coalition_ledger_get_from_task(task_t task)365 coalition_ledger_get_from_task(task_t task)
366 {
367 ledger_t ledger = LEDGER_NULL;
368 coalition_t coal = task->coalition[COALITION_TYPE_RESOURCE];
369
370 if (coal != NULL && (!queue_empty(&task->task_coalition[COALITION_TYPE_RESOURCE]))) {
371 ledger = coal->r.resource_monitor_ledger;
372 ledger_reference(ledger);
373 }
374 return ledger;
375 }
376
377
378 enum {
379 COALITION_IO_LEDGER_ENABLE,
380 COALITION_IO_LEDGER_DISABLE
381 };
382
383 void
coalition_io_monitor_ctl(struct coalition * coalition,uint32_t flags,int64_t limit)384 coalition_io_monitor_ctl(struct coalition *coalition, uint32_t flags, int64_t limit)
385 {
386 ledger_t ledger = coalition->r.resource_monitor_ledger;
387
388 if (flags == COALITION_IO_LEDGER_ENABLE) {
389 /* Configure the logical I/O ledger */
390 ledger_set_limit(ledger, coalition_ledgers.logical_writes, (limit * 1024 * 1024), 0);
391 ledger_set_period(ledger, coalition_ledgers.logical_writes, (COALITION_LEDGER_MONITOR_INTERVAL_SECS * NSEC_PER_SEC));
392 } else if (flags == COALITION_IO_LEDGER_DISABLE) {
393 ledger_disable_refill(ledger, coalition_ledgers.logical_writes);
394 ledger_disable_callback(ledger, coalition_ledgers.logical_writes);
395 }
396 }
397
398 int
coalition_ledger_set_logical_writes_limit(struct coalition * coalition,int64_t limit)399 coalition_ledger_set_logical_writes_limit(struct coalition *coalition, int64_t limit)
400 {
401 int error = 0;
402
403 /* limit = -1 will be used to disable the limit and the callback */
404 if (limit > COALITION_MAX_LOGICAL_WRITES_LIMIT || limit == 0 || limit < -1) {
405 error = EINVAL;
406 goto out;
407 }
408
409 coalition_lock(coalition);
410 if (limit == -1) {
411 coalition_io_monitor_ctl(coalition, COALITION_IO_LEDGER_DISABLE, limit);
412 } else {
413 coalition_io_monitor_ctl(coalition, COALITION_IO_LEDGER_ENABLE, limit);
414 }
415 coalition_unlock(coalition);
416 out:
417 return error;
418 }
419
420 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_COALITION_IS_CAUSING_TOO_MUCH_IO(int flavor)421 SENDING_NOTIFICATION__THIS_COALITION_IS_CAUSING_TOO_MUCH_IO(int flavor)
422 {
423 int pid = proc_selfpid();
424 ledger_amount_t new_limit;
425 task_t task = current_task();
426 struct ledger_entry_info lei;
427 kern_return_t kr;
428 ledger_t ledger;
429 struct coalition *coalition = task->coalition[COALITION_TYPE_RESOURCE];
430
431 assert(coalition != NULL);
432 ledger = coalition->r.resource_monitor_ledger;
433
434 switch (flavor) {
435 case FLAVOR_IO_LOGICAL_WRITES:
436 ledger_get_entry_info(ledger, coalition_ledgers.logical_writes, &lei);
437 trace_resource_violation(RMON_LOGWRITES_VIOLATED, &lei);
438 break;
439 default:
440 goto Exit;
441 }
442
443 os_log(OS_LOG_DEFAULT, "Coalition [%lld] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]. Triggered by process [%d]\n",
444 coalition->id, flavor, (lei.lei_balance / (1024 * 1024)), (lei.lei_limit / (1024 * 1024)),
445 (lei.lei_refill_period / NSEC_PER_SEC), pid);
446
447 kr = send_resource_violation(send_disk_writes_violation, task, &lei, kRNFlagsNone);
448 if (kr) {
449 os_log(OS_LOG_DEFAULT, "ERROR %#x returned from send_resource_violation(disk_writes, ...)\n", kr);
450 }
451
452 /*
453 * Continue to monitor the coalition after it hits the initital limit, but increase
454 * the limit exponentially so that we don't spam the listener.
455 */
456 new_limit = (lei.lei_limit / 1024 / 1024) * 4;
457 coalition_lock(coalition);
458 if (new_limit > COALITION_MAX_LOGICAL_WRITES_LIMIT) {
459 coalition_io_monitor_ctl(coalition, COALITION_IO_LEDGER_DISABLE, -1);
460 } else {
461 coalition_io_monitor_ctl(coalition, COALITION_IO_LEDGER_ENABLE, new_limit);
462 }
463 coalition_unlock(coalition);
464
465 Exit:
466 return;
467 }
468
469 void
coalition_io_rate_exceeded(int warning,const void * param0,__unused const void * param1)470 coalition_io_rate_exceeded(int warning, const void *param0, __unused const void *param1)
471 {
472 if (warning == 0) {
473 SENDING_NOTIFICATION__THIS_COALITION_IS_CAUSING_TOO_MUCH_IO((int)param0);
474 }
475 }
476
477 void
init_coalition_ledgers(void)478 init_coalition_ledgers(void)
479 {
480 ledger_template_t t;
481 assert(coalition_ledger_template == NULL);
482
483 if ((t = ledger_template_create("Per-coalition ledgers")) == NULL) {
484 panic("couldn't create coalition ledger template");
485 }
486
487 coalition_ledgers.logical_writes = ledger_entry_add(t, "logical_writes", "res", "bytes");
488
489 if (coalition_ledgers.logical_writes < 0) {
490 panic("couldn't create entries for coaliton ledger template");
491 }
492
493 ledger_set_callback(t, coalition_ledgers.logical_writes, coalition_io_rate_exceeded, (void *)FLAVOR_IO_LOGICAL_WRITES, NULL);
494 ledger_template_complete(t);
495
496 coalition_task_ledger_template = ledger_template_copy(task_ledger_template, "Coalition task ledgers");
497
498 if (coalition_task_ledger_template == NULL) {
499 panic("couldn't create coalition task ledger template");
500 }
501
502 ledger_template_complete(coalition_task_ledger_template);
503
504 coalition_ledger_template = t;
505 }
506
507 void
coalition_io_ledger_update(task_t task,int32_t flavor,boolean_t is_credit,uint32_t io_size)508 coalition_io_ledger_update(task_t task, int32_t flavor, boolean_t is_credit, uint32_t io_size)
509 {
510 ledger_t ledger;
511 coalition_t coal = task->coalition[COALITION_TYPE_RESOURCE];
512
513 assert(coal != NULL);
514 ledger = coal->r.resource_monitor_ledger;
515 if (LEDGER_VALID(ledger)) {
516 if (flavor == FLAVOR_IO_LOGICAL_WRITES) {
517 if (is_credit) {
518 ledger_credit(ledger, coalition_ledgers.logical_writes, io_size);
519 } else {
520 ledger_debit(ledger, coalition_ledgers.logical_writes, io_size);
521 }
522 }
523 }
524 }
525
526 static void
coalition_notify_user(uint64_t id,uint32_t flags)527 coalition_notify_user(uint64_t id, uint32_t flags)
528 {
529 mach_port_t user_port;
530 kern_return_t kr;
531
532 kr = host_get_coalition_port(host_priv_self(), &user_port);
533 if ((kr != KERN_SUCCESS) || !IPC_PORT_VALID(user_port)) {
534 return;
535 }
536
537 coalition_notification(user_port, id, flags);
538 ipc_port_release_send(user_port);
539 }
540
541 /*
542 *
543 * COALITION_TYPE_RESOURCE
544 *
545 */
546 static kern_return_t
i_coal_resource_init(coalition_t coal,boolean_t privileged,boolean_t efficient)547 i_coal_resource_init(coalition_t coal, boolean_t privileged, boolean_t efficient)
548 {
549 (void)privileged;
550 (void)efficient;
551 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
552 coal->r.ledger = ledger_instantiate(coalition_task_ledger_template,
553 LEDGER_CREATE_ACTIVE_ENTRIES);
554 if (coal->r.ledger == NULL) {
555 return KERN_RESOURCE_SHORTAGE;
556 }
557
558 coal->r.resource_monitor_ledger = ledger_instantiate(coalition_ledger_template,
559 LEDGER_CREATE_ACTIVE_ENTRIES);
560 if (coal->r.resource_monitor_ledger == NULL) {
561 return KERN_RESOURCE_SHORTAGE;
562 }
563
564 queue_init(&coal->r.tasks);
565
566 return KERN_SUCCESS;
567 }
568
569 static void
i_coal_resource_dealloc(coalition_t coal)570 i_coal_resource_dealloc(coalition_t coal)
571 {
572 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
573
574 ledger_dereference(coal->r.ledger);
575 ledger_dereference(coal->r.resource_monitor_ledger);
576 }
577
578 static kern_return_t
i_coal_resource_adopt_task(coalition_t coal,task_t task)579 i_coal_resource_adopt_task(coalition_t coal, task_t task)
580 {
581 struct i_resource_coalition *cr;
582
583 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
584 assert(queue_empty(&task->task_coalition[COALITION_TYPE_RESOURCE]));
585
586 cr = &coal->r;
587 cr->task_count++;
588
589 if (cr->task_count < cr->dead_task_count) {
590 panic("%s: coalition %p id:%llu type:%s task_count(%llu) < dead_task_count(%llu)",
591 __func__, coal, coal->id, coal_type_str(coal->type),
592 cr->task_count, cr->dead_task_count);
593 }
594
595 /* If moving from 0->1 active tasks */
596 if (cr->task_count - cr->dead_task_count == 1) {
597 cr->last_became_nonempty_time = mach_absolute_time();
598 }
599
600 /* put the task on the coalition's list of tasks */
601 enqueue_tail(&cr->tasks, &task->task_coalition[COALITION_TYPE_RESOURCE]);
602
603 coal_dbg("Added PID:%d to id:%llu, task_count:%llu, dead_count:%llu, nonempty_time:%llu",
604 task_pid(task), coal->id, cr->task_count, cr->dead_task_count,
605 cr->last_became_nonempty_time);
606
607 return KERN_SUCCESS;
608 }
609
610 static kern_return_t
i_coal_resource_remove_task(coalition_t coal,task_t task)611 i_coal_resource_remove_task(coalition_t coal, task_t task)
612 {
613 struct i_resource_coalition *cr;
614
615 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
616 assert(task->coalition[COALITION_TYPE_RESOURCE] == coal);
617 assert(!queue_empty(&task->task_coalition[COALITION_TYPE_RESOURCE]));
618
619 /*
620 * handle resource coalition accounting rollup for dead tasks
621 */
622 cr = &coal->r;
623
624 cr->dead_task_count++;
625
626 if (cr->task_count < cr->dead_task_count) {
627 panic("%s: coalition %p id:%llu type:%s task_count(%llu) < dead_task_count(%llu)",
628 __func__, coal, coal->id, coal_type_str(coal->type), cr->task_count, cr->dead_task_count);
629 }
630
631 /* If moving from 1->0 active tasks */
632 if (cr->task_count - cr->dead_task_count == 0) {
633 uint64_t last_time_nonempty = mach_absolute_time() - cr->last_became_nonempty_time;
634 cr->last_became_nonempty_time = 0;
635 cr->time_nonempty += last_time_nonempty;
636 }
637
638 /* Do not roll up for exec'd task or exec copy task */
639 if (!task_is_exec_copy(task) && !task_did_exec(task)) {
640 ledger_rollup(cr->ledger, task->ledger);
641 cr->bytesread += task->task_io_stats->disk_reads.size;
642 cr->byteswritten += task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size;
643 #if defined(__x86_64__)
644 cr->gpu_time += task_gpu_utilisation(task);
645 #endif /* defined(__x86_64__) */
646
647 #if defined(__arm__) || defined(__arm64__)
648 cr->energy += task_energy(task);
649 #endif /* defined(__arm__) || defined(__arm64__) */
650
651 cr->logical_immediate_writes += task->task_writes_counters_internal.task_immediate_writes;
652 cr->logical_deferred_writes += task->task_writes_counters_internal.task_deferred_writes;
653 cr->logical_invalidated_writes += task->task_writes_counters_internal.task_invalidated_writes;
654 cr->logical_metadata_writes += task->task_writes_counters_internal.task_metadata_writes;
655 cr->logical_immediate_writes_to_external += task->task_writes_counters_external.task_immediate_writes;
656 cr->logical_deferred_writes_to_external += task->task_writes_counters_external.task_deferred_writes;
657 cr->logical_invalidated_writes_to_external += task->task_writes_counters_external.task_invalidated_writes;
658 cr->logical_metadata_writes_to_external += task->task_writes_counters_external.task_metadata_writes;
659 #if CONFIG_PHYS_WRITE_ACCT
660 cr->fs_metadata_writes += task->task_fs_metadata_writes;
661 #endif /* CONFIG_PHYS_WRITE_ACCT */
662 cr->cpu_ptime += task_cpu_ptime(task);
663 task_update_cpu_time_qos_stats(task, cr->cpu_time_eqos, cr->cpu_time_rqos);
664 #if MONOTONIC
665 uint64_t counts[MT_CORE_NFIXED] = {};
666 (void)mt_fixed_task_counts(task, counts);
667 cr->cpu_cycles += counts[MT_CORE_CYCLES];
668 #if defined(MT_CORE_INSTRS)
669 cr->cpu_instructions += counts[MT_CORE_INSTRS];
670 #endif /* defined(MT_CORE_INSTRS) */
671 #endif /* MONOTONIC */
672 }
673
674 /* remove the task from the coalition's list */
675 remqueue(&task->task_coalition[COALITION_TYPE_RESOURCE]);
676 queue_chain_init(task->task_coalition[COALITION_TYPE_RESOURCE]);
677
678 coal_dbg("removed PID:%d from id:%llu, task_count:%llu, dead_count:%llu",
679 task_pid(task), coal->id, cr->task_count, cr->dead_task_count);
680
681 return KERN_SUCCESS;
682 }
683
684 static kern_return_t
i_coal_resource_set_taskrole(__unused coalition_t coal,__unused task_t task,__unused int role)685 i_coal_resource_set_taskrole(__unused coalition_t coal,
686 __unused task_t task, __unused int role)
687 {
688 return KERN_SUCCESS;
689 }
690
691 static int
i_coal_resource_get_taskrole(__unused coalition_t coal,__unused task_t task)692 i_coal_resource_get_taskrole(__unused coalition_t coal, __unused task_t task)
693 {
694 task_t t;
695
696 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
697
698 qe_foreach_element(t, &coal->r.tasks, task_coalition[COALITION_TYPE_RESOURCE]) {
699 if (t == task) {
700 return COALITION_TASKROLE_UNDEF;
701 }
702 }
703
704 return -1;
705 }
706
707 static void
i_coal_resource_iterate_tasks(coalition_t coal,void * ctx,void (* callback)(coalition_t,void *,task_t))708 i_coal_resource_iterate_tasks(coalition_t coal, void *ctx, void (*callback)(coalition_t, void *, task_t))
709 {
710 task_t t;
711 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
712
713 qe_foreach_element(t, &coal->r.tasks, task_coalition[COALITION_TYPE_RESOURCE])
714 callback(coal, ctx, t);
715 }
716
717 #if CONFIG_PHYS_WRITE_ACCT
718 extern uint64_t kernel_pm_writes;
719 #endif /* CONFIG_PHYS_WRITE_ACCT */
720
721 kern_return_t
coalition_resource_usage_internal(coalition_t coal,struct coalition_resource_usage * cru_out)722 coalition_resource_usage_internal(coalition_t coal, struct coalition_resource_usage *cru_out)
723 {
724 kern_return_t kr;
725 ledger_amount_t credit, debit;
726 int i;
727
728 if (coal->type != COALITION_TYPE_RESOURCE) {
729 return KERN_INVALID_ARGUMENT;
730 }
731
732 /* Return KERN_INVALID_ARGUMENT for Corpse coalition */
733 for (i = 0; i < COALITION_NUM_TYPES; i++) {
734 if (coal == corpse_coalition[i]) {
735 return KERN_INVALID_ARGUMENT;
736 }
737 }
738
739 ledger_t sum_ledger = ledger_instantiate(coalition_task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
740 if (sum_ledger == LEDGER_NULL) {
741 return KERN_RESOURCE_SHORTAGE;
742 }
743
744 coalition_lock(coal);
745
746 /*
747 * Start with the coalition's ledger, which holds the totals from all
748 * the dead tasks.
749 */
750 ledger_rollup(sum_ledger, coal->r.ledger);
751 uint64_t bytesread = coal->r.bytesread;
752 uint64_t byteswritten = coal->r.byteswritten;
753 uint64_t gpu_time = coal->r.gpu_time;
754 uint64_t energy = coal->r.energy;
755 uint64_t logical_immediate_writes = coal->r.logical_immediate_writes;
756 uint64_t logical_deferred_writes = coal->r.logical_deferred_writes;
757 uint64_t logical_invalidated_writes = coal->r.logical_invalidated_writes;
758 uint64_t logical_metadata_writes = coal->r.logical_metadata_writes;
759 uint64_t logical_immediate_writes_to_external = coal->r.logical_immediate_writes_to_external;
760 uint64_t logical_deferred_writes_to_external = coal->r.logical_deferred_writes_to_external;
761 uint64_t logical_invalidated_writes_to_external = coal->r.logical_invalidated_writes_to_external;
762 uint64_t logical_metadata_writes_to_external = coal->r.logical_metadata_writes_to_external;
763 #if CONFIG_PHYS_WRITE_ACCT
764 uint64_t fs_metadata_writes = coal->r.fs_metadata_writes;
765 #endif /* CONFIG_PHYS_WRITE_ACCT */
766 int64_t cpu_time_billed_to_me = 0;
767 int64_t cpu_time_billed_to_others = 0;
768 int64_t energy_billed_to_me = 0;
769 int64_t energy_billed_to_others = 0;
770 uint64_t cpu_ptime = coal->r.cpu_ptime;
771 uint64_t cpu_time_eqos[COALITION_NUM_THREAD_QOS_TYPES];
772 memcpy(cpu_time_eqos, coal->r.cpu_time_eqos, sizeof(cpu_time_eqos));
773 uint64_t cpu_time_rqos[COALITION_NUM_THREAD_QOS_TYPES];
774 memcpy(cpu_time_rqos, coal->r.cpu_time_rqos, sizeof(cpu_time_rqos));
775 uint64_t cpu_instructions = coal->r.cpu_instructions;
776 uint64_t cpu_cycles = coal->r.cpu_cycles;
777
778 /*
779 * Add to that all the active tasks' ledgers. Tasks cannot deallocate
780 * out from under us, since we hold the coalition lock.
781 */
782 task_t task;
783 qe_foreach_element(task, &coal->r.tasks, task_coalition[COALITION_TYPE_RESOURCE]) {
784 /*
785 * Rolling up stats for exec copy task or exec'd task will lead to double accounting.
786 * Cannot take task lock after taking coaliton lock
787 */
788 if (task_is_exec_copy(task) || task_did_exec(task)) {
789 continue;
790 }
791
792 ledger_rollup(sum_ledger, task->ledger);
793 bytesread += task->task_io_stats->disk_reads.size;
794 byteswritten += task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size;
795 #if defined(__x86_64__)
796 gpu_time += task_gpu_utilisation(task);
797 #endif /* defined(__x86_64__) */
798
799 #if defined(__arm__) || defined(__arm64__)
800 energy += task_energy(task);
801 #endif /* defined(__arm__) || defined(__arm64__) */
802
803 logical_immediate_writes += task->task_writes_counters_internal.task_immediate_writes;
804 logical_deferred_writes += task->task_writes_counters_internal.task_deferred_writes;
805 logical_invalidated_writes += task->task_writes_counters_internal.task_invalidated_writes;
806 logical_metadata_writes += task->task_writes_counters_internal.task_metadata_writes;
807 logical_immediate_writes_to_external += task->task_writes_counters_external.task_immediate_writes;
808 logical_deferred_writes_to_external += task->task_writes_counters_external.task_deferred_writes;
809 logical_invalidated_writes_to_external += task->task_writes_counters_external.task_invalidated_writes;
810 logical_metadata_writes_to_external += task->task_writes_counters_external.task_metadata_writes;
811 #if CONFIG_PHYS_WRITE_ACCT
812 fs_metadata_writes += task->task_fs_metadata_writes;
813 #endif /* CONFIG_PHYS_WRITE_ACCT */
814
815 cpu_ptime += task_cpu_ptime(task);
816 task_update_cpu_time_qos_stats(task, cpu_time_eqos, cpu_time_rqos);
817 #if MONOTONIC
818 uint64_t counts[MT_CORE_NFIXED] = {};
819 (void)mt_fixed_task_counts(task, counts);
820 cpu_cycles += counts[MT_CORE_CYCLES];
821 #if defined(MT_CORE_INSTRS)
822 cpu_instructions += counts[MT_CORE_INSTRS];
823 #endif /* defined(MT_CORE_INSTRS) */
824 #endif /* MONOTONIC */
825 }
826
827 kr = ledger_get_balance(sum_ledger, task_ledgers.cpu_time_billed_to_me, (int64_t *)&cpu_time_billed_to_me);
828 if (kr != KERN_SUCCESS || cpu_time_billed_to_me < 0) {
829 cpu_time_billed_to_me = 0;
830 }
831
832 kr = ledger_get_balance(sum_ledger, task_ledgers.cpu_time_billed_to_others, (int64_t *)&cpu_time_billed_to_others);
833 if (kr != KERN_SUCCESS || cpu_time_billed_to_others < 0) {
834 cpu_time_billed_to_others = 0;
835 }
836
837 kr = ledger_get_balance(sum_ledger, task_ledgers.energy_billed_to_me, (int64_t *)&energy_billed_to_me);
838 if (kr != KERN_SUCCESS || energy_billed_to_me < 0) {
839 energy_billed_to_me = 0;
840 }
841
842 kr = ledger_get_balance(sum_ledger, task_ledgers.energy_billed_to_others, (int64_t *)&energy_billed_to_others);
843 if (kr != KERN_SUCCESS || energy_billed_to_others < 0) {
844 energy_billed_to_others = 0;
845 }
846
847 /* collect information from the coalition itself */
848 cru_out->tasks_started = coal->r.task_count;
849 cru_out->tasks_exited = coal->r.dead_task_count;
850
851 uint64_t time_nonempty = coal->r.time_nonempty;
852 uint64_t last_became_nonempty_time = coal->r.last_became_nonempty_time;
853
854 coalition_unlock(coal);
855
856 /* Copy the totals out of sum_ledger */
857 kr = ledger_get_entries(sum_ledger, task_ledgers.cpu_time,
858 &credit, &debit);
859 if (kr != KERN_SUCCESS) {
860 credit = 0;
861 }
862 cru_out->cpu_time = credit;
863 cru_out->cpu_time_billed_to_me = (uint64_t)cpu_time_billed_to_me;
864 cru_out->cpu_time_billed_to_others = (uint64_t)cpu_time_billed_to_others;
865 cru_out->energy_billed_to_me = (uint64_t)energy_billed_to_me;
866 cru_out->energy_billed_to_others = (uint64_t)energy_billed_to_others;
867
868 kr = ledger_get_entries(sum_ledger, task_ledgers.interrupt_wakeups,
869 &credit, &debit);
870 if (kr != KERN_SUCCESS) {
871 credit = 0;
872 }
873 cru_out->interrupt_wakeups = credit;
874
875 kr = ledger_get_entries(sum_ledger, task_ledgers.platform_idle_wakeups,
876 &credit, &debit);
877 if (kr != KERN_SUCCESS) {
878 credit = 0;
879 }
880 cru_out->platform_idle_wakeups = credit;
881
882 cru_out->bytesread = bytesread;
883 cru_out->byteswritten = byteswritten;
884 cru_out->gpu_time = gpu_time;
885 cru_out->energy = energy;
886 cru_out->logical_immediate_writes = logical_immediate_writes;
887 cru_out->logical_deferred_writes = logical_deferred_writes;
888 cru_out->logical_invalidated_writes = logical_invalidated_writes;
889 cru_out->logical_metadata_writes = logical_metadata_writes;
890 cru_out->logical_immediate_writes_to_external = logical_immediate_writes_to_external;
891 cru_out->logical_deferred_writes_to_external = logical_deferred_writes_to_external;
892 cru_out->logical_invalidated_writes_to_external = logical_invalidated_writes_to_external;
893 cru_out->logical_metadata_writes_to_external = logical_metadata_writes_to_external;
894 #if CONFIG_PHYS_WRITE_ACCT
895 cru_out->fs_metadata_writes = fs_metadata_writes;
896 #else
897 cru_out->fs_metadata_writes = 0;
898 #endif /* CONFIG_PHYS_WRITE_ACCT */
899 cru_out->cpu_ptime = cpu_ptime;
900 cru_out->cpu_time_eqos_len = COALITION_NUM_THREAD_QOS_TYPES;
901 memcpy(cru_out->cpu_time_eqos, cpu_time_eqos, sizeof(cru_out->cpu_time_eqos));
902 cru_out->cpu_cycles = cpu_cycles;
903 cru_out->cpu_instructions = cpu_instructions;
904 ledger_dereference(sum_ledger);
905 sum_ledger = LEDGER_NULL;
906
907 #if CONFIG_PHYS_WRITE_ACCT
908 // kernel_pm_writes are only recorded under kernel_task coalition
909 if (coalition_id(coal) == COALITION_ID_KERNEL) {
910 cru_out->pm_writes = kernel_pm_writes;
911 } else {
912 cru_out->pm_writes = 0;
913 }
914 #else
915 cru_out->pm_writes = 0;
916 #endif /* CONFIG_PHYS_WRITE_ACCT */
917
918 if (last_became_nonempty_time) {
919 time_nonempty += mach_absolute_time() - last_became_nonempty_time;
920 }
921 absolutetime_to_nanoseconds(time_nonempty, &cru_out->time_nonempty);
922
923 return KERN_SUCCESS;
924 }
925
926 /*
927 *
928 * COALITION_TYPE_JETSAM
929 *
930 */
931 static kern_return_t
i_coal_jetsam_init(coalition_t coal,boolean_t privileged,boolean_t efficient)932 i_coal_jetsam_init(coalition_t coal, boolean_t privileged, boolean_t efficient)
933 {
934 assert(coal && coal->type == COALITION_TYPE_JETSAM);
935 (void)privileged;
936 (void)efficient;
937
938 coal->j.leader = TASK_NULL;
939 queue_head_init(coal->j.extensions);
940 queue_head_init(coal->j.services);
941 queue_head_init(coal->j.other);
942
943 #if CONFIG_THREAD_GROUPS
944 switch (coal->role) {
945 case COALITION_ROLE_SYSTEM:
946 coal->j.thread_group = thread_group_find_by_id_and_retain(THREAD_GROUP_SYSTEM);
947 break;
948 case COALITION_ROLE_BACKGROUND:
949 coal->j.thread_group = thread_group_find_by_id_and_retain(THREAD_GROUP_BACKGROUND);
950 break;
951 default:
952 coal->j.thread_group = thread_group_create_and_retain(efficient);
953 }
954 assert(coal->j.thread_group != NULL);
955 #endif
956 return KERN_SUCCESS;
957 }
958
959 static void
i_coal_jetsam_dealloc(__unused coalition_t coal)960 i_coal_jetsam_dealloc(__unused coalition_t coal)
961 {
962 assert(coal && coal->type == COALITION_TYPE_JETSAM);
963
964 /* the coalition should be completely clear at this point */
965 assert(queue_empty(&coal->j.extensions));
966 assert(queue_empty(&coal->j.services));
967 assert(queue_empty(&coal->j.other));
968 assert(coal->j.leader == TASK_NULL);
969
970 #if CONFIG_THREAD_GROUPS
971 /* disassociate from the thread group */
972 assert(coal->j.thread_group != NULL);
973 thread_group_release(coal->j.thread_group);
974 coal->j.thread_group = NULL;
975 #endif
976 }
977
978 static kern_return_t
i_coal_jetsam_adopt_task(coalition_t coal,task_t task)979 i_coal_jetsam_adopt_task(coalition_t coal, task_t task)
980 {
981 struct i_jetsam_coalition *cj;
982 assert(coal && coal->type == COALITION_TYPE_JETSAM);
983
984 cj = &coal->j;
985
986 assert(queue_empty(&task->task_coalition[COALITION_TYPE_JETSAM]));
987
988 /* put each task initially in the "other" list */
989 enqueue_tail(&cj->other, &task->task_coalition[COALITION_TYPE_JETSAM]);
990 coal_dbg("coalition %lld adopted PID:%d as UNDEF",
991 coal->id, task_pid(task));
992
993 return KERN_SUCCESS;
994 }
995
996 static kern_return_t
i_coal_jetsam_remove_task(coalition_t coal,task_t task)997 i_coal_jetsam_remove_task(coalition_t coal, task_t task)
998 {
999 assert(coal && coal->type == COALITION_TYPE_JETSAM);
1000 assert(task->coalition[COALITION_TYPE_JETSAM] == coal);
1001
1002 coal_dbg("removing PID:%d from coalition id:%lld",
1003 task_pid(task), coal->id);
1004
1005 if (task == coal->j.leader) {
1006 coal->j.leader = NULL;
1007 coal_dbg(" PID:%d was the leader!", task_pid(task));
1008 } else {
1009 assert(!queue_empty(&task->task_coalition[COALITION_TYPE_JETSAM]));
1010 }
1011
1012 /* remove the task from the specific coalition role queue */
1013 remqueue(&task->task_coalition[COALITION_TYPE_JETSAM]);
1014 queue_chain_init(task->task_coalition[COALITION_TYPE_RESOURCE]);
1015
1016 return KERN_SUCCESS;
1017 }
1018
1019 static kern_return_t
i_coal_jetsam_set_taskrole(coalition_t coal,task_t task,int role)1020 i_coal_jetsam_set_taskrole(coalition_t coal, task_t task, int role)
1021 {
1022 struct i_jetsam_coalition *cj;
1023 queue_t q = NULL;
1024 assert(coal && coal->type == COALITION_TYPE_JETSAM);
1025 assert(task->coalition[COALITION_TYPE_JETSAM] == coal);
1026
1027 cj = &coal->j;
1028
1029 switch (role) {
1030 case COALITION_TASKROLE_LEADER:
1031 coal_dbg("setting PID:%d as LEADER of %lld",
1032 task_pid(task), coal->id);
1033 if (cj->leader != TASK_NULL) {
1034 /* re-queue the exiting leader onto the "other" list */
1035 coal_dbg(" re-queue existing leader (%d) as OTHER",
1036 task_pid(cj->leader));
1037 re_queue_tail(&cj->other, &cj->leader->task_coalition[COALITION_TYPE_JETSAM]);
1038 }
1039 /*
1040 * remove the task from the "other" list
1041 * (where it was put by default)
1042 */
1043 remqueue(&task->task_coalition[COALITION_TYPE_JETSAM]);
1044 queue_chain_init(task->task_coalition[COALITION_TYPE_JETSAM]);
1045
1046 /* set the coalition leader */
1047 cj->leader = task;
1048 break;
1049 case COALITION_TASKROLE_XPC:
1050 coal_dbg("setting PID:%d as XPC in %lld",
1051 task_pid(task), coal->id);
1052 q = (queue_t)&cj->services;
1053 break;
1054 case COALITION_TASKROLE_EXT:
1055 coal_dbg("setting PID:%d as EXT in %lld",
1056 task_pid(task), coal->id);
1057 q = (queue_t)&cj->extensions;
1058 break;
1059 case COALITION_TASKROLE_NONE:
1060 /*
1061 * Tasks with a role of "none" should fall through to an
1062 * undefined role so long as the task is currently a member
1063 * of the coalition. This scenario can happen if a task is
1064 * killed (usually via jetsam) during exec.
1065 */
1066 if (task->coalition[COALITION_TYPE_JETSAM] != coal) {
1067 panic("%s: task %p attempting to set role %d "
1068 "in coalition %p to which it does not belong!", __func__, task, role, coal);
1069 }
1070 OS_FALLTHROUGH;
1071 case COALITION_TASKROLE_UNDEF:
1072 coal_dbg("setting PID:%d as UNDEF in %lld",
1073 task_pid(task), coal->id);
1074 q = (queue_t)&cj->other;
1075 break;
1076 default:
1077 panic("%s: invalid role(%d) for task", __func__, role);
1078 return KERN_INVALID_ARGUMENT;
1079 }
1080
1081 if (q != NULL) {
1082 re_queue_tail(q, &task->task_coalition[COALITION_TYPE_JETSAM]);
1083 }
1084
1085 return KERN_SUCCESS;
1086 }
1087
1088 int
i_coal_jetsam_get_taskrole(coalition_t coal,task_t task)1089 i_coal_jetsam_get_taskrole(coalition_t coal, task_t task)
1090 {
1091 struct i_jetsam_coalition *cj;
1092 task_t t;
1093
1094 assert(coal && coal->type == COALITION_TYPE_JETSAM);
1095 assert(task->coalition[COALITION_TYPE_JETSAM] == coal);
1096
1097 cj = &coal->j;
1098
1099 if (task == cj->leader) {
1100 return COALITION_TASKROLE_LEADER;
1101 }
1102
1103 qe_foreach_element(t, &cj->services, task_coalition[COALITION_TYPE_JETSAM]) {
1104 if (t == task) {
1105 return COALITION_TASKROLE_XPC;
1106 }
1107 }
1108
1109 qe_foreach_element(t, &cj->extensions, task_coalition[COALITION_TYPE_JETSAM]) {
1110 if (t == task) {
1111 return COALITION_TASKROLE_EXT;
1112 }
1113 }
1114
1115 qe_foreach_element(t, &cj->other, task_coalition[COALITION_TYPE_JETSAM]) {
1116 if (t == task) {
1117 return COALITION_TASKROLE_UNDEF;
1118 }
1119 }
1120
1121 /* task not in the coalition?! */
1122 return COALITION_TASKROLE_NONE;
1123 }
1124
1125 static void
i_coal_jetsam_iterate_tasks(coalition_t coal,void * ctx,void (* callback)(coalition_t,void *,task_t))1126 i_coal_jetsam_iterate_tasks(coalition_t coal, void *ctx, void (*callback)(coalition_t, void *, task_t))
1127 {
1128 struct i_jetsam_coalition *cj;
1129 task_t t;
1130
1131 assert(coal && coal->type == COALITION_TYPE_JETSAM);
1132
1133 cj = &coal->j;
1134
1135 if (cj->leader) {
1136 callback(coal, ctx, cj->leader);
1137 }
1138
1139 qe_foreach_element(t, &cj->services, task_coalition[COALITION_TYPE_JETSAM])
1140 callback(coal, ctx, t);
1141
1142 qe_foreach_element(t, &cj->extensions, task_coalition[COALITION_TYPE_JETSAM])
1143 callback(coal, ctx, t);
1144
1145 qe_foreach_element(t, &cj->other, task_coalition[COALITION_TYPE_JETSAM])
1146 callback(coal, ctx, t);
1147 }
1148
1149
1150 /*
1151 *
1152 * Main Coalition implementation
1153 *
1154 */
1155
1156 /*
1157 * coalition_create_internal
1158 * Returns: New coalition object, referenced for the caller and unlocked.
1159 * Condition: coalitions_list_lock must be UNLOCKED.
1160 */
1161 kern_return_t
coalition_create_internal(int type,int role,boolean_t privileged,boolean_t efficient,coalition_t * out,uint64_t * coalition_id)1162 coalition_create_internal(int type, int role, boolean_t privileged, boolean_t efficient, coalition_t *out, uint64_t *coalition_id)
1163 {
1164 kern_return_t kr;
1165 struct coalition *new_coal;
1166 uint64_t cid;
1167 uint32_t ctype;
1168
1169 if (type < 0 || type > COALITION_TYPE_MAX) {
1170 return KERN_INVALID_ARGUMENT;
1171 }
1172
1173 new_coal = zalloc_flags(coalition_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1174
1175 new_coal->type = type;
1176 new_coal->role = role;
1177
1178 /* initialize type-specific resources */
1179 kr = coal_call(new_coal, init, privileged, efficient);
1180 if (kr != KERN_SUCCESS) {
1181 zfree(coalition_zone, new_coal);
1182 return kr;
1183 }
1184
1185 /* One for caller, one for coalitions list */
1186 new_coal->ref_count = 2;
1187
1188 new_coal->privileged = privileged ? TRUE : FALSE;
1189 new_coal->efficient = efficient ? TRUE : FALSE;
1190 #if DEVELOPMENT || DEBUG
1191 new_coal->should_notify = 1;
1192 #endif
1193
1194 lck_mtx_init(&new_coal->lock, &coalitions_lck_grp, LCK_ATTR_NULL);
1195
1196 lck_rw_lock_exclusive(&coalitions_list_lock);
1197 new_coal->id = coalition_next_id++;
1198 coalition_count++;
1199 enqueue_tail(&coalitions_q, &new_coal->coalitions);
1200
1201 #if CONFIG_THREAD_GROUPS
1202 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_NEW),
1203 new_coal->id, new_coal->type,
1204 (new_coal->type == COALITION_TYPE_JETSAM && new_coal->j.thread_group) ?
1205 thread_group_get_id(new_coal->j.thread_group) : 0);
1206
1207 #else
1208 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_NEW),
1209 new_coal->id, new_coal->type);
1210 #endif
1211 cid = new_coal->id;
1212 ctype = new_coal->type;
1213 lck_rw_unlock_exclusive(&coalitions_list_lock);
1214
1215 coal_dbg("id:%llu, type:%s", cid, coal_type_str(ctype));
1216
1217 if (coalition_id != NULL) {
1218 *coalition_id = cid;
1219 }
1220
1221 *out = new_coal;
1222 return KERN_SUCCESS;
1223 }
1224
1225 /*
1226 * coalition_release
1227 * Condition: coalition must be UNLOCKED.
1228 * */
1229 void
coalition_release(coalition_t coal)1230 coalition_release(coalition_t coal)
1231 {
1232 /* TODO: This can be done with atomics. */
1233 coalition_lock(coal);
1234 coal->ref_count--;
1235
1236 #if COALITION_DEBUG
1237 uint32_t rc = coal->ref_count;
1238 uint32_t ac = coal->active_count;
1239 #endif /* COALITION_DEBUG */
1240
1241 coal_dbg("id:%llu type:%s ref_count:%u active_count:%u%s",
1242 coal->id, coal_type_str(coal->type), rc, ac,
1243 rc <= 0 ? ", will deallocate now" : "");
1244
1245 if (coal->ref_count > 0) {
1246 coalition_unlock(coal);
1247 return;
1248 }
1249
1250 assert(coal->termrequested);
1251 assert(coal->terminated);
1252 assert(coal->active_count == 0);
1253 assert(coal->reaped);
1254 assert(coal->focal_task_count == 0);
1255 assert(coal->nonfocal_task_count == 0);
1256 #if CONFIG_THREAD_GROUPS
1257 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_FREE),
1258 coal->id, coal->type,
1259 coal->type == COALITION_TYPE_JETSAM ?
1260 coal->j.thread_group : 0);
1261 #else
1262 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_FREE),
1263 coal->id, coal->type);
1264 #endif
1265
1266 coal_call(coal, dealloc);
1267
1268 coalition_unlock(coal);
1269
1270 lck_mtx_destroy(&coal->lock, &coalitions_lck_grp);
1271
1272 zfree(coalition_zone, coal);
1273 }
1274
1275 /*
1276 * coalition_find_by_id_internal
1277 * Returns: Coalition object with specified id, NOT referenced.
1278 * If not found, returns COALITION_NULL.
1279 * If found, returns a locked coalition.
1280 *
1281 * Condition: No locks held
1282 */
1283 static coalition_t
coalition_find_by_id_internal(uint64_t coal_id)1284 coalition_find_by_id_internal(uint64_t coal_id)
1285 {
1286 coalition_t coal;
1287
1288 if (coal_id == 0) {
1289 return COALITION_NULL;
1290 }
1291
1292 lck_rw_lock_shared(&coalitions_list_lock);
1293 qe_foreach_element(coal, &coalitions_q, coalitions) {
1294 if (coal->id == coal_id) {
1295 coalition_lock(coal);
1296 lck_rw_unlock_shared(&coalitions_list_lock);
1297 return coal;
1298 }
1299 }
1300 lck_rw_unlock_shared(&coalitions_list_lock);
1301
1302 return COALITION_NULL;
1303 }
1304
1305 /*
1306 * coalition_find_by_id
1307 * Returns: Coalition object with specified id, referenced.
1308 * Condition: coalitions_list_lock must be UNLOCKED.
1309 */
1310 coalition_t
coalition_find_by_id(uint64_t cid)1311 coalition_find_by_id(uint64_t cid)
1312 {
1313 coalition_t coal = coalition_find_by_id_internal(cid);
1314
1315 if (coal == COALITION_NULL) {
1316 return COALITION_NULL;
1317 }
1318
1319 /* coal is locked */
1320
1321 if (coal->reaped) {
1322 coalition_unlock(coal);
1323 return COALITION_NULL;
1324 }
1325
1326 if (coal->ref_count == 0) {
1327 panic("resurrecting coalition %p id:%llu type:%s, active_count:%u",
1328 coal, coal->id, coal_type_str(coal->type), coal->active_count);
1329 }
1330 coal->ref_count++;
1331 #if COALITION_DEBUG
1332 uint32_t rc = coal->ref_count;
1333 #endif
1334
1335 coalition_unlock(coal);
1336
1337 coal_dbg("id:%llu type:%s ref_count:%u",
1338 coal->id, coal_type_str(coal->type), rc);
1339
1340 return coal;
1341 }
1342
1343 /*
1344 * coalition_find_and_activate_by_id
1345 * Returns: Coalition object with specified id, referenced, and activated.
1346 * Condition: coalitions_list_lock must be UNLOCKED.
1347 * This is the function to use when putting a 'new' thing into a coalition,
1348 * like posix_spawn of an XPC service by launchd.
1349 * See also coalition_extend_active.
1350 */
1351 coalition_t
coalition_find_and_activate_by_id(uint64_t cid)1352 coalition_find_and_activate_by_id(uint64_t cid)
1353 {
1354 coalition_t coal = coalition_find_by_id_internal(cid);
1355
1356 if (coal == COALITION_NULL) {
1357 return COALITION_NULL;
1358 }
1359
1360 /* coal is locked */
1361
1362 if (coal->reaped || coal->terminated) {
1363 /* Too late to put something new into this coalition, it's
1364 * already on its way out the door */
1365 coalition_unlock(coal);
1366 return COALITION_NULL;
1367 }
1368
1369 if (coal->ref_count == 0) {
1370 panic("resurrecting coalition %p id:%llu type:%s, active_count:%u",
1371 coal, coal->id, coal_type_str(coal->type), coal->active_count);
1372 }
1373
1374 coal->ref_count++;
1375 coal->active_count++;
1376
1377 #if COALITION_DEBUG
1378 uint32_t rc = coal->ref_count;
1379 uint32_t ac = coal->active_count;
1380 #endif
1381
1382 coalition_unlock(coal);
1383
1384 coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u",
1385 coal->id, coal_type_str(coal->type), rc, ac);
1386
1387 return coal;
1388 }
1389
1390 uint64_t
coalition_id(coalition_t coal)1391 coalition_id(coalition_t coal)
1392 {
1393 assert(coal != COALITION_NULL);
1394 return coal->id;
1395 }
1396
1397 void
task_coalition_ids(task_t task,uint64_t ids[COALITION_NUM_TYPES])1398 task_coalition_ids(task_t task, uint64_t ids[COALITION_NUM_TYPES])
1399 {
1400 int i;
1401 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1402 if (task->coalition[i]) {
1403 ids[i] = task->coalition[i]->id;
1404 } else {
1405 ids[i] = 0;
1406 }
1407 }
1408 }
1409
1410 void
task_coalition_roles(task_t task,int roles[COALITION_NUM_TYPES])1411 task_coalition_roles(task_t task, int roles[COALITION_NUM_TYPES])
1412 {
1413 int i;
1414 memset(roles, 0, COALITION_NUM_TYPES * sizeof(roles[0]));
1415
1416 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1417 if (task->coalition[i]) {
1418 coalition_lock(task->coalition[i]);
1419 roles[i] = coal_call(task->coalition[i],
1420 get_taskrole, task);
1421 coalition_unlock(task->coalition[i]);
1422 } else {
1423 roles[i] = COALITION_TASKROLE_NONE;
1424 }
1425 }
1426 }
1427
1428
1429 int
coalition_type(coalition_t coal)1430 coalition_type(coalition_t coal)
1431 {
1432 return coal->type;
1433 }
1434
1435 boolean_t
coalition_term_requested(coalition_t coal)1436 coalition_term_requested(coalition_t coal)
1437 {
1438 return coal->termrequested;
1439 }
1440
1441 boolean_t
coalition_is_terminated(coalition_t coal)1442 coalition_is_terminated(coalition_t coal)
1443 {
1444 return coal->terminated;
1445 }
1446
1447 boolean_t
coalition_is_reaped(coalition_t coal)1448 coalition_is_reaped(coalition_t coal)
1449 {
1450 return coal->reaped;
1451 }
1452
1453 boolean_t
coalition_is_privileged(coalition_t coal)1454 coalition_is_privileged(coalition_t coal)
1455 {
1456 return coal->privileged || unrestrict_coalition_syscalls;
1457 }
1458
1459 boolean_t
task_is_in_privileged_coalition(task_t task,int type)1460 task_is_in_privileged_coalition(task_t task, int type)
1461 {
1462 if (type < 0 || type > COALITION_TYPE_MAX) {
1463 return FALSE;
1464 }
1465 if (unrestrict_coalition_syscalls) {
1466 return TRUE;
1467 }
1468 if (!task->coalition[type]) {
1469 return FALSE;
1470 }
1471 return task->coalition[type]->privileged;
1472 }
1473
1474 void
task_coalition_update_gpu_stats(task_t task,uint64_t gpu_ns_delta)1475 task_coalition_update_gpu_stats(task_t task, uint64_t gpu_ns_delta)
1476 {
1477 coalition_t coal;
1478
1479 assert(task != TASK_NULL);
1480 if (gpu_ns_delta == 0) {
1481 return;
1482 }
1483
1484 coal = task->coalition[COALITION_TYPE_RESOURCE];
1485 assert(coal != COALITION_NULL);
1486
1487 coalition_lock(coal);
1488 coal->r.gpu_time += gpu_ns_delta;
1489 coalition_unlock(coal);
1490 }
1491
1492 boolean_t
task_coalition_adjust_focal_count(task_t task,int count,uint32_t * new_count)1493 task_coalition_adjust_focal_count(task_t task, int count, uint32_t *new_count)
1494 {
1495 coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING];
1496 if (coal == COALITION_NULL) {
1497 return FALSE;
1498 }
1499
1500 *new_count = os_atomic_add(&coal->focal_task_count, count, relaxed);
1501 assert(*new_count != UINT32_MAX);
1502 return TRUE;
1503 }
1504
1505 uint32_t
task_coalition_focal_count(task_t task)1506 task_coalition_focal_count(task_t task)
1507 {
1508 coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING];
1509 if (coal == COALITION_NULL) {
1510 return 0;
1511 }
1512
1513 return coal->focal_task_count;
1514 }
1515
1516 boolean_t
task_coalition_adjust_nonfocal_count(task_t task,int count,uint32_t * new_count)1517 task_coalition_adjust_nonfocal_count(task_t task, int count, uint32_t *new_count)
1518 {
1519 coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING];
1520 if (coal == COALITION_NULL) {
1521 return FALSE;
1522 }
1523
1524 *new_count = os_atomic_add(&coal->nonfocal_task_count, count, relaxed);
1525 assert(*new_count != UINT32_MAX);
1526 return TRUE;
1527 }
1528
1529 uint32_t
task_coalition_nonfocal_count(task_t task)1530 task_coalition_nonfocal_count(task_t task)
1531 {
1532 coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING];
1533 if (coal == COALITION_NULL) {
1534 return 0;
1535 }
1536
1537 return coal->nonfocal_task_count;
1538 }
1539
1540 #if CONFIG_THREAD_GROUPS
1541 struct thread_group *
task_coalition_get_thread_group(task_t task)1542 task_coalition_get_thread_group(task_t task)
1543 {
1544 coalition_t coal = task->coalition[COALITION_TYPE_JETSAM];
1545 /* return system thread group for non-jetsam coalitions */
1546 if (coal == COALITION_NULL) {
1547 return init_coalition[COALITION_TYPE_JETSAM]->j.thread_group;
1548 }
1549 return coal->j.thread_group;
1550 }
1551
1552
1553 struct thread_group *
kdp_coalition_get_thread_group(coalition_t coal)1554 kdp_coalition_get_thread_group(coalition_t coal)
1555 {
1556 if (coal->type != COALITION_TYPE_JETSAM) {
1557 return NULL;
1558 }
1559 assert(coal->j.thread_group != NULL);
1560 return coal->j.thread_group;
1561 }
1562
1563 struct thread_group *
coalition_get_thread_group(coalition_t coal)1564 coalition_get_thread_group(coalition_t coal)
1565 {
1566 if (coal->type != COALITION_TYPE_JETSAM) {
1567 return NULL;
1568 }
1569 assert(coal->j.thread_group != NULL);
1570 return thread_group_retain(coal->j.thread_group);
1571 }
1572
1573 void
coalition_set_thread_group(coalition_t coal,struct thread_group * tg)1574 coalition_set_thread_group(coalition_t coal, struct thread_group *tg)
1575 {
1576 assert(coal != COALITION_NULL);
1577 assert(tg != NULL);
1578
1579 if (coal->type != COALITION_TYPE_JETSAM) {
1580 return;
1581 }
1582 struct thread_group *old_tg = coal->j.thread_group;
1583 assert(old_tg != NULL);
1584 coal->j.thread_group = tg;
1585
1586 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_THREAD_GROUP_SET),
1587 coal->id, coal->type, thread_group_get_id(tg));
1588
1589 thread_group_release(old_tg);
1590 }
1591
1592 void
task_coalition_thread_group_focal_update(task_t task)1593 task_coalition_thread_group_focal_update(task_t task)
1594 {
1595 assert(task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING] != COALITION_NULL);
1596 thread_group_flags_update_lock();
1597 uint32_t focal_count = task_coalition_focal_count(task);
1598 if (focal_count) {
1599 thread_group_set_flags_locked(task_coalition_get_thread_group(task), THREAD_GROUP_FLAGS_UI_APP);
1600 } else {
1601 thread_group_clear_flags_locked(task_coalition_get_thread_group(task), THREAD_GROUP_FLAGS_UI_APP);
1602 }
1603 thread_group_flags_update_unlock();
1604 }
1605
1606 #endif
1607
1608 void
coalition_for_each_task(coalition_t coal,void * ctx,void (* callback)(coalition_t,void *,task_t))1609 coalition_for_each_task(coalition_t coal, void *ctx,
1610 void (*callback)(coalition_t, void *, task_t))
1611 {
1612 assert(coal != COALITION_NULL);
1613
1614 coal_dbg("iterating tasks in coalition %p id:%llu type:%s, active_count:%u",
1615 coal, coal->id, coal_type_str(coal->type), coal->active_count);
1616
1617 coalition_lock(coal);
1618
1619 coal_call(coal, iterate_tasks, ctx, callback);
1620
1621 coalition_unlock(coal);
1622 }
1623
1624
1625 void
coalition_remove_active(coalition_t coal)1626 coalition_remove_active(coalition_t coal)
1627 {
1628 coalition_lock(coal);
1629
1630 assert(!coal->reaped);
1631 assert(coal->active_count > 0);
1632
1633 coal->active_count--;
1634
1635 boolean_t do_notify = FALSE;
1636 uint64_t notify_id = 0;
1637 uint32_t notify_flags = 0;
1638 if (coal->termrequested && coal->active_count == 0) {
1639 /* We only notify once, when active_count reaches zero.
1640 * We just decremented, so if it reached zero, we mustn't have
1641 * notified already.
1642 */
1643 assert(!coal->terminated);
1644 coal->terminated = TRUE;
1645
1646 assert(!coal->notified);
1647
1648 coal->notified = TRUE;
1649 #if DEVELOPMENT || DEBUG
1650 do_notify = coal->should_notify;
1651 #else
1652 do_notify = TRUE;
1653 #endif
1654 notify_id = coal->id;
1655 notify_flags = 0;
1656 }
1657
1658 #if COALITION_DEBUG
1659 uint64_t cid = coal->id;
1660 uint32_t rc = coal->ref_count;
1661 int ac = coal->active_count;
1662 int ct = coal->type;
1663 #endif
1664 coalition_unlock(coal);
1665
1666 coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u,%s",
1667 cid, coal_type_str(ct), rc, ac, do_notify ? " NOTIFY" : " ");
1668
1669 if (do_notify) {
1670 coalition_notify_user(notify_id, notify_flags);
1671 }
1672 }
1673
1674 /* Used for kernel_task, launchd, launchd's early boot tasks... */
1675 kern_return_t
coalitions_adopt_init_task(task_t task)1676 coalitions_adopt_init_task(task_t task)
1677 {
1678 kern_return_t kr;
1679 kr = coalitions_adopt_task(init_coalition, task);
1680 if (kr != KERN_SUCCESS) {
1681 panic("failed to adopt task %p into default coalition: %d", task, kr);
1682 }
1683 return kr;
1684 }
1685
1686 /* Used for forked corpses. */
1687 kern_return_t
coalitions_adopt_corpse_task(task_t task)1688 coalitions_adopt_corpse_task(task_t task)
1689 {
1690 kern_return_t kr;
1691 kr = coalitions_adopt_task(corpse_coalition, task);
1692 if (kr != KERN_SUCCESS) {
1693 panic("failed to adopt task %p into corpse coalition: %d", task, kr);
1694 }
1695 return kr;
1696 }
1697
1698 /*
1699 * coalition_adopt_task_internal
1700 * Condition: Coalition must be referenced and unlocked. Will fail if coalition
1701 * is already terminated.
1702 */
1703 static kern_return_t
coalition_adopt_task_internal(coalition_t coal,task_t task)1704 coalition_adopt_task_internal(coalition_t coal, task_t task)
1705 {
1706 kern_return_t kr;
1707
1708 if (task->coalition[coal->type]) {
1709 return KERN_ALREADY_IN_SET;
1710 }
1711
1712 coalition_lock(coal);
1713
1714 if (coal->reaped || coal->terminated) {
1715 coalition_unlock(coal);
1716 return KERN_TERMINATED;
1717 }
1718
1719 kr = coal_call(coal, adopt_task, task);
1720 if (kr != KERN_SUCCESS) {
1721 goto out_unlock;
1722 }
1723
1724 coal->active_count++;
1725
1726 coal->ref_count++;
1727
1728 task->coalition[coal->type] = coal;
1729
1730 out_unlock:
1731 #if COALITION_DEBUG
1732 (void)coal; /* need expression after label */
1733 uint64_t cid = coal->id;
1734 uint32_t rc = coal->ref_count;
1735 uint32_t ct = coal->type;
1736 #endif
1737 if (get_task_uniqueid(task) != UINT64_MAX) {
1738 /* On 32-bit targets, uniqueid will get truncated to 32 bits */
1739 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_ADOPT),
1740 coal->id, get_task_uniqueid(task));
1741 }
1742
1743 coalition_unlock(coal);
1744
1745 coal_dbg("task:%d, id:%llu type:%s ref_count:%u, kr=%d",
1746 task_pid(task), cid, coal_type_str(ct), rc, kr);
1747 return kr;
1748 }
1749
1750 static kern_return_t
coalition_remove_task_internal(task_t task,int type)1751 coalition_remove_task_internal(task_t task, int type)
1752 {
1753 kern_return_t kr;
1754
1755 coalition_t coal = task->coalition[type];
1756
1757 if (!coal) {
1758 return KERN_SUCCESS;
1759 }
1760
1761 assert(coal->type == (uint32_t)type);
1762
1763 coalition_lock(coal);
1764
1765 kr = coal_call(coal, remove_task, task);
1766
1767 #if COALITION_DEBUG
1768 uint64_t cid = coal->id;
1769 uint32_t rc = coal->ref_count;
1770 int ac = coal->active_count;
1771 int ct = coal->type;
1772 #endif
1773 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_REMOVE),
1774 coal->id, get_task_uniqueid(task));
1775 coalition_unlock(coal);
1776
1777 coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u, kr=%d",
1778 cid, coal_type_str(ct), rc, ac, kr);
1779
1780 coalition_remove_active(coal);
1781
1782 return kr;
1783 }
1784
1785 /*
1786 * coalitions_adopt_task
1787 * Condition: All coalitions must be referenced and unlocked.
1788 * Will fail if any coalition is already terminated.
1789 */
1790 kern_return_t
coalitions_adopt_task(coalition_t * coals,task_t task)1791 coalitions_adopt_task(coalition_t *coals, task_t task)
1792 {
1793 int i;
1794 kern_return_t kr;
1795
1796 if (!coals || coals[COALITION_TYPE_RESOURCE] == COALITION_NULL) {
1797 return KERN_INVALID_ARGUMENT;
1798 }
1799
1800 /* verify that the incoming coalitions are what they say they are */
1801 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1802 if (coals[i] && coals[i]->type != (uint32_t)i) {
1803 return KERN_INVALID_ARGUMENT;
1804 }
1805 }
1806
1807 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1808 kr = KERN_SUCCESS;
1809 if (coals[i]) {
1810 kr = coalition_adopt_task_internal(coals[i], task);
1811 }
1812 if (kr != KERN_SUCCESS) {
1813 /* dis-associate any coalitions that just adopted this task */
1814 while (--i >= 0) {
1815 if (task->coalition[i]) {
1816 coalition_remove_task_internal(task, i);
1817 }
1818 }
1819 break;
1820 }
1821 }
1822 return kr;
1823 }
1824
1825 /*
1826 * coalitions_remove_task
1827 * Condition: task must be referenced and UNLOCKED; all task's coalitions must be UNLOCKED
1828 */
1829 kern_return_t
coalitions_remove_task(task_t task)1830 coalitions_remove_task(task_t task)
1831 {
1832 kern_return_t kr;
1833 int i;
1834
1835 task_lock(task);
1836 if (!task_is_coalition_member(task)) {
1837 task_unlock(task);
1838 return KERN_SUCCESS;
1839 }
1840
1841 task_clear_coalition_member(task);
1842 task_unlock(task);
1843
1844 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1845 kr = coalition_remove_task_internal(task, i);
1846 assert(kr == KERN_SUCCESS);
1847 }
1848
1849 return kr;
1850 }
1851
1852 /*
1853 * task_release_coalitions
1854 * helper function to release references to all coalitions in which
1855 * 'task' is a member.
1856 */
1857 void
task_release_coalitions(task_t task)1858 task_release_coalitions(task_t task)
1859 {
1860 int i;
1861 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1862 if (task->coalition[i]) {
1863 coalition_release(task->coalition[i]);
1864 } else if (i == COALITION_TYPE_RESOURCE) {
1865 panic("deallocating task %p was not a member of a resource coalition", task);
1866 }
1867 }
1868 }
1869
1870 /*
1871 * coalitions_set_roles
1872 * for each type of coalition, if the task is a member of a coalition of
1873 * that type (given in the coalitions parameter) then set the role of
1874 * the task within that that coalition.
1875 */
1876 kern_return_t
coalitions_set_roles(coalition_t coalitions[COALITION_NUM_TYPES],task_t task,int roles[COALITION_NUM_TYPES])1877 coalitions_set_roles(coalition_t coalitions[COALITION_NUM_TYPES],
1878 task_t task, int roles[COALITION_NUM_TYPES])
1879 {
1880 kern_return_t kr = KERN_SUCCESS;
1881 int i;
1882
1883 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1884 if (!coalitions[i]) {
1885 continue;
1886 }
1887 coalition_lock(coalitions[i]);
1888 kr = coal_call(coalitions[i], set_taskrole, task, roles[i]);
1889 coalition_unlock(coalitions[i]);
1890 assert(kr == KERN_SUCCESS);
1891 }
1892
1893 return kr;
1894 }
1895
1896 /*
1897 * coalition_terminate_internal
1898 * Condition: Coalition must be referenced and UNLOCKED.
1899 */
1900 kern_return_t
coalition_request_terminate_internal(coalition_t coal)1901 coalition_request_terminate_internal(coalition_t coal)
1902 {
1903 assert(coal->type >= 0 && coal->type <= COALITION_TYPE_MAX);
1904
1905 if (coal == init_coalition[coal->type]) {
1906 return KERN_DEFAULT_SET;
1907 }
1908
1909 coalition_lock(coal);
1910
1911 if (coal->reaped) {
1912 coalition_unlock(coal);
1913 return KERN_INVALID_NAME;
1914 }
1915
1916 if (coal->terminated || coal->termrequested) {
1917 coalition_unlock(coal);
1918 return KERN_TERMINATED;
1919 }
1920
1921 coal->termrequested = TRUE;
1922
1923 boolean_t do_notify = FALSE;
1924 uint64_t note_id = 0;
1925 uint32_t note_flags = 0;
1926
1927 if (coal->active_count == 0) {
1928 /*
1929 * We only notify once, when active_count reaches zero.
1930 * We just set termrequested to zero. If the active count
1931 * was already at zero (tasks died before we could request
1932 * a termination notification), we should notify.
1933 */
1934 assert(!coal->terminated);
1935 coal->terminated = TRUE;
1936
1937 assert(!coal->notified);
1938
1939 coal->notified = TRUE;
1940 #if DEVELOPMENT || DEBUG
1941 do_notify = coal->should_notify;
1942 #else
1943 do_notify = TRUE;
1944 #endif
1945 note_id = coal->id;
1946 note_flags = 0;
1947 }
1948
1949 coalition_unlock(coal);
1950
1951 if (do_notify) {
1952 coalition_notify_user(note_id, note_flags);
1953 }
1954
1955 return KERN_SUCCESS;
1956 }
1957
1958 /*
1959 * coalition_reap_internal
1960 * Condition: Coalition must be referenced and UNLOCKED.
1961 */
1962 kern_return_t
coalition_reap_internal(coalition_t coal)1963 coalition_reap_internal(coalition_t coal)
1964 {
1965 assert(coal->type <= COALITION_TYPE_MAX);
1966
1967 if (coal == init_coalition[coal->type]) {
1968 return KERN_DEFAULT_SET;
1969 }
1970
1971 coalition_lock(coal);
1972 if (coal->reaped) {
1973 coalition_unlock(coal);
1974 return KERN_TERMINATED;
1975 }
1976 if (!coal->terminated) {
1977 coalition_unlock(coal);
1978 return KERN_FAILURE;
1979 }
1980 assert(coal->termrequested);
1981 if (coal->active_count > 0) {
1982 coalition_unlock(coal);
1983 return KERN_FAILURE;
1984 }
1985
1986 coal->reaped = TRUE;
1987
1988 /* Caller, launchd, and coalitions list should each have a reference */
1989 assert(coal->ref_count > 2);
1990
1991 coalition_unlock(coal);
1992
1993 lck_rw_lock_exclusive(&coalitions_list_lock);
1994 coalition_count--;
1995 remqueue(&coal->coalitions);
1996 lck_rw_unlock_exclusive(&coalitions_list_lock);
1997
1998 /* Release the list's reference and launchd's reference. */
1999 coalition_release(coal);
2000 coalition_release(coal);
2001
2002 return KERN_SUCCESS;
2003 }
2004
2005 #if DEVELOPMENT || DEBUG
2006 int
coalition_should_notify(coalition_t coal)2007 coalition_should_notify(coalition_t coal)
2008 {
2009 int should;
2010 if (!coal) {
2011 return -1;
2012 }
2013 coalition_lock(coal);
2014 should = coal->should_notify;
2015 coalition_unlock(coal);
2016
2017 return should;
2018 }
2019
2020 void
coalition_set_notify(coalition_t coal,int notify)2021 coalition_set_notify(coalition_t coal, int notify)
2022 {
2023 if (!coal) {
2024 return;
2025 }
2026 coalition_lock(coal);
2027 coal->should_notify = !!notify;
2028 coalition_unlock(coal);
2029 }
2030 #endif
2031
2032 void
coalitions_init(void)2033 coalitions_init(void)
2034 {
2035 kern_return_t kr;
2036 int i;
2037 const struct coalition_type *ctype;
2038
2039 queue_head_init(coalitions_q);
2040
2041 init_task_ledgers();
2042
2043 init_coalition_ledgers();
2044
2045 for (i = 0, ctype = &s_coalition_types[0]; i < COALITION_NUM_TYPES; ctype++, i++) {
2046 /* verify the entry in the global coalition types array */
2047 if (ctype->type != i ||
2048 !ctype->init ||
2049 !ctype->dealloc ||
2050 !ctype->adopt_task ||
2051 !ctype->remove_task) {
2052 panic("%s: Malformed coalition type %s(%d) in slot for type:%s(%d)",
2053 __func__, coal_type_str(ctype->type), ctype->type, coal_type_str(i), i);
2054 }
2055 if (!ctype->has_default) {
2056 continue;
2057 }
2058 kr = coalition_create_internal(ctype->type, COALITION_ROLE_SYSTEM, TRUE, FALSE, &init_coalition[ctype->type], NULL);
2059 if (kr != KERN_SUCCESS) {
2060 panic("%s: could not create init %s coalition: kr:%d",
2061 __func__, coal_type_str(i), kr);
2062 }
2063 if (i == COALITION_TYPE_RESOURCE) {
2064 assert(COALITION_ID_KERNEL == init_coalition[ctype->type]->id);
2065 }
2066 kr = coalition_create_internal(ctype->type, COALITION_ROLE_SYSTEM, FALSE, FALSE, &corpse_coalition[ctype->type], NULL);
2067 if (kr != KERN_SUCCESS) {
2068 panic("%s: could not create corpse %s coalition: kr:%d",
2069 __func__, coal_type_str(i), kr);
2070 }
2071 }
2072
2073 /* "Leak" our reference to the global object */
2074 }
2075
2076 /*
2077 * BSD Kernel interface functions
2078 *
2079 */
2080 static void
coalition_fill_procinfo(struct coalition * coal,struct procinfo_coalinfo * coalinfo)2081 coalition_fill_procinfo(struct coalition *coal,
2082 struct procinfo_coalinfo *coalinfo)
2083 {
2084 coalinfo->coalition_id = coal->id;
2085 coalinfo->coalition_type = coal->type;
2086 coalinfo->coalition_tasks = coalition_get_task_count(coal);
2087 }
2088
2089
2090 int
coalitions_get_list(int type,struct procinfo_coalinfo * coal_list,int list_sz)2091 coalitions_get_list(int type, struct procinfo_coalinfo *coal_list, int list_sz)
2092 {
2093 int ncoals = 0;
2094 struct coalition *coal;
2095
2096 lck_rw_lock_shared(&coalitions_list_lock);
2097 qe_foreach_element(coal, &coalitions_q, coalitions) {
2098 if (!coal->reaped && (type < 0 || type == (int)coal->type)) {
2099 if (coal_list && ncoals < list_sz) {
2100 coalition_fill_procinfo(coal, &coal_list[ncoals]);
2101 }
2102 ++ncoals;
2103 }
2104 }
2105 lck_rw_unlock_shared(&coalitions_list_lock);
2106
2107 return ncoals;
2108 }
2109
2110 /*
2111 * Return the coaltion of the given type to which the task belongs.
2112 */
2113 coalition_t
task_get_coalition(task_t task,int coal_type)2114 task_get_coalition(task_t task, int coal_type)
2115 {
2116 coalition_t c;
2117
2118 if (task == NULL || coal_type > COALITION_TYPE_MAX) {
2119 return COALITION_NULL;
2120 }
2121
2122 c = task->coalition[coal_type];
2123 assert(c == COALITION_NULL || (int)c->type == coal_type);
2124 return c;
2125 }
2126
2127 /*
2128 * Report if the given task is the leader of the given jetsam coalition.
2129 */
2130 boolean_t
coalition_is_leader(task_t task,coalition_t coal)2131 coalition_is_leader(task_t task, coalition_t coal)
2132 {
2133 boolean_t ret = FALSE;
2134
2135 if (coal != COALITION_NULL) {
2136 coalition_lock(coal);
2137
2138 ret = (coal->type == COALITION_TYPE_JETSAM && coal->j.leader == task);
2139
2140 coalition_unlock(coal);
2141 }
2142
2143 return ret;
2144 }
2145
2146 kern_return_t
coalition_iterate_stackshot(coalition_iterate_fn_t callout,void * arg,uint32_t coalition_type)2147 coalition_iterate_stackshot(coalition_iterate_fn_t callout, void *arg, uint32_t coalition_type)
2148 {
2149 coalition_t coal;
2150 int i = 0;
2151
2152 qe_foreach_element(coal, &coalitions_q, coalitions) {
2153 if (coal == NULL || !ml_validate_nofault((vm_offset_t)coal, sizeof(struct coalition))) {
2154 return KERN_FAILURE;
2155 }
2156
2157 if (coalition_type == coal->type) {
2158 callout(arg, i++, coal);
2159 }
2160 }
2161
2162 return KERN_SUCCESS;
2163 }
2164
2165 task_t
kdp_coalition_get_leader(coalition_t coal)2166 kdp_coalition_get_leader(coalition_t coal)
2167 {
2168 if (!coal) {
2169 return TASK_NULL;
2170 }
2171
2172 if (coal->type == COALITION_TYPE_JETSAM) {
2173 return coal->j.leader;
2174 }
2175 return TASK_NULL;
2176 }
2177
2178 task_t
coalition_get_leader(coalition_t coal)2179 coalition_get_leader(coalition_t coal)
2180 {
2181 task_t leader = TASK_NULL;
2182
2183 if (!coal) {
2184 return TASK_NULL;
2185 }
2186
2187 coalition_lock(coal);
2188 if (coal->type != COALITION_TYPE_JETSAM) {
2189 goto out_unlock;
2190 }
2191
2192 leader = coal->j.leader;
2193 if (leader != TASK_NULL) {
2194 task_reference(leader);
2195 }
2196
2197 out_unlock:
2198 coalition_unlock(coal);
2199 return leader;
2200 }
2201
2202
2203 int
coalition_get_task_count(coalition_t coal)2204 coalition_get_task_count(coalition_t coal)
2205 {
2206 int ntasks = 0;
2207 struct queue_entry *qe;
2208 if (!coal) {
2209 return 0;
2210 }
2211
2212 coalition_lock(coal);
2213 switch (coal->type) {
2214 case COALITION_TYPE_RESOURCE:
2215 qe_foreach(qe, &coal->r.tasks)
2216 ntasks++;
2217 break;
2218 case COALITION_TYPE_JETSAM:
2219 if (coal->j.leader) {
2220 ntasks++;
2221 }
2222 qe_foreach(qe, &coal->j.other)
2223 ntasks++;
2224 qe_foreach(qe, &coal->j.extensions)
2225 ntasks++;
2226 qe_foreach(qe, &coal->j.services)
2227 ntasks++;
2228 break;
2229 default:
2230 break;
2231 }
2232 coalition_unlock(coal);
2233
2234 return ntasks;
2235 }
2236
2237
2238 static uint64_t
i_get_list_footprint(queue_t list,int type,int * ntasks)2239 i_get_list_footprint(queue_t list, int type, int *ntasks)
2240 {
2241 task_t task;
2242 uint64_t bytes = 0;
2243
2244 qe_foreach_element(task, list, task_coalition[type]) {
2245 bytes += get_task_phys_footprint(task);
2246 coal_dbg(" [%d] task_pid:%d, type:%d, footprint:%lld",
2247 *ntasks, task_pid(task), type, bytes);
2248 *ntasks += 1;
2249 }
2250
2251 return bytes;
2252 }
2253
2254 uint64_t
coalition_get_page_count(coalition_t coal,int * ntasks)2255 coalition_get_page_count(coalition_t coal, int *ntasks)
2256 {
2257 uint64_t bytes = 0;
2258 int num_tasks = 0;
2259
2260 if (ntasks) {
2261 *ntasks = 0;
2262 }
2263 if (!coal) {
2264 return bytes;
2265 }
2266
2267 coalition_lock(coal);
2268
2269 switch (coal->type) {
2270 case COALITION_TYPE_RESOURCE:
2271 bytes += i_get_list_footprint(&coal->r.tasks, COALITION_TYPE_RESOURCE, &num_tasks);
2272 break;
2273 case COALITION_TYPE_JETSAM:
2274 if (coal->j.leader) {
2275 bytes += get_task_phys_footprint(coal->j.leader);
2276 num_tasks = 1;
2277 }
2278 bytes += i_get_list_footprint(&coal->j.extensions, COALITION_TYPE_JETSAM, &num_tasks);
2279 bytes += i_get_list_footprint(&coal->j.services, COALITION_TYPE_JETSAM, &num_tasks);
2280 bytes += i_get_list_footprint(&coal->j.other, COALITION_TYPE_JETSAM, &num_tasks);
2281 break;
2282 default:
2283 break;
2284 }
2285
2286 coalition_unlock(coal);
2287
2288 if (ntasks) {
2289 *ntasks = num_tasks;
2290 }
2291
2292 return bytes / PAGE_SIZE_64;
2293 }
2294
2295 struct coal_sort_s {
2296 int pid;
2297 int usr_order;
2298 uint64_t bytes;
2299 };
2300
2301 /*
2302 * return < 0 for a < b
2303 * 0 for a == b
2304 * > 0 for a > b
2305 */
2306 typedef int (*cmpfunc_t)(const void *a, const void *b);
2307
2308 extern void
2309 qsort(void *a, size_t n, size_t es, cmpfunc_t cmp);
2310
2311 static int
dflt_cmp(const void * a,const void * b)2312 dflt_cmp(const void *a, const void *b)
2313 {
2314 const struct coal_sort_s *csA = (const struct coal_sort_s *)a;
2315 const struct coal_sort_s *csB = (const struct coal_sort_s *)b;
2316
2317 /*
2318 * if both A and B are equal, use a memory descending sort
2319 */
2320 if (csA->usr_order == csB->usr_order) {
2321 return (int)((int64_t)csB->bytes - (int64_t)csA->bytes);
2322 }
2323
2324 /* otherwise, return the relationship between user specified orders */
2325 return csA->usr_order - csB->usr_order;
2326 }
2327
2328 static int
mem_asc_cmp(const void * a,const void * b)2329 mem_asc_cmp(const void *a, const void *b)
2330 {
2331 const struct coal_sort_s *csA = (const struct coal_sort_s *)a;
2332 const struct coal_sort_s *csB = (const struct coal_sort_s *)b;
2333
2334 return (int)((int64_t)csA->bytes - (int64_t)csB->bytes);
2335 }
2336
2337 static int
mem_dec_cmp(const void * a,const void * b)2338 mem_dec_cmp(const void *a, const void *b)
2339 {
2340 const struct coal_sort_s *csA = (const struct coal_sort_s *)a;
2341 const struct coal_sort_s *csB = (const struct coal_sort_s *)b;
2342
2343 return (int)((int64_t)csB->bytes - (int64_t)csA->bytes);
2344 }
2345
2346 static int
usr_asc_cmp(const void * a,const void * b)2347 usr_asc_cmp(const void *a, const void *b)
2348 {
2349 const struct coal_sort_s *csA = (const struct coal_sort_s *)a;
2350 const struct coal_sort_s *csB = (const struct coal_sort_s *)b;
2351
2352 return csA->usr_order - csB->usr_order;
2353 }
2354
2355 static int
usr_dec_cmp(const void * a,const void * b)2356 usr_dec_cmp(const void *a, const void *b)
2357 {
2358 const struct coal_sort_s *csA = (const struct coal_sort_s *)a;
2359 const struct coal_sort_s *csB = (const struct coal_sort_s *)b;
2360
2361 return csB->usr_order - csA->usr_order;
2362 }
2363
2364 /* avoid dynamic allocation in this path */
2365 #define MAX_SORTED_PIDS 80
2366
2367 static int
coalition_get_sort_list(coalition_t coal,int sort_order,queue_t list,struct coal_sort_s * sort_array,int array_sz)2368 coalition_get_sort_list(coalition_t coal, int sort_order, queue_t list,
2369 struct coal_sort_s *sort_array, int array_sz)
2370 {
2371 int ntasks = 0;
2372 task_t task;
2373
2374 assert(sort_array != NULL);
2375
2376 if (array_sz <= 0) {
2377 return 0;
2378 }
2379
2380 if (!list) {
2381 /*
2382 * this function will only be called with a NULL
2383 * list for JETSAM-type coalitions, and is intended
2384 * to investigate the leader process
2385 */
2386 if (coal->type != COALITION_TYPE_JETSAM ||
2387 coal->j.leader == TASK_NULL) {
2388 return 0;
2389 }
2390 sort_array[0].pid = task_pid(coal->j.leader);
2391 switch (sort_order) {
2392 case COALITION_SORT_DEFAULT:
2393 sort_array[0].usr_order = 0;
2394 OS_FALLTHROUGH;
2395 case COALITION_SORT_MEM_ASC:
2396 case COALITION_SORT_MEM_DEC:
2397 sort_array[0].bytes = get_task_phys_footprint(coal->j.leader);
2398 break;
2399 case COALITION_SORT_USER_ASC:
2400 case COALITION_SORT_USER_DEC:
2401 sort_array[0].usr_order = 0;
2402 break;
2403 default:
2404 break;
2405 }
2406 return 1;
2407 }
2408
2409 qe_foreach_element(task, list, task_coalition[coal->type]) {
2410 if (ntasks >= array_sz) {
2411 printf("WARNING: more than %d pids in coalition %llu\n",
2412 MAX_SORTED_PIDS, coal->id);
2413 break;
2414 }
2415
2416 sort_array[ntasks].pid = task_pid(task);
2417
2418 switch (sort_order) {
2419 case COALITION_SORT_DEFAULT:
2420 sort_array[ntasks].usr_order = 0;
2421 OS_FALLTHROUGH;
2422 case COALITION_SORT_MEM_ASC:
2423 case COALITION_SORT_MEM_DEC:
2424 sort_array[ntasks].bytes = get_task_phys_footprint(task);
2425 break;
2426 case COALITION_SORT_USER_ASC:
2427 case COALITION_SORT_USER_DEC:
2428 sort_array[ntasks].usr_order = 0;
2429 break;
2430 default:
2431 break;
2432 }
2433
2434 ntasks++;
2435 }
2436
2437 return ntasks;
2438 }
2439
2440 int
coalition_get_pid_list(coalition_t coal,uint32_t rolemask,int sort_order,int * pid_list,int list_sz)2441 coalition_get_pid_list(coalition_t coal, uint32_t rolemask, int sort_order,
2442 int *pid_list, int list_sz)
2443 {
2444 struct i_jetsam_coalition *cj;
2445 int ntasks = 0;
2446 cmpfunc_t cmp_func = NULL;
2447 struct coal_sort_s sort_array[MAX_SORTED_PIDS] = { {0, 0, 0} }; /* keep to < 2k */
2448
2449 if (!coal ||
2450 !(rolemask & COALITION_ROLEMASK_ALLROLES) ||
2451 !pid_list || list_sz < 1) {
2452 coal_dbg("Invalid parameters: coal:%p, type:%d, rolemask:0x%x, "
2453 "pid_list:%p, list_sz:%d", coal, coal ? coal->type : -1,
2454 rolemask, pid_list, list_sz);
2455 return -EINVAL;
2456 }
2457
2458 switch (sort_order) {
2459 case COALITION_SORT_NOSORT:
2460 cmp_func = NULL;
2461 break;
2462 case COALITION_SORT_DEFAULT:
2463 cmp_func = dflt_cmp;
2464 break;
2465 case COALITION_SORT_MEM_ASC:
2466 cmp_func = mem_asc_cmp;
2467 break;
2468 case COALITION_SORT_MEM_DEC:
2469 cmp_func = mem_dec_cmp;
2470 break;
2471 case COALITION_SORT_USER_ASC:
2472 cmp_func = usr_asc_cmp;
2473 break;
2474 case COALITION_SORT_USER_DEC:
2475 cmp_func = usr_dec_cmp;
2476 break;
2477 default:
2478 return -ENOTSUP;
2479 }
2480
2481 coalition_lock(coal);
2482
2483 if (coal->type == COALITION_TYPE_RESOURCE) {
2484 ntasks += coalition_get_sort_list(coal, sort_order, &coal->r.tasks,
2485 sort_array, MAX_SORTED_PIDS);
2486 goto unlock_coal;
2487 }
2488
2489 cj = &coal->j;
2490
2491 if (rolemask & COALITION_ROLEMASK_UNDEF) {
2492 ntasks += coalition_get_sort_list(coal, sort_order, &cj->other,
2493 sort_array + ntasks,
2494 MAX_SORTED_PIDS - ntasks);
2495 }
2496
2497 if (rolemask & COALITION_ROLEMASK_XPC) {
2498 ntasks += coalition_get_sort_list(coal, sort_order, &cj->services,
2499 sort_array + ntasks,
2500 MAX_SORTED_PIDS - ntasks);
2501 }
2502
2503 if (rolemask & COALITION_ROLEMASK_EXT) {
2504 ntasks += coalition_get_sort_list(coal, sort_order, &cj->extensions,
2505 sort_array + ntasks,
2506 MAX_SORTED_PIDS - ntasks);
2507 }
2508
2509 if (rolemask & COALITION_ROLEMASK_LEADER) {
2510 ntasks += coalition_get_sort_list(coal, sort_order, NULL,
2511 sort_array + ntasks,
2512 MAX_SORTED_PIDS - ntasks);
2513 }
2514
2515 unlock_coal:
2516 coalition_unlock(coal);
2517
2518 /* sort based on the chosen criterion (no sense sorting 1 item) */
2519 if (cmp_func && ntasks > 1) {
2520 qsort(sort_array, ntasks, sizeof(struct coal_sort_s), cmp_func);
2521 }
2522
2523 for (int i = 0; i < ntasks; i++) {
2524 if (i >= list_sz) {
2525 break;
2526 }
2527 coal_dbg(" [%d] PID:%d, footprint:%lld, usr_order:%d",
2528 i, sort_array[i].pid, sort_array[i].bytes,
2529 sort_array[i].usr_order);
2530 pid_list[i] = sort_array[i].pid;
2531 }
2532
2533 return ntasks;
2534 }
2535