1 /*
2 * Copyright (c) 2006-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 *
28 */
29
30 #ifndef _KERN_MEMORYSTATUS_INTERNAL_H_
31 #define _KERN_MEMORYSTATUS_INTERNAL_H_
32
33 /*
34 * Contains memorystatus subsystem definitions that are not
35 * exported outside of the memorystatus subsystem.
36 *
37 * For example, all of the mechanisms used by kern_memorystatus_policy.c
38 * should be defined in this header.
39 */
40
41 #if BSD_KERNEL_PRIVATE
42
43 #include <mach/boolean.h>
44 #include <stdbool.h>
45 #include <os/atomic_private.h>
46 #include <os/base.h>
47 #include <os/log.h>
48 #include <os/overflow.h>
49 #include <kern/locks.h>
50 #include <kern/sched_prim.h>
51 #include <sys/kern_memorystatus.h>
52 #include <sys/kernel_types.h>
53 #include <sys/proc.h>
54 #include <sys/proc_internal.h>
55
56 #if CONFIG_FREEZE
57 #include <sys/kern_memorystatus_freeze.h>
58 #endif /* CONFIG_FREEZE */
59
60 /*
61 * memorystatus subsystem globals
62 */
63 extern uint32_t memorystatus_available_pages;
64 #if CONFIG_JETSAM
65 extern bool jetsam_kill_on_low_swap;
66 #endif /* CONFIG_JETSAM */
67 extern bool kill_on_no_paging_space;
68 extern int block_corpses; /* counter to block new corpses if jetsam purges them */
69 extern int system_procs_aging_band;
70 extern int applications_aging_band;
71 /* the jetsam band which will contain P_MEMSTAT_FROZEN processes */
72 extern int memorystatus_freeze_jetsam_band;
73 #if CONFIG_FREEZE
74 extern unsigned int memorystatus_suspended_count;
75 #endif /* CONFIG_FREEZE */
76 extern uint64_t memorystatus_sysprocs_idle_delay_time;
77 extern uint64_t memorystatus_apps_idle_delay_time;
78
79 /*
80 * TODO(jason): This should really be calculated dynamically by the zalloc
81 * subsystem before we do a zone map exhaustion kill. But the zone_gc
82 * logic is non-trivial, so for now it just sets this global.
83 */
84 extern _Atomic bool memorystatus_zone_map_is_exhausted;
85 /*
86 * TODO(jason): We should get rid of this global
87 * and have the memorystatus thread check for compressor space shortages
88 * itself. However, there are 3 async call sites remaining that require more work to get us there:
89 * 2 of them are in vm_swap_defragment. When it's about to swap in a segment, it checks if that
90 * will cause a compressor space shortage & pre-emptively triggers jetsam. vm_compressor_backing_store
91 * needs to keep track of in-flight swapins due to defrag so we can perform those checks
92 * in the memorystatus thread.
93 * The other is in no_paging_space_action. This is only on macOS right now, but will
94 * be needed on iPad when we run out of swap space. This should be a new kill
95 * reason and we need to add a new health check for it.
96 * We need to maintain the macOS behavior though that we kill no more than 1 process
97 * every 5 seconds.
98 */
99 extern _Atomic bool memorystatus_compressor_space_shortage;
100 /*
101 * TODO(jason): We should also get rid of this global
102 * and check for phantom cache pressure from the memorystatus
103 * thread. But first we need to fix the syncronization in
104 * vm_phantom_cache_check_pressure
105 */
106 extern _Atomic bool memorystatus_phantom_cache_pressure;
107
108 extern _Atomic bool memorystatus_pageout_starved;
109 /*
110 * The actions that the memorystatus thread can perform
111 * when we're low on memory.
112 * See memorystatus_pick_action to see when each action is deployed.
113 */
114 OS_CLOSED_ENUM(memorystatus_action, uint32_t,
115 MEMORYSTATUS_KILL_HIWATER, // Kill 1 highwatermark process
116 MEMORYSTATUS_KILL_AGGRESSIVE, // Do aggressive jetsam
117 MEMORYSTATUS_KILL_TOP_PROCESS, // Kill based on jetsam priority
118 MEMORYSTATUS_WAKE_SWAPPER, // Wake up the swap thread
119 MEMORYSTATUS_PROCESS_SWAPIN_QUEUE, // Compact the swapin queue and move segments to the swapout queue
120 MEMORYSTATUS_KILL_SUSPENDED_SWAPPABLE, // Kill a suspended swap-eligible processes based on jetsam priority
121 MEMORYSTATUS_KILL_SWAPPABLE, // Kill a swap-eligible process (even if it's running) based on jetsam priority
122 MEMORYSTATUS_KILL_IDLE, // Kill an idle process
123 MEMORYSTATUS_KILL_LONG_IDLE, // Kill a long-idle process (reaper)
124 MEMORYSTATUS_NO_PAGING_SPACE, // Perform a no-paging-space-action
125 MEMORYSTATUS_PURGE_CACHES, // Purge system memory caches (e.g. corpses, deferred reclaim memory)
126 MEMORYSTATUS_KILL_NONE, // Do nothing
127 );
128
129 __options_closed_decl(memstat_kill_options_t, uint8_t, {
130 MEMSTAT_ONLY_SWAPPABBLE = 0x01,
131 MEMSTAT_ONLY_LONG_IDLE = 0x02,
132 MEMSTAT_SORT_BUCKET = 0x04,
133 });
134
135 /*
136 * Structure to hold state for a jetsam thread.
137 * Typically there should be a single jetsam thread
138 * unless parallel jetsam is enabled.
139 */
140 typedef struct jetsam_state_s {
141 bool inited; /* if the thread is initialized */
142 bool limit_to_low_bands; /* limit kills to < JETSAM_PRIORITY_ELEVATED_INACTIVE */
143 int index; /* jetsam thread index */
144 thread_t thread; /* jetsam thread pointer */
145 int jld_idle_kills; /* idle jetsam kill counter for this session */
146 uint32_t errors; /* Error accumulator */
147 bool errors_cleared; /* Have we tried clearing all errors this iteration? */
148 bool sort_flag; /* Sort the fg band (idle on macOS) before killing? */
149 bool corpse_list_purged; /* Has the corpse list been purged? */
150 bool post_snapshot; /* Do we need to post a jetsam snapshot after this session? */
151 uint64_t memory_reclaimed; /* Amount of memory that was just reclaimed */
152 uint32_t hwm_kills; /* hwm kill counter for this session */
153 sched_cond_atomic_t jt_wakeup_cond; /* condition var used to synchronize wake/sleep operations for this jetsam thread */
154 } *jetsam_state_t;
155
156 /*
157 * The memorystatus thread monitors these conditions
158 * and will continue to act until the system is considered
159 * healthy.
160 */
161 typedef struct memorystatus_system_health_s {
162 #if CONFIG_JETSAM
163 bool msh_available_pages_below_soft;
164 bool msh_available_pages_below_idle;
165 bool msh_available_pages_below_critical;
166 bool msh_available_pages_below_reaper;
167 bool msh_compressor_needs_to_swap;
168 bool msh_compressor_is_thrashing;
169 bool msh_filecache_is_thrashing;
170 bool msh_phantom_cache_pressure;
171 bool msh_swappable_compressor_segments_over_limit;
172 bool msh_swapin_queue_over_limit;
173 bool msh_pageout_starved;
174 #endif /* CONFIG_JETSAM */
175 bool msh_vm_pressure_warning;
176 bool msh_vm_pressure_critical;
177 bool msh_compressor_low_on_space;
178 bool msh_compressor_exhausted;
179 bool msh_swap_exhausted;
180 bool msh_swap_low_on_space;
181 bool msh_zone_map_is_exhausted;
182 } *memorystatus_system_health_t;
183
184 /*
185 * @func memstat_check_system_health
186 *
187 * @brief Evaluate system memory conditions and return if the system is healthy.
188 *
189 * @discussion
190 * Evaluates various system memory conditions, including compressor size and
191 * available page quantities. If conditions indicate a kill should be
192 * performed, the system is considered "unhealthy".
193 *
194 * @returns @c true if the system is healthy, @c false otherwise.
195 */
196 extern bool memstat_check_system_health(memorystatus_system_health_t status);
197
198 #pragma mark Locks
199
200 extern lck_mtx_t memorystatus_jetsam_broadcast_lock;
201
202 #pragma mark Agressive jetsam tunables
203
204 extern boolean_t memorystatus_jld_enabled; /* Enable jetsam loop detection */
205 extern uint32_t memorystatus_jld_eval_period_msecs; /* Init pass sets this based on device memory size */
206 extern int memorystatus_jld_max_kill_loops; /* How many times should we try and kill up to the target band */
207 extern unsigned int memorystatus_sysproc_aging_aggr_pages; /* Aggressive jetsam pages threshold for sysproc aging policy */
208 extern unsigned int jld_eval_aggressive_count;
209 extern uint64_t jld_timestamp_msecs;
210 extern int jld_idle_kill_candidates;
211
212 #pragma mark No Paging Space Globals
213
214 extern _Atomic uint64_t last_no_space_action_ts;
215 extern uint64_t no_paging_space_action_throttle_delay_ns;
216
217 #pragma mark Pressure Response Globals
218 extern uint64_t memstat_last_cache_purge_ts;
219 extern uint64_t memstat_cache_purge_backoff_ns;
220
221 __options_decl(memstat_pressure_options_t, uint32_t, {
222 /* Kill long idle processes at kVMPressureWarning */
223 MEMSTAT_WARNING_KILL_LONG_IDLE = 0x01,
224 /* Kill idle processes from the notify thread at kVMPressureWarning */
225 MEMSTAT_WARNING_KILL_IDLE_THROTTLED = 0x02,
226 /* Purge memory caches (e.g. corpses, deferred reclaim rings) at kVMPressureCritical */
227 MEMSTAT_CRITICAL_PURGE_CACHES = 0x04,
228 /* Kill all idle processes at kVMPressureCritical */
229 MEMSTAT_CRITICAL_KILL_IDLE = 0x08,
230 /* Kill when at kVMPressureWarning for a prolonged period */
231 MEMSTAT_WARNING_KILL_SUSTAINED = 0x10,
232 });
233 /* Maximum value for sysctl handler */
234 #define MEMSTAT_PRESSURE_CONFIG_MAX (0x18U)
235
236 extern memstat_pressure_options_t memstat_pressure_config;
237
238 #pragma mark Config Globals
239 extern boolean_t memstat_reaper_enabled;
240
241 #pragma mark VM globals read by the memorystatus subsystem
242
243 extern unsigned int vm_page_free_count;
244 extern unsigned int vm_page_active_count;
245 extern unsigned int vm_page_inactive_count;
246 extern unsigned int vm_page_throttled_count;
247 extern unsigned int vm_page_purgeable_count;
248 extern unsigned int vm_page_wire_count;
249 extern unsigned int vm_page_speculative_count;
250 extern uint32_t c_late_swapout_count, c_late_swappedin_count;
251 extern uint32_t c_seg_allocsize;
252 extern bool vm_swapout_thread_running;
253 extern _Atomic bool vm_swapout_wake_pending;
254 #define VM_PAGE_DONATE_DISABLED 0
255 #define VM_PAGE_DONATE_ENABLED 1
256 extern uint32_t vm_page_donate_mode;
257
258 #if CONFIG_JETSAM
259 #define MEMORYSTATUS_LOG_AVAILABLE_PAGES os_atomic_load(&memorystatus_available_pages, relaxed)
260 #else /* CONFIG_JETSAM */
261 #define MEMORYSTATUS_LOG_AVAILABLE_PAGES (vm_page_active_count + vm_page_inactive_count + vm_page_free_count + vm_page_speculative_count)
262 #endif /* CONFIG_JETSAM */
263
264 bool memorystatus_avail_pages_below_pressure(void);
265 bool memorystatus_avail_pages_below_critical(void);
266 #if CONFIG_JETSAM
267 bool memorystatus_swap_over_trigger(uint64_t adjustment_factor);
268 bool memorystatus_swapin_over_trigger(void);
269 #endif /* CONFIG_JETSAM */
270
271 /* Does cause indicate vm or fc thrashing? */
272 bool is_reason_thrashing(unsigned cause);
273 /* Is the zone map almost full? */
274 bool is_reason_zone_map_exhaustion(unsigned cause);
275
276 memorystatus_action_t memorystatus_pick_action(jetsam_state_t state,
277 uint32_t *kill_cause, bool highwater_remaining,
278 bool suspended_swappable_apps_remaining,
279 bool swappable_apps_remaining, int *jld_idle_kills);
280
281 #define MEMSTAT_PERCENT_TOTAL_PAGES(p) ((uint32_t)(p * atop_64(max_mem) / 100))
282
283 /*
284 * Take a (redacted) zprint snapshot along with the jetsam snapshot.
285 */
286 #define JETSAM_ZPRINT_SNAPSHOT (CONFIG_MEMORYSTATUS && (DEBUG || DEVELOPMENT))
287
288 #pragma mark Logging Utilities
289
290 __enum_decl(memorystatus_log_level_t, unsigned int, {
291 MEMORYSTATUS_LOG_LEVEL_DEFAULT = 0,
292 MEMORYSTATUS_LOG_LEVEL_INFO = 1,
293 MEMORYSTATUS_LOG_LEVEL_DEBUG = 2,
294 });
295
296 extern os_log_t memorystatus_log_handle;
297 extern memorystatus_log_level_t memorystatus_log_level;
298
299 /*
300 * NB: Critical memorystatus logs (e.g. jetsam kills) are load-bearing for OS
301 * performance testing infrastructure. Be careful when modifying the log-level for
302 * important system events.
303 *
304 * Memorystatus logs are interpreted by a wide audience. To avoid logging information
305 * that could lead to false diagnoses, INFO and DEBUG messages are only logged if the
306 * system has been configured to do so via `kern.memorystatus_log_level` (sysctl) or
307 * `memorystatus_log_level` (boot-arg).
308 *
309 * os_log supports a mechanism for configuring these properties dynamically; however,
310 * this mechanism is currently unsupported in XNU.
311 *
312 * TODO (JC) Deprecate sysctl/boot-arg and move to subsystem preferences pending:
313 * - rdar://27006343 (Custom kernel log handles)
314 * - rdar://80958044 (Kernel Logging Configuration)
315 */
316 #define _memorystatus_log_with_type(type, format, ...) os_log_with_startup_serial_and_type(memorystatus_log_handle, type, format, ##__VA_ARGS__)
317 #define memorystatus_log(format, ...) _memorystatus_log_with_type(OS_LOG_TYPE_DEFAULT, format, ##__VA_ARGS__)
318 #define memorystatus_log_info(format, ...) if (memorystatus_log_level >= MEMORYSTATUS_LOG_LEVEL_INFO) { _memorystatus_log_with_type(OS_LOG_TYPE_INFO, format, ##__VA_ARGS__); }
319 #define memorystatus_log_debug(format, ...) if (memorystatus_log_level >= MEMORYSTATUS_LOG_LEVEL_DEBUG) { _memorystatus_log_with_type(OS_LOG_TYPE_DEBUG, format, ##__VA_ARGS__); }
320 #define memorystatus_log_error(format, ...) _memorystatus_log_with_type(OS_LOG_TYPE_ERROR, format, ##__VA_ARGS__)
321 #define memorystatus_log_fault(format, ...) _memorystatus_log_with_type(OS_LOG_TYPE_FAULT, format, ##__VA_ARGS__)
322
323 #pragma mark Jetsam Priority Management
324
325 /*
326 * Cancel a process' idle aging
327 * Returns whether a reschedule of the idle demotion thread is needed.
328 */
329 void memstat_update_priority_locked(proc_t p, int priority,
330 memstat_priority_options_t options);
331
332 static inline bool
_memstat_proc_is_aging(proc_t p)333 _memstat_proc_is_aging(proc_t p)
334 {
335 return p->p_memstat_dirty & P_DIRTY_AGING_IN_PROGRESS;
336 }
337
338 static inline bool
_memstat_proc_is_tracked(proc_t p)339 _memstat_proc_is_tracked(proc_t p)
340 {
341 return p->p_memstat_dirty & P_DIRTY_TRACK;
342 }
343
344 static inline bool
_memstat_proc_is_dirty(proc_t p)345 _memstat_proc_is_dirty(proc_t p)
346 {
347 return p->p_memstat_dirty & P_DIRTY_IS_DIRTY;
348 }
349
350 /*
351 * Return true if this process is self-terminating via ActivityTracking.
352 */
353 static inline bool
_memstat_proc_is_terminating(proc_t p)354 _memstat_proc_is_terminating(proc_t p)
355 {
356 return p->p_memstat_dirty & P_DIRTY_TERMINATED;
357 }
358
359 /*
360 * Return true if this process has been killed and is in the process of exiting.
361 */
362 static inline bool
_memstat_proc_was_killed(proc_t p)363 _memstat_proc_was_killed(proc_t p)
364 {
365 return p->p_memstat_state & P_MEMSTAT_TERMINATED;
366 }
367
368 static inline bool
_memstat_proc_is_internal(proc_t p)369 _memstat_proc_is_internal(proc_t p)
370 {
371 return p->p_memstat_state & P_MEMSTAT_INTERNAL;
372 }
373
374 static inline bool
_memstat_proc_can_idle_exit(proc_t p)375 _memstat_proc_can_idle_exit(proc_t p)
376 {
377 return _memstat_proc_is_tracked(p) &&
378 (p->p_memstat_dirty & P_DIRTY_ALLOW_IDLE_EXIT);
379 }
380
381 static inline bool
_memstat_proc_shutdown_on_clean(proc_t p)382 _memstat_proc_shutdown_on_clean(proc_t p)
383 {
384 return _memstat_proc_is_tracked(p) &&
385 (p->p_memstat_dirty & P_DIRTY_SHUTDOWN_ON_CLEAN);
386 }
387
388 static inline bool
_memstat_proc_has_priority_assertion(proc_t p)389 _memstat_proc_has_priority_assertion(proc_t p)
390 {
391 return p->p_memstat_state & P_MEMSTAT_PRIORITY_ASSERTION;
392 }
393
394 static inline bool
_memstat_proc_is_managed(proc_t p)395 _memstat_proc_is_managed(proc_t p)
396 {
397 return p->p_memstat_state & P_MEMSTAT_MANAGED;
398 }
399
400 static inline bool
_memstat_proc_is_frozen(proc_t p)401 _memstat_proc_is_frozen(proc_t p)
402 {
403 return p->p_memstat_state & P_MEMSTAT_FROZEN;
404 }
405
406 static inline bool
_memstat_proc_is_suspended(proc_t p)407 _memstat_proc_is_suspended(proc_t p)
408 {
409 return p->p_memstat_state & P_MEMSTAT_SUSPENDED;
410 }
411
412 static inline void
_memstat_proc_set_suspended(proc_t p)413 _memstat_proc_set_suspended(proc_t p)
414 {
415 LCK_MTX_ASSERT(&proc_list_mlock, LCK_ASSERT_OWNED);
416 if (!_memstat_proc_is_suspended(p)) {
417 p->p_memstat_state |= P_MEMSTAT_SUSPENDED;
418 #if CONFIG_FREEZE
419 if (os_inc_overflow(&memorystatus_suspended_count)) {
420 panic("Overflowed memorystatus_suspended_count");
421 }
422 #endif /* CONFIG_FREEZE */
423 }
424 }
425
426 static inline void
_memstat_proc_set_resumed(proc_t p)427 _memstat_proc_set_resumed(proc_t p)
428 {
429 LCK_MTX_ASSERT(&proc_list_mlock, LCK_ASSERT_OWNED);
430 if (_memstat_proc_is_suspended(p)) {
431 p->p_memstat_state &= ~P_MEMSTAT_SUSPENDED;
432 #if CONFIG_FREEZE
433 if (os_dec_overflow(&memorystatus_suspended_count)) {
434 panic("Underflowed memorystatus_suspended_count");
435 }
436 #endif /* CONFIG_FREEZE */
437 }
438 }
439
440 /*
441 * Return whether the process is to be placed in an elevated band while idle.
442 */
443 static inline bool
_memstat_proc_is_elevated(proc_t p)444 _memstat_proc_is_elevated(proc_t p)
445 {
446 return p->p_memstat_state & P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND;
447 }
448
449 /*
450 * Return whether p's ledger-enforced memlimit is fatal (as last cached by
451 * memorystatus)
452 */
453 static inline bool
_memstat_proc_cached_memlimit_is_fatal(proc_t p)454 _memstat_proc_cached_memlimit_is_fatal(proc_t p)
455 {
456 return p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT;
457 }
458
459 /*
460 * Return whether p's inactive/active memlimit is fatal
461 */
462 static inline bool
_memstat_proc_memlimit_is_fatal(proc_t p,bool is_active)463 _memstat_proc_memlimit_is_fatal(proc_t p, bool is_active)
464 {
465 const uint32_t flag = is_active ?
466 P_MEMSTAT_MEMLIMIT_ACTIVE_FATAL : P_MEMSTAT_MEMLIMIT_INACTIVE_FATAL;
467 return p->p_memstat_state & flag;
468 }
469
470 static inline bool
_memstat_proc_active_memlimit_is_fatal(proc_t p)471 _memstat_proc_active_memlimit_is_fatal(proc_t p)
472 {
473 return _memstat_proc_memlimit_is_fatal(p, true);
474 }
475
476 static inline bool
_memstat_proc_inactive_memlimit_is_fatal(proc_t p)477 _memstat_proc_inactive_memlimit_is_fatal(proc_t p)
478 {
479 return _memstat_proc_memlimit_is_fatal(p, false);
480 }
481
482 #pragma mark Jetsam
483
484 /*
485 * @func memstat_evaluate_page_shortage
486 *
487 * @brief
488 * Evaluate page shortage conditions. Returns true if the jetsam thread should be woken up.
489 *
490 * @param should_enforce_memlimits
491 * Set to true if soft memory limits should be enforced
492 *
493 * @param should_idle_exit
494 * Set to true if idle processes should begin exiting
495 *
496 * @param should_jetsam
497 * Set to true if non-idle processes should be jetsammed
498 *
499 * @param should_reap
500 * Set to true if long-idle processes should be jetsammed
501 */
502 bool memstat_evaluate_page_shortage(
503 bool *should_enforce_memlimits,
504 bool *should_idle_exit,
505 bool *should_jetsam,
506 bool *should_reap);
507
508 /*
509 * In nautical applications, ballast tanks are tanks on boats or submarines
510 * which can be filled with water. When flooded, they provide stability and
511 * reduce buoyancy. When drained (and filled with air), they provide buoyancy.
512 *
513 * In our analogy, the ballast tanks may be drained of unneeded weight (as
514 * occupied by idle processes or processes who have exceeded their memory
515 * limit) and filled with air (available memory). Userspace may toggle between
516 * these two states (filled/drained) depending on system requirements. For
517 * example, drained ballast tanks (i.e. evelated available memory pools) may
518 * have benefits to power and latency. However, applications with large
519 * working sets may need to flood the ballast tanks (i.e. with
520 * anonymous/wired memory) to avoid issues like jetsam loops of daemons that it
521 * has IPC relationships with.
522 *
523 * Mechanically, "draining" the ballast tanks means applying a configurable
524 * offset to the idle and soft available page shortage thresholds. This offset
525 * is then removed when the policy is disengaged.
526 *
527 * The ballast mechanism is intended to be used over long time periods and the
528 * ballast_offset should be sustainable for general applications. If response to
529 * transient spikes in memory demand is desired, the clear-the-decks policy
530 * should be used instead.
531 *
532 * Clients may toggle this behavior via sysctl: kern.memorystatus.ballast_drained
533 */
534 int memorystatus_ballast_control(bool drain);
535
536 /* Synchronously kill a process due to sustained memory pressure */
537 bool memorystatus_kill_on_sustained_pressure(void);
538
539 /* Synchronously kill an idle process */
540 bool memstat_kill_idle_process(memorystatus_kill_cause_t cause,
541 uint64_t *footprint_out);
542
543 /*
544 * Attempt to kill the specified pid with the given reason.
545 * Consumes a reference on the jetsam_reason.
546 */
547 bool memstat_kill_with_jetsam_reason_sync(pid_t pid, os_reason_t jetsam_reason);
548
549 /* Count the number of processes at priority <= max_bucket_index */
550 uint32_t memstat_get_proccnt_upto_priority(uint32_t max_bucket_index);
551
552 /*
553 * @func memstat_get_idle_proccnt
554 * @brief Return the number of idle processes which may be terminated.
555 */
556 uint32_t memstat_get_idle_proccnt(void);
557
558 /*
559 * @func memstat_get_reapable_proccnt
560 * @brief Return the number of idle, reapable processes which may be terminated.
561 */
562 uint32_t memstat_get_long_idle_proccnt(void);
563
564 #pragma mark Freezer
565 #if CONFIG_FREEZE
566 /*
567 * Freezer data types
568 */
569
570 /* An ordered list of freeze or demotion candidates */
571 struct memorystatus_freezer_candidate_list {
572 memorystatus_properties_freeze_entry_v1 *mfcl_list;
573 size_t mfcl_length;
574 };
575
576 struct memorystatus_freeze_list_iterator {
577 bool refreeze_only;
578 proc_t last_p;
579 size_t global_freeze_list_index;
580 };
581
582 /*
583 * Freezer globals
584 */
585 extern struct memorystatus_freezer_stats_t memorystatus_freezer_stats;
586 extern int memorystatus_freezer_use_ordered_list;
587 extern struct memorystatus_freezer_candidate_list memorystatus_global_freeze_list;
588 extern struct memorystatus_freezer_candidate_list memorystatus_global_demote_list;
589 extern uint64_t memorystatus_freezer_thread_next_run_ts;
590 bool memorystatus_is_process_eligible_for_freeze(proc_t p);
591 bool memorystatus_freeze_proc_is_refreeze_eligible(proc_t p);
592
593 proc_t memorystatus_freezer_candidate_list_get_proc(
594 struct memorystatus_freezer_candidate_list *list,
595 size_t index,
596 uint64_t *pid_mismatch_counter);
597 /*
598 * Returns the leader of the p's jetsam coalition
599 * and the role of p in that coalition.
600 */
601 proc_t memorystatus_get_coalition_leader_and_role(proc_t p, int *role_in_coalition);
602 bool memorystatus_freeze_process_is_recommended(const proc_t p);
603
604 /*
605 * Ordered iterator over all freeze candidates.
606 * The iterator should initially be zeroed out by the caller and
607 * can be zeroed out whenever the caller wishes to start from the beginning
608 * of the list again.
609 * Returns PROC_NULL when all candidates have been iterated over.
610 */
611 proc_t memorystatus_freeze_pick_process(struct memorystatus_freeze_list_iterator *iterator);
612
613 /*
614 * Returns the number of processes that the freezer thread should try to freeze
615 * on this wakeup.
616 */
617 size_t memorystatus_pick_freeze_count_for_wakeup(void);
618
619 /*
620 * Configure the freezer for app-based swap mode.
621 * Should be called at boot.
622 */
623 void memorystatus_freeze_configure_for_swap(void);
624 /*
625 * Undo memorystatus_freeze_configure_for_swap
626 */
627 void memorystatus_freeze_disable_swap(void);
628 #endif /* CONFIG_FREEZE */
629
630 #endif /* BSD_KERNEL_PRIVATE */
631
632 #endif /* _KERN_MEMORYSTATUS_INTERNAL_H_ */
633