xref: /xnu-11215.41.3/bsd/kern/kern_memorystatus_policy.c (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2006-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  *
28  */
29 
30 #include <kern/task.h>
31 #include <libkern/libkern.h>
32 #include <machine/atomic.h>
33 #include <mach/coalition.h>
34 #include <os/log.h>
35 #include <sys/coalition.h>
36 #include <sys/proc.h>
37 #include <sys/proc_internal.h>
38 #include <sys/sysctl.h>
39 #include <sys/kdebug.h>
40 #include <sys/kern_memorystatus.h>
41 #include <vm/vm_protos.h>
42 #include <vm/vm_compressor_xnu.h>
43 
44 #include <kern/kern_memorystatus_internal.h>
45 
46 /*
47  * All memory pressure policy decisions should live here, and there should be
48  * as little mechanism as possible. This file prioritizes readability.
49  */
50 
51 #pragma mark Policy Function Declarations
52 
53 #if CONFIG_JETSAM
54 static bool memorystatus_check_aggressive_jetsam_needed(int *jld_idle_kills);
55 #endif /* CONFIG_JETSAM */
56 
57 #pragma mark Memorystatus Health Check
58 
59 /*
60  * Each subsystem that relies on the memorystatus thread
61  * for resource exhaustion should put a health check in this section.
62  * The memorystatus thread runs all of the health checks
63  * to determine if the system is healthy. If the system is unhealthy
64  * it picks an action based on the system health status. See the
65  * Memorystatus Thread Actions section below.
66  */
67 
68 
69 #if XNU_TARGET_OS_WATCH
70 #define FREEZE_PREVENT_REFREEZE_OF_LAST_THAWED true
71 #define FREEZE_PREVENT_REFREEZE_OF_LAST_THAWED_TIMEOUT_SECONDS (60 * 15)
72 #else
73 #define FREEZE_PREVENT_REFREEZE_OF_LAST_THAWED false
74 #endif
75 extern pid_t memorystatus_freeze_last_pid_thawed;
76 extern uint64_t memorystatus_freeze_last_pid_thawed_ts;
77 
78 static void
memorystatus_health_check(memorystatus_system_health_t * status)79 memorystatus_health_check(memorystatus_system_health_t *status)
80 {
81 	memset(status, 0, sizeof(memorystatus_system_health_t));
82 #if CONFIG_JETSAM
83 	status->msh_available_pages_below_pressure = memorystatus_avail_pages_below_pressure();
84 	status->msh_available_pages_below_critical = memorystatus_avail_pages_below_critical();
85 	status->msh_compressor_is_low_on_space = (vm_compressor_low_on_space() == TRUE);
86 	status->msh_compressed_pages_nearing_limit = vm_compressor_compressed_pages_nearing_limit();
87 	status->msh_compressor_is_thrashing = !memorystatus_swap_all_apps && vm_compressor_is_thrashing();
88 #if CONFIG_PHANTOM_CACHE
89 	status->msh_phantom_cache_pressure = os_atomic_load(&memorystatus_phantom_cache_pressure, acquire);
90 #else
91 	status->msh_phantom_cache_pressure = false;
92 #endif /* CONFIG_PHANTOM_CACHE */
93 	if (!memorystatus_swap_all_apps &&
94 	    status->msh_phantom_cache_pressure &&
95 	    !(status->msh_compressor_is_thrashing && status->msh_compressor_is_low_on_space)) {
96 		status->msh_filecache_is_thrashing = true;
97 	}
98 	status->msh_compressor_is_low_on_space = os_atomic_load(&memorystatus_compressor_space_shortage, acquire);
99 	status->msh_pageout_starved = os_atomic_load(&memorystatus_pageout_starved, acquire);
100 	status->msh_swappable_compressor_segments_over_limit = memorystatus_swap_over_trigger(100);
101 	status->msh_swapin_queue_over_limit = memorystatus_swapin_over_trigger();
102 	status->msh_swap_low_on_space = vm_swap_low_on_space();
103 	status->msh_swap_out_of_space = vm_swap_out_of_space();
104 #endif /* CONFIG_JETSAM */
105 	status->msh_zone_map_is_exhausted = os_atomic_load(&memorystatus_zone_map_is_exhausted, relaxed);
106 }
107 
108 bool
memorystatus_is_system_healthy(const memorystatus_system_health_t * status)109 memorystatus_is_system_healthy(const memorystatus_system_health_t *status)
110 {
111 #if CONFIG_JETSAM
112 	return !(status->msh_available_pages_below_critical ||
113 	       status->msh_compressor_is_low_on_space ||
114 	       status->msh_compressor_is_thrashing ||
115 	       status->msh_filecache_is_thrashing ||
116 	       status->msh_zone_map_is_exhausted ||
117 	       status->msh_pageout_starved);
118 #else /* CONFIG_JETSAM */
119 	return !status->msh_zone_map_is_exhausted;
120 #endif /* CONFIG_JETSAM */
121 }
122 
123 
124 #pragma mark Memorystatus Thread Actions
125 
126 /*
127  * This section picks the appropriate memorystatus_action & deploys it.
128  */
129 
130 /*
131  * Inspects the state of various resources in the system to see if
132  * the system is healthy. If the system is not healthy, picks a
133  * memorystatus_action_t to recover the system.
134  *
135  * Every time the memorystatus thread wakes up it calls into here
136  * to pick an action. It will continue performing memorystatus actions until this
137  * function returns MEMORYSTATUS_KILL_NONE. At that point the thread will block.
138  */
139 memorystatus_action_t
memorystatus_pick_action(jetsam_state_t state,uint32_t * kill_cause,bool highwater_remaining,bool suspended_swappable_apps_remaining,bool swappable_apps_remaining,int * jld_idle_kills)140 memorystatus_pick_action(jetsam_state_t state,
141     uint32_t *kill_cause,
142     bool highwater_remaining,
143     bool suspended_swappable_apps_remaining,
144     bool swappable_apps_remaining,
145     int *jld_idle_kills)
146 {
147 	memorystatus_system_health_t status;
148 	memorystatus_health_check(&status);
149 	memorystatus_log_system_health(&status);
150 	bool is_system_healthy = memorystatus_is_system_healthy(&status);
151 
152 #if CONFIG_JETSAM
153 	if (status.msh_available_pages_below_pressure || !is_system_healthy) {
154 		/*
155 		 * If swap is enabled, first check if we're running low or are out of swap space.
156 		 */
157 		if (memorystatus_swap_all_apps && jetsam_kill_on_low_swap) {
158 			if (swappable_apps_remaining && status.msh_swap_out_of_space) {
159 				*kill_cause = kMemorystatusKilledLowSwap;
160 				return MEMORYSTATUS_KILL_SWAPPABLE;
161 			} else if (suspended_swappable_apps_remaining && status.msh_swap_low_on_space) {
162 				*kill_cause = kMemorystatusKilledLowSwap;
163 				return MEMORYSTATUS_KILL_SUSPENDED_SWAPPABLE;
164 			}
165 		}
166 
167 		/*
168 		 * We're below the pressure level or the system is unhealthy,
169 		 * regardless of the system health let's check if we should be swapping
170 		 * and if there are high watermark kills left to do.
171 		 */
172 		if (memorystatus_swap_all_apps) {
173 			if (status.msh_swappable_compressor_segments_over_limit && !vm_swapout_thread_running && !os_atomic_load(&vm_swapout_wake_pending, relaxed)) {
174 				/*
175 				 * TODO: The swapper will keep running until it has drained the entire early swapout queue.
176 				 * That might be overly aggressive & we should look into tuning it.
177 				 * See rdar://84102304.
178 				 */
179 				return MEMORYSTATUS_WAKE_SWAPPER;
180 			} else if (status.msh_swapin_queue_over_limit) {
181 				return MEMORYSTATUS_PROCESS_SWAPIN_QUEUE;
182 			} else if (status.msh_swappable_compressor_segments_over_limit) {
183 				memorystatus_log_info(
184 					"memorystatus: Skipping swap wakeup because the swap thread is already running. vm_swapout_thread_running=%d, vm_swapout_wake_pending=%d\n",
185 					vm_swapout_thread_running, os_atomic_load(&vm_swapout_wake_pending, relaxed));
186 			}
187 		}
188 
189 		if (highwater_remaining) {
190 			*kill_cause = kMemorystatusKilledHiwat;
191 			memorystatus_log("memorystatus: Looking for highwatermark kills.\n");
192 			return MEMORYSTATUS_KILL_HIWATER;
193 		}
194 	}
195 
196 	if (is_system_healthy) {
197 		*kill_cause = 0;
198 		return MEMORYSTATUS_KILL_NONE;
199 	}
200 
201 	/*
202 	 * At this point the system is unhealthy and there are no
203 	 * more highwatermark processes to kill.
204 	 */
205 
206 	if (!state->limit_to_low_bands) {
207 		if (memorystatus_check_aggressive_jetsam_needed(jld_idle_kills)) {
208 			memorystatus_log("memorystatus: Starting aggressive jetsam.\n");
209 			*kill_cause = kMemorystatusKilledProcThrashing;
210 			return MEMORYSTATUS_KILL_AGGRESSIVE;
211 		}
212 	}
213 	/*
214 	 * The system is unhealthy and we either don't need aggressive jetsam
215 	 * or are not allowed to deploy it.
216 	 * Kill in priority order. We'll use LRU within every band except the
217 	 * FG (which will be sorted by coalition role).
218 	 */
219 	*kill_cause = memorystatus_pick_kill_cause(&status);
220 	return MEMORYSTATUS_KILL_TOP_PROCESS;
221 #else /* CONFIG_JETSAM */
222 	(void) state;
223 	(void) jld_idle_kills;
224 	(void) suspended_swappable_apps_remaining;
225 	(void) swappable_apps_remaining;
226 	/*
227 	 * Without CONFIG_JETSAM, we only kill if the system is unhealthy.
228 	 * There is no aggressive jetsam and no
229 	 * early highwatermark killing.
230 	 */
231 	if (is_system_healthy) {
232 		*kill_cause = 0;
233 		return MEMORYSTATUS_KILL_NONE;
234 	}
235 	if (highwater_remaining) {
236 		*kill_cause = kMemorystatusKilledHiwat;
237 		return MEMORYSTATUS_KILL_HIWATER;
238 	} else {
239 		*kill_cause = memorystatus_pick_kill_cause(&status);
240 		return MEMORYSTATUS_KILL_TOP_PROCESS;
241 	}
242 #endif /* CONFIG_JETSAM */
243 }
244 
245 #pragma mark Aggressive Jetsam
246 /*
247  * This section defines when we deploy aggressive jetsam.
248  * Aggressive jetsam kills everything up to the jld_priority_band_max band.
249  */
250 
251 #if CONFIG_JETSAM
252 
253 static bool
254 memorystatus_aggressive_jetsam_needed_sysproc_aging(__unused int jld_eval_aggressive_count, __unused int *jld_idle_kills, __unused int jld_idle_kill_candidates, int *total_candidates);
255 
256 /*
257  * kJetsamHighRelaunchCandidatesThreshold defines the percentage of candidates
258  * in the idle & deferred bands that need to be bad candidates in order to trigger
259  * aggressive jetsam.
260  */
261 TUNABLE_DEV_WRITEABLE(unsigned int, kJetsamHighRelaunchCandidatesThreshold, "jetsam_high_relaunch_candidates_threshold_percent", 100);
262 #if DEVELOPMENT || DEBUG
263 SYSCTL_UINT(_kern, OID_AUTO, jetsam_high_relaunch_candidates_threshold_percent, CTLFLAG_RW | CTLFLAG_LOCKED, &kJetsamHighRelaunchCandidatesThreshold, 100, "");
264 #endif /* DEVELOPMENT || DEBUG */
265 
266 /* kJetsamMinCandidatesThreshold defines the minimum number of candidates in the
267  * idle/deferred bands to trigger aggressive jetsam. This value basically decides
268  * how much memory the system is ready to hold in the lower bands without triggering
269  * aggressive jetsam. This number should ideally be tuned based on the memory config
270  * of the device.
271  */
272 TUNABLE_DT_DEV_WRITEABLE(unsigned int, kJetsamMinCandidatesThreshold, "/defaults", "kern.jetsam_min_candidates_threshold", "jetsam_min_candidates_threshold", 5, TUNABLE_DT_CHECK_CHOSEN);
273 #if DEVELOPMENT || DEBUG
274 SYSCTL_UINT(_kern, OID_AUTO, jetsam_min_candidates_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &kJetsamMinCandidatesThreshold, 5, "");
275 #endif /* DEVELOPMENT || DEBUG */
276 
277 static bool
memorystatus_check_aggressive_jetsam_needed(int * jld_idle_kills)278 memorystatus_check_aggressive_jetsam_needed(int *jld_idle_kills)
279 {
280 	bool aggressive_jetsam_needed = false;
281 	int total_candidates = 0;
282 	/*
283 	 * The aggressive jetsam logic looks at the number of times it has been in the
284 	 * aggressive loop to determine the max priority band it should kill upto. The
285 	 * static variables below are used to track that property.
286 	 *
287 	 * To reset those values, the implementation checks if it has been
288 	 * memorystatus_jld_eval_period_msecs since the parameters were reset.
289 	 */
290 
291 	if (memorystatus_jld_enabled == FALSE) {
292 		/* If aggressive jetsam is disabled, nothing to do here */
293 		return false;
294 	}
295 
296 	/* Get current timestamp (msecs only) */
297 	struct timeval  jld_now_tstamp = {0, 0};
298 	uint64_t        jld_now_msecs = 0;
299 	microuptime(&jld_now_tstamp);
300 	jld_now_msecs = (jld_now_tstamp.tv_sec * 1000);
301 
302 	/*
303 	 * Look at the number of candidates in the idle and deferred band and
304 	 * how many out of them are marked as high relaunch probability.
305 	 */
306 	aggressive_jetsam_needed = memorystatus_aggressive_jetsam_needed_sysproc_aging(jld_eval_aggressive_count,
307 	    jld_idle_kills, jld_idle_kill_candidates, &total_candidates);
308 
309 	/*
310 	 * It is also possible that the system is down to a very small number of processes in the candidate
311 	 * bands. In that case, the decisions made by the memorystatus_aggressive_jetsam_needed_* routines
312 	 * would not be useful. In that case, do not trigger aggressive jetsam.
313 	 */
314 	if (total_candidates < kJetsamMinCandidatesThreshold) {
315 		memorystatus_log_debug(
316 			"memorystatus: aggressive: [FAILED] Low Candidate "
317 			"Count (current: %d, threshold: %d)\n",
318 			total_candidates, kJetsamMinCandidatesThreshold);
319 		aggressive_jetsam_needed = false;
320 	}
321 
322 	/*
323 	 * Check if its been really long since the aggressive jetsam evaluation
324 	 * parameters have been refreshed. This logic also resets the jld_eval_aggressive_count
325 	 * counter to make sure we reset the aggressive jetsam severity.
326 	 */
327 	if ((total_candidates == 0) ||
328 	    (jld_now_msecs > (jld_timestamp_msecs + memorystatus_jld_eval_period_msecs))) {
329 		jld_timestamp_msecs       = jld_now_msecs;
330 		jld_idle_kill_candidates  = total_candidates;
331 		*jld_idle_kills           = 0;
332 		jld_eval_aggressive_count = 0;
333 	}
334 
335 	return aggressive_jetsam_needed;
336 }
337 
338 static bool
memorystatus_aggressive_jetsam_needed_sysproc_aging(__unused int eval_aggressive_count,__unused int * idle_kills,__unused int idle_kill_candidates,int * total_candidates)339 memorystatus_aggressive_jetsam_needed_sysproc_aging(__unused int eval_aggressive_count, __unused int *idle_kills, __unused int idle_kill_candidates, int *total_candidates)
340 {
341 	bool aggressive_jetsam_needed = false;
342 
343 	/*
344 	 * For the kJetsamAgingPolicySysProcsReclaimedFirst aging policy, we maintain the jetsam
345 	 * relaunch behavior for all daemons. Also, daemons and apps are aged in deferred bands on
346 	 * every dirty->clean transition. For this aging policy, the best way to determine if
347 	 * aggressive jetsam is needed, is to see if the kill candidates are mostly bad candidates.
348 	 * If yes, then we need to go to higher bands to reclaim memory.
349 	 */
350 	proc_list_lock();
351 	/* Get total candidate counts for idle and idle deferred bands */
352 	*total_candidates = memstat_bucket[JETSAM_PRIORITY_IDLE].count + memstat_bucket[system_procs_aging_band].count;
353 	/* Get counts of bad kill candidates in idle and idle deferred bands */
354 	int bad_candidates = memstat_bucket[JETSAM_PRIORITY_IDLE].relaunch_high_count + memstat_bucket[system_procs_aging_band].relaunch_high_count;
355 
356 	proc_list_unlock();
357 
358 	/* Check if the number of bad candidates is greater than kJetsamHighRelaunchCandidatesThreshold % */
359 	aggressive_jetsam_needed = (((bad_candidates * 100) / *total_candidates) >= kJetsamHighRelaunchCandidatesThreshold);
360 
361 	/*
362 	 * Since the new aging policy bases the aggressive jetsam trigger on percentage of
363 	 * bad candidates, it is prone to being overly aggressive. In order to mitigate that,
364 	 * make sure the system is really under memory pressure before triggering aggressive
365 	 * jetsam.
366 	 */
367 	if (memorystatus_available_pages > memorystatus_sysproc_aging_aggr_pages) {
368 		aggressive_jetsam_needed = false;
369 	}
370 
371 #if DEVELOPMENT || DEBUG
372 	memorystatus_log_info(
373 		"memorystatus: aggressive%d: [%s] Bad Candidate Threshold Check (total: %d, bad: %d, threshold: %d %%); Memory Pressure Check (available_pgs: %llu, threshold_pgs: %llu)\n",
374 		eval_aggressive_count, aggressive_jetsam_needed ? "PASSED" : "FAILED", *total_candidates, bad_candidates,
375 		kJetsamHighRelaunchCandidatesThreshold, (uint64_t)MEMORYSTATUS_LOG_AVAILABLE_PAGES, (uint64_t)memorystatus_sysproc_aging_aggr_pages);
376 #endif /* DEVELOPMENT || DEBUG */
377 	return aggressive_jetsam_needed;
378 }
379 
380 #endif /* CONFIG_JETSAM */
381 
382 #pragma mark Freezer
383 #if CONFIG_FREEZE
384 /*
385  * Freezer policies
386  */
387 
388 /*
389  * These functions determine what is eligible for the freezer
390  * and the order that we consider freezing them
391  */
392 
393 /*
394  * Checks if the given process is eligible for the freezer.
395  * Processes can only be frozen if this returns true.
396  */
397 bool
memorystatus_is_process_eligible_for_freeze(proc_t p)398 memorystatus_is_process_eligible_for_freeze(proc_t p)
399 {
400 	/*
401 	 * Called with proc_list_lock held.
402 	 */
403 
404 	LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED);
405 
406 	bool should_freeze = false;
407 	uint32_t state = 0, pages = 0;
408 	bool first_consideration = true;
409 	task_t task;
410 
411 	state = p->p_memstat_state;
412 
413 	if (state & (P_MEMSTAT_TERMINATED | P_MEMSTAT_LOCKED | P_MEMSTAT_FREEZE_DISABLED | P_MEMSTAT_FREEZE_IGNORE)) {
414 		if (state & P_MEMSTAT_FREEZE_DISABLED) {
415 			p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonDisabled;
416 		}
417 		goto out;
418 	}
419 
420 	task = proc_task(p);
421 
422 	if (isSysProc(p)) {
423 		/*
424 		 * Daemon:- We consider freezing it if:
425 		 * - it belongs to a coalition and the leader is frozen, and,
426 		 * - its role in the coalition is XPC service.
427 		 *
428 		 * We skip memory size requirements in this case.
429 		 */
430 		int task_role_in_coalition = 0;
431 		proc_t leader_proc = memorystatus_get_coalition_leader_and_role(p, &task_role_in_coalition);
432 		if (leader_proc == PROC_NULL || leader_proc == p) {
433 			/*
434 			 * Jetsam coalition is leaderless or the leader is not an app.
435 			 * Either way, don't freeze this proc.
436 			 */
437 			goto out;
438 		}
439 
440 		/* Leader must be frozen */
441 		if (!(leader_proc->p_memstat_state & P_MEMSTAT_FROZEN)) {
442 			goto out;
443 		}
444 		/* Only freeze XPC services */
445 		if (task_role_in_coalition == COALITION_TASKROLE_XPC) {
446 			should_freeze = true;
447 		}
448 
449 		goto out;
450 	} else {
451 		/*
452 		 * Application. Only freeze if it's suspended.
453 		 */
454 		if (!(state & P_MEMSTAT_SUSPENDED)) {
455 			goto out;
456 		}
457 	}
458 
459 	/*
460 	 * We're interested in tracking what percentage of
461 	 * eligible apps actually get frozen.
462 	 * To avoid skewing the metrics towards processes which
463 	 * are considered more frequently, we only track failures once
464 	 * per process.
465 	 */
466 	first_consideration = !(state & P_MEMSTAT_FREEZE_CONSIDERED);
467 
468 	if (first_consideration) {
469 		memorystatus_freezer_stats.mfs_process_considered_count++;
470 		p->p_memstat_state |= P_MEMSTAT_FREEZE_CONSIDERED;
471 	}
472 
473 	/* Only freeze applications meeting our minimum resident page criteria */
474 	memorystatus_get_task_page_counts(proc_task(p), &pages, NULL, NULL);
475 	if (pages < memorystatus_freeze_pages_min) {
476 		if (first_consideration) {
477 			memorystatus_freezer_stats.mfs_error_below_min_pages_count++;
478 		}
479 		p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonBelowMinPages;
480 		goto out;
481 	}
482 
483 	/* Don't freeze processes that are already exiting on core. It may have started exiting
484 	 * after we chose it for freeze, but before we obtained the proc_list_lock.
485 	 * NB: This is only possible if we're coming in from memorystatus_freeze_process_sync.
486 	 * memorystatus_freeze_top_process holds the proc_list_lock while it traverses the bands.
487 	 */
488 	if (proc_list_exited(p)) {
489 		if (first_consideration) {
490 			memorystatus_freezer_stats.mfs_error_other_count++;
491 		}
492 		p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonOther;
493 		goto out;
494 	}
495 
496 	if (!memorystatus_freezer_use_ordered_list) {
497 		/*
498 		 * We're not using the ordered list so we need to check
499 		 * that dasd recommended the process. Note that the ordered list
500 		 * algorithm only considers processes on the list in the first place
501 		 * so there's no need to double check here.
502 		 */
503 		if (!memorystatus_freeze_process_is_recommended(p)) {
504 			if (first_consideration) {
505 				memorystatus_freezer_stats.mfs_error_low_probability_of_use_count++;
506 			}
507 			p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonLowProbOfUse;
508 			goto out;
509 		}
510 	}
511 
512 	if (!(state & P_MEMSTAT_FROZEN) && p->p_memstat_effectivepriority > memorystatus_freeze_max_candidate_band) {
513 		/*
514 		 * Proc has been elevated by something else.
515 		 * Don't freeze it.
516 		 */
517 		if (first_consideration) {
518 			memorystatus_freezer_stats.mfs_error_elevated_count++;
519 		}
520 		p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonElevated;
521 		goto out;
522 	}
523 
524 	should_freeze = true;
525 out:
526 	if (should_freeze && !(state & P_MEMSTAT_FROZEN)) {
527 		/*
528 		 * Reset the skip reason. If it's killed before we manage to actually freeze it
529 		 * we failed to consider it early enough.
530 		 */
531 		p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonNone;
532 		if (!first_consideration) {
533 			/*
534 			 * We're freezing this for the first time and we previously considered it ineligible.
535 			 * Bump the considered count so that we track this as 1 failure
536 			 * and 1 success.
537 			 */
538 			memorystatus_freezer_stats.mfs_process_considered_count++;
539 		}
540 	}
541 	return should_freeze;
542 }
543 
544 bool
memorystatus_freeze_proc_is_refreeze_eligible(proc_t p)545 memorystatus_freeze_proc_is_refreeze_eligible(proc_t p)
546 {
547 	return (p->p_memstat_state & P_MEMSTAT_REFREEZE_ELIGIBLE) != 0;
548 }
549 
550 
551 static proc_t
memorystatus_freeze_pick_refreeze_process(proc_t last_p)552 memorystatus_freeze_pick_refreeze_process(proc_t last_p)
553 {
554 	proc_t p = PROC_NULL, next_p = PROC_NULL;
555 	unsigned int band = (unsigned int) memorystatus_freeze_jetsam_band;
556 	if (last_p == PROC_NULL) {
557 		next_p = memorystatus_get_first_proc_locked(&band, FALSE);
558 	} else {
559 		next_p = memorystatus_get_next_proc_locked(&band, last_p, FALSE);
560 	}
561 	while (next_p) {
562 		p = next_p;
563 		next_p = memorystatus_get_next_proc_locked(&band, p, FALSE);
564 		if ((p->p_memstat_state & P_MEMSTAT_FROZEN) && !memorystatus_freeze_proc_is_refreeze_eligible(p)) {
565 			/* Process is already frozen & hasn't been thawed. */
566 			continue;
567 		}
568 		/*
569 		 * Has to have been frozen once before.
570 		 */
571 		if (!(p->p_memstat_state & P_MEMSTAT_FROZEN)) {
572 			continue;
573 		}
574 
575 		/*
576 		 * Not currently being looked at for something.
577 		 */
578 		if (p->p_memstat_state & P_MEMSTAT_LOCKED) {
579 			continue;
580 		}
581 
582 #if FREEZE_PREVENT_REFREEZE_OF_LAST_THAWED
583 		/*
584 		 * Don't refreeze the last process we just thawed if still within the timeout window
585 		 */
586 		if (p->p_pid == memorystatus_freeze_last_pid_thawed) {
587 			uint64_t timeout_delta_abs;
588 			nanoseconds_to_absolutetime(FREEZE_PREVENT_REFREEZE_OF_LAST_THAWED_TIMEOUT_SECONDS * NSEC_PER_SEC, &timeout_delta_abs);
589 			if (mach_absolute_time() < (memorystatus_freeze_last_pid_thawed_ts + timeout_delta_abs)) {
590 				continue;
591 			}
592 		}
593 #endif
594 
595 		/*
596 		 * Found it
597 		 */
598 		return p;
599 	}
600 	return PROC_NULL;
601 }
602 
603 proc_t
memorystatus_freeze_pick_process(struct memorystatus_freeze_list_iterator * iterator)604 memorystatus_freeze_pick_process(struct memorystatus_freeze_list_iterator *iterator)
605 {
606 	proc_t p = PROC_NULL, next_p = PROC_NULL;
607 	unsigned int band = JETSAM_PRIORITY_IDLE;
608 
609 	LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED);
610 	/*
611 	 * If the freezer is full, only consider refreezes.
612 	 */
613 	if (iterator->refreeze_only || memorystatus_frozen_count >= memorystatus_frozen_processes_max) {
614 		if (!iterator->refreeze_only) {
615 			/*
616 			 * The first time the iterator starts to return refreeze
617 			 * candidates, we need to reset the last pointer b/c it's pointing into the wrong band.
618 			 */
619 			iterator->last_p = PROC_NULL;
620 			iterator->refreeze_only = true;
621 		}
622 		iterator->last_p = memorystatus_freeze_pick_refreeze_process(iterator->last_p);
623 		return iterator->last_p;
624 	}
625 
626 	/*
627 	 * Search for the next freezer candidate.
628 	 */
629 	if (memorystatus_freezer_use_ordered_list) {
630 		while (iterator->global_freeze_list_index < memorystatus_global_freeze_list.mfcl_length) {
631 			p = memorystatus_freezer_candidate_list_get_proc(
632 				&memorystatus_global_freeze_list,
633 				(iterator->global_freeze_list_index)++,
634 				&memorystatus_freezer_stats.mfs_freeze_pid_mismatches);
635 
636 			if (p != PROC_NULL && memorystatus_is_process_eligible_for_freeze(p)) {
637 #if FREEZE_PREVENT_REFREEZE_OF_LAST_THAWED
638 				/*
639 				 * Don't refreeze the last process we just thawed if still within the timeout window
640 				 */
641 				if (p->p_pid == memorystatus_freeze_last_pid_thawed) {
642 					uint64_t timeout_delta_abs;
643 					nanoseconds_to_absolutetime(FREEZE_PREVENT_REFREEZE_OF_LAST_THAWED_TIMEOUT_SECONDS * NSEC_PER_SEC, &timeout_delta_abs);
644 					if (mach_absolute_time() < (memorystatus_freeze_last_pid_thawed_ts + timeout_delta_abs)) {
645 						continue;
646 					}
647 				}
648 #endif
649 				iterator->last_p = p;
650 				return iterator->last_p;
651 			}
652 		}
653 	} else {
654 		if (iterator->last_p == PROC_NULL) {
655 			next_p = memorystatus_get_first_proc_locked(&band, FALSE);
656 		} else {
657 			next_p = memorystatus_get_next_proc_locked(&band, iterator->last_p, FALSE);
658 		}
659 		while (next_p) {
660 			p = next_p;
661 			if (memorystatus_is_process_eligible_for_freeze(p)) {
662 				iterator->last_p = p;
663 				return iterator->last_p;
664 			} else {
665 				next_p = memorystatus_get_next_proc_locked(&band, p, FALSE);
666 			}
667 		}
668 	}
669 
670 	/*
671 	 * Failed to find a new freezer candidate.
672 	 * Try to re-freeze.
673 	 */
674 	if (memorystatus_refreeze_eligible_count >= memorystatus_min_thaw_refreeze_threshold) {
675 		assert(!iterator->refreeze_only);
676 		iterator->refreeze_only = true;
677 		iterator->last_p = memorystatus_freeze_pick_refreeze_process(PROC_NULL);
678 		return iterator->last_p;
679 	}
680 	return PROC_NULL;
681 }
682 
683 /*
684  * memorystatus_pages_update calls this function whenever the number
685  * of available pages changes. It wakes the freezer thread iff the function returns
686  * true. The freezer thread will try to freeze (or refreeze) up to 1 process
687  * before blocking again.
688  *
689  * Note the freezer thread is also woken up by memorystatus_on_inactivity.
690  */
691 
692 bool
memorystatus_freeze_thread_should_run()693 memorystatus_freeze_thread_should_run()
694 {
695 	/*
696 	 * No freezer_mutex held here...see why near call-site
697 	 * within memorystatus_pages_update().
698 	 */
699 
700 	if (memorystatus_freeze_enabled == false) {
701 		return false;
702 	}
703 
704 	if (memorystatus_available_pages > memorystatus_freeze_threshold) {
705 		return false;
706 	}
707 
708 	memorystatus_freezer_stats.mfs_below_threshold_count++;
709 
710 	if ((memorystatus_frozen_count >= memorystatus_frozen_processes_max)) {
711 		/*
712 		 * Consider this as a skip even if we wake up to refreeze because
713 		 * we won't freeze any new procs.
714 		 */
715 		memorystatus_freezer_stats.mfs_skipped_full_count++;
716 		if (memorystatus_refreeze_eligible_count < memorystatus_min_thaw_refreeze_threshold) {
717 			return false;
718 		}
719 	}
720 
721 	if (memorystatus_frozen_shared_mb_max && (memorystatus_frozen_shared_mb >= memorystatus_frozen_shared_mb_max)) {
722 		memorystatus_freezer_stats.mfs_skipped_shared_mb_high_count++;
723 		return false;
724 	}
725 
726 	uint64_t curr_time = mach_absolute_time();
727 
728 	if (curr_time < memorystatus_freezer_thread_next_run_ts) {
729 		return false;
730 	}
731 
732 	return true;
733 }
734 
735 size_t
memorystatus_pick_freeze_count_for_wakeup()736 memorystatus_pick_freeze_count_for_wakeup()
737 {
738 	size_t num_to_freeze = 0;
739 	if (!memorystatus_swap_all_apps) {
740 		num_to_freeze = 1;
741 	} else {
742 		/*
743 		 * When app swap is enabled, we want the freezer thread to aggressively freeze
744 		 * all candidates so we clear out space for the fg working set.
745 		 * But we still cap it to the current size of the candidate bands to avoid
746 		 * consuming excessive CPU if there's a lot of churn in the candidate band.
747 		 */
748 		proc_list_lock();
749 		for (unsigned int band = JETSAM_PRIORITY_IDLE; band <= memorystatus_freeze_max_candidate_band; band++) {
750 			num_to_freeze += memstat_bucket[band].count;
751 		}
752 		proc_list_unlock();
753 	}
754 
755 	return num_to_freeze;
756 }
757 
758 #endif /* CONFIG_FREEZE */
759