xref: /xnu-10002.81.5/bsd/kern/kern_memorystatus_policy.c (revision 5e3eaea39dcf651e66cb99ba7d70e32cc4a99587)
1 /*
2  * Copyright (c) 2006-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  *
28  */
29 
30 #include <kern/task.h>
31 #include <libkern/libkern.h>
32 #include <machine/atomic.h>
33 #include <mach/coalition.h>
34 #include <os/log.h>
35 #include <sys/coalition.h>
36 #include <sys/proc.h>
37 #include <sys/proc_internal.h>
38 #include <sys/kdebug.h>
39 #include <sys/kern_memorystatus.h>
40 #include <vm/vm_protos.h>
41 
42 #include <kern/kern_memorystatus_internal.h>
43 
44 /*
45  * All memory pressure policy decisions should live here, and there should be
46  * as little mechanism as possible. This file prioritizes readability.
47  */
48 
49 #pragma mark Policy Function Declarations
50 
51 #if CONFIG_JETSAM
52 static bool memorystatus_check_aggressive_jetsam_needed(int *jld_idle_kills);
53 #endif /* CONFIG_JETSAM */
54 
55 #pragma mark Memorystatus Health Check
56 
57 /*
58  * Each subsystem that relies on the memorystatus thread
59  * for resource exhaustion should put a health check in this section.
60  * The memorystatus thread runs all of the health checks
61  * to determine if the system is healthy. If the system is unhealthy
62  * it picks an action based on the system health status. See the
63  * Memorystatus Thread Actions section below.
64  */
65 
66 extern bool vm_compressor_needs_to_swap(bool wake_memorystatus_thread);
67 extern boolean_t vm_compressor_low_on_space(void);
68 extern bool vm_compressor_compressed_pages_nearing_limit(void);
69 extern bool vm_compressor_is_thrashing(void);
70 extern bool vm_compressor_swapout_is_ripe(void);
71 
72 static void
memorystatus_health_check(memorystatus_system_health_t * status)73 memorystatus_health_check(memorystatus_system_health_t *status)
74 {
75 	memset(status, 0, sizeof(memorystatus_system_health_t));
76 #if CONFIG_JETSAM
77 	status->msh_available_pages_below_pressure = memorystatus_avail_pages_below_pressure();
78 	status->msh_available_pages_below_critical = memorystatus_avail_pages_below_critical();
79 	status->msh_compressor_is_low_on_space = (vm_compressor_low_on_space() == TRUE);
80 	status->msh_compressed_pages_nearing_limit = vm_compressor_compressed_pages_nearing_limit();
81 	status->msh_compressor_is_thrashing = !memorystatus_swap_all_apps && vm_compressor_is_thrashing();
82 #if CONFIG_PHANTOM_CACHE
83 	status->msh_phantom_cache_pressure = os_atomic_load(&memorystatus_phantom_cache_pressure, acquire);
84 #else
85 	status->msh_phantom_cache_pressure = false;
86 #endif /* CONFIG_PHANTOM_CACHE */
87 	if (!memorystatus_swap_all_apps &&
88 	    status->msh_phantom_cache_pressure &&
89 	    !(status->msh_compressor_is_thrashing && status->msh_compressor_is_low_on_space)) {
90 		status->msh_filecache_is_thrashing = true;
91 	}
92 	status->msh_compressor_is_low_on_space = os_atomic_load(&memorystatus_compressor_space_shortage, acquire);
93 	status->msh_pageout_starved = os_atomic_load(&memorystatus_pageout_starved, acquire);
94 	status->msh_swappable_compressor_segments_over_limit = memorystatus_swap_over_trigger(100);
95 	status->msh_swapin_queue_over_limit = memorystatus_swapin_over_trigger();
96 	status->msh_swap_low_on_space = vm_swap_low_on_space();
97 	status->msh_swap_out_of_space = vm_swap_out_of_space();
98 #endif /* CONFIG_JETSAM */
99 	status->msh_zone_map_is_exhausted = os_atomic_load(&memorystatus_zone_map_is_exhausted, relaxed);
100 }
101 
102 bool
memorystatus_is_system_healthy(const memorystatus_system_health_t * status)103 memorystatus_is_system_healthy(const memorystatus_system_health_t *status)
104 {
105 #if CONFIG_JETSAM
106 	return !(status->msh_available_pages_below_critical ||
107 	       status->msh_compressor_is_low_on_space ||
108 	       status->msh_compressor_is_thrashing ||
109 	       status->msh_filecache_is_thrashing ||
110 	       status->msh_zone_map_is_exhausted ||
111 	       status->msh_pageout_starved);
112 #else /* CONFIG_JETSAM */
113 	return !status->msh_zone_map_is_exhausted;
114 #endif /* CONFIG_JETSAM */
115 }
116 
117 
118 #pragma mark Memorystatus Thread Actions
119 
120 /*
121  * This section picks the appropriate memorystatus_action & deploys it.
122  */
123 
124 /*
125  * Inspects the state of various resources in the system to see if
126  * the system is healthy. If the system is not healthy, picks a
127  * memorystatus_action_t to recover the system.
128  *
129  * Every time the memorystatus thread wakes up it calls into here
130  * to pick an action. It will continue performing memorystatus actions until this
131  * function returns MEMORYSTATUS_KILL_NONE. At that point the thread will block.
132  */
133 memorystatus_action_t
memorystatus_pick_action(struct jetsam_thread_state * jetsam_thread,uint32_t * kill_cause,bool highwater_remaining,bool suspended_swappable_apps_remaining,bool swappable_apps_remaining,int * jld_idle_kills)134 memorystatus_pick_action(struct jetsam_thread_state *jetsam_thread,
135     uint32_t *kill_cause,
136     bool highwater_remaining,
137     bool suspended_swappable_apps_remaining,
138     bool swappable_apps_remaining,
139     int *jld_idle_kills)
140 {
141 	memorystatus_system_health_t status;
142 	memorystatus_health_check(&status);
143 	memorystatus_log_system_health(&status);
144 	bool is_system_healthy = memorystatus_is_system_healthy(&status);
145 
146 #if CONFIG_JETSAM
147 	if (status.msh_available_pages_below_pressure || !is_system_healthy) {
148 		/*
149 		 * If swap is enabled, first check if we're running low or are out of swap space.
150 		 */
151 		if (memorystatus_swap_all_apps && jetsam_kill_on_low_swap) {
152 			if (swappable_apps_remaining && status.msh_swap_out_of_space) {
153 				*kill_cause = kMemorystatusKilledLowSwap;
154 				return MEMORYSTATUS_KILL_SWAPPABLE;
155 			} else if (suspended_swappable_apps_remaining && status.msh_swap_low_on_space) {
156 				*kill_cause = kMemorystatusKilledLowSwap;
157 				return MEMORYSTATUS_KILL_SUSPENDED_SWAPPABLE;
158 			}
159 		}
160 
161 		/*
162 		 * We're below the pressure level or the system is unhealthy,
163 		 * regardless of the system health let's check if we should be swapping
164 		 * and if there are high watermark kills left to do.
165 		 */
166 		if (memorystatus_swap_all_apps) {
167 			if (status.msh_swappable_compressor_segments_over_limit && !vm_swapout_thread_running && !os_atomic_load(&vm_swapout_wake_pending, relaxed)) {
168 				/*
169 				 * TODO: The swapper will keep running until it has drained the entire early swapout queue.
170 				 * That might be overly aggressive & we should look into tuning it.
171 				 * See rdar://84102304.
172 				 */
173 				return MEMORYSTATUS_WAKE_SWAPPER;
174 			} else if (status.msh_swapin_queue_over_limit) {
175 				return MEMORYSTATUS_PROCESS_SWAPIN_QUEUE;
176 			} else if (status.msh_swappable_compressor_segments_over_limit) {
177 				memorystatus_log_info(
178 					"memorystatus: Skipping swap wakeup because the swap thread is already running. vm_swapout_thread_running=%d, vm_swapout_wake_pending=%d\n",
179 					vm_swapout_thread_running, os_atomic_load(&vm_swapout_wake_pending, relaxed));
180 			}
181 		}
182 
183 		if (highwater_remaining) {
184 			*kill_cause = kMemorystatusKilledHiwat;
185 			memorystatus_log("memorystatus: Looking for highwatermark kills.\n");
186 			return MEMORYSTATUS_KILL_HIWATER;
187 		}
188 	}
189 
190 	if (is_system_healthy) {
191 		*kill_cause = 0;
192 		return MEMORYSTATUS_KILL_NONE;
193 	}
194 
195 	/*
196 	 * At this point the system is unhealthy and there are no
197 	 * more highwatermark processes to kill.
198 	 */
199 
200 	if (!jetsam_thread->limit_to_low_bands) {
201 		if (memorystatus_check_aggressive_jetsam_needed(jld_idle_kills)) {
202 			memorystatus_log("memorystatus: Starting aggressive jetsam.\n");
203 			*kill_cause = kMemorystatusKilledProcThrashing;
204 			return MEMORYSTATUS_KILL_AGGRESSIVE;
205 		}
206 	}
207 	/*
208 	 * The system is unhealthy and we either don't need aggressive jetsam
209 	 * or are not allowed to deploy it.
210 	 * Kill in priority order. We'll use LRU within every band except the
211 	 * FG (which will be sorted by coalition role).
212 	 */
213 	*kill_cause = memorystatus_pick_kill_cause(&status);
214 	return MEMORYSTATUS_KILL_TOP_PROCESS;
215 #else /* CONFIG_JETSAM */
216 	(void) jetsam_thread;
217 	(void) jld_idle_kills;
218 	(void) suspended_swappable_apps_remaining;
219 	(void) swappable_apps_remaining;
220 	/*
221 	 * Without CONFIG_JETSAM, we only kill if the system is unhealthy.
222 	 * There is no aggressive jetsam and no
223 	 * early highwatermark killing.
224 	 */
225 	if (is_system_healthy) {
226 		*kill_cause = 0;
227 		return MEMORYSTATUS_KILL_NONE;
228 	}
229 	if (highwater_remaining) {
230 		*kill_cause = kMemorystatusKilledHiwat;
231 		return MEMORYSTATUS_KILL_HIWATER;
232 	} else {
233 		*kill_cause = memorystatus_pick_kill_cause(&status);
234 		return MEMORYSTATUS_KILL_TOP_PROCESS;
235 	}
236 #endif /* CONFIG_JETSAM */
237 }
238 
239 #pragma mark Aggressive Jetsam
240 /*
241  * This section defines when we deploy aggressive jetsam.
242  * Aggressive jetsam kills everything up to the jld_priority_band_max band.
243  */
244 
245 #if CONFIG_JETSAM
246 
247 static bool
248 memorystatus_aggressive_jetsam_needed_sysproc_aging(__unused int jld_eval_aggressive_count, __unused int *jld_idle_kills, __unused int jld_idle_kill_candidates, int *total_candidates);
249 
250 /*
251  * kJetsamHighRelaunchCandidatesThreshold defines the percentage of candidates
252  * in the idle & deferred bands that need to be bad candidates in order to trigger
253  * aggressive jetsam.
254  */
255 #define kJetsamHighRelaunchCandidatesThreshold  (100)
256 
257 /* kJetsamMinCandidatesThreshold defines the minimum number of candidates in the
258  * idle/deferred bands to trigger aggressive jetsam. This value basically decides
259  * how much memory the system is ready to hold in the lower bands without triggering
260  * aggressive jetsam. This number should ideally be tuned based on the memory config
261  * of the device.
262  */
263 #define kJetsamMinCandidatesThreshold           (5)
264 
265 static bool
memorystatus_check_aggressive_jetsam_needed(int * jld_idle_kills)266 memorystatus_check_aggressive_jetsam_needed(int *jld_idle_kills)
267 {
268 	bool aggressive_jetsam_needed = false;
269 	int total_candidates = 0;
270 	/*
271 	 * The aggressive jetsam logic looks at the number of times it has been in the
272 	 * aggressive loop to determine the max priority band it should kill upto. The
273 	 * static variables below are used to track that property.
274 	 *
275 	 * To reset those values, the implementation checks if it has been
276 	 * memorystatus_jld_eval_period_msecs since the parameters were reset.
277 	 */
278 
279 	if (memorystatus_jld_enabled == FALSE) {
280 		/* If aggressive jetsam is disabled, nothing to do here */
281 		return FALSE;
282 	}
283 
284 	/* Get current timestamp (msecs only) */
285 	struct timeval  jld_now_tstamp = {0, 0};
286 	uint64_t        jld_now_msecs = 0;
287 	microuptime(&jld_now_tstamp);
288 	jld_now_msecs = (jld_now_tstamp.tv_sec * 1000);
289 
290 	/*
291 	 * Look at the number of candidates in the idle and deferred band and
292 	 * how many out of them are marked as high relaunch probability.
293 	 */
294 	aggressive_jetsam_needed = memorystatus_aggressive_jetsam_needed_sysproc_aging(jld_eval_aggressive_count,
295 	    jld_idle_kills, jld_idle_kill_candidates, &total_candidates);
296 
297 	/*
298 	 * Check if its been really long since the aggressive jetsam evaluation
299 	 * parameters have been refreshed. This logic also resets the jld_eval_aggressive_count
300 	 * counter to make sure we reset the aggressive jetsam severity.
301 	 */
302 	boolean_t param_reval = false;
303 
304 	if ((total_candidates == 0) ||
305 	    (jld_now_msecs > (jld_timestamp_msecs + memorystatus_jld_eval_period_msecs))) {
306 		jld_timestamp_msecs      = jld_now_msecs;
307 		jld_idle_kill_candidates = total_candidates;
308 		*jld_idle_kills          = 0;
309 		jld_eval_aggressive_count = 0;
310 		jld_priority_band_max   = JETSAM_PRIORITY_UI_SUPPORT;
311 		param_reval = true;
312 	}
313 
314 	/*
315 	 * It is also possible that the system is down to a very small number of processes in the candidate
316 	 * bands. In that case, the decisions made by the memorystatus_aggressive_jetsam_needed_* routines
317 	 * would not be useful. In that case, do not trigger aggressive jetsam.
318 	 */
319 	if (total_candidates < kJetsamMinCandidatesThreshold) {
320 #if DEVELOPMENT || DEBUG
321 		memorystatus_log_info(
322 			"memorystatus: aggressive: [FAILED] Low Candidate Count (current: %d, threshold: %d)\n", total_candidates, kJetsamMinCandidatesThreshold);
323 #endif /* DEVELOPMENT || DEBUG */
324 		aggressive_jetsam_needed = false;
325 	}
326 	return aggressive_jetsam_needed;
327 }
328 
329 static bool
memorystatus_aggressive_jetsam_needed_sysproc_aging(__unused int eval_aggressive_count,__unused int * idle_kills,__unused int idle_kill_candidates,int * total_candidates)330 memorystatus_aggressive_jetsam_needed_sysproc_aging(__unused int eval_aggressive_count, __unused int *idle_kills, __unused int idle_kill_candidates, int *total_candidates)
331 {
332 	bool aggressive_jetsam_needed = false;
333 
334 	/*
335 	 * For the kJetsamAgingPolicySysProcsReclaimedFirst aging policy, we maintain the jetsam
336 	 * relaunch behavior for all daemons. Also, daemons and apps are aged in deferred bands on
337 	 * every dirty->clean transition. For this aging policy, the best way to determine if
338 	 * aggressive jetsam is needed, is to see if the kill candidates are mostly bad candidates.
339 	 * If yes, then we need to go to higher bands to reclaim memory.
340 	 */
341 	proc_list_lock();
342 	/* Get total candidate counts for idle and idle deferred bands */
343 	*total_candidates = memstat_bucket[JETSAM_PRIORITY_IDLE].count + memstat_bucket[system_procs_aging_band].count;
344 	/* Get counts of bad kill candidates in idle and idle deferred bands */
345 	int bad_candidates = memstat_bucket[JETSAM_PRIORITY_IDLE].relaunch_high_count + memstat_bucket[system_procs_aging_band].relaunch_high_count;
346 
347 	proc_list_unlock();
348 
349 	/* Check if the number of bad candidates is greater than kJetsamHighRelaunchCandidatesThreshold % */
350 	aggressive_jetsam_needed = (((bad_candidates * 100) / *total_candidates) >= kJetsamHighRelaunchCandidatesThreshold);
351 
352 	/*
353 	 * Since the new aging policy bases the aggressive jetsam trigger on percentage of
354 	 * bad candidates, it is prone to being overly aggressive. In order to mitigate that,
355 	 * make sure the system is really under memory pressure before triggering aggressive
356 	 * jetsam.
357 	 */
358 	if (memorystatus_available_pages > memorystatus_sysproc_aging_aggr_pages) {
359 		aggressive_jetsam_needed = false;
360 	}
361 
362 #if DEVELOPMENT || DEBUG
363 	memorystatus_log_info(
364 		"memorystatus: aggressive%d: [%s] Bad Candidate Threshold Check (total: %d, bad: %d, threshold: %d %%); Memory Pressure Check (available_pgs: %llu, threshold_pgs: %llu)\n",
365 		eval_aggressive_count, aggressive_jetsam_needed ? "PASSED" : "FAILED", *total_candidates, bad_candidates,
366 		kJetsamHighRelaunchCandidatesThreshold, (uint64_t)MEMORYSTATUS_LOG_AVAILABLE_PAGES, (uint64_t)memorystatus_sysproc_aging_aggr_pages);
367 #endif /* DEVELOPMENT || DEBUG */
368 	return aggressive_jetsam_needed;
369 }
370 
371 #endif /* CONFIG_JETSAM */
372 
373 #pragma mark Freezer
374 #if CONFIG_FREEZE
375 /*
376  * Freezer policies
377  */
378 
379 /*
380  * These functions determine what is eligible for the freezer
381  * and the order that we consider freezing them
382  */
383 
384 /*
385  * Checks if the given process is eligible for the freezer.
386  * Processes can only be frozen if this returns true.
387  */
388 bool
memorystatus_is_process_eligible_for_freeze(proc_t p)389 memorystatus_is_process_eligible_for_freeze(proc_t p)
390 {
391 	/*
392 	 * Called with proc_list_lock held.
393 	 */
394 
395 	LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED);
396 
397 	bool should_freeze = false;
398 	uint32_t state = 0, pages = 0;
399 	bool first_consideration = true;
400 	task_t task;
401 
402 	state = p->p_memstat_state;
403 
404 	if (state & (P_MEMSTAT_TERMINATED | P_MEMSTAT_LOCKED | P_MEMSTAT_FREEZE_DISABLED | P_MEMSTAT_FREEZE_IGNORE)) {
405 		if (state & P_MEMSTAT_FREEZE_DISABLED) {
406 			p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonDisabled;
407 		}
408 		goto out;
409 	}
410 
411 	task = proc_task(p);
412 
413 	if (isSysProc(p)) {
414 		/*
415 		 * Daemon:- We consider freezing it if:
416 		 * - it belongs to a coalition and the leader is frozen, and,
417 		 * - its role in the coalition is XPC service.
418 		 *
419 		 * We skip memory size requirements in this case.
420 		 */
421 		int task_role_in_coalition = 0;
422 		proc_t leader_proc = memorystatus_get_coalition_leader_and_role(p, &task_role_in_coalition);
423 		if (leader_proc == PROC_NULL || leader_proc == p) {
424 			/*
425 			 * Jetsam coalition is leaderless or the leader is not an app.
426 			 * Either way, don't freeze this proc.
427 			 */
428 			goto out;
429 		}
430 
431 		/* Leader must be frozen */
432 		if (!(leader_proc->p_memstat_state & P_MEMSTAT_FROZEN)) {
433 			goto out;
434 		}
435 		/* Only freeze XPC services */
436 		if (task_role_in_coalition == COALITION_TASKROLE_XPC) {
437 			should_freeze = true;
438 		}
439 
440 		goto out;
441 	} else {
442 		/*
443 		 * Application. Only freeze if it's suspended.
444 		 */
445 		if (!(state & P_MEMSTAT_SUSPENDED)) {
446 			goto out;
447 		}
448 	}
449 
450 	/*
451 	 * We're interested in tracking what percentage of
452 	 * eligible apps actually get frozen.
453 	 * To avoid skewing the metrics towards processes which
454 	 * are considered more frequently, we only track failures once
455 	 * per process.
456 	 */
457 	first_consideration = !(state & P_MEMSTAT_FREEZE_CONSIDERED);
458 
459 	if (first_consideration) {
460 		memorystatus_freezer_stats.mfs_process_considered_count++;
461 		p->p_memstat_state |= P_MEMSTAT_FREEZE_CONSIDERED;
462 	}
463 
464 	/* Only freeze applications meeting our minimum resident page criteria */
465 	memorystatus_get_task_page_counts(proc_task(p), &pages, NULL, NULL);
466 	if (pages < memorystatus_freeze_pages_min) {
467 		if (first_consideration) {
468 			memorystatus_freezer_stats.mfs_error_below_min_pages_count++;
469 		}
470 		p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonBelowMinPages;
471 		goto out;
472 	}
473 
474 	/* Don't freeze processes that are already exiting on core. It may have started exiting
475 	 * after we chose it for freeze, but before we obtained the proc_list_lock.
476 	 * NB: This is only possible if we're coming in from memorystatus_freeze_process_sync.
477 	 * memorystatus_freeze_top_process holds the proc_list_lock while it traverses the bands.
478 	 */
479 	if (proc_list_exited(p)) {
480 		if (first_consideration) {
481 			memorystatus_freezer_stats.mfs_error_other_count++;
482 		}
483 		p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonOther;
484 		goto out;
485 	}
486 
487 	if (!memorystatus_freezer_use_ordered_list) {
488 		/*
489 		 * We're not using the ordered list so we need to check
490 		 * that dasd recommended the process. Note that the ordered list
491 		 * algorithm only considers processes on the list in the first place
492 		 * so there's no need to double check here.
493 		 */
494 		if (!memorystatus_freeze_process_is_recommended(p)) {
495 			if (first_consideration) {
496 				memorystatus_freezer_stats.mfs_error_low_probability_of_use_count++;
497 			}
498 			p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonLowProbOfUse;
499 			goto out;
500 		}
501 	}
502 
503 	if (!(state & P_MEMSTAT_FROZEN) && p->p_memstat_effectivepriority > memorystatus_freeze_max_candidate_band) {
504 		/*
505 		 * Proc has been elevated by something else.
506 		 * Don't freeze it.
507 		 */
508 		if (first_consideration) {
509 			memorystatus_freezer_stats.mfs_error_elevated_count++;
510 		}
511 		p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonElevated;
512 		goto out;
513 	}
514 
515 	should_freeze = true;
516 out:
517 	if (should_freeze && !(state & P_MEMSTAT_FROZEN)) {
518 		/*
519 		 * Reset the skip reason. If it's killed before we manage to actually freeze it
520 		 * we failed to consider it early enough.
521 		 */
522 		p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonNone;
523 		if (!first_consideration) {
524 			/*
525 			 * We're freezing this for the first time and we previously considered it ineligible.
526 			 * Bump the considered count so that we track this as 1 failure
527 			 * and 1 success.
528 			 */
529 			memorystatus_freezer_stats.mfs_process_considered_count++;
530 		}
531 	}
532 	return should_freeze;
533 }
534 
535 bool
memorystatus_freeze_proc_is_refreeze_eligible(proc_t p)536 memorystatus_freeze_proc_is_refreeze_eligible(proc_t p)
537 {
538 	return (p->p_memstat_state & P_MEMSTAT_REFREEZE_ELIGIBLE) != 0;
539 }
540 
541 
542 static proc_t
memorystatus_freeze_pick_refreeze_process(proc_t last_p)543 memorystatus_freeze_pick_refreeze_process(proc_t last_p)
544 {
545 	proc_t p = PROC_NULL, next_p = PROC_NULL;
546 	unsigned int band = (unsigned int) memorystatus_freeze_jetsam_band;
547 	if (last_p == PROC_NULL) {
548 		next_p = memorystatus_get_first_proc_locked(&band, FALSE);
549 	} else {
550 		next_p = memorystatus_get_next_proc_locked(&band, last_p, FALSE);
551 	}
552 	while (next_p) {
553 		p = next_p;
554 		next_p = memorystatus_get_next_proc_locked(&band, p, FALSE);
555 		if ((p->p_memstat_state & P_MEMSTAT_FROZEN) && !memorystatus_freeze_proc_is_refreeze_eligible(p)) {
556 			/* Process is already frozen & hasn't been thawed. */
557 			continue;
558 		}
559 		/*
560 		 * Has to have been frozen once before.
561 		 */
562 		if (!(p->p_memstat_state & P_MEMSTAT_FROZEN)) {
563 			continue;
564 		}
565 
566 		/*
567 		 * Not currently being looked at for something.
568 		 */
569 		if (p->p_memstat_state & P_MEMSTAT_LOCKED) {
570 			continue;
571 		}
572 		/*
573 		 * Found it
574 		 */
575 		break;
576 	}
577 	return p;
578 }
579 
580 proc_t
memorystatus_freeze_pick_process(struct memorystatus_freeze_list_iterator * iterator)581 memorystatus_freeze_pick_process(struct memorystatus_freeze_list_iterator *iterator)
582 {
583 	proc_t p = PROC_NULL, next_p = PROC_NULL;
584 	unsigned int band = JETSAM_PRIORITY_IDLE;
585 
586 	LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED);
587 	/*
588 	 * If the freezer is full, only consider refreezes.
589 	 */
590 	if (iterator->refreeze_only || memorystatus_frozen_count >= memorystatus_frozen_processes_max) {
591 		if (!iterator->refreeze_only) {
592 			/*
593 			 * The first time the iterator starts to return refreeze
594 			 * candidates, we need to reset the last pointer b/c it's pointing into the wrong band.
595 			 */
596 			iterator->last_p = PROC_NULL;
597 			iterator->refreeze_only = true;
598 		}
599 		iterator->last_p = memorystatus_freeze_pick_refreeze_process(iterator->last_p);
600 		return iterator->last_p;
601 	}
602 
603 	/*
604 	 * Search for the next freezer candidate.
605 	 */
606 	if (memorystatus_freezer_use_ordered_list) {
607 		next_p = memorystatus_freezer_candidate_list_get_proc(
608 			&memorystatus_global_freeze_list,
609 			(iterator->global_freeze_list_index)++,
610 			&memorystatus_freezer_stats.mfs_freeze_pid_mismatches);
611 	} else if (iterator->last_p == PROC_NULL) {
612 		next_p = memorystatus_get_first_proc_locked(&band, FALSE);
613 	} else {
614 		next_p = memorystatus_get_next_proc_locked(&band, iterator->last_p, FALSE);
615 	}
616 	while (next_p) {
617 		p = next_p;
618 		if (memorystatus_is_process_eligible_for_freeze(p)) {
619 			iterator->last_p = p;
620 			return iterator->last_p;
621 		} else {
622 			if (memorystatus_freezer_use_ordered_list) {
623 				next_p = memorystatus_freezer_candidate_list_get_proc(
624 					&memorystatus_global_freeze_list,
625 					(iterator->global_freeze_list_index)++,
626 					&memorystatus_freezer_stats.mfs_freeze_pid_mismatches);
627 			} else {
628 				next_p = memorystatus_get_next_proc_locked(&band, p, FALSE);
629 			}
630 		}
631 	}
632 
633 	/*
634 	 * Failed to find a new freezer candidate.
635 	 * Try to re-freeze.
636 	 */
637 	if (memorystatus_refreeze_eligible_count >= memorystatus_min_thaw_refreeze_threshold) {
638 		assert(!iterator->refreeze_only);
639 		iterator->refreeze_only = true;
640 		iterator->last_p = memorystatus_freeze_pick_refreeze_process(PROC_NULL);
641 		return iterator->last_p;
642 	}
643 	return PROC_NULL;
644 }
645 
646 /*
647  * memorystatus_pages_update calls this function whenever the number
648  * of available pages changes. It wakes the freezer thread iff the function returns
649  * true. The freezer thread will try to freeze (or refreeze) up to 1 process
650  * before blocking again.
651  *
652  * Note the freezer thread is also woken up by memorystatus_on_inactivity.
653  */
654 
655 bool
memorystatus_freeze_thread_should_run()656 memorystatus_freeze_thread_should_run()
657 {
658 	/*
659 	 * No freezer_mutex held here...see why near call-site
660 	 * within memorystatus_pages_update().
661 	 */
662 
663 	if (memorystatus_freeze_enabled == FALSE) {
664 		return false;
665 	}
666 
667 	if (memorystatus_available_pages > memorystatus_freeze_threshold) {
668 		return false;
669 	}
670 
671 	memorystatus_freezer_stats.mfs_below_threshold_count++;
672 
673 	if ((memorystatus_frozen_count >= memorystatus_frozen_processes_max)) {
674 		/*
675 		 * Consider this as a skip even if we wake up to refreeze because
676 		 * we won't freeze any new procs.
677 		 */
678 		memorystatus_freezer_stats.mfs_skipped_full_count++;
679 		if (memorystatus_refreeze_eligible_count < memorystatus_min_thaw_refreeze_threshold) {
680 			return false;
681 		}
682 	}
683 
684 	if (memorystatus_frozen_shared_mb_max && (memorystatus_frozen_shared_mb >= memorystatus_frozen_shared_mb_max)) {
685 		memorystatus_freezer_stats.mfs_skipped_shared_mb_high_count++;
686 		return false;
687 	}
688 
689 	uint64_t curr_time = mach_absolute_time();
690 
691 	if (curr_time < memorystatus_freezer_thread_next_run_ts) {
692 		return false;
693 	}
694 
695 	return true;
696 }
697 
698 size_t
memorystatus_pick_freeze_count_for_wakeup()699 memorystatus_pick_freeze_count_for_wakeup()
700 {
701 	size_t num_to_freeze = 0;
702 	if (!memorystatus_swap_all_apps) {
703 		num_to_freeze = 1;
704 	} else {
705 		/*
706 		 * When app swap is enabled, we want the freezer thread to aggressively freeze
707 		 * all candidates so we clear out space for the fg working set.
708 		 * But we still cap it to the current size of the candidate bands to avoid
709 		 * consuming excessive CPU if there's a lot of churn in the candidate band.
710 		 */
711 		proc_list_lock();
712 		for (unsigned int band = JETSAM_PRIORITY_IDLE; band <= memorystatus_freeze_max_candidate_band; band++) {
713 			num_to_freeze += memstat_bucket[band].count;
714 		}
715 		proc_list_unlock();
716 	}
717 
718 	return num_to_freeze;
719 }
720 
721 #endif /* CONFIG_FREEZE */
722