xref: /xnu-8019.80.24/osfmk/vm/vm_pageout.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/vm_pageout.c
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *	Date:	1985
62  *
63  *	The proverbial page-out daemon.
64  */
65 
66 #include <stdint.h>
67 #include <ptrauth.h>
68 
69 #include <debug.h>
70 #include <mach_pagemap.h>
71 #include <mach_cluster_stats.h>
72 
73 #include <mach/mach_types.h>
74 #include <mach/memory_object.h>
75 #include <mach/memory_object_default.h>
76 #include <mach/memory_object_control_server.h>
77 #include <mach/mach_host_server.h>
78 #include <mach/upl.h>
79 #include <mach/vm_map.h>
80 #include <mach/vm_param.h>
81 #include <mach/vm_statistics.h>
82 #include <mach/sdt.h>
83 
84 #include <kern/kern_types.h>
85 #include <kern/counter.h>
86 #include <kern/host_statistics.h>
87 #include <kern/machine.h>
88 #include <kern/misc_protos.h>
89 #include <kern/sched.h>
90 #include <kern/thread.h>
91 #include <kern/kalloc.h>
92 #include <kern/zalloc_internal.h>
93 #include <kern/policy_internal.h>
94 #include <kern/thread_group.h>
95 
96 #include <machine/vm_tuning.h>
97 #include <machine/commpage.h>
98 
99 #include <vm/pmap.h>
100 #include <vm/vm_compressor_pager.h>
101 #include <vm/vm_fault.h>
102 #include <vm/vm_map.h>
103 #include <vm/vm_object.h>
104 #include <vm/vm_page.h>
105 #include <vm/vm_pageout.h>
106 #include <vm/vm_protos.h> /* must be last */
107 #include <vm/memory_object.h>
108 #include <vm/vm_purgeable_internal.h>
109 #include <vm/vm_shared_region.h>
110 #include <vm/vm_compressor.h>
111 
112 #include <san/kasan.h>
113 
114 #if CONFIG_PHANTOM_CACHE
115 #include <vm/vm_phantom_cache.h>
116 #endif
117 
118 #if UPL_DEBUG
119 #include <libkern/OSDebug.h>
120 #endif
121 
122 extern int cs_debug;
123 
124 extern void mbuf_drain(boolean_t);
125 
126 #if VM_PRESSURE_EVENTS
127 #if CONFIG_JETSAM
128 extern unsigned int memorystatus_available_pages;
129 extern unsigned int memorystatus_available_pages_pressure;
130 extern unsigned int memorystatus_available_pages_critical;
131 #else /* CONFIG_JETSAM */
132 extern uint64_t memorystatus_available_pages;
133 extern uint64_t memorystatus_available_pages_pressure;
134 extern uint64_t memorystatus_available_pages_critical;
135 #endif /* CONFIG_JETSAM */
136 
137 extern unsigned int memorystatus_frozen_count;
138 extern unsigned int memorystatus_suspended_count;
139 extern vm_pressure_level_t memorystatus_vm_pressure_level;
140 
141 extern lck_mtx_t memorystatus_jetsam_fg_band_lock;
142 extern uint32_t memorystatus_jetsam_fg_band_waiters;
143 
144 void vm_pressure_response(void);
145 extern void consider_vm_pressure_events(void);
146 
147 #define MEMORYSTATUS_SUSPENDED_THRESHOLD  4
148 #endif /* VM_PRESSURE_EVENTS */
149 
150 SECURITY_READ_ONLY_LATE(thread_t) vm_pageout_scan_thread;
151 boolean_t vps_dynamic_priority_enabled = FALSE;
152 
153 #ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE  /* maximum iterations of the inactive queue w/o stealing/cleaning a page */
154 #if !XNU_TARGET_OS_OSX
155 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 1024
156 #else /* !XNU_TARGET_OS_OSX */
157 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096
158 #endif /* !XNU_TARGET_OS_OSX */
159 #endif
160 
161 #ifndef VM_PAGEOUT_DEADLOCK_RELIEF
162 #define VM_PAGEOUT_DEADLOCK_RELIEF 100  /* number of pages to move to break deadlock */
163 #endif
164 
165 #ifndef VM_PAGE_LAUNDRY_MAX
166 #define VM_PAGE_LAUNDRY_MAX     128UL   /* maximum pageouts on a given pageout queue */
167 #endif  /* VM_PAGEOUT_LAUNDRY_MAX */
168 
169 #ifndef VM_PAGEOUT_BURST_WAIT
170 #define VM_PAGEOUT_BURST_WAIT   1       /* milliseconds */
171 #endif  /* VM_PAGEOUT_BURST_WAIT */
172 
173 #ifndef VM_PAGEOUT_EMPTY_WAIT
174 #define VM_PAGEOUT_EMPTY_WAIT   50      /* milliseconds */
175 #endif  /* VM_PAGEOUT_EMPTY_WAIT */
176 
177 #ifndef VM_PAGEOUT_DEADLOCK_WAIT
178 #define VM_PAGEOUT_DEADLOCK_WAIT 100    /* milliseconds */
179 #endif  /* VM_PAGEOUT_DEADLOCK_WAIT */
180 
181 #ifndef VM_PAGEOUT_IDLE_WAIT
182 #define VM_PAGEOUT_IDLE_WAIT    10      /* milliseconds */
183 #endif  /* VM_PAGEOUT_IDLE_WAIT */
184 
185 #ifndef VM_PAGEOUT_SWAP_WAIT
186 #define VM_PAGEOUT_SWAP_WAIT    10      /* milliseconds */
187 #endif  /* VM_PAGEOUT_SWAP_WAIT */
188 
189 
190 #ifndef VM_PAGE_SPECULATIVE_TARGET
191 #define VM_PAGE_SPECULATIVE_TARGET(total) ((total) * 1 / (100 / vm_pageout_state.vm_page_speculative_percentage))
192 #endif /* VM_PAGE_SPECULATIVE_TARGET */
193 
194 
195 /*
196  *	To obtain a reasonable LRU approximation, the inactive queue
197  *	needs to be large enough to give pages on it a chance to be
198  *	referenced a second time.  This macro defines the fraction
199  *	of active+inactive pages that should be inactive.
200  *	The pageout daemon uses it to update vm_page_inactive_target.
201  *
202  *	If vm_page_free_count falls below vm_page_free_target and
203  *	vm_page_inactive_count is below vm_page_inactive_target,
204  *	then the pageout daemon starts running.
205  */
206 
207 #ifndef VM_PAGE_INACTIVE_TARGET
208 #define VM_PAGE_INACTIVE_TARGET(avail)  ((avail) * 1 / 2)
209 #endif  /* VM_PAGE_INACTIVE_TARGET */
210 
211 /*
212  *	Once the pageout daemon starts running, it keeps going
213  *	until vm_page_free_count meets or exceeds vm_page_free_target.
214  */
215 
216 #ifndef VM_PAGE_FREE_TARGET
217 #if !XNU_TARGET_OS_OSX
218 #define VM_PAGE_FREE_TARGET(free)       (15 + (free) / 100)
219 #else /* !XNU_TARGET_OS_OSX */
220 #define VM_PAGE_FREE_TARGET(free)       (15 + (free) / 80)
221 #endif /* !XNU_TARGET_OS_OSX */
222 #endif  /* VM_PAGE_FREE_TARGET */
223 
224 
225 /*
226  *	The pageout daemon always starts running once vm_page_free_count
227  *	falls below vm_page_free_min.
228  */
229 
230 #ifndef VM_PAGE_FREE_MIN
231 #if !XNU_TARGET_OS_OSX
232 #define VM_PAGE_FREE_MIN(free)          (10 + (free) / 200)
233 #else /* !XNU_TARGET_OS_OSX */
234 #define VM_PAGE_FREE_MIN(free)          (10 + (free) / 100)
235 #endif /* !XNU_TARGET_OS_OSX */
236 #endif  /* VM_PAGE_FREE_MIN */
237 
238 #if !XNU_TARGET_OS_OSX
239 #define VM_PAGE_FREE_RESERVED_LIMIT     100
240 #define VM_PAGE_FREE_MIN_LIMIT          1500
241 #define VM_PAGE_FREE_TARGET_LIMIT       2000
242 #else /* !XNU_TARGET_OS_OSX */
243 #define VM_PAGE_FREE_RESERVED_LIMIT     1700
244 #define VM_PAGE_FREE_MIN_LIMIT          3500
245 #define VM_PAGE_FREE_TARGET_LIMIT       4000
246 #endif /* !XNU_TARGET_OS_OSX */
247 
248 /*
249  *	When vm_page_free_count falls below vm_page_free_reserved,
250  *	only vm-privileged threads can allocate pages.  vm-privilege
251  *	allows the pageout daemon and default pager (and any other
252  *	associated threads needed for default pageout) to continue
253  *	operation by dipping into the reserved pool of pages.
254  */
255 
256 #ifndef VM_PAGE_FREE_RESERVED
257 #define VM_PAGE_FREE_RESERVED(n)        \
258 	((unsigned) (6 * VM_PAGE_LAUNDRY_MAX) + (n))
259 #endif  /* VM_PAGE_FREE_RESERVED */
260 
261 /*
262  *	When we dequeue pages from the inactive list, they are
263  *	reactivated (ie, put back on the active queue) if referenced.
264  *	However, it is possible to starve the free list if other
265  *	processors are referencing pages faster than we can turn off
266  *	the referenced bit.  So we limit the number of reactivations
267  *	we will make per call of vm_pageout_scan().
268  */
269 #define VM_PAGE_REACTIVATE_LIMIT_MAX 20000
270 
271 #ifndef VM_PAGE_REACTIVATE_LIMIT
272 #if !XNU_TARGET_OS_OSX
273 #define VM_PAGE_REACTIVATE_LIMIT(avail) (VM_PAGE_INACTIVE_TARGET(avail) / 2)
274 #else /* !XNU_TARGET_OS_OSX */
275 #define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX))
276 #endif /* !XNU_TARGET_OS_OSX */
277 #endif  /* VM_PAGE_REACTIVATE_LIMIT */
278 #define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM       1000
279 
280 extern boolean_t hibernate_cleaning_in_progress;
281 
282 /*
283  * Forward declarations for internal routines.
284  */
285 struct cq {
286 	struct vm_pageout_queue *q;
287 	void                    *current_chead;
288 	char                    *scratch_buf;
289 	int                     id;
290 };
291 
292 struct cq ciq[MAX_COMPRESSOR_THREAD_COUNT];
293 
294 
295 #if VM_PRESSURE_EVENTS
296 void vm_pressure_thread(void);
297 
298 boolean_t VM_PRESSURE_NORMAL_TO_WARNING(void);
299 boolean_t VM_PRESSURE_WARNING_TO_CRITICAL(void);
300 
301 boolean_t VM_PRESSURE_WARNING_TO_NORMAL(void);
302 boolean_t VM_PRESSURE_CRITICAL_TO_WARNING(void);
303 #endif
304 
305 void vm_pageout_garbage_collect(int);
306 static void vm_pageout_iothread_external(void);
307 static void vm_pageout_iothread_internal(struct cq *cq);
308 static void vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue *, boolean_t);
309 
310 extern void vm_pageout_continue(void);
311 extern void vm_pageout_scan(void);
312 
313 boolean_t vm_pageout_running = FALSE;
314 
315 uint32_t vm_page_upl_tainted = 0;
316 uint32_t vm_page_iopl_tainted = 0;
317 
318 #if XNU_TARGET_OS_OSX
319 static boolean_t vm_pageout_waiter  = FALSE;
320 #endif /* XNU_TARGET_OS_OSX */
321 
322 
323 #if DEVELOPMENT || DEBUG
324 struct vm_pageout_debug vm_pageout_debug;
325 #endif
326 struct vm_pageout_vminfo vm_pageout_vminfo;
327 struct vm_pageout_state  vm_pageout_state;
328 struct vm_config         vm_config;
329 
330 struct  vm_pageout_queue vm_pageout_queue_internal VM_PAGE_PACKED_ALIGNED;
331 struct  vm_pageout_queue vm_pageout_queue_external VM_PAGE_PACKED_ALIGNED;
332 
333 int         vm_upl_wait_for_pages = 0;
334 vm_object_t vm_pageout_scan_wants_object = VM_OBJECT_NULL;
335 
336 boolean_t(*volatile consider_buffer_cache_collect)(int) = NULL;
337 
338 int     vm_debug_events = 0;
339 
340 LCK_GRP_DECLARE(vm_pageout_lck_grp, "vm_pageout");
341 
342 #if CONFIG_MEMORYSTATUS
343 extern boolean_t memorystatus_kill_on_VM_page_shortage(boolean_t async);
344 
345 uint32_t vm_pageout_memorystatus_fb_factor_nr = 5;
346 uint32_t vm_pageout_memorystatus_fb_factor_dr = 2;
347 
348 #endif
349 
350 #if __AMP__
351 int vm_compressor_ebound = 1;
352 int vm_pgo_pbound = 0;
353 extern void thread_bind_cluster_type(thread_t, char, bool);
354 #endif /* __AMP__ */
355 
356 
357 /*
358  *	Routine:	vm_pageout_object_terminate
359  *	Purpose:
360  *		Destroy the pageout_object, and perform all of the
361  *		required cleanup actions.
362  *
363  *	In/Out conditions:
364  *		The object must be locked, and will be returned locked.
365  */
366 void
vm_pageout_object_terminate(vm_object_t object)367 vm_pageout_object_terminate(
368 	vm_object_t     object)
369 {
370 	vm_object_t     shadow_object;
371 
372 	/*
373 	 * Deal with the deallocation (last reference) of a pageout object
374 	 * (used for cleaning-in-place) by dropping the paging references/
375 	 * freeing pages in the original object.
376 	 */
377 
378 	assert(object->pageout);
379 	shadow_object = object->shadow;
380 	vm_object_lock(shadow_object);
381 
382 	while (!vm_page_queue_empty(&object->memq)) {
383 		vm_page_t               p, m;
384 		vm_object_offset_t      offset;
385 
386 		p = (vm_page_t) vm_page_queue_first(&object->memq);
387 
388 		assert(p->vmp_private);
389 		assert(p->vmp_free_when_done);
390 		p->vmp_free_when_done = FALSE;
391 		assert(!p->vmp_cleaning);
392 		assert(!p->vmp_laundry);
393 
394 		offset = p->vmp_offset;
395 		VM_PAGE_FREE(p);
396 		p = VM_PAGE_NULL;
397 
398 		m = vm_page_lookup(shadow_object,
399 		    offset + object->vo_shadow_offset);
400 
401 		if (m == VM_PAGE_NULL) {
402 			continue;
403 		}
404 
405 		assert((m->vmp_dirty) || (m->vmp_precious) ||
406 		    (m->vmp_busy && m->vmp_cleaning));
407 
408 		/*
409 		 * Handle the trusted pager throttle.
410 		 * Also decrement the burst throttle (if external).
411 		 */
412 		vm_page_lock_queues();
413 		if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
414 			vm_pageout_throttle_up(m);
415 		}
416 
417 		/*
418 		 * Handle the "target" page(s). These pages are to be freed if
419 		 * successfully cleaned. Target pages are always busy, and are
420 		 * wired exactly once. The initial target pages are not mapped,
421 		 * (so cannot be referenced or modified) but converted target
422 		 * pages may have been modified between the selection as an
423 		 * adjacent page and conversion to a target.
424 		 */
425 		if (m->vmp_free_when_done) {
426 			assert(m->vmp_busy);
427 			assert(m->vmp_q_state == VM_PAGE_IS_WIRED);
428 			assert(m->vmp_wire_count == 1);
429 			m->vmp_cleaning = FALSE;
430 			m->vmp_free_when_done = FALSE;
431 			/*
432 			 * Revoke all access to the page. Since the object is
433 			 * locked, and the page is busy, this prevents the page
434 			 * from being dirtied after the pmap_disconnect() call
435 			 * returns.
436 			 *
437 			 * Since the page is left "dirty" but "not modifed", we
438 			 * can detect whether the page was redirtied during
439 			 * pageout by checking the modify state.
440 			 */
441 			if (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED) {
442 				SET_PAGE_DIRTY(m, FALSE);
443 			} else {
444 				m->vmp_dirty = FALSE;
445 			}
446 
447 			if (m->vmp_dirty) {
448 				vm_page_unwire(m, TRUE);        /* reactivates */
449 				counter_inc(&vm_statistics_reactivations);
450 				PAGE_WAKEUP_DONE(m);
451 			} else {
452 				vm_page_free(m);  /* clears busy, etc. */
453 			}
454 			vm_page_unlock_queues();
455 			continue;
456 		}
457 		/*
458 		 * Handle the "adjacent" pages. These pages were cleaned in
459 		 * place, and should be left alone.
460 		 * If prep_pin_count is nonzero, then someone is using the
461 		 * page, so make it active.
462 		 */
463 		if ((m->vmp_q_state == VM_PAGE_NOT_ON_Q) && !m->vmp_private) {
464 			if (m->vmp_reference) {
465 				vm_page_activate(m);
466 			} else {
467 				vm_page_deactivate(m);
468 			}
469 		}
470 		if (m->vmp_overwriting) {
471 			/*
472 			 * the (COPY_OUT_FROM == FALSE) request_page_list case
473 			 */
474 			if (m->vmp_busy) {
475 				/*
476 				 * We do not re-set m->vmp_dirty !
477 				 * The page was busy so no extraneous activity
478 				 * could have occurred. COPY_INTO is a read into the
479 				 * new pages. CLEAN_IN_PLACE does actually write
480 				 * out the pages but handling outside of this code
481 				 * will take care of resetting dirty. We clear the
482 				 * modify however for the Programmed I/O case.
483 				 */
484 				pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
485 
486 				m->vmp_busy = FALSE;
487 				m->vmp_absent = FALSE;
488 			} else {
489 				/*
490 				 * alternate (COPY_OUT_FROM == FALSE) request_page_list case
491 				 * Occurs when the original page was wired
492 				 * at the time of the list request
493 				 */
494 				assert(VM_PAGE_WIRED(m));
495 				vm_page_unwire(m, TRUE);        /* reactivates */
496 			}
497 			m->vmp_overwriting = FALSE;
498 		} else {
499 			m->vmp_dirty = FALSE;
500 		}
501 		m->vmp_cleaning = FALSE;
502 
503 		/*
504 		 * Wakeup any thread waiting for the page to be un-cleaning.
505 		 */
506 		PAGE_WAKEUP(m);
507 		vm_page_unlock_queues();
508 	}
509 	/*
510 	 * Account for the paging reference taken in vm_paging_object_allocate.
511 	 */
512 	vm_object_activity_end(shadow_object);
513 	vm_object_unlock(shadow_object);
514 
515 	assert(object->ref_count == 0);
516 	assert(object->paging_in_progress == 0);
517 	assert(object->activity_in_progress == 0);
518 	assert(object->resident_page_count == 0);
519 	return;
520 }
521 
522 /*
523  * Routine:	vm_pageclean_setup
524  *
525  * Purpose:	setup a page to be cleaned (made non-dirty), but not
526  *		necessarily flushed from the VM page cache.
527  *		This is accomplished by cleaning in place.
528  *
529  *		The page must not be busy, and new_object
530  *		must be locked.
531  *
532  */
533 static void
vm_pageclean_setup(vm_page_t m,vm_page_t new_m,vm_object_t new_object,vm_object_offset_t new_offset)534 vm_pageclean_setup(
535 	vm_page_t               m,
536 	vm_page_t               new_m,
537 	vm_object_t             new_object,
538 	vm_object_offset_t      new_offset)
539 {
540 	assert(!m->vmp_busy);
541 #if 0
542 	assert(!m->vmp_cleaning);
543 #endif
544 
545 	pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
546 
547 	/*
548 	 * Mark original page as cleaning in place.
549 	 */
550 	m->vmp_cleaning = TRUE;
551 	SET_PAGE_DIRTY(m, FALSE);
552 	m->vmp_precious = FALSE;
553 
554 	/*
555 	 * Convert the fictitious page to a private shadow of
556 	 * the real page.
557 	 */
558 	assert(new_m->vmp_fictitious);
559 	assert(VM_PAGE_GET_PHYS_PAGE(new_m) == vm_page_fictitious_addr);
560 	new_m->vmp_fictitious = FALSE;
561 	new_m->vmp_private = TRUE;
562 	new_m->vmp_free_when_done = TRUE;
563 	VM_PAGE_SET_PHYS_PAGE(new_m, VM_PAGE_GET_PHYS_PAGE(m));
564 
565 	vm_page_lockspin_queues();
566 	vm_page_wire(new_m, VM_KERN_MEMORY_NONE, TRUE);
567 	vm_page_unlock_queues();
568 
569 	vm_page_insert_wired(new_m, new_object, new_offset, VM_KERN_MEMORY_NONE);
570 	assert(!new_m->vmp_wanted);
571 	new_m->vmp_busy = FALSE;
572 }
573 
574 /*
575  *	Routine:	vm_pageout_initialize_page
576  *	Purpose:
577  *		Causes the specified page to be initialized in
578  *		the appropriate memory object. This routine is used to push
579  *		pages into a copy-object when they are modified in the
580  *		permanent object.
581  *
582  *		The page is moved to a temporary object and paged out.
583  *
584  *	In/out conditions:
585  *		The page in question must not be on any pageout queues.
586  *		The object to which it belongs must be locked.
587  *		The page must be busy, but not hold a paging reference.
588  *
589  *	Implementation:
590  *		Move this page to a completely new object.
591  */
592 void
vm_pageout_initialize_page(vm_page_t m)593 vm_pageout_initialize_page(
594 	vm_page_t       m)
595 {
596 	vm_object_t             object;
597 	vm_object_offset_t      paging_offset;
598 	memory_object_t         pager;
599 
600 	assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
601 
602 	object = VM_PAGE_OBJECT(m);
603 
604 	assert(m->vmp_busy);
605 	assert(object->internal);
606 
607 	/*
608 	 *	Verify that we really want to clean this page
609 	 */
610 	assert(!m->vmp_absent);
611 	assert(!m->vmp_error);
612 	assert(m->vmp_dirty);
613 
614 	/*
615 	 *	Create a paging reference to let us play with the object.
616 	 */
617 	paging_offset = m->vmp_offset + object->paging_offset;
618 
619 	if (m->vmp_absent || m->vmp_error || m->vmp_restart || (!m->vmp_dirty && !m->vmp_precious)) {
620 		panic("reservation without pageout?"); /* alan */
621 
622 		VM_PAGE_FREE(m);
623 		vm_object_unlock(object);
624 
625 		return;
626 	}
627 
628 	/*
629 	 * If there's no pager, then we can't clean the page.  This should
630 	 * never happen since this should be a copy object and therefore not
631 	 * an external object, so the pager should always be there.
632 	 */
633 
634 	pager = object->pager;
635 
636 	if (pager == MEMORY_OBJECT_NULL) {
637 		panic("missing pager for copy object");
638 
639 		VM_PAGE_FREE(m);
640 		return;
641 	}
642 
643 	/*
644 	 * set the page for future call to vm_fault_list_request
645 	 */
646 	pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
647 	SET_PAGE_DIRTY(m, FALSE);
648 
649 	/*
650 	 * keep the object from collapsing or terminating
651 	 */
652 	vm_object_paging_begin(object);
653 	vm_object_unlock(object);
654 
655 	/*
656 	 *	Write the data to its pager.
657 	 *	Note that the data is passed by naming the new object,
658 	 *	not a virtual address; the pager interface has been
659 	 *	manipulated to use the "internal memory" data type.
660 	 *	[The object reference from its allocation is donated
661 	 *	to the eventual recipient.]
662 	 */
663 	memory_object_data_initialize(pager, paging_offset, PAGE_SIZE);
664 
665 	vm_object_lock(object);
666 	vm_object_paging_end(object);
667 }
668 
669 
670 /*
671  * vm_pageout_cluster:
672  *
673  * Given a page, queue it to the appropriate I/O thread,
674  * which will page it out and attempt to clean adjacent pages
675  * in the same operation.
676  *
677  * The object and queues must be locked. We will take a
678  * paging reference to prevent deallocation or collapse when we
679  * release the object lock back at the call site.  The I/O thread
680  * is responsible for consuming this reference
681  *
682  * The page must not be on any pageout queue.
683  */
684 #if DEVELOPMENT || DEBUG
685 vmct_stats_t vmct_stats;
686 
687 int32_t vmct_active = 0;
688 uint64_t vm_compressor_epoch_start = 0;
689 uint64_t vm_compressor_epoch_stop = 0;
690 
691 typedef enum vmct_state_t {
692 	VMCT_IDLE,
693 	VMCT_AWAKENED,
694 	VMCT_ACTIVE,
695 } vmct_state_t;
696 vmct_state_t vmct_state[MAX_COMPRESSOR_THREAD_COUNT];
697 #endif
698 
699 
700 void
vm_pageout_cluster(vm_page_t m)701 vm_pageout_cluster(vm_page_t m)
702 {
703 	vm_object_t     object = VM_PAGE_OBJECT(m);
704 	struct          vm_pageout_queue *q;
705 
706 	VM_PAGE_CHECK(m);
707 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
708 	vm_object_lock_assert_exclusive(object);
709 
710 	/*
711 	 * Only a certain kind of page is appreciated here.
712 	 */
713 	assert((m->vmp_dirty || m->vmp_precious) && (!VM_PAGE_WIRED(m)));
714 	assert(!m->vmp_cleaning && !m->vmp_laundry);
715 	assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
716 
717 	/*
718 	 * protect the object from collapse or termination
719 	 */
720 	vm_object_activity_begin(object);
721 
722 	if (object->internal == TRUE) {
723 		assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
724 
725 		m->vmp_busy = TRUE;
726 
727 		q = &vm_pageout_queue_internal;
728 	} else {
729 		q = &vm_pageout_queue_external;
730 	}
731 
732 	/*
733 	 * pgo_laundry count is tied to the laundry bit
734 	 */
735 	m->vmp_laundry = TRUE;
736 	q->pgo_laundry++;
737 
738 	m->vmp_q_state = VM_PAGE_ON_PAGEOUT_Q;
739 	vm_page_queue_enter(&q->pgo_pending, m, vmp_pageq);
740 
741 	if (q->pgo_idle == TRUE) {
742 		q->pgo_idle = FALSE;
743 		thread_wakeup((event_t) &q->pgo_pending);
744 	}
745 	VM_PAGE_CHECK(m);
746 }
747 
748 
749 /*
750  * A page is back from laundry or we are stealing it back from
751  * the laundering state.  See if there are some pages waiting to
752  * go to laundry and if we can let some of them go now.
753  *
754  * Object and page queues must be locked.
755  */
756 void
vm_pageout_throttle_up(vm_page_t m)757 vm_pageout_throttle_up(
758 	vm_page_t       m)
759 {
760 	struct vm_pageout_queue *q;
761 	vm_object_t      m_object;
762 
763 	m_object = VM_PAGE_OBJECT(m);
764 
765 	assert(m_object != VM_OBJECT_NULL);
766 	assert(m_object != kernel_object);
767 
768 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
769 	vm_object_lock_assert_exclusive(m_object);
770 
771 	if (m_object->internal == TRUE) {
772 		q = &vm_pageout_queue_internal;
773 	} else {
774 		q = &vm_pageout_queue_external;
775 	}
776 
777 	if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
778 		vm_page_queue_remove(&q->pgo_pending, m, vmp_pageq);
779 		m->vmp_q_state = VM_PAGE_NOT_ON_Q;
780 
781 		VM_PAGE_ZERO_PAGEQ_ENTRY(m);
782 
783 		vm_object_activity_end(m_object);
784 
785 		VM_PAGEOUT_DEBUG(vm_page_steal_pageout_page, 1);
786 	}
787 	if (m->vmp_laundry == TRUE) {
788 		m->vmp_laundry = FALSE;
789 		q->pgo_laundry--;
790 
791 		if (q->pgo_throttled == TRUE) {
792 			q->pgo_throttled = FALSE;
793 			thread_wakeup((event_t) &q->pgo_laundry);
794 		}
795 		if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
796 			q->pgo_draining = FALSE;
797 			thread_wakeup((event_t) (&q->pgo_laundry + 1));
798 		}
799 		VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count, 1);
800 	}
801 }
802 
803 
804 static void
vm_pageout_throttle_up_batch(struct vm_pageout_queue * q,int batch_cnt)805 vm_pageout_throttle_up_batch(
806 	struct vm_pageout_queue *q,
807 	int             batch_cnt)
808 {
809 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
810 
811 	VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count, batch_cnt);
812 
813 	q->pgo_laundry -= batch_cnt;
814 
815 	if (q->pgo_throttled == TRUE) {
816 		q->pgo_throttled = FALSE;
817 		thread_wakeup((event_t) &q->pgo_laundry);
818 	}
819 	if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
820 		q->pgo_draining = FALSE;
821 		thread_wakeup((event_t) (&q->pgo_laundry + 1));
822 	}
823 }
824 
825 
826 
827 /*
828  * VM memory pressure monitoring.
829  *
830  * vm_pageout_scan() keeps track of the number of pages it considers and
831  * reclaims, in the currently active vm_pageout_stat[vm_pageout_stat_now].
832  *
833  * compute_memory_pressure() is called every second from compute_averages()
834  * and moves "vm_pageout_stat_now" forward, to start accumulating the number
835  * of recalimed pages in a new vm_pageout_stat[] bucket.
836  *
837  * mach_vm_pressure_monitor() collects past statistics about memory pressure.
838  * The caller provides the number of seconds ("nsecs") worth of statistics
839  * it wants, up to 30 seconds.
840  * It computes the number of pages reclaimed in the past "nsecs" seconds and
841  * also returns the number of pages the system still needs to reclaim at this
842  * moment in time.
843  */
844 #if DEVELOPMENT || DEBUG
845 #define VM_PAGEOUT_STAT_SIZE    (30 * 8) + 1
846 #else
847 #define VM_PAGEOUT_STAT_SIZE    (1 * 8) + 1
848 #endif
849 struct vm_pageout_stat {
850 	unsigned long vm_page_active_count;
851 	unsigned long vm_page_speculative_count;
852 	unsigned long vm_page_inactive_count;
853 	unsigned long vm_page_anonymous_count;
854 
855 	unsigned long vm_page_free_count;
856 	unsigned long vm_page_wire_count;
857 	unsigned long vm_page_compressor_count;
858 
859 	unsigned long vm_page_pages_compressed;
860 	unsigned long vm_page_pageable_internal_count;
861 	unsigned long vm_page_pageable_external_count;
862 	unsigned long vm_page_xpmapped_external_count;
863 
864 	unsigned int pages_grabbed;
865 	unsigned int pages_freed;
866 
867 	unsigned int pages_compressed;
868 	unsigned int pages_grabbed_by_compressor;
869 	unsigned int failed_compressions;
870 
871 	unsigned int pages_evicted;
872 	unsigned int pages_purged;
873 
874 	unsigned int considered;
875 	unsigned int considered_bq_internal;
876 	unsigned int considered_bq_external;
877 
878 	unsigned int skipped_external;
879 	unsigned int skipped_internal;
880 	unsigned int filecache_min_reactivations;
881 
882 	unsigned int freed_speculative;
883 	unsigned int freed_cleaned;
884 	unsigned int freed_internal;
885 	unsigned int freed_external;
886 
887 	unsigned int cleaned_dirty_external;
888 	unsigned int cleaned_dirty_internal;
889 
890 	unsigned int inactive_referenced;
891 	unsigned int inactive_nolock;
892 	unsigned int reactivation_limit_exceeded;
893 	unsigned int forced_inactive_reclaim;
894 
895 	unsigned int throttled_internal_q;
896 	unsigned int throttled_external_q;
897 
898 	unsigned int phantom_ghosts_found;
899 	unsigned int phantom_ghosts_added;
900 } vm_pageout_stats[VM_PAGEOUT_STAT_SIZE] = {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, };
901 
902 unsigned int vm_pageout_stat_now = 0;
903 
904 #define VM_PAGEOUT_STAT_BEFORE(i) \
905 	(((i) == 0) ? VM_PAGEOUT_STAT_SIZE - 1 : (i) - 1)
906 #define VM_PAGEOUT_STAT_AFTER(i) \
907 	(((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1)
908 
909 #if VM_PAGE_BUCKETS_CHECK
910 int vm_page_buckets_check_interval = 80; /* in eighths of a second */
911 #endif /* VM_PAGE_BUCKETS_CHECK */
912 
913 
914 void
915 record_memory_pressure(void);
916 void
record_memory_pressure(void)917 record_memory_pressure(void)
918 {
919 	unsigned int vm_pageout_next;
920 
921 #if VM_PAGE_BUCKETS_CHECK
922 	/* check the consistency of VM page buckets at regular interval */
923 	static int counter = 0;
924 	if ((++counter % vm_page_buckets_check_interval) == 0) {
925 		vm_page_buckets_check();
926 	}
927 #endif /* VM_PAGE_BUCKETS_CHECK */
928 
929 	vm_pageout_state.vm_memory_pressure =
930 	    vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_speculative +
931 	    vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_cleaned +
932 	    vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_internal +
933 	    vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_external;
934 
935 	commpage_set_memory_pressure((unsigned int)vm_pageout_state.vm_memory_pressure );
936 
937 	/* move "now" forward */
938 	vm_pageout_next = VM_PAGEOUT_STAT_AFTER(vm_pageout_stat_now);
939 
940 	bzero(&vm_pageout_stats[vm_pageout_next], sizeof(struct vm_pageout_stat));
941 
942 	vm_pageout_stat_now = vm_pageout_next;
943 }
944 
945 
946 /*
947  * IMPORTANT
948  * mach_vm_ctl_page_free_wanted() is called indirectly, via
949  * mach_vm_pressure_monitor(), when taking a stackshot. Therefore,
950  * it must be safe in the restricted stackshot context. Locks and/or
951  * blocking are not allowable.
952  */
953 unsigned int
mach_vm_ctl_page_free_wanted(void)954 mach_vm_ctl_page_free_wanted(void)
955 {
956 	unsigned int page_free_target, page_free_count, page_free_wanted;
957 
958 	page_free_target = vm_page_free_target;
959 	page_free_count = vm_page_free_count;
960 	if (page_free_target > page_free_count) {
961 		page_free_wanted = page_free_target - page_free_count;
962 	} else {
963 		page_free_wanted = 0;
964 	}
965 
966 	return page_free_wanted;
967 }
968 
969 
970 /*
971  * IMPORTANT:
972  * mach_vm_pressure_monitor() is called when taking a stackshot, with
973  * wait_for_pressure FALSE, so that code path must remain safe in the
974  * restricted stackshot context. No blocking or locks are allowable.
975  * on that code path.
976  */
977 
978 kern_return_t
mach_vm_pressure_monitor(boolean_t wait_for_pressure,unsigned int nsecs_monitored,unsigned int * pages_reclaimed_p,unsigned int * pages_wanted_p)979 mach_vm_pressure_monitor(
980 	boolean_t       wait_for_pressure,
981 	unsigned int    nsecs_monitored,
982 	unsigned int    *pages_reclaimed_p,
983 	unsigned int    *pages_wanted_p)
984 {
985 	wait_result_t   wr;
986 	unsigned int    vm_pageout_then, vm_pageout_now;
987 	unsigned int    pages_reclaimed;
988 	unsigned int    units_of_monitor;
989 
990 	units_of_monitor = 8 * nsecs_monitored;
991 	/*
992 	 * We don't take the vm_page_queue_lock here because we don't want
993 	 * vm_pressure_monitor() to get in the way of the vm_pageout_scan()
994 	 * thread when it's trying to reclaim memory.  We don't need fully
995 	 * accurate monitoring anyway...
996 	 */
997 
998 	if (wait_for_pressure) {
999 		/* wait until there's memory pressure */
1000 		while (vm_page_free_count >= vm_page_free_target) {
1001 			wr = assert_wait((event_t) &vm_page_free_wanted,
1002 			    THREAD_INTERRUPTIBLE);
1003 			if (wr == THREAD_WAITING) {
1004 				wr = thread_block(THREAD_CONTINUE_NULL);
1005 			}
1006 			if (wr == THREAD_INTERRUPTED) {
1007 				return KERN_ABORTED;
1008 			}
1009 			if (wr == THREAD_AWAKENED) {
1010 				/*
1011 				 * The memory pressure might have already
1012 				 * been relieved but let's not block again
1013 				 * and let's report that there was memory
1014 				 * pressure at some point.
1015 				 */
1016 				break;
1017 			}
1018 		}
1019 	}
1020 
1021 	/* provide the number of pages the system wants to reclaim */
1022 	if (pages_wanted_p != NULL) {
1023 		*pages_wanted_p = mach_vm_ctl_page_free_wanted();
1024 	}
1025 
1026 	if (pages_reclaimed_p == NULL) {
1027 		return KERN_SUCCESS;
1028 	}
1029 
1030 	/* provide number of pages reclaimed in the last "nsecs_monitored" */
1031 	vm_pageout_now = vm_pageout_stat_now;
1032 	pages_reclaimed = 0;
1033 	for (vm_pageout_then =
1034 	    VM_PAGEOUT_STAT_BEFORE(vm_pageout_now);
1035 	    vm_pageout_then != vm_pageout_now &&
1036 	    units_of_monitor-- != 0;
1037 	    vm_pageout_then =
1038 	    VM_PAGEOUT_STAT_BEFORE(vm_pageout_then)) {
1039 		pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_speculative;
1040 		pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_cleaned;
1041 		pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_internal;
1042 		pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_external;
1043 	}
1044 	*pages_reclaimed_p = pages_reclaimed;
1045 
1046 	return KERN_SUCCESS;
1047 }
1048 
1049 
1050 
1051 #if DEVELOPMENT || DEBUG
1052 
1053 static void
1054 vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *, int);
1055 
1056 /*
1057  * condition variable used to make sure there is
1058  * only a single sweep going on at a time
1059  */
1060 boolean_t       vm_pageout_disconnect_all_pages_active = FALSE;
1061 
1062 
1063 void
vm_pageout_disconnect_all_pages()1064 vm_pageout_disconnect_all_pages()
1065 {
1066 	vm_page_lock_queues();
1067 
1068 	if (vm_pageout_disconnect_all_pages_active == TRUE) {
1069 		vm_page_unlock_queues();
1070 		return;
1071 	}
1072 	vm_pageout_disconnect_all_pages_active = TRUE;
1073 	vm_page_unlock_queues();
1074 
1075 	vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_throttled, vm_page_throttled_count);
1076 	vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_anonymous, vm_page_anonymous_count);
1077 	vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_active, vm_page_active_count);
1078 
1079 	vm_pageout_disconnect_all_pages_active = FALSE;
1080 }
1081 
1082 
1083 void
vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t * q,int qcount)1084 vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *q, int qcount)
1085 {
1086 	vm_page_t       m;
1087 	vm_object_t     t_object = NULL;
1088 	vm_object_t     l_object = NULL;
1089 	vm_object_t     m_object = NULL;
1090 	int             delayed_unlock = 0;
1091 	int             try_failed_count = 0;
1092 	int             disconnected_count = 0;
1093 	int             paused_count = 0;
1094 	int             object_locked_count = 0;
1095 
1096 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_ALL_PAGE_MAPPINGS)) | DBG_FUNC_START,
1097 	    q, qcount, 0, 0, 0);
1098 
1099 	vm_page_lock_queues();
1100 
1101 	while (qcount && !vm_page_queue_empty(q)) {
1102 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
1103 
1104 		m = (vm_page_t) vm_page_queue_first(q);
1105 		m_object = VM_PAGE_OBJECT(m);
1106 
1107 		/*
1108 		 * check to see if we currently are working
1109 		 * with the same object... if so, we've
1110 		 * already got the lock
1111 		 */
1112 		if (m_object != l_object) {
1113 			/*
1114 			 * the object associated with candidate page is
1115 			 * different from the one we were just working
1116 			 * with... dump the lock if we still own it
1117 			 */
1118 			if (l_object != NULL) {
1119 				vm_object_unlock(l_object);
1120 				l_object = NULL;
1121 			}
1122 			if (m_object != t_object) {
1123 				try_failed_count = 0;
1124 			}
1125 
1126 			/*
1127 			 * Try to lock object; since we've alread got the
1128 			 * page queues lock, we can only 'try' for this one.
1129 			 * if the 'try' fails, we need to do a mutex_pause
1130 			 * to allow the owner of the object lock a chance to
1131 			 * run...
1132 			 */
1133 			if (!vm_object_lock_try_scan(m_object)) {
1134 				if (try_failed_count > 20) {
1135 					goto reenter_pg_on_q;
1136 				}
1137 				vm_page_unlock_queues();
1138 				mutex_pause(try_failed_count++);
1139 				vm_page_lock_queues();
1140 				delayed_unlock = 0;
1141 
1142 				paused_count++;
1143 
1144 				t_object = m_object;
1145 				continue;
1146 			}
1147 			object_locked_count++;
1148 
1149 			l_object = m_object;
1150 		}
1151 		if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || m->vmp_error || m->vmp_free_when_done) {
1152 			/*
1153 			 * put it back on the head of its queue
1154 			 */
1155 			goto reenter_pg_on_q;
1156 		}
1157 		if (m->vmp_pmapped == TRUE) {
1158 			pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
1159 
1160 			disconnected_count++;
1161 		}
1162 reenter_pg_on_q:
1163 		vm_page_queue_remove(q, m, vmp_pageq);
1164 		vm_page_queue_enter(q, m, vmp_pageq);
1165 
1166 		qcount--;
1167 		try_failed_count = 0;
1168 
1169 		if (delayed_unlock++ > 128) {
1170 			if (l_object != NULL) {
1171 				vm_object_unlock(l_object);
1172 				l_object = NULL;
1173 			}
1174 			lck_mtx_yield(&vm_page_queue_lock);
1175 			delayed_unlock = 0;
1176 		}
1177 	}
1178 	if (l_object != NULL) {
1179 		vm_object_unlock(l_object);
1180 		l_object = NULL;
1181 	}
1182 	vm_page_unlock_queues();
1183 
1184 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_ALL_PAGE_MAPPINGS)) | DBG_FUNC_END,
1185 	    q, disconnected_count, object_locked_count, paused_count, 0);
1186 }
1187 
1188 #endif
1189 
1190 
1191 static void
1192 vm_pageout_page_queue(vm_page_queue_head_t *, int);
1193 
1194 /*
1195  * condition variable used to make sure there is
1196  * only a single sweep going on at a time
1197  */
1198 boolean_t       vm_pageout_anonymous_pages_active = FALSE;
1199 
1200 
1201 void
vm_pageout_anonymous_pages()1202 vm_pageout_anonymous_pages()
1203 {
1204 	if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
1205 		vm_page_lock_queues();
1206 
1207 		if (vm_pageout_anonymous_pages_active == TRUE) {
1208 			vm_page_unlock_queues();
1209 			return;
1210 		}
1211 		vm_pageout_anonymous_pages_active = TRUE;
1212 		vm_page_unlock_queues();
1213 
1214 		vm_pageout_page_queue(&vm_page_queue_throttled, vm_page_throttled_count);
1215 		vm_pageout_page_queue(&vm_page_queue_anonymous, vm_page_anonymous_count);
1216 		vm_pageout_page_queue(&vm_page_queue_active, vm_page_active_count);
1217 
1218 		if (VM_CONFIG_SWAP_IS_PRESENT) {
1219 			vm_consider_swapping();
1220 		}
1221 
1222 		vm_page_lock_queues();
1223 		vm_pageout_anonymous_pages_active = FALSE;
1224 		vm_page_unlock_queues();
1225 	}
1226 }
1227 
1228 
1229 void
vm_pageout_page_queue(vm_page_queue_head_t * q,int qcount)1230 vm_pageout_page_queue(vm_page_queue_head_t *q, int qcount)
1231 {
1232 	vm_page_t       m;
1233 	vm_object_t     t_object = NULL;
1234 	vm_object_t     l_object = NULL;
1235 	vm_object_t     m_object = NULL;
1236 	int             delayed_unlock = 0;
1237 	int             try_failed_count = 0;
1238 	int             refmod_state;
1239 	int             pmap_options;
1240 	struct          vm_pageout_queue *iq;
1241 	ppnum_t         phys_page;
1242 
1243 
1244 	iq = &vm_pageout_queue_internal;
1245 
1246 	vm_page_lock_queues();
1247 
1248 	while (qcount && !vm_page_queue_empty(q)) {
1249 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
1250 
1251 		if (VM_PAGE_Q_THROTTLED(iq)) {
1252 			if (l_object != NULL) {
1253 				vm_object_unlock(l_object);
1254 				l_object = NULL;
1255 			}
1256 			iq->pgo_draining = TRUE;
1257 
1258 			assert_wait((event_t) (&iq->pgo_laundry + 1), THREAD_INTERRUPTIBLE);
1259 			vm_page_unlock_queues();
1260 
1261 			thread_block(THREAD_CONTINUE_NULL);
1262 
1263 			vm_page_lock_queues();
1264 			delayed_unlock = 0;
1265 			continue;
1266 		}
1267 		m = (vm_page_t) vm_page_queue_first(q);
1268 		m_object = VM_PAGE_OBJECT(m);
1269 
1270 		/*
1271 		 * check to see if we currently are working
1272 		 * with the same object... if so, we've
1273 		 * already got the lock
1274 		 */
1275 		if (m_object != l_object) {
1276 			if (!m_object->internal) {
1277 				goto reenter_pg_on_q;
1278 			}
1279 
1280 			/*
1281 			 * the object associated with candidate page is
1282 			 * different from the one we were just working
1283 			 * with... dump the lock if we still own it
1284 			 */
1285 			if (l_object != NULL) {
1286 				vm_object_unlock(l_object);
1287 				l_object = NULL;
1288 			}
1289 			if (m_object != t_object) {
1290 				try_failed_count = 0;
1291 			}
1292 
1293 			/*
1294 			 * Try to lock object; since we've alread got the
1295 			 * page queues lock, we can only 'try' for this one.
1296 			 * if the 'try' fails, we need to do a mutex_pause
1297 			 * to allow the owner of the object lock a chance to
1298 			 * run...
1299 			 */
1300 			if (!vm_object_lock_try_scan(m_object)) {
1301 				if (try_failed_count > 20) {
1302 					goto reenter_pg_on_q;
1303 				}
1304 				vm_page_unlock_queues();
1305 				mutex_pause(try_failed_count++);
1306 				vm_page_lock_queues();
1307 				delayed_unlock = 0;
1308 
1309 				t_object = m_object;
1310 				continue;
1311 			}
1312 			l_object = m_object;
1313 		}
1314 		if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || m->vmp_error || m->vmp_free_when_done) {
1315 			/*
1316 			 * page is not to be cleaned
1317 			 * put it back on the head of its queue
1318 			 */
1319 			goto reenter_pg_on_q;
1320 		}
1321 		phys_page = VM_PAGE_GET_PHYS_PAGE(m);
1322 
1323 		if (m->vmp_reference == FALSE && m->vmp_pmapped == TRUE) {
1324 			refmod_state = pmap_get_refmod(phys_page);
1325 
1326 			if (refmod_state & VM_MEM_REFERENCED) {
1327 				m->vmp_reference = TRUE;
1328 			}
1329 			if (refmod_state & VM_MEM_MODIFIED) {
1330 				SET_PAGE_DIRTY(m, FALSE);
1331 			}
1332 		}
1333 		if (m->vmp_reference == TRUE) {
1334 			m->vmp_reference = FALSE;
1335 			pmap_clear_refmod_options(phys_page, VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
1336 			goto reenter_pg_on_q;
1337 		}
1338 		if (m->vmp_pmapped == TRUE) {
1339 			if (m->vmp_dirty || m->vmp_precious) {
1340 				pmap_options = PMAP_OPTIONS_COMPRESSOR;
1341 			} else {
1342 				pmap_options = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
1343 			}
1344 			refmod_state = pmap_disconnect_options(phys_page, pmap_options, NULL);
1345 			if (refmod_state & VM_MEM_MODIFIED) {
1346 				SET_PAGE_DIRTY(m, FALSE);
1347 			}
1348 		}
1349 
1350 		if (!m->vmp_dirty && !m->vmp_precious) {
1351 			vm_page_unlock_queues();
1352 			VM_PAGE_FREE(m);
1353 			vm_page_lock_queues();
1354 			delayed_unlock = 0;
1355 
1356 			goto next_pg;
1357 		}
1358 		if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL) {
1359 			if (!m_object->pager_initialized) {
1360 				vm_page_unlock_queues();
1361 
1362 				vm_object_collapse(m_object, (vm_object_offset_t) 0, TRUE);
1363 
1364 				if (!m_object->pager_initialized) {
1365 					vm_object_compressor_pager_create(m_object);
1366 				}
1367 
1368 				vm_page_lock_queues();
1369 				delayed_unlock = 0;
1370 			}
1371 			if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL) {
1372 				goto reenter_pg_on_q;
1373 			}
1374 			/*
1375 			 * vm_object_compressor_pager_create will drop the object lock
1376 			 * which means 'm' may no longer be valid to use
1377 			 */
1378 			continue;
1379 		}
1380 		/*
1381 		 * we've already factored out pages in the laundry which
1382 		 * means this page can't be on the pageout queue so it's
1383 		 * safe to do the vm_page_queues_remove
1384 		 */
1385 		vm_page_queues_remove(m, TRUE);
1386 
1387 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
1388 
1389 		vm_pageout_cluster(m);
1390 
1391 		goto next_pg;
1392 
1393 reenter_pg_on_q:
1394 		vm_page_queue_remove(q, m, vmp_pageq);
1395 		vm_page_queue_enter(q, m, vmp_pageq);
1396 next_pg:
1397 		qcount--;
1398 		try_failed_count = 0;
1399 
1400 		if (delayed_unlock++ > 128) {
1401 			if (l_object != NULL) {
1402 				vm_object_unlock(l_object);
1403 				l_object = NULL;
1404 			}
1405 			lck_mtx_yield(&vm_page_queue_lock);
1406 			delayed_unlock = 0;
1407 		}
1408 	}
1409 	if (l_object != NULL) {
1410 		vm_object_unlock(l_object);
1411 		l_object = NULL;
1412 	}
1413 	vm_page_unlock_queues();
1414 }
1415 
1416 
1417 
1418 /*
1419  * function in BSD to apply I/O throttle to the pageout thread
1420  */
1421 extern void vm_pageout_io_throttle(void);
1422 
1423 #define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, obj)                    \
1424 	MACRO_BEGIN                                                     \
1425 	/* \
1426 	 * If a "reusable" page somehow made it back into \
1427 	 * the active queue, it's been re-used and is not \
1428 	 * quite re-usable. \
1429 	 * If the VM object was "all_reusable", consider it \
1430 	 * as "all re-used" instead of converting it to \
1431 	 * "partially re-used", which could be expensive. \
1432 	 */                                                             \
1433 	assert(VM_PAGE_OBJECT((m)) == (obj));                           \
1434 	if ((m)->vmp_reusable ||                                        \
1435 	    (obj)->all_reusable) {                                      \
1436 	        vm_object_reuse_pages((obj),                            \
1437 	                              (m)->vmp_offset,                  \
1438 	                              (m)->vmp_offset + PAGE_SIZE_64,   \
1439 	                              FALSE);                           \
1440 	}                                                               \
1441 	MACRO_END
1442 
1443 
1444 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT         64
1445 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX     1024
1446 
1447 #define FCS_IDLE                0
1448 #define FCS_DELAYED             1
1449 #define FCS_DEADLOCK_DETECTED   2
1450 
1451 struct flow_control {
1452 	int             state;
1453 	mach_timespec_t ts;
1454 };
1455 
1456 
1457 #if CONFIG_BACKGROUND_QUEUE
1458 uint64_t vm_pageout_rejected_bq_internal = 0;
1459 uint64_t vm_pageout_rejected_bq_external = 0;
1460 uint64_t vm_pageout_skipped_bq_internal = 0;
1461 #endif
1462 
1463 #define ANONS_GRABBED_LIMIT     2
1464 
1465 
1466 #if 0
1467 static void vm_pageout_delayed_unlock(int *, int *, vm_page_t *);
1468 #endif
1469 static void vm_pageout_prepare_to_block(vm_object_t *, int *, vm_page_t *, int *, int);
1470 
1471 #define VM_PAGEOUT_PB_NO_ACTION                         0
1472 #define VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER 1
1473 #define VM_PAGEOUT_PB_THREAD_YIELD                      2
1474 
1475 
1476 #if 0
1477 static void
1478 vm_pageout_delayed_unlock(int *delayed_unlock, int *local_freed, vm_page_t *local_freeq)
1479 {
1480 	if (*local_freeq) {
1481 		vm_page_unlock_queues();
1482 
1483 		VM_DEBUG_CONSTANT_EVENT(
1484 			vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
1485 			vm_page_free_count, 0, 0, 1);
1486 
1487 		vm_page_free_list(*local_freeq, TRUE);
1488 
1489 		VM_DEBUG_CONSTANT_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
1490 		    vm_page_free_count, *local_freed, 0, 1);
1491 
1492 		*local_freeq = NULL;
1493 		*local_freed = 0;
1494 
1495 		vm_page_lock_queues();
1496 	} else {
1497 		lck_mtx_yield(&vm_page_queue_lock);
1498 	}
1499 	*delayed_unlock = 1;
1500 }
1501 #endif
1502 
1503 
1504 static void
vm_pageout_prepare_to_block(vm_object_t * object,int * delayed_unlock,vm_page_t * local_freeq,int * local_freed,int action)1505 vm_pageout_prepare_to_block(vm_object_t *object, int *delayed_unlock,
1506     vm_page_t *local_freeq, int *local_freed, int action)
1507 {
1508 	vm_page_unlock_queues();
1509 
1510 	if (*object != NULL) {
1511 		vm_object_unlock(*object);
1512 		*object = NULL;
1513 	}
1514 	if (*local_freeq) {
1515 		vm_page_free_list(*local_freeq, TRUE);
1516 
1517 		*local_freeq = NULL;
1518 		*local_freed = 0;
1519 	}
1520 	*delayed_unlock = 1;
1521 
1522 	switch (action) {
1523 	case VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER:
1524 		vm_consider_waking_compactor_swapper();
1525 		break;
1526 	case VM_PAGEOUT_PB_THREAD_YIELD:
1527 		thread_yield_internal(1);
1528 		break;
1529 	case VM_PAGEOUT_PB_NO_ACTION:
1530 	default:
1531 		break;
1532 	}
1533 	vm_page_lock_queues();
1534 }
1535 
1536 
1537 static struct vm_pageout_vminfo last;
1538 
1539 uint64_t last_vm_page_pages_grabbed = 0;
1540 
1541 extern  uint32_t c_segment_pages_compressed;
1542 
1543 extern uint64_t shared_region_pager_reclaimed;
1544 extern struct memory_object_pager_ops shared_region_pager_ops;
1545 
1546 void
update_vm_info(void)1547 update_vm_info(void)
1548 {
1549 	unsigned long tmp;
1550 	uint64_t tmp64;
1551 
1552 	vm_pageout_stats[vm_pageout_stat_now].vm_page_active_count = vm_page_active_count;
1553 	vm_pageout_stats[vm_pageout_stat_now].vm_page_speculative_count = vm_page_speculative_count;
1554 	vm_pageout_stats[vm_pageout_stat_now].vm_page_inactive_count = vm_page_inactive_count;
1555 	vm_pageout_stats[vm_pageout_stat_now].vm_page_anonymous_count = vm_page_anonymous_count;
1556 
1557 	vm_pageout_stats[vm_pageout_stat_now].vm_page_free_count = vm_page_free_count;
1558 	vm_pageout_stats[vm_pageout_stat_now].vm_page_wire_count = vm_page_wire_count;
1559 	vm_pageout_stats[vm_pageout_stat_now].vm_page_compressor_count = VM_PAGE_COMPRESSOR_COUNT;
1560 
1561 	vm_pageout_stats[vm_pageout_stat_now].vm_page_pages_compressed = c_segment_pages_compressed;
1562 	vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_internal_count = vm_page_pageable_internal_count;
1563 	vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_external_count = vm_page_pageable_external_count;
1564 	vm_pageout_stats[vm_pageout_stat_now].vm_page_xpmapped_external_count = vm_page_xpmapped_external_count;
1565 
1566 
1567 	tmp = vm_pageout_vminfo.vm_pageout_considered_page;
1568 	vm_pageout_stats[vm_pageout_stat_now].considered = (unsigned int)(tmp - last.vm_pageout_considered_page);
1569 	last.vm_pageout_considered_page = tmp;
1570 
1571 	tmp64 = vm_pageout_vminfo.vm_pageout_compressions;
1572 	vm_pageout_stats[vm_pageout_stat_now].pages_compressed = (unsigned int)(tmp64 - last.vm_pageout_compressions);
1573 	last.vm_pageout_compressions = tmp64;
1574 
1575 	tmp = vm_pageout_vminfo.vm_compressor_failed;
1576 	vm_pageout_stats[vm_pageout_stat_now].failed_compressions = (unsigned int)(tmp - last.vm_compressor_failed);
1577 	last.vm_compressor_failed = tmp;
1578 
1579 	tmp64 = vm_pageout_vminfo.vm_compressor_pages_grabbed;
1580 	vm_pageout_stats[vm_pageout_stat_now].pages_grabbed_by_compressor = (unsigned int)(tmp64 - last.vm_compressor_pages_grabbed);
1581 	last.vm_compressor_pages_grabbed = tmp64;
1582 
1583 	tmp = vm_pageout_vminfo.vm_phantom_cache_found_ghost;
1584 	vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_found = (unsigned int)(tmp - last.vm_phantom_cache_found_ghost);
1585 	last.vm_phantom_cache_found_ghost = tmp;
1586 
1587 	tmp = vm_pageout_vminfo.vm_phantom_cache_added_ghost;
1588 	vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_added = (unsigned int)(tmp - last.vm_phantom_cache_added_ghost);
1589 	last.vm_phantom_cache_added_ghost = tmp;
1590 
1591 	tmp64 = counter_load(&vm_page_grab_count);
1592 	vm_pageout_stats[vm_pageout_stat_now].pages_grabbed = (unsigned int)(tmp64 - last_vm_page_pages_grabbed);
1593 	last_vm_page_pages_grabbed = tmp64;
1594 
1595 	tmp = vm_pageout_vminfo.vm_page_pages_freed;
1596 	vm_pageout_stats[vm_pageout_stat_now].pages_freed = (unsigned int)(tmp - last.vm_page_pages_freed);
1597 	last.vm_page_pages_freed = tmp;
1598 
1599 
1600 	if (vm_pageout_stats[vm_pageout_stat_now].considered) {
1601 		tmp = vm_pageout_vminfo.vm_pageout_pages_evicted;
1602 		vm_pageout_stats[vm_pageout_stat_now].pages_evicted = (unsigned int)(tmp - last.vm_pageout_pages_evicted);
1603 		last.vm_pageout_pages_evicted = tmp;
1604 
1605 		tmp = vm_pageout_vminfo.vm_pageout_pages_purged;
1606 		vm_pageout_stats[vm_pageout_stat_now].pages_purged = (unsigned int)(tmp - last.vm_pageout_pages_purged);
1607 		last.vm_pageout_pages_purged = tmp;
1608 
1609 		tmp = vm_pageout_vminfo.vm_pageout_freed_speculative;
1610 		vm_pageout_stats[vm_pageout_stat_now].freed_speculative = (unsigned int)(tmp - last.vm_pageout_freed_speculative);
1611 		last.vm_pageout_freed_speculative = tmp;
1612 
1613 		tmp = vm_pageout_vminfo.vm_pageout_freed_external;
1614 		vm_pageout_stats[vm_pageout_stat_now].freed_external = (unsigned int)(tmp - last.vm_pageout_freed_external);
1615 		last.vm_pageout_freed_external = tmp;
1616 
1617 		tmp = vm_pageout_vminfo.vm_pageout_inactive_referenced;
1618 		vm_pageout_stats[vm_pageout_stat_now].inactive_referenced = (unsigned int)(tmp - last.vm_pageout_inactive_referenced);
1619 		last.vm_pageout_inactive_referenced = tmp;
1620 
1621 		tmp = vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_external;
1622 		vm_pageout_stats[vm_pageout_stat_now].throttled_external_q = (unsigned int)(tmp - last.vm_pageout_scan_inactive_throttled_external);
1623 		last.vm_pageout_scan_inactive_throttled_external = tmp;
1624 
1625 		tmp = vm_pageout_vminfo.vm_pageout_inactive_dirty_external;
1626 		vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_external = (unsigned int)(tmp - last.vm_pageout_inactive_dirty_external);
1627 		last.vm_pageout_inactive_dirty_external = tmp;
1628 
1629 		tmp = vm_pageout_vminfo.vm_pageout_freed_cleaned;
1630 		vm_pageout_stats[vm_pageout_stat_now].freed_cleaned = (unsigned int)(tmp - last.vm_pageout_freed_cleaned);
1631 		last.vm_pageout_freed_cleaned = tmp;
1632 
1633 		tmp = vm_pageout_vminfo.vm_pageout_inactive_nolock;
1634 		vm_pageout_stats[vm_pageout_stat_now].inactive_nolock = (unsigned int)(tmp - last.vm_pageout_inactive_nolock);
1635 		last.vm_pageout_inactive_nolock = tmp;
1636 
1637 		tmp = vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_internal;
1638 		vm_pageout_stats[vm_pageout_stat_now].throttled_internal_q = (unsigned int)(tmp - last.vm_pageout_scan_inactive_throttled_internal);
1639 		last.vm_pageout_scan_inactive_throttled_internal = tmp;
1640 
1641 		tmp = vm_pageout_vminfo.vm_pageout_skipped_external;
1642 		vm_pageout_stats[vm_pageout_stat_now].skipped_external = (unsigned int)(tmp - last.vm_pageout_skipped_external);
1643 		last.vm_pageout_skipped_external = tmp;
1644 
1645 		tmp = vm_pageout_vminfo.vm_pageout_skipped_internal;
1646 		vm_pageout_stats[vm_pageout_stat_now].skipped_internal = (unsigned int)(tmp - last.vm_pageout_skipped_internal);
1647 		last.vm_pageout_skipped_internal = tmp;
1648 
1649 		tmp = vm_pageout_vminfo.vm_pageout_reactivation_limit_exceeded;
1650 		vm_pageout_stats[vm_pageout_stat_now].reactivation_limit_exceeded = (unsigned int)(tmp - last.vm_pageout_reactivation_limit_exceeded);
1651 		last.vm_pageout_reactivation_limit_exceeded = tmp;
1652 
1653 		tmp = vm_pageout_vminfo.vm_pageout_inactive_force_reclaim;
1654 		vm_pageout_stats[vm_pageout_stat_now].forced_inactive_reclaim = (unsigned int)(tmp - last.vm_pageout_inactive_force_reclaim);
1655 		last.vm_pageout_inactive_force_reclaim = tmp;
1656 
1657 		tmp = vm_pageout_vminfo.vm_pageout_freed_internal;
1658 		vm_pageout_stats[vm_pageout_stat_now].freed_internal = (unsigned int)(tmp - last.vm_pageout_freed_internal);
1659 		last.vm_pageout_freed_internal = tmp;
1660 
1661 		tmp = vm_pageout_vminfo.vm_pageout_considered_bq_internal;
1662 		vm_pageout_stats[vm_pageout_stat_now].considered_bq_internal = (unsigned int)(tmp - last.vm_pageout_considered_bq_internal);
1663 		last.vm_pageout_considered_bq_internal = tmp;
1664 
1665 		tmp = vm_pageout_vminfo.vm_pageout_considered_bq_external;
1666 		vm_pageout_stats[vm_pageout_stat_now].considered_bq_external = (unsigned int)(tmp - last.vm_pageout_considered_bq_external);
1667 		last.vm_pageout_considered_bq_external = tmp;
1668 
1669 		tmp = vm_pageout_vminfo.vm_pageout_filecache_min_reactivated;
1670 		vm_pageout_stats[vm_pageout_stat_now].filecache_min_reactivations = (unsigned int)(tmp - last.vm_pageout_filecache_min_reactivated);
1671 		last.vm_pageout_filecache_min_reactivated = tmp;
1672 
1673 		tmp = vm_pageout_vminfo.vm_pageout_inactive_dirty_internal;
1674 		vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_internal = (unsigned int)(tmp - last.vm_pageout_inactive_dirty_internal);
1675 		last.vm_pageout_inactive_dirty_internal = tmp;
1676 	}
1677 
1678 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO1)) | DBG_FUNC_NONE,
1679 	    vm_pageout_stats[vm_pageout_stat_now].vm_page_active_count,
1680 	    vm_pageout_stats[vm_pageout_stat_now].vm_page_speculative_count,
1681 	    vm_pageout_stats[vm_pageout_stat_now].vm_page_inactive_count,
1682 	    vm_pageout_stats[vm_pageout_stat_now].vm_page_anonymous_count,
1683 	    0);
1684 
1685 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO2)) | DBG_FUNC_NONE,
1686 	    vm_pageout_stats[vm_pageout_stat_now].vm_page_free_count,
1687 	    vm_pageout_stats[vm_pageout_stat_now].vm_page_wire_count,
1688 	    vm_pageout_stats[vm_pageout_stat_now].vm_page_compressor_count,
1689 	    0,
1690 	    0);
1691 
1692 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO3)) | DBG_FUNC_NONE,
1693 	    vm_pageout_stats[vm_pageout_stat_now].vm_page_pages_compressed,
1694 	    vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_internal_count,
1695 	    vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_external_count,
1696 	    vm_pageout_stats[vm_pageout_stat_now].vm_page_xpmapped_external_count,
1697 	    0);
1698 
1699 	if (vm_pageout_stats[vm_pageout_stat_now].considered ||
1700 	    vm_pageout_stats[vm_pageout_stat_now].pages_compressed ||
1701 	    vm_pageout_stats[vm_pageout_stat_now].failed_compressions) {
1702 		KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO4)) | DBG_FUNC_NONE,
1703 		    vm_pageout_stats[vm_pageout_stat_now].considered,
1704 		    vm_pageout_stats[vm_pageout_stat_now].freed_speculative,
1705 		    vm_pageout_stats[vm_pageout_stat_now].freed_external,
1706 		    vm_pageout_stats[vm_pageout_stat_now].inactive_referenced,
1707 		    0);
1708 
1709 		KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO5)) | DBG_FUNC_NONE,
1710 		    vm_pageout_stats[vm_pageout_stat_now].throttled_external_q,
1711 		    vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_external,
1712 		    vm_pageout_stats[vm_pageout_stat_now].freed_cleaned,
1713 		    vm_pageout_stats[vm_pageout_stat_now].inactive_nolock,
1714 		    0);
1715 
1716 		KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO6)) | DBG_FUNC_NONE,
1717 		    vm_pageout_stats[vm_pageout_stat_now].throttled_internal_q,
1718 		    vm_pageout_stats[vm_pageout_stat_now].pages_compressed,
1719 		    vm_pageout_stats[vm_pageout_stat_now].pages_grabbed_by_compressor,
1720 		    vm_pageout_stats[vm_pageout_stat_now].skipped_external,
1721 		    0);
1722 
1723 		KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO7)) | DBG_FUNC_NONE,
1724 		    vm_pageout_stats[vm_pageout_stat_now].reactivation_limit_exceeded,
1725 		    vm_pageout_stats[vm_pageout_stat_now].forced_inactive_reclaim,
1726 		    vm_pageout_stats[vm_pageout_stat_now].failed_compressions,
1727 		    vm_pageout_stats[vm_pageout_stat_now].freed_internal,
1728 		    0);
1729 
1730 		KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO8)) | DBG_FUNC_NONE,
1731 		    vm_pageout_stats[vm_pageout_stat_now].considered_bq_internal,
1732 		    vm_pageout_stats[vm_pageout_stat_now].considered_bq_external,
1733 		    vm_pageout_stats[vm_pageout_stat_now].filecache_min_reactivations,
1734 		    vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_internal,
1735 		    0);
1736 	}
1737 	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO9)) | DBG_FUNC_NONE,
1738 	    vm_pageout_stats[vm_pageout_stat_now].pages_grabbed,
1739 	    vm_pageout_stats[vm_pageout_stat_now].pages_freed,
1740 	    vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_found,
1741 	    vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_added,
1742 	    0);
1743 
1744 	record_memory_pressure();
1745 }
1746 
1747 extern boolean_t hibernation_vmqueues_inspection;
1748 
1749 /*
1750  * Return values for functions called by vm_pageout_scan
1751  * that control its flow.
1752  *
1753  * PROCEED -- vm_pageout_scan will keep making forward progress.
1754  * DONE_RETURN -- page demand satisfied, work is done -> vm_pageout_scan returns.
1755  * NEXT_ITERATION -- restart the 'for' loop in vm_pageout_scan aka continue.
1756  */
1757 
1758 #define VM_PAGEOUT_SCAN_PROCEED                 (0)
1759 #define VM_PAGEOUT_SCAN_DONE_RETURN             (1)
1760 #define VM_PAGEOUT_SCAN_NEXT_ITERATION          (2)
1761 
1762 /*
1763  * This function is called only from vm_pageout_scan and
1764  * it moves overflow secluded pages (one-at-a-time) to the
1765  * batched 'local' free Q or active Q.
1766  */
1767 static void
vps_deal_with_secluded_page_overflow(vm_page_t * local_freeq,int * local_freed)1768 vps_deal_with_secluded_page_overflow(vm_page_t *local_freeq, int *local_freed)
1769 {
1770 #if CONFIG_SECLUDED_MEMORY
1771 	/*
1772 	 * Deal with secluded_q overflow.
1773 	 */
1774 	if (vm_page_secluded_count > vm_page_secluded_target) {
1775 		vm_page_t secluded_page;
1776 
1777 		/*
1778 		 * SECLUDED_AGING_BEFORE_ACTIVE:
1779 		 * Excess secluded pages go to the active queue and
1780 		 * will later go to the inactive queue.
1781 		 */
1782 		assert((vm_page_secluded_count_free +
1783 		    vm_page_secluded_count_inuse) ==
1784 		    vm_page_secluded_count);
1785 		secluded_page = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
1786 		assert(secluded_page->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
1787 
1788 		vm_page_queues_remove(secluded_page, FALSE);
1789 		assert(!secluded_page->vmp_fictitious);
1790 		assert(!VM_PAGE_WIRED(secluded_page));
1791 
1792 		if (secluded_page->vmp_object == 0) {
1793 			/* transfer to free queue */
1794 			assert(secluded_page->vmp_busy);
1795 			secluded_page->vmp_snext = *local_freeq;
1796 			*local_freeq = secluded_page;
1797 			*local_freed += 1;
1798 		} else {
1799 			/* transfer to head of active queue */
1800 			vm_page_enqueue_active(secluded_page, FALSE);
1801 			secluded_page = VM_PAGE_NULL;
1802 		}
1803 	}
1804 #else /* CONFIG_SECLUDED_MEMORY */
1805 
1806 #pragma unused(local_freeq)
1807 #pragma unused(local_freed)
1808 
1809 	return;
1810 
1811 #endif /* CONFIG_SECLUDED_MEMORY */
1812 }
1813 
1814 /*
1815  * This function is called only from vm_pageout_scan and
1816  * it initializes the loop targets for vm_pageout_scan().
1817  */
1818 static void
vps_init_page_targets(void)1819 vps_init_page_targets(void)
1820 {
1821 	/*
1822 	 * LD TODO: Other page targets should be calculated here too.
1823 	 */
1824 	vm_page_anonymous_min = vm_page_inactive_target / 20;
1825 
1826 	if (vm_pageout_state.vm_page_speculative_percentage > 50) {
1827 		vm_pageout_state.vm_page_speculative_percentage = 50;
1828 	} else if (vm_pageout_state.vm_page_speculative_percentage <= 0) {
1829 		vm_pageout_state.vm_page_speculative_percentage = 1;
1830 	}
1831 
1832 	vm_pageout_state.vm_page_speculative_target = VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count +
1833 	    vm_page_inactive_count);
1834 }
1835 
1836 /*
1837  * This function is called only from vm_pageout_scan and
1838  * it purges a single VM object at-a-time and will either
1839  * make vm_pageout_scan() restart the loop or keeping moving forward.
1840  */
1841 static int
vps_purge_object()1842 vps_purge_object()
1843 {
1844 	int             force_purge;
1845 
1846 	assert(available_for_purge >= 0);
1847 	force_purge = 0; /* no force-purging */
1848 
1849 #if VM_PRESSURE_EVENTS
1850 	vm_pressure_level_t pressure_level;
1851 
1852 	pressure_level = memorystatus_vm_pressure_level;
1853 
1854 	if (pressure_level > kVMPressureNormal) {
1855 		if (pressure_level >= kVMPressureCritical) {
1856 			force_purge = vm_pageout_state.memorystatus_purge_on_critical;
1857 		} else if (pressure_level >= kVMPressureUrgent) {
1858 			force_purge = vm_pageout_state.memorystatus_purge_on_urgent;
1859 		} else if (pressure_level >= kVMPressureWarning) {
1860 			force_purge = vm_pageout_state.memorystatus_purge_on_warning;
1861 		}
1862 	}
1863 #endif /* VM_PRESSURE_EVENTS */
1864 
1865 	if (available_for_purge || force_purge) {
1866 		memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_START);
1867 
1868 		VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_START, vm_page_free_count, 0, 0, 0);
1869 		if (vm_purgeable_object_purge_one(force_purge, C_DONT_BLOCK)) {
1870 			VM_PAGEOUT_DEBUG(vm_pageout_purged_objects, 1);
1871 			VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, vm_page_free_count, 0, 0, 0);
1872 			memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END);
1873 
1874 			return VM_PAGEOUT_SCAN_NEXT_ITERATION;
1875 		}
1876 		VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, 0, 0, 0, -1);
1877 		memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END);
1878 	}
1879 
1880 	return VM_PAGEOUT_SCAN_PROCEED;
1881 }
1882 
1883 /*
1884  * This function is called only from vm_pageout_scan and
1885  * it will try to age the next speculative Q if the oldest
1886  * one is empty.
1887  */
1888 static int
vps_age_speculative_queue(boolean_t force_speculative_aging)1889 vps_age_speculative_queue(boolean_t force_speculative_aging)
1890 {
1891 #define DELAY_SPECULATIVE_AGE   1000
1892 
1893 	/*
1894 	 * try to pull pages from the aging bins...
1895 	 * see vm_page.h for an explanation of how
1896 	 * this mechanism works
1897 	 */
1898 	boolean_t                       can_steal = FALSE;
1899 	int                             num_scanned_queues;
1900 	static int                      delay_speculative_age = 0; /* depends the # of times we go through the main pageout_scan loop.*/
1901 	mach_timespec_t                 ts;
1902 	struct vm_speculative_age_q     *aq;
1903 	struct vm_speculative_age_q     *sq;
1904 
1905 	sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
1906 
1907 	aq = &vm_page_queue_speculative[speculative_steal_index];
1908 
1909 	num_scanned_queues = 0;
1910 	while (vm_page_queue_empty(&aq->age_q) &&
1911 	    num_scanned_queues++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
1912 		speculative_steal_index++;
1913 
1914 		if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
1915 			speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
1916 		}
1917 
1918 		aq = &vm_page_queue_speculative[speculative_steal_index];
1919 	}
1920 
1921 	if (num_scanned_queues == VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1) {
1922 		/*
1923 		 * XXX We've scanned all the speculative
1924 		 * queues but still haven't found one
1925 		 * that is not empty, even though
1926 		 * vm_page_speculative_count is not 0.
1927 		 */
1928 		if (!vm_page_queue_empty(&sq->age_q)) {
1929 			return VM_PAGEOUT_SCAN_NEXT_ITERATION;
1930 		}
1931 #if DEVELOPMENT || DEBUG
1932 		panic("vm_pageout_scan: vm_page_speculative_count=%d but queues are empty", vm_page_speculative_count);
1933 #endif
1934 		/* readjust... */
1935 		vm_page_speculative_count = 0;
1936 		/* ... and continue */
1937 		return VM_PAGEOUT_SCAN_NEXT_ITERATION;
1938 	}
1939 
1940 	if (vm_page_speculative_count > vm_pageout_state.vm_page_speculative_target || force_speculative_aging == TRUE) {
1941 		can_steal = TRUE;
1942 	} else {
1943 		if (!delay_speculative_age) {
1944 			mach_timespec_t ts_fully_aged;
1945 
1946 			ts_fully_aged.tv_sec = (VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_pageout_state.vm_page_speculative_q_age_ms) / 1000;
1947 			ts_fully_aged.tv_nsec = ((VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_pageout_state.vm_page_speculative_q_age_ms) % 1000)
1948 			    * 1000 * NSEC_PER_USEC;
1949 
1950 			ADD_MACH_TIMESPEC(&ts_fully_aged, &aq->age_ts);
1951 
1952 			clock_sec_t sec;
1953 			clock_nsec_t nsec;
1954 			clock_get_system_nanotime(&sec, &nsec);
1955 			ts.tv_sec = (unsigned int) sec;
1956 			ts.tv_nsec = nsec;
1957 
1958 			if (CMP_MACH_TIMESPEC(&ts, &ts_fully_aged) >= 0) {
1959 				can_steal = TRUE;
1960 			} else {
1961 				delay_speculative_age++;
1962 			}
1963 		} else {
1964 			delay_speculative_age++;
1965 			if (delay_speculative_age == DELAY_SPECULATIVE_AGE) {
1966 				delay_speculative_age = 0;
1967 			}
1968 		}
1969 	}
1970 	if (can_steal == TRUE) {
1971 		vm_page_speculate_ageit(aq);
1972 	}
1973 
1974 	return VM_PAGEOUT_SCAN_PROCEED;
1975 }
1976 
1977 /*
1978  * This function is called only from vm_pageout_scan and
1979  * it evicts a single VM object from the cache.
1980  */
1981 static int inline
vps_object_cache_evict(vm_object_t * object_to_unlock)1982 vps_object_cache_evict(vm_object_t *object_to_unlock)
1983 {
1984 	static int                      cache_evict_throttle = 0;
1985 	struct vm_speculative_age_q     *sq;
1986 
1987 	sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
1988 
1989 	if (vm_page_queue_empty(&sq->age_q) && cache_evict_throttle == 0) {
1990 		int     pages_evicted;
1991 
1992 		if (*object_to_unlock != NULL) {
1993 			vm_object_unlock(*object_to_unlock);
1994 			*object_to_unlock = NULL;
1995 		}
1996 		KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0);
1997 
1998 		pages_evicted = vm_object_cache_evict(100, 10);
1999 
2000 		KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_END, pages_evicted, 0, 0, 0, 0);
2001 
2002 		if (pages_evicted) {
2003 			vm_pageout_vminfo.vm_pageout_pages_evicted += pages_evicted;
2004 
2005 			VM_DEBUG_EVENT(vm_pageout_cache_evict, VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE,
2006 			    vm_page_free_count, pages_evicted, vm_pageout_vminfo.vm_pageout_pages_evicted, 0);
2007 			memoryshot(VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE);
2008 
2009 			/*
2010 			 * we just freed up to 100 pages,
2011 			 * so go back to the top of the main loop
2012 			 * and re-evaulate the memory situation
2013 			 */
2014 			return VM_PAGEOUT_SCAN_NEXT_ITERATION;
2015 		} else {
2016 			cache_evict_throttle = 1000;
2017 		}
2018 	}
2019 	if (cache_evict_throttle) {
2020 		cache_evict_throttle--;
2021 	}
2022 
2023 	return VM_PAGEOUT_SCAN_PROCEED;
2024 }
2025 
2026 
2027 /*
2028  * This function is called only from vm_pageout_scan and
2029  * it calculates the filecache min. that needs to be maintained
2030  * as we start to steal pages.
2031  */
2032 static void
vps_calculate_filecache_min(void)2033 vps_calculate_filecache_min(void)
2034 {
2035 	int divisor = vm_pageout_state.vm_page_filecache_min_divisor;
2036 
2037 #if CONFIG_JETSAM
2038 	/*
2039 	 * don't let the filecache_min fall below 15% of available memory
2040 	 * on systems with an active compressor that isn't nearing its
2041 	 * limits w/r to accepting new data
2042 	 *
2043 	 * on systems w/o the compressor/swapper, the filecache is always
2044 	 * a very large percentage of the AVAILABLE_NON_COMPRESSED_MEMORY
2045 	 * since most (if not all) of the anonymous pages are in the
2046 	 * throttled queue (which isn't counted as available) which
2047 	 * effectively disables this filter
2048 	 */
2049 	if (vm_compressor_low_on_space() || divisor == 0) {
2050 		vm_pageout_state.vm_page_filecache_min = 0;
2051 	} else {
2052 		vm_pageout_state.vm_page_filecache_min =
2053 		    ((AVAILABLE_NON_COMPRESSED_MEMORY) * 10) / divisor;
2054 	}
2055 #else
2056 	if (vm_compressor_out_of_space() || divisor == 0) {
2057 		vm_pageout_state.vm_page_filecache_min = 0;
2058 	} else {
2059 		/*
2060 		 * don't let the filecache_min fall below the specified critical level
2061 		 */
2062 		vm_pageout_state.vm_page_filecache_min =
2063 		    ((AVAILABLE_NON_COMPRESSED_MEMORY) * 10) / divisor;
2064 	}
2065 #endif
2066 	if (vm_page_free_count < (vm_page_free_reserved / 4)) {
2067 		vm_pageout_state.vm_page_filecache_min = 0;
2068 	}
2069 }
2070 
2071 /*
2072  * This function is called only from vm_pageout_scan and
2073  * it updates the flow control time to detect if VM pageoutscan
2074  * isn't making progress.
2075  */
2076 static void
vps_flow_control_reset_deadlock_timer(struct flow_control * flow_control)2077 vps_flow_control_reset_deadlock_timer(struct flow_control *flow_control)
2078 {
2079 	mach_timespec_t ts;
2080 	clock_sec_t sec;
2081 	clock_nsec_t nsec;
2082 
2083 	ts.tv_sec = vm_pageout_state.vm_pageout_deadlock_wait / 1000;
2084 	ts.tv_nsec = (vm_pageout_state.vm_pageout_deadlock_wait % 1000) * 1000 * NSEC_PER_USEC;
2085 	clock_get_system_nanotime(&sec, &nsec);
2086 	flow_control->ts.tv_sec = (unsigned int) sec;
2087 	flow_control->ts.tv_nsec = nsec;
2088 	ADD_MACH_TIMESPEC(&flow_control->ts, &ts);
2089 
2090 	flow_control->state = FCS_DELAYED;
2091 
2092 	vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_internal++;
2093 }
2094 
2095 /*
2096  * This function is called only from vm_pageout_scan and
2097  * it is the flow control logic of VM pageout scan which
2098  * controls if it should block and for how long.
2099  * Any blocking of vm_pageout_scan happens ONLY in this function.
2100  */
2101 static int
vps_flow_control(struct flow_control * flow_control,int * anons_grabbed,vm_object_t * object,int * delayed_unlock,vm_page_t * local_freeq,int * local_freed,int * vm_pageout_deadlock_target,unsigned int inactive_burst_count)2102 vps_flow_control(struct flow_control *flow_control, int *anons_grabbed, vm_object_t *object, int *delayed_unlock,
2103     vm_page_t *local_freeq, int *local_freed, int *vm_pageout_deadlock_target, unsigned int inactive_burst_count)
2104 {
2105 	boolean_t       exceeded_burst_throttle = FALSE;
2106 	unsigned int    msecs = 0;
2107 	uint32_t        inactive_external_count;
2108 	mach_timespec_t ts;
2109 	struct  vm_pageout_queue *iq;
2110 	struct  vm_pageout_queue *eq;
2111 	struct  vm_speculative_age_q *sq;
2112 
2113 	iq = &vm_pageout_queue_internal;
2114 	eq = &vm_pageout_queue_external;
2115 	sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
2116 
2117 	/*
2118 	 * Sometimes we have to pause:
2119 	 *	1) No inactive pages - nothing to do.
2120 	 *	2) Loop control - no acceptable pages found on the inactive queue
2121 	 *         within the last vm_pageout_burst_inactive_throttle iterations
2122 	 *	3) Flow control - default pageout queue is full
2123 	 */
2124 	if (vm_page_queue_empty(&vm_page_queue_inactive) &&
2125 	    vm_page_queue_empty(&vm_page_queue_anonymous) &&
2126 	    vm_page_queue_empty(&vm_page_queue_cleaned) &&
2127 	    vm_page_queue_empty(&sq->age_q)) {
2128 		VM_PAGEOUT_DEBUG(vm_pageout_scan_empty_throttle, 1);
2129 		msecs = vm_pageout_state.vm_pageout_empty_wait;
2130 	} else if (inactive_burst_count >=
2131 	    MIN(vm_pageout_state.vm_pageout_burst_inactive_throttle,
2132 	    (vm_page_inactive_count +
2133 	    vm_page_speculative_count))) {
2134 		VM_PAGEOUT_DEBUG(vm_pageout_scan_burst_throttle, 1);
2135 		msecs = vm_pageout_state.vm_pageout_burst_wait;
2136 
2137 		exceeded_burst_throttle = TRUE;
2138 	} else if (VM_PAGE_Q_THROTTLED(iq) &&
2139 	    VM_DYNAMIC_PAGING_ENABLED()) {
2140 		clock_sec_t sec;
2141 		clock_nsec_t nsec;
2142 
2143 		switch (flow_control->state) {
2144 		case FCS_IDLE:
2145 			if ((vm_page_free_count + *local_freed) < vm_page_free_target &&
2146 			    vm_pageout_state.vm_restricted_to_single_processor == FALSE) {
2147 				/*
2148 				 * since the compressor is running independently of vm_pageout_scan
2149 				 * let's not wait for it just yet... as long as we have a healthy supply
2150 				 * of filecache pages to work with, let's keep stealing those.
2151 				 */
2152 				inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count;
2153 
2154 				if (vm_page_pageable_external_count > vm_pageout_state.vm_page_filecache_min &&
2155 				    (inactive_external_count >= VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count))) {
2156 					*anons_grabbed = ANONS_GRABBED_LIMIT;
2157 					VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle_deferred, 1);
2158 					return VM_PAGEOUT_SCAN_PROCEED;
2159 				}
2160 			}
2161 
2162 			vps_flow_control_reset_deadlock_timer(flow_control);
2163 			msecs = vm_pageout_state.vm_pageout_deadlock_wait;
2164 
2165 			break;
2166 
2167 		case FCS_DELAYED:
2168 			clock_get_system_nanotime(&sec, &nsec);
2169 			ts.tv_sec = (unsigned int) sec;
2170 			ts.tv_nsec = nsec;
2171 
2172 			if (CMP_MACH_TIMESPEC(&ts, &flow_control->ts) >= 0) {
2173 				/*
2174 				 * the pageout thread for the default pager is potentially
2175 				 * deadlocked since the
2176 				 * default pager queue has been throttled for more than the
2177 				 * allowable time... we need to move some clean pages or dirty
2178 				 * pages belonging to the external pagers if they aren't throttled
2179 				 * vm_page_free_wanted represents the number of threads currently
2180 				 * blocked waiting for pages... we'll move one page for each of
2181 				 * these plus a fixed amount to break the logjam... once we're done
2182 				 * moving this number of pages, we'll re-enter the FSC_DELAYED state
2183 				 * with a new timeout target since we have no way of knowing
2184 				 * whether we've broken the deadlock except through observation
2185 				 * of the queue associated with the default pager... we need to
2186 				 * stop moving pages and allow the system to run to see what
2187 				 * state it settles into.
2188 				 */
2189 
2190 				*vm_pageout_deadlock_target = vm_pageout_state.vm_pageout_deadlock_relief +
2191 				    vm_page_free_wanted + vm_page_free_wanted_privileged;
2192 				VM_PAGEOUT_DEBUG(vm_pageout_scan_deadlock_detected, 1);
2193 				flow_control->state = FCS_DEADLOCK_DETECTED;
2194 				thread_wakeup((event_t) &vm_pageout_garbage_collect);
2195 				return VM_PAGEOUT_SCAN_PROCEED;
2196 			}
2197 			/*
2198 			 * just resniff instead of trying
2199 			 * to compute a new delay time... we're going to be
2200 			 * awakened immediately upon a laundry completion,
2201 			 * so we won't wait any longer than necessary
2202 			 */
2203 			msecs = vm_pageout_state.vm_pageout_idle_wait;
2204 			break;
2205 
2206 		case FCS_DEADLOCK_DETECTED:
2207 			if (*vm_pageout_deadlock_target) {
2208 				return VM_PAGEOUT_SCAN_PROCEED;
2209 			}
2210 
2211 			vps_flow_control_reset_deadlock_timer(flow_control);
2212 			msecs = vm_pageout_state.vm_pageout_deadlock_wait;
2213 
2214 			break;
2215 		}
2216 	} else {
2217 		/*
2218 		 * No need to pause...
2219 		 */
2220 		return VM_PAGEOUT_SCAN_PROCEED;
2221 	}
2222 
2223 	vm_pageout_scan_wants_object = VM_OBJECT_NULL;
2224 
2225 	vm_pageout_prepare_to_block(object, delayed_unlock, local_freeq, local_freed,
2226 	    VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
2227 
2228 	if (vm_page_free_count >= vm_page_free_target) {
2229 		/*
2230 		 * we're here because
2231 		 *  1) someone else freed up some pages while we had
2232 		 *     the queues unlocked above
2233 		 * and we've hit one of the 3 conditions that
2234 		 * cause us to pause the pageout scan thread
2235 		 *
2236 		 * since we already have enough free pages,
2237 		 * let's avoid stalling and return normally
2238 		 *
2239 		 * before we return, make sure the pageout I/O threads
2240 		 * are running throttled in case there are still requests
2241 		 * in the laundry... since we have enough free pages
2242 		 * we don't need the laundry to be cleaned in a timely
2243 		 * fashion... so let's avoid interfering with foreground
2244 		 * activity
2245 		 *
2246 		 * we don't want to hold vm_page_queue_free_lock when
2247 		 * calling vm_pageout_adjust_eq_iothrottle (since it
2248 		 * may cause other locks to be taken), we do the intitial
2249 		 * check outside of the lock.  Once we take the lock,
2250 		 * we recheck the condition since it may have changed.
2251 		 * if it has, no problem, we will make the threads
2252 		 * non-throttled before actually blocking
2253 		 */
2254 		vm_pageout_adjust_eq_iothrottle(eq, TRUE);
2255 	}
2256 	lck_mtx_lock(&vm_page_queue_free_lock);
2257 
2258 	if (vm_page_free_count >= vm_page_free_target &&
2259 	    (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
2260 		return VM_PAGEOUT_SCAN_DONE_RETURN;
2261 	}
2262 	lck_mtx_unlock(&vm_page_queue_free_lock);
2263 
2264 	if ((vm_page_free_count + vm_page_cleaned_count) < vm_page_free_target) {
2265 		/*
2266 		 * we're most likely about to block due to one of
2267 		 * the 3 conditions that cause vm_pageout_scan to
2268 		 * not be able to make forward progress w/r
2269 		 * to providing new pages to the free queue,
2270 		 * so unthrottle the I/O threads in case we
2271 		 * have laundry to be cleaned... it needs
2272 		 * to be completed ASAP.
2273 		 *
2274 		 * even if we don't block, we want the io threads
2275 		 * running unthrottled since the sum of free +
2276 		 * clean pages is still under our free target
2277 		 */
2278 		vm_pageout_adjust_eq_iothrottle(eq, FALSE);
2279 	}
2280 	if (vm_page_cleaned_count > 0 && exceeded_burst_throttle == FALSE) {
2281 		/*
2282 		 * if we get here we're below our free target and
2283 		 * we're stalling due to a full laundry queue or
2284 		 * we don't have any inactive pages other then
2285 		 * those in the clean queue...
2286 		 * however, we have pages on the clean queue that
2287 		 * can be moved to the free queue, so let's not
2288 		 * stall the pageout scan
2289 		 */
2290 		flow_control->state = FCS_IDLE;
2291 		return VM_PAGEOUT_SCAN_PROCEED;
2292 	}
2293 	if (flow_control->state == FCS_DELAYED && !VM_PAGE_Q_THROTTLED(iq)) {
2294 		flow_control->state = FCS_IDLE;
2295 		return VM_PAGEOUT_SCAN_PROCEED;
2296 	}
2297 
2298 	VM_CHECK_MEMORYSTATUS;
2299 
2300 	if (flow_control->state != FCS_IDLE) {
2301 		VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle, 1);
2302 	}
2303 
2304 	iq->pgo_throttled = TRUE;
2305 	assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000 * NSEC_PER_USEC);
2306 
2307 	vm_page_unlock_queues();
2308 
2309 	assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
2310 
2311 	VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START,
2312 	    iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
2313 	memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START);
2314 
2315 	thread_block(THREAD_CONTINUE_NULL);
2316 
2317 	VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END,
2318 	    iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
2319 	memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END);
2320 
2321 	vm_page_lock_queues();
2322 
2323 	iq->pgo_throttled = FALSE;
2324 
2325 	vps_init_page_targets();
2326 
2327 	return VM_PAGEOUT_SCAN_NEXT_ITERATION;
2328 }
2329 
2330 /*
2331  * This function is called only from vm_pageout_scan and
2332  * it will find and return the most appropriate page to be
2333  * reclaimed.
2334  */
2335 static int
vps_choose_victim_page(vm_page_t * victim_page,int * anons_grabbed,boolean_t * grab_anonymous,boolean_t force_anonymous,boolean_t * is_page_from_bg_q,unsigned int * reactivated_this_call)2336 vps_choose_victim_page(vm_page_t *victim_page, int *anons_grabbed, boolean_t *grab_anonymous, boolean_t force_anonymous,
2337     boolean_t *is_page_from_bg_q, unsigned int *reactivated_this_call)
2338 {
2339 	vm_page_t                       m = NULL;
2340 	vm_object_t                     m_object = VM_OBJECT_NULL;
2341 	uint32_t                        inactive_external_count;
2342 	struct vm_speculative_age_q     *sq;
2343 	struct vm_pageout_queue         *iq;
2344 	int                             retval = VM_PAGEOUT_SCAN_PROCEED;
2345 
2346 	sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
2347 	iq = &vm_pageout_queue_internal;
2348 
2349 	*is_page_from_bg_q = FALSE;
2350 
2351 	m = NULL;
2352 	m_object = VM_OBJECT_NULL;
2353 
2354 	if (VM_DYNAMIC_PAGING_ENABLED()) {
2355 		assert(vm_page_throttled_count == 0);
2356 		assert(vm_page_queue_empty(&vm_page_queue_throttled));
2357 	}
2358 
2359 	/*
2360 	 * Try for a clean-queue inactive page.
2361 	 * These are pages that vm_pageout_scan tried to steal earlier, but
2362 	 * were dirty and had to be cleaned.  Pick them up now that they are clean.
2363 	 */
2364 	if (!vm_page_queue_empty(&vm_page_queue_cleaned)) {
2365 		m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
2366 
2367 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
2368 
2369 		goto found_page;
2370 	}
2371 
2372 	/*
2373 	 * The next most eligible pages are ones we paged in speculatively,
2374 	 * but which have not yet been touched and have been aged out.
2375 	 */
2376 	if (!vm_page_queue_empty(&sq->age_q)) {
2377 		m = (vm_page_t) vm_page_queue_first(&sq->age_q);
2378 
2379 		assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q);
2380 
2381 		if (!m->vmp_dirty || force_anonymous == FALSE) {
2382 			goto found_page;
2383 		} else {
2384 			m = NULL;
2385 		}
2386 	}
2387 
2388 #if CONFIG_BACKGROUND_QUEUE
2389 	if (vm_page_background_mode != VM_PAGE_BG_DISABLED && (vm_page_background_count > vm_page_background_target)) {
2390 		vm_object_t     bg_m_object = NULL;
2391 
2392 		m = (vm_page_t) vm_page_queue_first(&vm_page_queue_background);
2393 
2394 		bg_m_object = VM_PAGE_OBJECT(m);
2395 
2396 		if (!VM_PAGE_PAGEABLE(m)) {
2397 			/*
2398 			 * This page is on the background queue
2399 			 * but not on a pageable queue.  This is
2400 			 * likely a transient state and whoever
2401 			 * took it out of its pageable queue
2402 			 * will likely put it back on a pageable
2403 			 * queue soon but we can't deal with it
2404 			 * at this point, so let's ignore this
2405 			 * page.
2406 			 */
2407 		} else if (force_anonymous == FALSE || bg_m_object->internal) {
2408 			if (bg_m_object->internal &&
2409 			    (VM_PAGE_Q_THROTTLED(iq) ||
2410 			    vm_compressor_out_of_space() == TRUE ||
2411 			    vm_page_free_count < (vm_page_free_reserved / 4))) {
2412 				vm_pageout_skipped_bq_internal++;
2413 			} else {
2414 				*is_page_from_bg_q = TRUE;
2415 
2416 				if (bg_m_object->internal) {
2417 					vm_pageout_vminfo.vm_pageout_considered_bq_internal++;
2418 				} else {
2419 					vm_pageout_vminfo.vm_pageout_considered_bq_external++;
2420 				}
2421 				goto found_page;
2422 			}
2423 		}
2424 	}
2425 #endif /* CONFIG_BACKGROUND_QUEUE */
2426 
2427 	inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count;
2428 
2429 	if ((vm_page_pageable_external_count < vm_pageout_state.vm_page_filecache_min || force_anonymous == TRUE) ||
2430 	    (inactive_external_count < VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count))) {
2431 		*grab_anonymous = TRUE;
2432 		*anons_grabbed = 0;
2433 
2434 		if (VM_CONFIG_SWAP_IS_ACTIVE) {
2435 			vm_pageout_vminfo.vm_pageout_skipped_external++;
2436 		} else {
2437 			if (vm_page_free_count < (COMPRESSOR_FREE_RESERVED_LIMIT * 2)) {
2438 				/*
2439 				 * No swap and we are in dangerously low levels of free memory.
2440 				 * If we keep going ahead with anonymous pages, we are going to run into a situation
2441 				 * where the compressor will be stuck waiting for free pages (if it isn't already).
2442 				 *
2443 				 * So, pick a file backed page...
2444 				 */
2445 				*grab_anonymous = FALSE;
2446 				*anons_grabbed = ANONS_GRABBED_LIMIT;
2447 				vm_pageout_vminfo.vm_pageout_skipped_internal++;
2448 			}
2449 		}
2450 		goto want_anonymous;
2451 	}
2452 	*grab_anonymous = (vm_page_anonymous_count > vm_page_anonymous_min);
2453 
2454 #if CONFIG_JETSAM
2455 	/* If the file-backed pool has accumulated
2456 	 * significantly more pages than the jetsam
2457 	 * threshold, prefer to reclaim those
2458 	 * inline to minimise compute overhead of reclaiming
2459 	 * anonymous pages.
2460 	 * This calculation does not account for the CPU local
2461 	 * external page queues, as those are expected to be
2462 	 * much smaller relative to the global pools.
2463 	 */
2464 
2465 	struct vm_pageout_queue *eq = &vm_pageout_queue_external;
2466 
2467 	if (*grab_anonymous == TRUE && !VM_PAGE_Q_THROTTLED(eq)) {
2468 		if (vm_page_pageable_external_count >
2469 		    vm_pageout_state.vm_page_filecache_min) {
2470 			if ((vm_page_pageable_external_count *
2471 			    vm_pageout_memorystatus_fb_factor_dr) >
2472 			    (memorystatus_available_pages_critical *
2473 			    vm_pageout_memorystatus_fb_factor_nr)) {
2474 				*grab_anonymous = FALSE;
2475 
2476 				VM_PAGEOUT_DEBUG(vm_grab_anon_overrides, 1);
2477 			}
2478 		}
2479 		if (*grab_anonymous) {
2480 			VM_PAGEOUT_DEBUG(vm_grab_anon_nops, 1);
2481 		}
2482 	}
2483 #endif /* CONFIG_JETSAM */
2484 
2485 want_anonymous:
2486 	if (*grab_anonymous == FALSE || *anons_grabbed >= ANONS_GRABBED_LIMIT || vm_page_queue_empty(&vm_page_queue_anonymous)) {
2487 		if (!vm_page_queue_empty(&vm_page_queue_inactive)) {
2488 			m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
2489 
2490 			assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
2491 			*anons_grabbed = 0;
2492 
2493 			if (vm_page_pageable_external_count < vm_pageout_state.vm_page_filecache_min) {
2494 				if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
2495 					if ((++(*reactivated_this_call) % 100)) {
2496 						vm_pageout_vminfo.vm_pageout_filecache_min_reactivated++;
2497 
2498 						vm_page_activate(m);
2499 						counter_inc(&vm_statistics_reactivations);
2500 #if CONFIG_BACKGROUND_QUEUE
2501 #if DEVELOPMENT || DEBUG
2502 						if (*is_page_from_bg_q == TRUE) {
2503 							if (m_object->internal) {
2504 								vm_pageout_rejected_bq_internal++;
2505 							} else {
2506 								vm_pageout_rejected_bq_external++;
2507 							}
2508 						}
2509 #endif /* DEVELOPMENT || DEBUG */
2510 #endif /* CONFIG_BACKGROUND_QUEUE */
2511 						vm_pageout_state.vm_pageout_inactive_used++;
2512 
2513 						m = NULL;
2514 						retval = VM_PAGEOUT_SCAN_NEXT_ITERATION;
2515 
2516 						goto found_page;
2517 					}
2518 
2519 					/*
2520 					 * steal 1 of the file backed pages even if
2521 					 * we are under the limit that has been set
2522 					 * for a healthy filecache
2523 					 */
2524 				}
2525 			}
2526 			goto found_page;
2527 		}
2528 	}
2529 	if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
2530 		m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
2531 
2532 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
2533 		*anons_grabbed += 1;
2534 
2535 		goto found_page;
2536 	}
2537 
2538 	m = NULL;
2539 
2540 found_page:
2541 	*victim_page = m;
2542 
2543 	return retval;
2544 }
2545 
2546 /*
2547  * This function is called only from vm_pageout_scan and
2548  * it will put a page back on the active/inactive queue
2549  * if we can't reclaim it for some reason.
2550  */
2551 static void
vps_requeue_page(vm_page_t m,int page_prev_q_state,__unused boolean_t page_from_bg_q)2552 vps_requeue_page(vm_page_t m, int page_prev_q_state, __unused boolean_t page_from_bg_q)
2553 {
2554 	if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
2555 		vm_page_enqueue_inactive(m, FALSE);
2556 	} else {
2557 		vm_page_activate(m);
2558 	}
2559 
2560 #if CONFIG_BACKGROUND_QUEUE
2561 #if DEVELOPMENT || DEBUG
2562 	vm_object_t m_object = VM_PAGE_OBJECT(m);
2563 
2564 	if (page_from_bg_q == TRUE) {
2565 		if (m_object->internal) {
2566 			vm_pageout_rejected_bq_internal++;
2567 		} else {
2568 			vm_pageout_rejected_bq_external++;
2569 		}
2570 	}
2571 #endif /* DEVELOPMENT || DEBUG */
2572 #endif /* CONFIG_BACKGROUND_QUEUE */
2573 }
2574 
2575 /*
2576  * This function is called only from vm_pageout_scan and
2577  * it will try to grab the victim page's VM object (m_object)
2578  * which differs from the previous victim page's object (object).
2579  */
2580 static int
vps_switch_object(vm_page_t m,vm_object_t m_object,vm_object_t * object,int page_prev_q_state,boolean_t avoid_anon_pages,boolean_t page_from_bg_q)2581 vps_switch_object(vm_page_t m, vm_object_t m_object, vm_object_t *object, int page_prev_q_state, boolean_t avoid_anon_pages, boolean_t page_from_bg_q)
2582 {
2583 	struct vm_speculative_age_q *sq;
2584 
2585 	sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
2586 
2587 	/*
2588 	 * the object associated with candidate page is
2589 	 * different from the one we were just working
2590 	 * with... dump the lock if we still own it
2591 	 */
2592 	if (*object != NULL) {
2593 		vm_object_unlock(*object);
2594 		*object = NULL;
2595 	}
2596 	/*
2597 	 * Try to lock object; since we've alread got the
2598 	 * page queues lock, we can only 'try' for this one.
2599 	 * if the 'try' fails, we need to do a mutex_pause
2600 	 * to allow the owner of the object lock a chance to
2601 	 * run... otherwise, we're likely to trip over this
2602 	 * object in the same state as we work our way through
2603 	 * the queue... clumps of pages associated with the same
2604 	 * object are fairly typical on the inactive and active queues
2605 	 */
2606 	if (!vm_object_lock_try_scan(m_object)) {
2607 		vm_page_t m_want = NULL;
2608 
2609 		vm_pageout_vminfo.vm_pageout_inactive_nolock++;
2610 
2611 		if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
2612 			VM_PAGEOUT_DEBUG(vm_pageout_cleaned_nolock, 1);
2613 		}
2614 
2615 		pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
2616 
2617 		m->vmp_reference = FALSE;
2618 
2619 		if (!m_object->object_is_shared_cache) {
2620 			/*
2621 			 * don't apply this optimization if this is the shared cache
2622 			 * object, it's too easy to get rid of very hot and important
2623 			 * pages...
2624 			 * m->vmp_object must be stable since we hold the page queues lock...
2625 			 * we can update the scan_collisions field sans the object lock
2626 			 * since it is a separate field and this is the only spot that does
2627 			 * a read-modify-write operation and it is never executed concurrently...
2628 			 * we can asynchronously set this field to 0 when creating a UPL, so it
2629 			 * is possible for the value to be a bit non-determistic, but that's ok
2630 			 * since it's only used as a hint
2631 			 */
2632 			m_object->scan_collisions = 1;
2633 		}
2634 		if (!vm_page_queue_empty(&vm_page_queue_cleaned)) {
2635 			m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
2636 		} else if (!vm_page_queue_empty(&sq->age_q)) {
2637 			m_want = (vm_page_t) vm_page_queue_first(&sq->age_q);
2638 		} else if ((avoid_anon_pages || vm_page_queue_empty(&vm_page_queue_anonymous)) &&
2639 		    !vm_page_queue_empty(&vm_page_queue_inactive)) {
2640 			m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
2641 		} else if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
2642 			m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
2643 		}
2644 
2645 		/*
2646 		 * this is the next object we're going to be interested in
2647 		 * try to make sure its available after the mutex_pause
2648 		 * returns control
2649 		 */
2650 		if (m_want) {
2651 			vm_pageout_scan_wants_object = VM_PAGE_OBJECT(m_want);
2652 		}
2653 
2654 		vps_requeue_page(m, page_prev_q_state, page_from_bg_q);
2655 
2656 		return VM_PAGEOUT_SCAN_NEXT_ITERATION;
2657 	} else {
2658 		*object = m_object;
2659 		vm_pageout_scan_wants_object = VM_OBJECT_NULL;
2660 	}
2661 
2662 	return VM_PAGEOUT_SCAN_PROCEED;
2663 }
2664 
2665 /*
2666  * This function is called only from vm_pageout_scan and
2667  * it notices that pageout scan may be rendered ineffective
2668  * due to a FS deadlock and will jetsam a process if possible.
2669  * If jetsam isn't supported, it'll move the page to the active
2670  * queue to try and get some different pages pushed onwards so
2671  * we can try to get out of this scenario.
2672  */
2673 static void
vps_deal_with_throttled_queues(vm_page_t m,vm_object_t * object,uint32_t * vm_pageout_inactive_external_forced_reactivate_limit,int * delayed_unlock,boolean_t * force_anonymous,__unused boolean_t is_page_from_bg_q)2674 vps_deal_with_throttled_queues(vm_page_t m, vm_object_t *object, uint32_t *vm_pageout_inactive_external_forced_reactivate_limit,
2675     int *delayed_unlock, boolean_t *force_anonymous, __unused boolean_t is_page_from_bg_q)
2676 {
2677 	struct  vm_pageout_queue *eq;
2678 	vm_object_t cur_object = VM_OBJECT_NULL;
2679 
2680 	cur_object = *object;
2681 
2682 	eq = &vm_pageout_queue_external;
2683 
2684 	if (cur_object->internal == FALSE) {
2685 		/*
2686 		 * we need to break up the following potential deadlock case...
2687 		 *  a) The external pageout thread is stuck on the truncate lock for a file that is being extended i.e. written.
2688 		 *  b) The thread doing the writing is waiting for pages while holding the truncate lock
2689 		 *  c) Most of the pages in the inactive queue belong to this file.
2690 		 *
2691 		 * we are potentially in this deadlock because...
2692 		 *  a) the external pageout queue is throttled
2693 		 *  b) we're done with the active queue and moved on to the inactive queue
2694 		 *  c) we've got a dirty external page
2695 		 *
2696 		 * since we don't know the reason for the external pageout queue being throttled we
2697 		 * must suspect that we are deadlocked, so move the current page onto the active queue
2698 		 * in an effort to cause a page from the active queue to 'age' to the inactive queue
2699 		 *
2700 		 * if we don't have jetsam configured (i.e. we have a dynamic pager), set
2701 		 * 'force_anonymous' to TRUE to cause us to grab a page from the cleaned/anonymous
2702 		 * pool the next time we select a victim page... if we can make enough new free pages,
2703 		 * the deadlock will break, the external pageout queue will empty and it will no longer
2704 		 * be throttled
2705 		 *
2706 		 * if we have jetsam configured, keep a count of the pages reactivated this way so
2707 		 * that we can try to find clean pages in the active/inactive queues before
2708 		 * deciding to jetsam a process
2709 		 */
2710 		vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_external++;
2711 
2712 		vm_page_check_pageable_safe(m);
2713 		assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
2714 		vm_page_queue_enter(&vm_page_queue_active, m, vmp_pageq);
2715 		m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
2716 		vm_page_active_count++;
2717 		vm_page_pageable_external_count++;
2718 
2719 		vm_pageout_adjust_eq_iothrottle(eq, FALSE);
2720 
2721 #if CONFIG_MEMORYSTATUS && CONFIG_JETSAM
2722 
2723 #pragma unused(force_anonymous)
2724 
2725 		*vm_pageout_inactive_external_forced_reactivate_limit -= 1;
2726 
2727 		if (*vm_pageout_inactive_external_forced_reactivate_limit <= 0) {
2728 			*vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
2729 			/*
2730 			 * Possible deadlock scenario so request jetsam action
2731 			 */
2732 
2733 			assert(cur_object);
2734 			vm_object_unlock(cur_object);
2735 
2736 			cur_object = VM_OBJECT_NULL;
2737 
2738 			/*
2739 			 * VM pageout scan needs to know we have dropped this lock and so set the
2740 			 * object variable we got passed in to NULL.
2741 			 */
2742 			*object = VM_OBJECT_NULL;
2743 
2744 			vm_page_unlock_queues();
2745 
2746 			VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_START,
2747 			    vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count);
2748 
2749 			/* Kill first suitable process. If this call returned FALSE, we might have simply purged a process instead. */
2750 			if (memorystatus_kill_on_VM_page_shortage(FALSE) == TRUE) {
2751 				VM_PAGEOUT_DEBUG(vm_pageout_inactive_external_forced_jetsam_count, 1);
2752 			}
2753 
2754 			VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_END,
2755 			    vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count);
2756 
2757 			vm_page_lock_queues();
2758 			*delayed_unlock = 1;
2759 		}
2760 #else /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
2761 
2762 #pragma unused(vm_pageout_inactive_external_forced_reactivate_limit)
2763 #pragma unused(delayed_unlock)
2764 
2765 		*force_anonymous = TRUE;
2766 #endif /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
2767 	} else {
2768 		vm_page_activate(m);
2769 		counter_inc(&vm_statistics_reactivations);
2770 
2771 #if CONFIG_BACKGROUND_QUEUE
2772 #if DEVELOPMENT || DEBUG
2773 		if (is_page_from_bg_q == TRUE) {
2774 			if (cur_object->internal) {
2775 				vm_pageout_rejected_bq_internal++;
2776 			} else {
2777 				vm_pageout_rejected_bq_external++;
2778 			}
2779 		}
2780 #endif /* DEVELOPMENT || DEBUG */
2781 #endif /* CONFIG_BACKGROUND_QUEUE */
2782 
2783 		vm_pageout_state.vm_pageout_inactive_used++;
2784 	}
2785 }
2786 
2787 
2788 void
vm_page_balance_inactive(int max_to_move)2789 vm_page_balance_inactive(int max_to_move)
2790 {
2791 	vm_page_t m;
2792 
2793 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2794 
2795 	if (hibernation_vmqueues_inspection || hibernate_cleaning_in_progress) {
2796 		/*
2797 		 * It is likely that the hibernation code path is
2798 		 * dealing with these very queues as we are about
2799 		 * to move pages around in/from them and completely
2800 		 * change the linkage of the pages.
2801 		 *
2802 		 * And so we skip the rebalancing of these queues.
2803 		 */
2804 		return;
2805 	}
2806 	vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
2807 	    vm_page_inactive_count +
2808 	    vm_page_speculative_count);
2809 
2810 	while (max_to_move-- && (vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) {
2811 		VM_PAGEOUT_DEBUG(vm_pageout_balanced, 1);
2812 
2813 		m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
2814 
2815 		assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
2816 		assert(!m->vmp_laundry);
2817 		assert(VM_PAGE_OBJECT(m) != kernel_object);
2818 		assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
2819 
2820 		DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
2821 
2822 		/*
2823 		 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
2824 		 *
2825 		 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
2826 		 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
2827 		 * new reference happens. If no futher references happen on the page after that remote TLB flushes
2828 		 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
2829 		 * by pageout_scan, which is just fine since the last reference would have happened quite far
2830 		 * in the past (TLB caches don't hang around for very long), and of course could just as easily
2831 		 * have happened before we moved the page
2832 		 */
2833 		if (m->vmp_pmapped == TRUE) {
2834 			pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
2835 		}
2836 
2837 		/*
2838 		 * The page might be absent or busy,
2839 		 * but vm_page_deactivate can handle that.
2840 		 * FALSE indicates that we don't want a H/W clear reference
2841 		 */
2842 		vm_page_deactivate_internal(m, FALSE);
2843 	}
2844 }
2845 
2846 
2847 /*
2848  *	vm_pageout_scan does the dirty work for the pageout daemon.
2849  *	It returns with both vm_page_queue_free_lock and vm_page_queue_lock
2850  *	held and vm_page_free_wanted == 0.
2851  */
2852 void
vm_pageout_scan(void)2853 vm_pageout_scan(void)
2854 {
2855 	unsigned int loop_count = 0;
2856 	unsigned int inactive_burst_count = 0;
2857 	unsigned int reactivated_this_call;
2858 	unsigned int reactivate_limit;
2859 	vm_page_t   local_freeq = NULL;
2860 	int         local_freed = 0;
2861 	int         delayed_unlock;
2862 	int         delayed_unlock_limit = 0;
2863 	int         refmod_state = 0;
2864 	int     vm_pageout_deadlock_target = 0;
2865 	struct  vm_pageout_queue *iq;
2866 	struct  vm_pageout_queue *eq;
2867 	struct  vm_speculative_age_q *sq;
2868 	struct  flow_control    flow_control = { .state = 0, .ts = { .tv_sec = 0, .tv_nsec = 0 } };
2869 	boolean_t inactive_throttled = FALSE;
2870 	vm_object_t     object = NULL;
2871 	uint32_t        inactive_reclaim_run;
2872 	boolean_t       grab_anonymous = FALSE;
2873 	boolean_t       force_anonymous = FALSE;
2874 	boolean_t       force_speculative_aging = FALSE;
2875 	int             anons_grabbed = 0;
2876 	int             page_prev_q_state = 0;
2877 	boolean_t       page_from_bg_q = FALSE;
2878 	uint32_t        vm_pageout_inactive_external_forced_reactivate_limit = 0;
2879 	vm_object_t     m_object = VM_OBJECT_NULL;
2880 	int             retval = 0;
2881 	boolean_t       lock_yield_check = FALSE;
2882 
2883 
2884 	VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_START,
2885 	    vm_pageout_vminfo.vm_pageout_freed_speculative,
2886 	    vm_pageout_state.vm_pageout_inactive_clean,
2887 	    vm_pageout_vminfo.vm_pageout_inactive_dirty_internal,
2888 	    vm_pageout_vminfo.vm_pageout_inactive_dirty_external);
2889 
2890 	flow_control.state = FCS_IDLE;
2891 	iq = &vm_pageout_queue_internal;
2892 	eq = &vm_pageout_queue_external;
2893 	sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
2894 
2895 	/* Ask the pmap layer to return any pages it no longer needs. */
2896 	uint64_t pmap_wired_pages_freed = pmap_release_pages_fast();
2897 
2898 	vm_page_lock_queues();
2899 
2900 	vm_page_wire_count -= pmap_wired_pages_freed;
2901 
2902 	delayed_unlock = 1;
2903 
2904 	/*
2905 	 *	Calculate the max number of referenced pages on the inactive
2906 	 *	queue that we will reactivate.
2907 	 */
2908 	reactivated_this_call = 0;
2909 	reactivate_limit = VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count +
2910 	    vm_page_inactive_count);
2911 	inactive_reclaim_run = 0;
2912 
2913 	vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
2914 
2915 	/*
2916 	 *	We must limit the rate at which we send pages to the pagers
2917 	 *	so that we don't tie up too many pages in the I/O queues.
2918 	 *	We implement a throttling mechanism using the laundry count
2919 	 *      to limit the number of pages outstanding to the default
2920 	 *	and external pagers.  We can bypass the throttles and look
2921 	 *	for clean pages if the pageout queues don't drain in a timely
2922 	 *	fashion since this may indicate that the pageout paths are
2923 	 *	stalled waiting for memory, which only we can provide.
2924 	 */
2925 
2926 	vps_init_page_targets();
2927 	assert(object == NULL);
2928 	assert(delayed_unlock != 0);
2929 
2930 	for (;;) {
2931 		vm_page_t m;
2932 
2933 		DTRACE_VM2(rev, int, 1, (uint64_t *), NULL);
2934 
2935 		if (lock_yield_check) {
2936 			lock_yield_check = FALSE;
2937 
2938 			if (delayed_unlock++ > delayed_unlock_limit) {
2939 				int freed = local_freed;
2940 
2941 				vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
2942 				    VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
2943 				if (freed == 0) {
2944 					lck_mtx_yield(&vm_page_queue_lock);
2945 				}
2946 			} else if (vm_pageout_scan_wants_object) {
2947 				vm_page_unlock_queues();
2948 				mutex_pause(0);
2949 				vm_page_lock_queues();
2950 			}
2951 		}
2952 
2953 		if (vm_upl_wait_for_pages < 0) {
2954 			vm_upl_wait_for_pages = 0;
2955 		}
2956 
2957 		delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT + vm_upl_wait_for_pages;
2958 
2959 		if (delayed_unlock_limit > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX) {
2960 			delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX;
2961 		}
2962 
2963 		vps_deal_with_secluded_page_overflow(&local_freeq, &local_freed);
2964 
2965 		assert(delayed_unlock);
2966 
2967 		/*
2968 		 * maintain our balance
2969 		 */
2970 		vm_page_balance_inactive(1);
2971 
2972 
2973 		/**********************************************************************
2974 		* above this point we're playing with the active and secluded queues
2975 		* below this point we're playing with the throttling mechanisms
2976 		* and the inactive queue
2977 		**********************************************************************/
2978 
2979 		if (vm_page_free_count + local_freed >= vm_page_free_target) {
2980 			vm_pageout_scan_wants_object = VM_OBJECT_NULL;
2981 
2982 			vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
2983 			    VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
2984 			/*
2985 			 * make sure the pageout I/O threads are running
2986 			 * throttled in case there are still requests
2987 			 * in the laundry... since we have met our targets
2988 			 * we don't need the laundry to be cleaned in a timely
2989 			 * fashion... so let's avoid interfering with foreground
2990 			 * activity
2991 			 */
2992 			vm_pageout_adjust_eq_iothrottle(eq, TRUE);
2993 
2994 			lck_mtx_lock(&vm_page_queue_free_lock);
2995 
2996 			if ((vm_page_free_count >= vm_page_free_target) &&
2997 			    (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
2998 				/*
2999 				 * done - we have met our target *and*
3000 				 * there is no one waiting for a page.
3001 				 */
3002 return_from_scan:
3003 				assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
3004 
3005 				VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_NONE,
3006 				    vm_pageout_state.vm_pageout_inactive,
3007 				    vm_pageout_state.vm_pageout_inactive_used, 0, 0);
3008 				VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_END,
3009 				    vm_pageout_vminfo.vm_pageout_freed_speculative,
3010 				    vm_pageout_state.vm_pageout_inactive_clean,
3011 				    vm_pageout_vminfo.vm_pageout_inactive_dirty_internal,
3012 				    vm_pageout_vminfo.vm_pageout_inactive_dirty_external);
3013 
3014 				return;
3015 			}
3016 			lck_mtx_unlock(&vm_page_queue_free_lock);
3017 		}
3018 
3019 		/*
3020 		 * Before anything, we check if we have any ripe volatile
3021 		 * objects around. If so, try to purge the first object.
3022 		 * If the purge fails, fall through to reclaim a page instead.
3023 		 * If the purge succeeds, go back to the top and reevalute
3024 		 * the new memory situation.
3025 		 */
3026 		retval = vps_purge_object();
3027 
3028 		if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3029 			/*
3030 			 * Success
3031 			 */
3032 			if (object != NULL) {
3033 				vm_object_unlock(object);
3034 				object = NULL;
3035 			}
3036 
3037 			lock_yield_check = FALSE;
3038 			continue;
3039 		}
3040 
3041 		/*
3042 		 * If our 'aged' queue is empty and we have some speculative pages
3043 		 * in the other queues, let's go through and see if we need to age
3044 		 * them.
3045 		 *
3046 		 * If we succeeded in aging a speculative Q or just that everything
3047 		 * looks normal w.r.t queue age and queue counts, we keep going onward.
3048 		 *
3049 		 * If, for some reason, we seem to have a mismatch between the spec.
3050 		 * page count and the page queues, we reset those variables and
3051 		 * restart the loop (LD TODO: Track this better?).
3052 		 */
3053 		if (vm_page_queue_empty(&sq->age_q) && vm_page_speculative_count) {
3054 			retval = vps_age_speculative_queue(force_speculative_aging);
3055 
3056 			if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3057 				lock_yield_check = FALSE;
3058 				continue;
3059 			}
3060 		}
3061 		force_speculative_aging = FALSE;
3062 
3063 		/*
3064 		 * Check to see if we need to evict objects from the cache.
3065 		 *
3066 		 * Note: 'object' here doesn't have anything to do with
3067 		 * the eviction part. We just need to make sure we have dropped
3068 		 * any object lock we might be holding if we need to go down
3069 		 * into the eviction logic.
3070 		 */
3071 		retval = vps_object_cache_evict(&object);
3072 
3073 		if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3074 			lock_yield_check = FALSE;
3075 			continue;
3076 		}
3077 
3078 
3079 		/*
3080 		 * Calculate our filecache_min that will affect the loop
3081 		 * going forward.
3082 		 */
3083 		vps_calculate_filecache_min();
3084 
3085 		/*
3086 		 * LD TODO: Use a structure to hold all state variables for a single
3087 		 * vm_pageout_scan iteration and pass that structure to this function instead.
3088 		 */
3089 		retval = vps_flow_control(&flow_control, &anons_grabbed, &object,
3090 		    &delayed_unlock, &local_freeq, &local_freed,
3091 		    &vm_pageout_deadlock_target, inactive_burst_count);
3092 
3093 		if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3094 			if (loop_count >= vm_page_inactive_count) {
3095 				loop_count = 0;
3096 			}
3097 
3098 			inactive_burst_count = 0;
3099 
3100 			assert(object == NULL);
3101 			assert(delayed_unlock != 0);
3102 
3103 			lock_yield_check = FALSE;
3104 			continue;
3105 		} else if (retval == VM_PAGEOUT_SCAN_DONE_RETURN) {
3106 			goto return_from_scan;
3107 		}
3108 
3109 		flow_control.state = FCS_IDLE;
3110 
3111 		vm_pageout_inactive_external_forced_reactivate_limit = MIN((vm_page_active_count + vm_page_inactive_count),
3112 		    vm_pageout_inactive_external_forced_reactivate_limit);
3113 		loop_count++;
3114 		inactive_burst_count++;
3115 		vm_pageout_state.vm_pageout_inactive++;
3116 
3117 		/*
3118 		 * Choose a victim.
3119 		 */
3120 
3121 		m = NULL;
3122 		retval = vps_choose_victim_page(&m, &anons_grabbed, &grab_anonymous, force_anonymous, &page_from_bg_q, &reactivated_this_call);
3123 
3124 		if (m == NULL) {
3125 			if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3126 				inactive_burst_count = 0;
3127 
3128 				if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3129 					VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
3130 				}
3131 
3132 				lock_yield_check = TRUE;
3133 				continue;
3134 			}
3135 
3136 			/*
3137 			 * if we've gotten here, we have no victim page.
3138 			 * check to see if we've not finished balancing the queues
3139 			 * or we have a page on the aged speculative queue that we
3140 			 * skipped due to force_anonymous == TRUE.. or we have
3141 			 * speculative  pages that we can prematurely age... if
3142 			 * one of these cases we'll keep going, else panic
3143 			 */
3144 			force_anonymous = FALSE;
3145 			VM_PAGEOUT_DEBUG(vm_pageout_no_victim, 1);
3146 
3147 			if (!vm_page_queue_empty(&sq->age_q)) {
3148 				lock_yield_check = TRUE;
3149 				continue;
3150 			}
3151 
3152 			if (vm_page_speculative_count) {
3153 				force_speculative_aging = TRUE;
3154 				lock_yield_check = TRUE;
3155 				continue;
3156 			}
3157 			panic("vm_pageout: no victim");
3158 
3159 			/* NOTREACHED */
3160 		}
3161 
3162 		assert(VM_PAGE_PAGEABLE(m));
3163 		m_object = VM_PAGE_OBJECT(m);
3164 		force_anonymous = FALSE;
3165 
3166 		page_prev_q_state = m->vmp_q_state;
3167 		/*
3168 		 * we just found this page on one of our queues...
3169 		 * it can't also be on the pageout queue, so safe
3170 		 * to call vm_page_queues_remove
3171 		 */
3172 		vm_page_queues_remove(m, TRUE);
3173 
3174 		assert(!m->vmp_laundry);
3175 		assert(!m->vmp_private);
3176 		assert(!m->vmp_fictitious);
3177 		assert(m_object != kernel_object);
3178 		assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
3179 
3180 		vm_pageout_vminfo.vm_pageout_considered_page++;
3181 
3182 		DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
3183 
3184 		/*
3185 		 * check to see if we currently are working
3186 		 * with the same object... if so, we've
3187 		 * already got the lock
3188 		 */
3189 		if (m_object != object) {
3190 			boolean_t avoid_anon_pages = (grab_anonymous == FALSE || anons_grabbed >= ANONS_GRABBED_LIMIT);
3191 
3192 			/*
3193 			 * vps_switch_object() will always drop the 'object' lock first
3194 			 * and then try to acquire the 'm_object' lock. So 'object' has to point to
3195 			 * either 'm_object' or NULL.
3196 			 */
3197 			retval = vps_switch_object(m, m_object, &object, page_prev_q_state, avoid_anon_pages, page_from_bg_q);
3198 
3199 			if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3200 				lock_yield_check = TRUE;
3201 				continue;
3202 			}
3203 		}
3204 		assert(m_object == object);
3205 		assert(VM_PAGE_OBJECT(m) == m_object);
3206 
3207 		if (m->vmp_busy) {
3208 			/*
3209 			 *	Somebody is already playing with this page.
3210 			 *	Put it back on the appropriate queue
3211 			 *
3212 			 */
3213 			VM_PAGEOUT_DEBUG(vm_pageout_inactive_busy, 1);
3214 
3215 			if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3216 				VM_PAGEOUT_DEBUG(vm_pageout_cleaned_busy, 1);
3217 			}
3218 
3219 			vps_requeue_page(m, page_prev_q_state, page_from_bg_q);
3220 
3221 			lock_yield_check = TRUE;
3222 			continue;
3223 		}
3224 
3225 		/*
3226 		 *   if (m->vmp_cleaning && !m->vmp_free_when_done)
3227 		 *	If already cleaning this page in place
3228 		 *	just leave if off the paging queues.
3229 		 *	We can leave the page mapped, and upl_commit_range
3230 		 *	will put it on the clean queue.
3231 		 *
3232 		 *   if (m->vmp_free_when_done && !m->vmp_cleaning)
3233 		 *	an msync INVALIDATE is in progress...
3234 		 *	this page has been marked for destruction
3235 		 *      after it has been cleaned,
3236 		 *      but not yet gathered into a UPL
3237 		 *	where 'cleaning' will be set...
3238 		 *	just leave it off the paging queues
3239 		 *
3240 		 *   if (m->vmp_free_when_done && m->vmp_clenaing)
3241 		 *	an msync INVALIDATE is in progress
3242 		 *	and the UPL has already gathered this page...
3243 		 *	just leave it off the paging queues
3244 		 */
3245 		if (m->vmp_free_when_done || m->vmp_cleaning) {
3246 			lock_yield_check = TRUE;
3247 			continue;
3248 		}
3249 
3250 
3251 		/*
3252 		 *	If it's absent, in error or the object is no longer alive,
3253 		 *	we can reclaim the page... in the no longer alive case,
3254 		 *	there are 2 states the page can be in that preclude us
3255 		 *	from reclaiming it - busy or cleaning - that we've already
3256 		 *	dealt with
3257 		 */
3258 		if (m->vmp_absent || m->vmp_error || !object->alive) {
3259 			if (m->vmp_absent) {
3260 				VM_PAGEOUT_DEBUG(vm_pageout_inactive_absent, 1);
3261 			} else if (!object->alive) {
3262 				VM_PAGEOUT_DEBUG(vm_pageout_inactive_notalive, 1);
3263 			} else {
3264 				VM_PAGEOUT_DEBUG(vm_pageout_inactive_error, 1);
3265 			}
3266 reclaim_page:
3267 			if (vm_pageout_deadlock_target) {
3268 				VM_PAGEOUT_DEBUG(vm_pageout_scan_inactive_throttle_success, 1);
3269 				vm_pageout_deadlock_target--;
3270 			}
3271 
3272 			DTRACE_VM2(dfree, int, 1, (uint64_t *), NULL);
3273 
3274 			if (object->internal) {
3275 				DTRACE_VM2(anonfree, int, 1, (uint64_t *), NULL);
3276 			} else {
3277 				DTRACE_VM2(fsfree, int, 1, (uint64_t *), NULL);
3278 			}
3279 			assert(!m->vmp_cleaning);
3280 			assert(!m->vmp_laundry);
3281 
3282 			if (!object->internal &&
3283 			    object->pager != NULL &&
3284 			    object->pager->mo_pager_ops == &shared_region_pager_ops) {
3285 				shared_region_pager_reclaimed++;
3286 			}
3287 
3288 			m->vmp_busy = TRUE;
3289 
3290 			/*
3291 			 * remove page from object here since we're already
3292 			 * behind the object lock... defer the rest of the work
3293 			 * we'd normally do in vm_page_free_prepare_object
3294 			 * until 'vm_page_free_list' is called
3295 			 */
3296 			if (m->vmp_tabled) {
3297 				vm_page_remove(m, TRUE);
3298 			}
3299 
3300 			assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
3301 			m->vmp_snext = local_freeq;
3302 			local_freeq = m;
3303 			local_freed++;
3304 
3305 			if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
3306 				vm_pageout_vminfo.vm_pageout_freed_speculative++;
3307 			} else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3308 				vm_pageout_vminfo.vm_pageout_freed_cleaned++;
3309 			} else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q) {
3310 				vm_pageout_vminfo.vm_pageout_freed_internal++;
3311 			} else {
3312 				vm_pageout_vminfo.vm_pageout_freed_external++;
3313 			}
3314 
3315 			inactive_burst_count = 0;
3316 
3317 			lock_yield_check = TRUE;
3318 			continue;
3319 		}
3320 		if (object->copy == VM_OBJECT_NULL) {
3321 			/*
3322 			 * No one else can have any interest in this page.
3323 			 * If this is an empty purgable object, the page can be
3324 			 * reclaimed even if dirty.
3325 			 * If the page belongs to a volatile purgable object, we
3326 			 * reactivate it if the compressor isn't active.
3327 			 */
3328 			if (object->purgable == VM_PURGABLE_EMPTY) {
3329 				if (m->vmp_pmapped == TRUE) {
3330 					/* unmap the page */
3331 					refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
3332 					if (refmod_state & VM_MEM_MODIFIED) {
3333 						SET_PAGE_DIRTY(m, FALSE);
3334 					}
3335 				}
3336 				if (m->vmp_dirty || m->vmp_precious) {
3337 					/* we saved the cost of cleaning this page ! */
3338 					vm_page_purged_count++;
3339 				}
3340 				goto reclaim_page;
3341 			}
3342 
3343 			if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
3344 				/*
3345 				 * With the VM compressor, the cost of
3346 				 * reclaiming a page is much lower (no I/O),
3347 				 * so if we find a "volatile" page, it's better
3348 				 * to let it get compressed rather than letting
3349 				 * it occupy a full page until it gets purged.
3350 				 * So no need to check for "volatile" here.
3351 				 */
3352 			} else if (object->purgable == VM_PURGABLE_VOLATILE) {
3353 				/*
3354 				 * Avoid cleaning a "volatile" page which might
3355 				 * be purged soon.
3356 				 */
3357 
3358 				/* if it's wired, we can't put it on our queue */
3359 				assert(!VM_PAGE_WIRED(m));
3360 
3361 				/* just stick it back on! */
3362 				reactivated_this_call++;
3363 
3364 				if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3365 					VM_PAGEOUT_DEBUG(vm_pageout_cleaned_volatile_reactivated, 1);
3366 				}
3367 
3368 				goto reactivate_page;
3369 			}
3370 		}
3371 		/*
3372 		 *	If it's being used, reactivate.
3373 		 *	(Fictitious pages are either busy or absent.)
3374 		 *	First, update the reference and dirty bits
3375 		 *	to make sure the page is unreferenced.
3376 		 */
3377 		refmod_state = -1;
3378 
3379 		if (m->vmp_reference == FALSE && m->vmp_pmapped == TRUE) {
3380 			refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
3381 
3382 			if (refmod_state & VM_MEM_REFERENCED) {
3383 				m->vmp_reference = TRUE;
3384 			}
3385 			if (refmod_state & VM_MEM_MODIFIED) {
3386 				SET_PAGE_DIRTY(m, FALSE);
3387 			}
3388 		}
3389 
3390 		if (m->vmp_reference || m->vmp_dirty) {
3391 			/* deal with a rogue "reusable" page */
3392 			VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, m_object);
3393 		}
3394 
3395 		if (vm_pageout_state.vm_page_xpmapped_min_divisor == 0) {
3396 			vm_pageout_state.vm_page_xpmapped_min = 0;
3397 		} else {
3398 			vm_pageout_state.vm_page_xpmapped_min = (vm_page_external_count * 10) / vm_pageout_state.vm_page_xpmapped_min_divisor;
3399 		}
3400 
3401 		if (!m->vmp_no_cache &&
3402 		    page_from_bg_q == FALSE &&
3403 		    (m->vmp_reference || (m->vmp_xpmapped && !object->internal &&
3404 		    (vm_page_xpmapped_external_count < vm_pageout_state.vm_page_xpmapped_min)))) {
3405 			/*
3406 			 * The page we pulled off the inactive list has
3407 			 * been referenced.  It is possible for other
3408 			 * processors to be touching pages faster than we
3409 			 * can clear the referenced bit and traverse the
3410 			 * inactive queue, so we limit the number of
3411 			 * reactivations.
3412 			 */
3413 			if (++reactivated_this_call >= reactivate_limit) {
3414 				vm_pageout_vminfo.vm_pageout_reactivation_limit_exceeded++;
3415 			} else if (++inactive_reclaim_run >= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM) {
3416 				vm_pageout_vminfo.vm_pageout_inactive_force_reclaim++;
3417 			} else {
3418 				uint32_t isinuse;
3419 
3420 				if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3421 					VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reference_reactivated, 1);
3422 				}
3423 
3424 				vm_pageout_vminfo.vm_pageout_inactive_referenced++;
3425 reactivate_page:
3426 				if (!object->internal && object->pager != MEMORY_OBJECT_NULL &&
3427 				    vnode_pager_get_isinuse(object->pager, &isinuse) == KERN_SUCCESS && !isinuse) {
3428 					/*
3429 					 * no explict mappings of this object exist
3430 					 * and it's not open via the filesystem
3431 					 */
3432 					vm_page_deactivate(m);
3433 					VM_PAGEOUT_DEBUG(vm_pageout_inactive_deactivated, 1);
3434 				} else {
3435 					/*
3436 					 * The page was/is being used, so put back on active list.
3437 					 */
3438 					vm_page_activate(m);
3439 					counter_inc(&vm_statistics_reactivations);
3440 					inactive_burst_count = 0;
3441 				}
3442 #if CONFIG_BACKGROUND_QUEUE
3443 #if DEVELOPMENT || DEBUG
3444 				if (page_from_bg_q == TRUE) {
3445 					if (m_object->internal) {
3446 						vm_pageout_rejected_bq_internal++;
3447 					} else {
3448 						vm_pageout_rejected_bq_external++;
3449 					}
3450 				}
3451 #endif /* DEVELOPMENT || DEBUG */
3452 #endif /* CONFIG_BACKGROUND_QUEUE */
3453 
3454 				if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3455 					VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
3456 				}
3457 				vm_pageout_state.vm_pageout_inactive_used++;
3458 
3459 				lock_yield_check = TRUE;
3460 				continue;
3461 			}
3462 			/*
3463 			 * Make sure we call pmap_get_refmod() if it
3464 			 * wasn't already called just above, to update
3465 			 * the dirty bit.
3466 			 */
3467 			if ((refmod_state == -1) && !m->vmp_dirty && m->vmp_pmapped) {
3468 				refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
3469 				if (refmod_state & VM_MEM_MODIFIED) {
3470 					SET_PAGE_DIRTY(m, FALSE);
3471 				}
3472 			}
3473 		}
3474 
3475 		/*
3476 		 * we've got a candidate page to steal...
3477 		 *
3478 		 * m->vmp_dirty is up to date courtesy of the
3479 		 * preceding check for m->vmp_reference... if
3480 		 * we get here, then m->vmp_reference had to be
3481 		 * FALSE (or possibly "reactivate_limit" was
3482 		 * exceeded), but in either case we called
3483 		 * pmap_get_refmod() and updated both
3484 		 * m->vmp_reference and m->vmp_dirty
3485 		 *
3486 		 * if it's dirty or precious we need to
3487 		 * see if the target queue is throtttled
3488 		 * it if is, we need to skip over it by moving it back
3489 		 * to the end of the inactive queue
3490 		 */
3491 
3492 		inactive_throttled = FALSE;
3493 
3494 		if (m->vmp_dirty || m->vmp_precious) {
3495 			if (object->internal) {
3496 				if (VM_PAGE_Q_THROTTLED(iq)) {
3497 					inactive_throttled = TRUE;
3498 				}
3499 			} else if (VM_PAGE_Q_THROTTLED(eq)) {
3500 				inactive_throttled = TRUE;
3501 			}
3502 		}
3503 throttle_inactive:
3504 		if (!VM_DYNAMIC_PAGING_ENABLED() &&
3505 		    object->internal && m->vmp_dirty &&
3506 		    (object->purgable == VM_PURGABLE_DENY ||
3507 		    object->purgable == VM_PURGABLE_NONVOLATILE ||
3508 		    object->purgable == VM_PURGABLE_VOLATILE)) {
3509 			vm_page_check_pageable_safe(m);
3510 			assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
3511 			vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
3512 			m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
3513 			vm_page_throttled_count++;
3514 
3515 			VM_PAGEOUT_DEBUG(vm_pageout_scan_reclaimed_throttled, 1);
3516 
3517 			inactive_burst_count = 0;
3518 
3519 			lock_yield_check = TRUE;
3520 			continue;
3521 		}
3522 		if (inactive_throttled == TRUE) {
3523 			vps_deal_with_throttled_queues(m, &object, &vm_pageout_inactive_external_forced_reactivate_limit,
3524 			    &delayed_unlock, &force_anonymous, page_from_bg_q);
3525 
3526 			inactive_burst_count = 0;
3527 
3528 			if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3529 				VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
3530 			}
3531 
3532 			lock_yield_check = TRUE;
3533 			continue;
3534 		}
3535 
3536 		/*
3537 		 * we've got a page that we can steal...
3538 		 * eliminate all mappings and make sure
3539 		 * we have the up-to-date modified state
3540 		 *
3541 		 * if we need to do a pmap_disconnect then we
3542 		 * need to re-evaluate m->vmp_dirty since the pmap_disconnect
3543 		 * provides the true state atomically... the
3544 		 * page was still mapped up to the pmap_disconnect
3545 		 * and may have been dirtied at the last microsecond
3546 		 *
3547 		 * Note that if 'pmapped' is FALSE then the page is not
3548 		 * and has not been in any map, so there is no point calling
3549 		 * pmap_disconnect().  m->vmp_dirty could have been set in anticipation
3550 		 * of likely usage of the page.
3551 		 */
3552 		if (m->vmp_pmapped == TRUE) {
3553 			int pmap_options;
3554 
3555 			/*
3556 			 * Don't count this page as going into the compressor
3557 			 * if any of these are true:
3558 			 * 1) compressed pager isn't enabled
3559 			 * 2) Freezer enabled device with compressed pager
3560 			 *    backend (exclusive use) i.e. most of the VM system
3561 			 *    (including vm_pageout_scan) has no knowledge of
3562 			 *    the compressor
3563 			 * 3) This page belongs to a file and hence will not be
3564 			 *    sent into the compressor
3565 			 */
3566 			if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE ||
3567 			    object->internal == FALSE) {
3568 				pmap_options = 0;
3569 			} else if (m->vmp_dirty || m->vmp_precious) {
3570 				/*
3571 				 * VM knows that this page is dirty (or
3572 				 * precious) and needs to be compressed
3573 				 * rather than freed.
3574 				 * Tell the pmap layer to count this page
3575 				 * as "compressed".
3576 				 */
3577 				pmap_options = PMAP_OPTIONS_COMPRESSOR;
3578 			} else {
3579 				/*
3580 				 * VM does not know if the page needs to
3581 				 * be preserved but the pmap layer might tell
3582 				 * us if any mapping has "modified" it.
3583 				 * Let's the pmap layer to count this page
3584 				 * as compressed if and only if it has been
3585 				 * modified.
3586 				 */
3587 				pmap_options =
3588 				    PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
3589 			}
3590 			refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m),
3591 			    pmap_options,
3592 			    NULL);
3593 			if (refmod_state & VM_MEM_MODIFIED) {
3594 				SET_PAGE_DIRTY(m, FALSE);
3595 			}
3596 		}
3597 
3598 		/*
3599 		 * reset our count of pages that have been reclaimed
3600 		 * since the last page was 'stolen'
3601 		 */
3602 		inactive_reclaim_run = 0;
3603 
3604 		/*
3605 		 *	If it's clean and not precious, we can free the page.
3606 		 */
3607 		if (!m->vmp_dirty && !m->vmp_precious) {
3608 			vm_pageout_state.vm_pageout_inactive_clean++;
3609 
3610 			/*
3611 			 * OK, at this point we have found a page we are going to free.
3612 			 */
3613 #if CONFIG_PHANTOM_CACHE
3614 			if (!object->internal) {
3615 				vm_phantom_cache_add_ghost(m);
3616 			}
3617 #endif
3618 			goto reclaim_page;
3619 		}
3620 
3621 		/*
3622 		 * The page may have been dirtied since the last check
3623 		 * for a throttled target queue (which may have been skipped
3624 		 * if the page was clean then).  With the dirty page
3625 		 * disconnected here, we can make one final check.
3626 		 */
3627 		if (object->internal) {
3628 			if (VM_PAGE_Q_THROTTLED(iq)) {
3629 				inactive_throttled = TRUE;
3630 			}
3631 		} else if (VM_PAGE_Q_THROTTLED(eq)) {
3632 			inactive_throttled = TRUE;
3633 		}
3634 
3635 		if (inactive_throttled == TRUE) {
3636 			goto throttle_inactive;
3637 		}
3638 
3639 #if VM_PRESSURE_EVENTS
3640 #if CONFIG_JETSAM
3641 
3642 		/*
3643 		 * If Jetsam is enabled, then the sending
3644 		 * of memory pressure notifications is handled
3645 		 * from the same thread that takes care of high-water
3646 		 * and other jetsams i.e. the memorystatus_thread.
3647 		 */
3648 
3649 #else /* CONFIG_JETSAM */
3650 
3651 		vm_pressure_response();
3652 
3653 #endif /* CONFIG_JETSAM */
3654 #endif /* VM_PRESSURE_EVENTS */
3655 
3656 		if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
3657 			VM_PAGEOUT_DEBUG(vm_pageout_speculative_dirty, 1);
3658 		}
3659 
3660 		if (object->internal) {
3661 			vm_pageout_vminfo.vm_pageout_inactive_dirty_internal++;
3662 		} else {
3663 			vm_pageout_vminfo.vm_pageout_inactive_dirty_external++;
3664 		}
3665 
3666 		/*
3667 		 * internal pages will go to the compressor...
3668 		 * external pages will go to the appropriate pager to be cleaned
3669 		 * and upon completion will end up on 'vm_page_queue_cleaned' which
3670 		 * is a preferred queue to steal from
3671 		 */
3672 		vm_pageout_cluster(m);
3673 		inactive_burst_count = 0;
3674 
3675 		/*
3676 		 * back to top of pageout scan loop
3677 		 */
3678 	}
3679 }
3680 
3681 
3682 void
vm_page_free_reserve(int pages)3683 vm_page_free_reserve(
3684 	int pages)
3685 {
3686 	int             free_after_reserve;
3687 
3688 	if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
3689 		if ((vm_page_free_reserved + pages + COMPRESSOR_FREE_RESERVED_LIMIT) >= (VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT)) {
3690 			vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT;
3691 		} else {
3692 			vm_page_free_reserved += (pages + COMPRESSOR_FREE_RESERVED_LIMIT);
3693 		}
3694 	} else {
3695 		if ((vm_page_free_reserved + pages) >= VM_PAGE_FREE_RESERVED_LIMIT) {
3696 			vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT;
3697 		} else {
3698 			vm_page_free_reserved += pages;
3699 		}
3700 	}
3701 	free_after_reserve = vm_pageout_state.vm_page_free_count_init - vm_page_free_reserved;
3702 
3703 	vm_page_free_min = vm_page_free_reserved +
3704 	    VM_PAGE_FREE_MIN(free_after_reserve);
3705 
3706 	if (vm_page_free_min > VM_PAGE_FREE_MIN_LIMIT) {
3707 		vm_page_free_min = VM_PAGE_FREE_MIN_LIMIT;
3708 	}
3709 
3710 	vm_page_free_target = vm_page_free_reserved +
3711 	    VM_PAGE_FREE_TARGET(free_after_reserve);
3712 
3713 	if (vm_page_free_target > VM_PAGE_FREE_TARGET_LIMIT) {
3714 		vm_page_free_target = VM_PAGE_FREE_TARGET_LIMIT;
3715 	}
3716 
3717 	if (vm_page_free_target < vm_page_free_min + 5) {
3718 		vm_page_free_target = vm_page_free_min + 5;
3719 	}
3720 
3721 	vm_page_throttle_limit = vm_page_free_target - (vm_page_free_target / 2);
3722 }
3723 
3724 /*
3725  *	vm_pageout is the high level pageout daemon.
3726  */
3727 
3728 void
vm_pageout_continue(void)3729 vm_pageout_continue(void)
3730 {
3731 	DTRACE_VM2(pgrrun, int, 1, (uint64_t *), NULL);
3732 	VM_PAGEOUT_DEBUG(vm_pageout_scan_event_counter, 1);
3733 
3734 	lck_mtx_lock(&vm_page_queue_free_lock);
3735 	vm_pageout_running = TRUE;
3736 	lck_mtx_unlock(&vm_page_queue_free_lock);
3737 
3738 	vm_pageout_scan();
3739 	/*
3740 	 * we hold both the vm_page_queue_free_lock
3741 	 * and the vm_page_queues_lock at this point
3742 	 */
3743 	assert(vm_page_free_wanted == 0);
3744 	assert(vm_page_free_wanted_privileged == 0);
3745 	assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT);
3746 
3747 	vm_pageout_running = FALSE;
3748 #if XNU_TARGET_OS_OSX
3749 	if (vm_pageout_waiter) {
3750 		vm_pageout_waiter = FALSE;
3751 		thread_wakeup((event_t)&vm_pageout_waiter);
3752 	}
3753 #endif /* XNU_TARGET_OS_OSX */
3754 
3755 	lck_mtx_unlock(&vm_page_queue_free_lock);
3756 	vm_page_unlock_queues();
3757 
3758 	thread_block((thread_continue_t)vm_pageout_continue);
3759 	/*NOTREACHED*/
3760 }
3761 
3762 #if XNU_TARGET_OS_OSX
3763 kern_return_t
vm_pageout_wait(uint64_t deadline)3764 vm_pageout_wait(uint64_t deadline)
3765 {
3766 	kern_return_t kr;
3767 
3768 	lck_mtx_lock(&vm_page_queue_free_lock);
3769 	for (kr = KERN_SUCCESS; vm_pageout_running && (KERN_SUCCESS == kr);) {
3770 		vm_pageout_waiter = TRUE;
3771 		if (THREAD_AWAKENED != lck_mtx_sleep_deadline(
3772 			    &vm_page_queue_free_lock, LCK_SLEEP_DEFAULT,
3773 			    (event_t) &vm_pageout_waiter, THREAD_UNINT, deadline)) {
3774 			kr = KERN_OPERATION_TIMED_OUT;
3775 		}
3776 	}
3777 	lck_mtx_unlock(&vm_page_queue_free_lock);
3778 
3779 	return kr;
3780 }
3781 #endif /* XNU_TARGET_OS_OSX */
3782 
3783 
3784 static void
vm_pageout_iothread_external_continue(struct vm_pageout_queue * q)3785 vm_pageout_iothread_external_continue(struct vm_pageout_queue *q)
3786 {
3787 	vm_page_t       m = NULL;
3788 	vm_object_t     object;
3789 	vm_object_offset_t offset;
3790 	memory_object_t pager;
3791 
3792 	/* On systems with a compressor, the external IO thread clears its
3793 	 * VM privileged bit to accommodate large allocations (e.g. bulk UPL
3794 	 * creation)
3795 	 */
3796 	if (vm_pageout_state.vm_pageout_internal_iothread != THREAD_NULL) {
3797 		current_thread()->options &= ~TH_OPT_VMPRIV;
3798 	}
3799 
3800 	vm_page_lockspin_queues();
3801 
3802 	while (!vm_page_queue_empty(&q->pgo_pending)) {
3803 		q->pgo_busy = TRUE;
3804 		vm_page_queue_remove_first(&q->pgo_pending, m, vmp_pageq);
3805 
3806 		assert(m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q);
3807 		VM_PAGE_CHECK(m);
3808 		/*
3809 		 * grab a snapshot of the object and offset this
3810 		 * page is tabled in so that we can relookup this
3811 		 * page after we've taken the object lock - these
3812 		 * fields are stable while we hold the page queues lock
3813 		 * but as soon as we drop it, there is nothing to keep
3814 		 * this page in this object... we hold an activity_in_progress
3815 		 * on this object which will keep it from terminating
3816 		 */
3817 		object = VM_PAGE_OBJECT(m);
3818 		offset = m->vmp_offset;
3819 
3820 		m->vmp_q_state = VM_PAGE_NOT_ON_Q;
3821 		VM_PAGE_ZERO_PAGEQ_ENTRY(m);
3822 
3823 		vm_page_unlock_queues();
3824 
3825 		vm_object_lock(object);
3826 
3827 		m = vm_page_lookup(object, offset);
3828 
3829 		if (m == NULL || m->vmp_busy || m->vmp_cleaning ||
3830 		    !m->vmp_laundry || (m->vmp_q_state != VM_PAGE_NOT_ON_Q)) {
3831 			/*
3832 			 * it's either the same page that someone else has
3833 			 * started cleaning (or it's finished cleaning or
3834 			 * been put back on the pageout queue), or
3835 			 * the page has been freed or we have found a
3836 			 * new page at this offset... in all of these cases
3837 			 * we merely need to release the activity_in_progress
3838 			 * we took when we put the page on the pageout queue
3839 			 */
3840 			vm_object_activity_end(object);
3841 			vm_object_unlock(object);
3842 
3843 			vm_page_lockspin_queues();
3844 			continue;
3845 		}
3846 		pager = object->pager;
3847 
3848 		if (pager == MEMORY_OBJECT_NULL) {
3849 			/*
3850 			 * This pager has been destroyed by either
3851 			 * memory_object_destroy or vm_object_destroy, and
3852 			 * so there is nowhere for the page to go.
3853 			 */
3854 			if (m->vmp_free_when_done) {
3855 				/*
3856 				 * Just free the page... VM_PAGE_FREE takes
3857 				 * care of cleaning up all the state...
3858 				 * including doing the vm_pageout_throttle_up
3859 				 */
3860 				VM_PAGE_FREE(m);
3861 			} else {
3862 				vm_page_lockspin_queues();
3863 
3864 				vm_pageout_throttle_up(m);
3865 				vm_page_activate(m);
3866 
3867 				vm_page_unlock_queues();
3868 
3869 				/*
3870 				 *	And we are done with it.
3871 				 */
3872 			}
3873 			vm_object_activity_end(object);
3874 			vm_object_unlock(object);
3875 
3876 			vm_page_lockspin_queues();
3877 			continue;
3878 		}
3879 #if 0
3880 		/*
3881 		 * we don't hold the page queue lock
3882 		 * so this check isn't safe to make
3883 		 */
3884 		VM_PAGE_CHECK(m);
3885 #endif
3886 		/*
3887 		 * give back the activity_in_progress reference we
3888 		 * took when we queued up this page and replace it
3889 		 * it with a paging_in_progress reference that will
3890 		 * also hold the paging offset from changing and
3891 		 * prevent the object from terminating
3892 		 */
3893 		vm_object_activity_end(object);
3894 		vm_object_paging_begin(object);
3895 		vm_object_unlock(object);
3896 
3897 		/*
3898 		 * Send the data to the pager.
3899 		 * any pageout clustering happens there
3900 		 */
3901 		memory_object_data_return(pager,
3902 		    m->vmp_offset + object->paging_offset,
3903 		    PAGE_SIZE,
3904 		    NULL,
3905 		    NULL,
3906 		    FALSE,
3907 		    FALSE,
3908 		    0);
3909 
3910 		vm_object_lock(object);
3911 		vm_object_paging_end(object);
3912 		vm_object_unlock(object);
3913 
3914 		vm_pageout_io_throttle();
3915 
3916 		vm_page_lockspin_queues();
3917 	}
3918 	q->pgo_busy = FALSE;
3919 	q->pgo_idle = TRUE;
3920 
3921 	assert_wait((event_t) &q->pgo_pending, THREAD_UNINT);
3922 	vm_page_unlock_queues();
3923 
3924 	thread_block_parameter((thread_continue_t)vm_pageout_iothread_external_continue, (void *) q);
3925 	/*NOTREACHED*/
3926 }
3927 
3928 
3929 #define         MAX_FREE_BATCH          32
3930 uint32_t vm_compressor_time_thread; /* Set via sysctl to record time accrued by
3931                                      * this thread.
3932                                      */
3933 
3934 
3935 void
3936 vm_pageout_iothread_internal_continue(struct cq *);
3937 void
vm_pageout_iothread_internal_continue(struct cq * cq)3938 vm_pageout_iothread_internal_continue(struct cq *cq)
3939 {
3940 	struct vm_pageout_queue *q;
3941 	vm_page_t       m = NULL;
3942 	boolean_t       pgo_draining;
3943 	vm_page_t   local_q;
3944 	int         local_cnt;
3945 	vm_page_t   local_freeq = NULL;
3946 	int         local_freed = 0;
3947 	int         local_batch_size;
3948 #if DEVELOPMENT || DEBUG
3949 	int       ncomps = 0;
3950 	boolean_t marked_active = FALSE;
3951 #endif
3952 	KERNEL_DEBUG(0xe040000c | DBG_FUNC_END, 0, 0, 0, 0, 0);
3953 
3954 	q = cq->q;
3955 #if __AMP__
3956 	if (vm_compressor_ebound && (vm_pageout_state.vm_compressor_thread_count > 1)) {
3957 		local_batch_size = (q->pgo_maxlaundry >> 3);
3958 		local_batch_size = MAX(local_batch_size, 16);
3959 	} else {
3960 		local_batch_size = q->pgo_maxlaundry / (vm_pageout_state.vm_compressor_thread_count * 2);
3961 	}
3962 #else
3963 	local_batch_size = q->pgo_maxlaundry / (vm_pageout_state.vm_compressor_thread_count * 2);
3964 #endif
3965 
3966 #if RECORD_THE_COMPRESSED_DATA
3967 	if (q->pgo_laundry) {
3968 		c_compressed_record_init();
3969 	}
3970 #endif
3971 	while (TRUE) {
3972 		int     pages_left_on_q = 0;
3973 
3974 		local_cnt = 0;
3975 		local_q = NULL;
3976 
3977 		KERNEL_DEBUG(0xe0400014 | DBG_FUNC_START, 0, 0, 0, 0, 0);
3978 
3979 		vm_page_lock_queues();
3980 #if DEVELOPMENT || DEBUG
3981 		if (marked_active == FALSE) {
3982 			vmct_active++;
3983 			vmct_state[cq->id] = VMCT_ACTIVE;
3984 			marked_active = TRUE;
3985 			if (vmct_active == 1) {
3986 				vm_compressor_epoch_start = mach_absolute_time();
3987 			}
3988 		}
3989 #endif
3990 		KERNEL_DEBUG(0xe0400014 | DBG_FUNC_END, 0, 0, 0, 0, 0);
3991 
3992 		KERNEL_DEBUG(0xe0400018 | DBG_FUNC_START, q->pgo_laundry, 0, 0, 0, 0);
3993 
3994 		while (!vm_page_queue_empty(&q->pgo_pending) && local_cnt < local_batch_size) {
3995 			vm_page_queue_remove_first(&q->pgo_pending, m, vmp_pageq);
3996 			assert(m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q);
3997 			VM_PAGE_CHECK(m);
3998 
3999 			m->vmp_q_state = VM_PAGE_NOT_ON_Q;
4000 			VM_PAGE_ZERO_PAGEQ_ENTRY(m);
4001 			m->vmp_laundry = FALSE;
4002 
4003 			m->vmp_snext = local_q;
4004 			local_q = m;
4005 			local_cnt++;
4006 		}
4007 		if (local_q == NULL) {
4008 			break;
4009 		}
4010 
4011 		q->pgo_busy = TRUE;
4012 
4013 		if ((pgo_draining = q->pgo_draining) == FALSE) {
4014 			vm_pageout_throttle_up_batch(q, local_cnt);
4015 			pages_left_on_q = q->pgo_laundry;
4016 		} else {
4017 			pages_left_on_q = q->pgo_laundry - local_cnt;
4018 		}
4019 
4020 		vm_page_unlock_queues();
4021 
4022 #if !RECORD_THE_COMPRESSED_DATA
4023 		if (pages_left_on_q >= local_batch_size && cq->id < (vm_pageout_state.vm_compressor_thread_count - 1)) {
4024 			thread_wakeup((event_t) ((uintptr_t)&q->pgo_pending + cq->id + 1));
4025 		}
4026 #endif
4027 		KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, q->pgo_laundry, 0, 0, 0, 0);
4028 
4029 		while (local_q) {
4030 			KERNEL_DEBUG(0xe0400024 | DBG_FUNC_START, local_cnt, 0, 0, 0, 0);
4031 
4032 			m = local_q;
4033 			local_q = m->vmp_snext;
4034 			m->vmp_snext = NULL;
4035 
4036 			if (vm_pageout_compress_page(&cq->current_chead, cq->scratch_buf, m) == KERN_SUCCESS) {
4037 #if DEVELOPMENT || DEBUG
4038 				ncomps++;
4039 #endif
4040 				KERNEL_DEBUG(0xe0400024 | DBG_FUNC_END, local_cnt, 0, 0, 0, 0);
4041 
4042 				m->vmp_snext = local_freeq;
4043 				local_freeq = m;
4044 				local_freed++;
4045 
4046 				if (local_freed >= MAX_FREE_BATCH) {
4047 					OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
4048 
4049 					vm_page_free_list(local_freeq, TRUE);
4050 
4051 					local_freeq = NULL;
4052 					local_freed = 0;
4053 				}
4054 			}
4055 #if !CONFIG_JETSAM
4056 			while (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) {
4057 				kern_return_t   wait_result;
4058 				int             need_wakeup = 0;
4059 
4060 				if (local_freeq) {
4061 					OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
4062 
4063 					vm_page_free_list(local_freeq, TRUE);
4064 					local_freeq = NULL;
4065 					local_freed = 0;
4066 
4067 					continue;
4068 				}
4069 				lck_mtx_lock_spin(&vm_page_queue_free_lock);
4070 
4071 				if (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) {
4072 					if (vm_page_free_wanted_privileged++ == 0) {
4073 						need_wakeup = 1;
4074 					}
4075 					wait_result = assert_wait((event_t)&vm_page_free_wanted_privileged, THREAD_UNINT);
4076 
4077 					lck_mtx_unlock(&vm_page_queue_free_lock);
4078 
4079 					if (need_wakeup) {
4080 						thread_wakeup((event_t)&vm_page_free_wanted);
4081 					}
4082 
4083 					if (wait_result == THREAD_WAITING) {
4084 						thread_block(THREAD_CONTINUE_NULL);
4085 					}
4086 				} else {
4087 					lck_mtx_unlock(&vm_page_queue_free_lock);
4088 				}
4089 			}
4090 #endif
4091 		}
4092 		if (local_freeq) {
4093 			OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
4094 
4095 			vm_page_free_list(local_freeq, TRUE);
4096 			local_freeq = NULL;
4097 			local_freed = 0;
4098 		}
4099 		if (pgo_draining == TRUE) {
4100 			vm_page_lockspin_queues();
4101 			vm_pageout_throttle_up_batch(q, local_cnt);
4102 			vm_page_unlock_queues();
4103 		}
4104 	}
4105 	KERNEL_DEBUG(0xe040000c | DBG_FUNC_START, 0, 0, 0, 0, 0);
4106 
4107 	/*
4108 	 * queue lock is held and our q is empty
4109 	 */
4110 	q->pgo_busy = FALSE;
4111 	q->pgo_idle = TRUE;
4112 
4113 	assert_wait((event_t) ((uintptr_t)&q->pgo_pending + cq->id), THREAD_UNINT);
4114 #if DEVELOPMENT || DEBUG
4115 	if (marked_active == TRUE) {
4116 		vmct_active--;
4117 		vmct_state[cq->id] = VMCT_IDLE;
4118 
4119 		if (vmct_active == 0) {
4120 			vm_compressor_epoch_stop = mach_absolute_time();
4121 			assertf(vm_compressor_epoch_stop >= vm_compressor_epoch_start,
4122 			    "Compressor epoch non-monotonic: 0x%llx -> 0x%llx",
4123 			    vm_compressor_epoch_start, vm_compressor_epoch_stop);
4124 			/* This interval includes intervals where one or more
4125 			 * compressor threads were pre-empted
4126 			 */
4127 			vmct_stats.vmct_cthreads_total += vm_compressor_epoch_stop - vm_compressor_epoch_start;
4128 		}
4129 	}
4130 #endif
4131 	vm_page_unlock_queues();
4132 #if DEVELOPMENT || DEBUG
4133 	if (__improbable(vm_compressor_time_thread)) {
4134 		vmct_stats.vmct_runtimes[cq->id] = thread_get_runtime_self();
4135 		vmct_stats.vmct_pages[cq->id] += ncomps;
4136 		vmct_stats.vmct_iterations[cq->id]++;
4137 		if (ncomps > vmct_stats.vmct_maxpages[cq->id]) {
4138 			vmct_stats.vmct_maxpages[cq->id] = ncomps;
4139 		}
4140 		if (ncomps < vmct_stats.vmct_minpages[cq->id]) {
4141 			vmct_stats.vmct_minpages[cq->id] = ncomps;
4142 		}
4143 	}
4144 #endif
4145 
4146 	KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, 0, 0, 0, 0, 0);
4147 
4148 	thread_block_parameter((thread_continue_t)vm_pageout_iothread_internal_continue, (void *) cq);
4149 	/*NOTREACHED*/
4150 }
4151 
4152 
4153 kern_return_t
vm_pageout_compress_page(void ** current_chead,char * scratch_buf,vm_page_t m)4154 vm_pageout_compress_page(void **current_chead, char *scratch_buf, vm_page_t m)
4155 {
4156 	vm_object_t     object;
4157 	memory_object_t pager;
4158 	int             compressed_count_delta;
4159 	kern_return_t   retval;
4160 
4161 	object = VM_PAGE_OBJECT(m);
4162 
4163 	assert(!m->vmp_free_when_done);
4164 	assert(!m->vmp_laundry);
4165 
4166 	pager = object->pager;
4167 
4168 	if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) {
4169 		KERNEL_DEBUG(0xe0400010 | DBG_FUNC_START, object, pager, 0, 0, 0);
4170 
4171 		vm_object_lock(object);
4172 
4173 		/*
4174 		 * If there is no memory object for the page, create
4175 		 * one and hand it to the compression pager.
4176 		 */
4177 
4178 		if (!object->pager_initialized) {
4179 			vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
4180 		}
4181 		if (!object->pager_initialized) {
4182 			vm_object_compressor_pager_create(object);
4183 		}
4184 
4185 		pager = object->pager;
4186 
4187 		if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) {
4188 			/*
4189 			 * Still no pager for the object,
4190 			 * or the pager has been destroyed.
4191 			 * Reactivate the page.
4192 			 *
4193 			 * Should only happen if there is no
4194 			 * compression pager
4195 			 */
4196 			PAGE_WAKEUP_DONE(m);
4197 
4198 			vm_page_lockspin_queues();
4199 			vm_page_activate(m);
4200 			VM_PAGEOUT_DEBUG(vm_pageout_dirty_no_pager, 1);
4201 			vm_page_unlock_queues();
4202 
4203 			/*
4204 			 *	And we are done with it.
4205 			 */
4206 			vm_object_activity_end(object);
4207 			vm_object_unlock(object);
4208 
4209 			return KERN_FAILURE;
4210 		}
4211 		vm_object_unlock(object);
4212 
4213 		KERNEL_DEBUG(0xe0400010 | DBG_FUNC_END, object, pager, 0, 0, 0);
4214 	}
4215 	assert(object->pager_initialized && pager != MEMORY_OBJECT_NULL);
4216 	assert(object->activity_in_progress > 0);
4217 
4218 	retval = vm_compressor_pager_put(
4219 		pager,
4220 		m->vmp_offset + object->paging_offset,
4221 		VM_PAGE_GET_PHYS_PAGE(m),
4222 		current_chead,
4223 		scratch_buf,
4224 		&compressed_count_delta);
4225 
4226 	vm_object_lock(object);
4227 
4228 	assert(object->activity_in_progress > 0);
4229 	assert(VM_PAGE_OBJECT(m) == object);
4230 	assert( !VM_PAGE_WIRED(m));
4231 
4232 	vm_compressor_pager_count(pager,
4233 	    compressed_count_delta,
4234 	    FALSE,                       /* shared_lock */
4235 	    object);
4236 
4237 	if (retval == KERN_SUCCESS) {
4238 		/*
4239 		 * If the object is purgeable, its owner's
4240 		 * purgeable ledgers will be updated in
4241 		 * vm_page_remove() but the page still
4242 		 * contributes to the owner's memory footprint,
4243 		 * so account for it as such.
4244 		 */
4245 		if ((object->purgable != VM_PURGABLE_DENY ||
4246 		    object->vo_ledger_tag) &&
4247 		    object->vo_owner != NULL) {
4248 			/* one more compressed purgeable/tagged page */
4249 			vm_object_owner_compressed_update(object,
4250 			    +1);
4251 		}
4252 		counter_inc(&vm_statistics_compressions);
4253 
4254 		if (m->vmp_tabled) {
4255 			vm_page_remove(m, TRUE);
4256 		}
4257 	} else {
4258 		PAGE_WAKEUP_DONE(m);
4259 
4260 		vm_page_lockspin_queues();
4261 
4262 		vm_page_activate(m);
4263 		vm_pageout_vminfo.vm_compressor_failed++;
4264 
4265 		vm_page_unlock_queues();
4266 	}
4267 	vm_object_activity_end(object);
4268 	vm_object_unlock(object);
4269 
4270 	return retval;
4271 }
4272 
4273 
4274 static void
vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue * eq,boolean_t req_lowpriority)4275 vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue *eq, boolean_t req_lowpriority)
4276 {
4277 	uint32_t        policy;
4278 
4279 	if (hibernate_cleaning_in_progress == TRUE) {
4280 		req_lowpriority = FALSE;
4281 	}
4282 
4283 	if (eq->pgo_inited == TRUE && eq->pgo_lowpriority != req_lowpriority) {
4284 		vm_page_unlock_queues();
4285 
4286 		if (req_lowpriority == TRUE) {
4287 			policy = THROTTLE_LEVEL_PAGEOUT_THROTTLED;
4288 			DTRACE_VM(laundrythrottle);
4289 		} else {
4290 			policy = THROTTLE_LEVEL_PAGEOUT_UNTHROTTLED;
4291 			DTRACE_VM(laundryunthrottle);
4292 		}
4293 		proc_set_thread_policy_with_tid(kernel_task, eq->pgo_tid,
4294 		    TASK_POLICY_EXTERNAL, TASK_POLICY_IO, policy);
4295 
4296 		vm_page_lock_queues();
4297 		eq->pgo_lowpriority = req_lowpriority;
4298 	}
4299 }
4300 
4301 
4302 static void
vm_pageout_iothread_external(void)4303 vm_pageout_iothread_external(void)
4304 {
4305 	thread_t        self = current_thread();
4306 
4307 	self->options |= TH_OPT_VMPRIV;
4308 
4309 	DTRACE_VM2(laundrythrottle, int, 1, (uint64_t *), NULL);
4310 
4311 	proc_set_thread_policy(self, TASK_POLICY_EXTERNAL,
4312 	    TASK_POLICY_IO, THROTTLE_LEVEL_PAGEOUT_THROTTLED);
4313 
4314 	vm_page_lock_queues();
4315 
4316 	vm_pageout_queue_external.pgo_tid = self->thread_id;
4317 	vm_pageout_queue_external.pgo_lowpriority = TRUE;
4318 	vm_pageout_queue_external.pgo_inited = TRUE;
4319 
4320 	vm_page_unlock_queues();
4321 
4322 #if CONFIG_THREAD_GROUPS
4323 	thread_group_vm_add();
4324 #endif /* CONFIG_THREAD_GROUPS */
4325 
4326 	vm_pageout_iothread_external_continue(&vm_pageout_queue_external);
4327 
4328 	/*NOTREACHED*/
4329 }
4330 
4331 
4332 static void
vm_pageout_iothread_internal(struct cq * cq)4333 vm_pageout_iothread_internal(struct cq *cq)
4334 {
4335 	thread_t        self = current_thread();
4336 
4337 	self->options |= TH_OPT_VMPRIV;
4338 
4339 	vm_page_lock_queues();
4340 
4341 	vm_pageout_queue_internal.pgo_tid = self->thread_id;
4342 	vm_pageout_queue_internal.pgo_lowpriority = TRUE;
4343 	vm_pageout_queue_internal.pgo_inited = TRUE;
4344 
4345 	vm_page_unlock_queues();
4346 
4347 	if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
4348 		thread_vm_bind_group_add();
4349 	}
4350 
4351 #if CONFIG_THREAD_GROUPS
4352 	thread_group_vm_add();
4353 #endif /* CONFIG_THREAD_GROUPS */
4354 
4355 #if __AMP__
4356 	if (vm_compressor_ebound) {
4357 		/*
4358 		 * Use the soft bound option for vm_compressor to allow it to run on
4359 		 * P-cores if E-cluster is unavailable.
4360 		 */
4361 		thread_bind_cluster_type(self, 'E', true);
4362 	}
4363 #endif /* __AMP__ */
4364 
4365 	thread_set_thread_name(current_thread(), "VM_compressor");
4366 #if DEVELOPMENT || DEBUG
4367 	vmct_stats.vmct_minpages[cq->id] = INT32_MAX;
4368 #endif
4369 	vm_pageout_iothread_internal_continue(cq);
4370 
4371 	/*NOTREACHED*/
4372 }
4373 
4374 kern_return_t
vm_set_buffer_cleanup_callout(boolean_t (* func)(int))4375 vm_set_buffer_cleanup_callout(boolean_t (*func)(int))
4376 {
4377 	if (OSCompareAndSwapPtr(NULL, ptrauth_nop_cast(void *, func), (void * volatile *) &consider_buffer_cache_collect)) {
4378 		return KERN_SUCCESS;
4379 	} else {
4380 		return KERN_FAILURE; /* Already set */
4381 	}
4382 }
4383 
4384 extern boolean_t        memorystatus_manual_testing_on;
4385 extern unsigned int     memorystatus_level;
4386 
4387 
4388 #if VM_PRESSURE_EVENTS
4389 
4390 boolean_t vm_pressure_events_enabled = FALSE;
4391 
4392 extern uint64_t next_warning_notification_sent_at_ts;
4393 extern uint64_t next_critical_notification_sent_at_ts;
4394 
4395 #define PRESSURE_LEVEL_STUCK_THRESHOLD_MINS    (30)    /* 30 minutes. */
4396 
4397 /*
4398  * The last time there was change in pressure level OR we forced a check
4399  * because the system is stuck in a non-normal pressure level.
4400  */
4401 uint64_t  vm_pressure_last_level_transition_abs = 0;
4402 
4403 /*
4404  *  This is how the long the system waits 'stuck' in an unchanged non-normal pressure
4405  * level before resending out notifications for that level again.
4406  */
4407 int  vm_pressure_level_transition_threshold = PRESSURE_LEVEL_STUCK_THRESHOLD_MINS;
4408 
4409 void
vm_pressure_response(void)4410 vm_pressure_response(void)
4411 {
4412 	vm_pressure_level_t     old_level = kVMPressureNormal;
4413 	int                     new_level = -1;
4414 	unsigned int            total_pages;
4415 	uint64_t                available_memory = 0;
4416 	uint64_t                curr_ts, abs_time_since_level_transition, time_in_ns;
4417 	bool                    force_check = false;
4418 	int                     time_in_mins;
4419 
4420 
4421 	if (vm_pressure_events_enabled == FALSE) {
4422 		return;
4423 	}
4424 
4425 #if !XNU_TARGET_OS_OSX
4426 
4427 	available_memory = (uint64_t) memorystatus_available_pages;
4428 
4429 #else /* !XNU_TARGET_OS_OSX */
4430 
4431 	available_memory = (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY;
4432 	memorystatus_available_pages = (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY;
4433 
4434 #endif /* !XNU_TARGET_OS_OSX */
4435 
4436 	total_pages = (unsigned int) atop_64(max_mem);
4437 #if CONFIG_SECLUDED_MEMORY
4438 	total_pages -= vm_page_secluded_count;
4439 #endif /* CONFIG_SECLUDED_MEMORY */
4440 	memorystatus_level = (unsigned int) ((available_memory * 100) / total_pages);
4441 
4442 	if (memorystatus_manual_testing_on) {
4443 		return;
4444 	}
4445 
4446 	curr_ts = mach_absolute_time();
4447 	abs_time_since_level_transition = curr_ts - vm_pressure_last_level_transition_abs;
4448 
4449 	absolutetime_to_nanoseconds(abs_time_since_level_transition, &time_in_ns);
4450 	time_in_mins = (int) ((time_in_ns / NSEC_PER_SEC) / 60);
4451 	force_check = (time_in_mins >= vm_pressure_level_transition_threshold);
4452 
4453 	old_level = memorystatus_vm_pressure_level;
4454 
4455 	switch (memorystatus_vm_pressure_level) {
4456 	case kVMPressureNormal:
4457 	{
4458 		if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
4459 			new_level = kVMPressureCritical;
4460 		} else if (VM_PRESSURE_NORMAL_TO_WARNING()) {
4461 			new_level = kVMPressureWarning;
4462 		}
4463 		break;
4464 	}
4465 
4466 	case kVMPressureWarning:
4467 	case kVMPressureUrgent:
4468 	{
4469 		if (VM_PRESSURE_WARNING_TO_NORMAL()) {
4470 			new_level = kVMPressureNormal;
4471 		} else if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
4472 			new_level = kVMPressureCritical;
4473 		} else if (force_check) {
4474 			new_level = kVMPressureWarning;
4475 			next_warning_notification_sent_at_ts = curr_ts;
4476 		}
4477 		break;
4478 	}
4479 
4480 	case kVMPressureCritical:
4481 	{
4482 		if (VM_PRESSURE_WARNING_TO_NORMAL()) {
4483 			new_level = kVMPressureNormal;
4484 		} else if (VM_PRESSURE_CRITICAL_TO_WARNING()) {
4485 			new_level = kVMPressureWarning;
4486 		} else if (force_check) {
4487 			new_level = kVMPressureCritical;
4488 			next_critical_notification_sent_at_ts = curr_ts;
4489 		}
4490 		break;
4491 	}
4492 
4493 	default:
4494 		return;
4495 	}
4496 
4497 	if (new_level != -1 || force_check) {
4498 		if (new_level != -1) {
4499 			memorystatus_vm_pressure_level = (vm_pressure_level_t) new_level;
4500 
4501 			if (new_level != (int) old_level) {
4502 				VM_DEBUG_CONSTANT_EVENT(vm_pressure_level_change, VM_PRESSURE_LEVEL_CHANGE, DBG_FUNC_NONE,
4503 				    new_level, old_level, 0, 0);
4504 			}
4505 		} else {
4506 			VM_DEBUG_CONSTANT_EVENT(vm_pressure_level_change, VM_PRESSURE_LEVEL_CHANGE, DBG_FUNC_NONE,
4507 			    new_level, old_level, force_check, 0);
4508 		}
4509 
4510 		if (hibernation_vmqueues_inspection || hibernate_cleaning_in_progress) {
4511 			/*
4512 			 * We don't want to schedule a wakeup while hibernation is in progress
4513 			 * because that could collide with checks for non-monotonicity in the scheduler.
4514 			 * We do however do all the updates to memorystatus_vm_pressure_level because
4515 			 * we _might_ want to use that for decisions regarding which pages or how
4516 			 * many pages we want to dump in hibernation.
4517 			 */
4518 			return;
4519 		}
4520 
4521 		if ((memorystatus_vm_pressure_level != kVMPressureNormal) || (old_level != memorystatus_vm_pressure_level) || force_check) {
4522 			if (vm_pageout_state.vm_pressure_thread_running == FALSE) {
4523 				thread_wakeup(&vm_pressure_thread);
4524 			}
4525 
4526 			if (old_level != memorystatus_vm_pressure_level) {
4527 				thread_wakeup(&vm_pageout_state.vm_pressure_changed);
4528 			}
4529 			vm_pressure_last_level_transition_abs = curr_ts; /* renew the window of observation for a stuck pressure level */
4530 		}
4531 	}
4532 }
4533 #endif /* VM_PRESSURE_EVENTS */
4534 
4535 /*
4536  * Function called by a kernel thread to either get the current pressure level or
4537  * wait until memory pressure changes from a given level.
4538  */
4539 kern_return_t
mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure,__unused unsigned int * pressure_level)4540 mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure, __unused unsigned int *pressure_level)
4541 {
4542 #if !VM_PRESSURE_EVENTS
4543 
4544 	return KERN_FAILURE;
4545 
4546 #else /* VM_PRESSURE_EVENTS */
4547 
4548 	wait_result_t       wr = 0;
4549 	vm_pressure_level_t old_level = memorystatus_vm_pressure_level;
4550 
4551 	if (pressure_level == NULL) {
4552 		return KERN_INVALID_ARGUMENT;
4553 	}
4554 
4555 	if (*pressure_level == kVMPressureJetsam) {
4556 		if (!wait_for_pressure) {
4557 			return KERN_INVALID_ARGUMENT;
4558 		}
4559 
4560 		lck_mtx_lock(&memorystatus_jetsam_fg_band_lock);
4561 		wr = assert_wait((event_t)&memorystatus_jetsam_fg_band_waiters,
4562 		    THREAD_INTERRUPTIBLE);
4563 		if (wr == THREAD_WAITING) {
4564 			++memorystatus_jetsam_fg_band_waiters;
4565 			lck_mtx_unlock(&memorystatus_jetsam_fg_band_lock);
4566 			wr = thread_block(THREAD_CONTINUE_NULL);
4567 		} else {
4568 			lck_mtx_unlock(&memorystatus_jetsam_fg_band_lock);
4569 		}
4570 		if (wr != THREAD_AWAKENED) {
4571 			return KERN_ABORTED;
4572 		}
4573 		*pressure_level = kVMPressureJetsam;
4574 		return KERN_SUCCESS;
4575 	}
4576 
4577 	if (wait_for_pressure == TRUE) {
4578 		while (old_level == *pressure_level) {
4579 			wr = assert_wait((event_t) &vm_pageout_state.vm_pressure_changed,
4580 			    THREAD_INTERRUPTIBLE);
4581 			if (wr == THREAD_WAITING) {
4582 				wr = thread_block(THREAD_CONTINUE_NULL);
4583 			}
4584 			if (wr == THREAD_INTERRUPTED) {
4585 				return KERN_ABORTED;
4586 			}
4587 
4588 			if (wr == THREAD_AWAKENED) {
4589 				old_level = memorystatus_vm_pressure_level;
4590 			}
4591 		}
4592 	}
4593 
4594 	*pressure_level = old_level;
4595 	return KERN_SUCCESS;
4596 #endif /* VM_PRESSURE_EVENTS */
4597 }
4598 
4599 #if VM_PRESSURE_EVENTS
4600 void
vm_pressure_thread(void)4601 vm_pressure_thread(void)
4602 {
4603 	static boolean_t thread_initialized = FALSE;
4604 
4605 	if (thread_initialized == TRUE) {
4606 		vm_pageout_state.vm_pressure_thread_running = TRUE;
4607 		consider_vm_pressure_events();
4608 		vm_pageout_state.vm_pressure_thread_running = FALSE;
4609 	}
4610 
4611 #if CONFIG_THREAD_GROUPS
4612 	thread_group_vm_add();
4613 #endif /* CONFIG_THREAD_GROUPS */
4614 
4615 	thread_set_thread_name(current_thread(), "VM_pressure");
4616 	thread_initialized = TRUE;
4617 	assert_wait((event_t) &vm_pressure_thread, THREAD_UNINT);
4618 	thread_block((thread_continue_t)vm_pressure_thread);
4619 }
4620 #endif /* VM_PRESSURE_EVENTS */
4621 
4622 
4623 /*
4624  * called once per-second via "compute_averages"
4625  */
4626 void
compute_pageout_gc_throttle(__unused void * arg)4627 compute_pageout_gc_throttle(__unused void *arg)
4628 {
4629 	if (vm_pageout_vminfo.vm_pageout_considered_page != vm_pageout_state.vm_pageout_considered_page_last) {
4630 		vm_pageout_state.vm_pageout_considered_page_last = vm_pageout_vminfo.vm_pageout_considered_page;
4631 
4632 		thread_wakeup((event_t) &vm_pageout_garbage_collect);
4633 	}
4634 }
4635 
4636 /*
4637  * vm_pageout_garbage_collect can also be called when the zone allocator needs
4638  * to call zone_gc on a different thread in order to trigger zone-map-exhaustion
4639  * jetsams. We need to check if the zone map size is above its jetsam limit to
4640  * decide if this was indeed the case.
4641  *
4642  * We need to do this on a different thread because of the following reasons:
4643  *
4644  * 1. In the case of synchronous jetsams, the leaking process can try to jetsam
4645  * itself causing the system to hang. We perform synchronous jetsams if we're
4646  * leaking in the VM map entries zone, so the leaking process could be doing a
4647  * zalloc for a VM map entry while holding its vm_map lock, when it decides to
4648  * jetsam itself. We also need the vm_map lock on the process termination path,
4649  * which would now lead the dying process to deadlock against itself.
4650  *
4651  * 2. The jetsam path might need to allocate zone memory itself. We could try
4652  * using the non-blocking variant of zalloc for this path, but we can still
4653  * end up trying to do a kernel_memory_allocate when the zone maps are almost
4654  * full.
4655  */
4656 bool garbage_collect_thread_inited = false;
4657 void
vm_pageout_garbage_collect(int collect)4658 vm_pageout_garbage_collect(int collect)
4659 {
4660 	if (!garbage_collect_thread_inited) {
4661 #if CONFIG_THREAD_GROUPS
4662 		thread_group_vm_add();
4663 #endif /* CONFIG_THREAD_GROUPS */
4664 		garbage_collect_thread_inited = true;
4665 	}
4666 
4667 	if (collect) {
4668 		if (zone_map_nearing_exhaustion()) {
4669 			/*
4670 			 * Woken up by the zone allocator for zone-map-exhaustion jetsams.
4671 			 *
4672 			 * Bail out after calling zone_gc (which triggers the
4673 			 * zone-map-exhaustion jetsams). If we fall through, the subsequent
4674 			 * operations that clear out a bunch of caches might allocate zone
4675 			 * memory themselves (for eg. vm_map operations would need VM map
4676 			 * entries). Since the zone map is almost full at this point, we
4677 			 * could end up with a panic. We just need to quickly jetsam a
4678 			 * process and exit here.
4679 			 *
4680 			 * It could so happen that we were woken up to relieve memory
4681 			 * pressure and the zone map also happened to be near its limit at
4682 			 * the time, in which case we'll skip out early. But that should be
4683 			 * ok; if memory pressure persists, the thread will simply be woken
4684 			 * up again.
4685 			 */
4686 			zone_gc(ZONE_GC_JETSAM);
4687 		} else {
4688 			/* Woken up by vm_pageout_scan or compute_pageout_gc_throttle. */
4689 			boolean_t buf_large_zfree = FALSE;
4690 			boolean_t first_try = TRUE;
4691 
4692 			stack_collect();
4693 
4694 			consider_machine_collect();
4695 			mbuf_drain(FALSE);
4696 
4697 			do {
4698 				if (consider_buffer_cache_collect != NULL) {
4699 					buf_large_zfree = (*consider_buffer_cache_collect)(0);
4700 				}
4701 				if (first_try == TRUE || buf_large_zfree == TRUE) {
4702 					/*
4703 					 * zone_gc should be last, because the other operations
4704 					 * might return memory to zones.
4705 					 */
4706 					zone_gc(ZONE_GC_TRIM);
4707 				}
4708 				first_try = FALSE;
4709 			} while (buf_large_zfree == TRUE && vm_page_free_count < vm_page_free_target);
4710 
4711 			consider_machine_adjust();
4712 		}
4713 	}
4714 
4715 	assert_wait((event_t) &vm_pageout_garbage_collect, THREAD_UNINT);
4716 
4717 	thread_block_parameter((thread_continue_t) vm_pageout_garbage_collect, (void *)1);
4718 	/*NOTREACHED*/
4719 }
4720 
4721 
4722 #if VM_PAGE_BUCKETS_CHECK
4723 #if VM_PAGE_FAKE_BUCKETS
4724 extern vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
4725 #endif /* VM_PAGE_FAKE_BUCKETS */
4726 #endif /* VM_PAGE_BUCKETS_CHECK */
4727 
4728 
4729 
4730 void
vm_set_restrictions(unsigned int num_cpus)4731 vm_set_restrictions(unsigned int num_cpus)
4732 {
4733 	int vm_restricted_to_single_processor = 0;
4734 
4735 	if (PE_parse_boot_argn("vm_restricted_to_single_processor", &vm_restricted_to_single_processor, sizeof(vm_restricted_to_single_processor))) {
4736 		kprintf("Overriding vm_restricted_to_single_processor to %d\n", vm_restricted_to_single_processor);
4737 		vm_pageout_state.vm_restricted_to_single_processor = (vm_restricted_to_single_processor ? TRUE : FALSE);
4738 	} else {
4739 		assert(num_cpus > 0);
4740 
4741 		if (num_cpus <= 3) {
4742 			/*
4743 			 * on systems with a limited number of CPUS, bind the
4744 			 * 4 major threads that can free memory and that tend to use
4745 			 * a fair bit of CPU under pressured conditions to a single processor.
4746 			 * This insures that these threads don't hog all of the available CPUs
4747 			 * (important for camera launch), while allowing them to run independently
4748 			 * w/r to locks... the 4 threads are
4749 			 * vm_pageout_scan,  vm_pageout_iothread_internal (compressor),
4750 			 * vm_compressor_swap_trigger_thread (minor and major compactions),
4751 			 * memorystatus_thread (jetsams).
4752 			 *
4753 			 * the first time the thread is run, it is responsible for checking the
4754 			 * state of vm_restricted_to_single_processor, and if TRUE it calls
4755 			 * thread_bind_master...  someday this should be replaced with a group
4756 			 * scheduling mechanism and KPI.
4757 			 */
4758 			vm_pageout_state.vm_restricted_to_single_processor = TRUE;
4759 		} else {
4760 			vm_pageout_state.vm_restricted_to_single_processor = FALSE;
4761 		}
4762 	}
4763 }
4764 
4765 /*
4766  * Set up vm_config based on the vm_compressor_mode.
4767  * Must run BEFORE the pageout thread starts up.
4768  */
4769 __startup_func
4770 void
vm_config_init(void)4771 vm_config_init(void)
4772 {
4773 	bzero(&vm_config, sizeof(vm_config));
4774 
4775 	switch (vm_compressor_mode) {
4776 	case VM_PAGER_DEFAULT:
4777 		printf("mapping deprecated VM_PAGER_DEFAULT to VM_PAGER_COMPRESSOR_WITH_SWAP\n");
4778 		OS_FALLTHROUGH;
4779 
4780 	case VM_PAGER_COMPRESSOR_WITH_SWAP:
4781 		vm_config.compressor_is_present = TRUE;
4782 		vm_config.swap_is_present = TRUE;
4783 		vm_config.compressor_is_active = TRUE;
4784 		vm_config.swap_is_active = TRUE;
4785 		break;
4786 
4787 	case VM_PAGER_COMPRESSOR_NO_SWAP:
4788 		vm_config.compressor_is_present = TRUE;
4789 		vm_config.swap_is_present = TRUE;
4790 		vm_config.compressor_is_active = TRUE;
4791 		break;
4792 
4793 	case VM_PAGER_FREEZER_DEFAULT:
4794 		printf("mapping deprecated VM_PAGER_FREEZER_DEFAULT to VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP\n");
4795 		OS_FALLTHROUGH;
4796 
4797 	case VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP:
4798 		vm_config.compressor_is_present = TRUE;
4799 		vm_config.swap_is_present = TRUE;
4800 		break;
4801 
4802 	case VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP:
4803 		vm_config.compressor_is_present = TRUE;
4804 		vm_config.swap_is_present = TRUE;
4805 		vm_config.compressor_is_active = TRUE;
4806 		vm_config.freezer_swap_is_active = TRUE;
4807 		break;
4808 
4809 	case VM_PAGER_NOT_CONFIGURED:
4810 		break;
4811 
4812 	default:
4813 		printf("unknown compressor mode - %x\n", vm_compressor_mode);
4814 		break;
4815 	}
4816 }
4817 
4818 void
vm_pageout(void)4819 vm_pageout(void)
4820 {
4821 	thread_t        self = current_thread();
4822 	thread_t        thread;
4823 	kern_return_t   result;
4824 	spl_t           s;
4825 
4826 	/*
4827 	 * Set thread privileges.
4828 	 */
4829 	s = splsched();
4830 
4831 #if CONFIG_VPS_DYNAMIC_PRIO
4832 
4833 	int             vps_dynprio_bootarg = 0;
4834 
4835 	if (PE_parse_boot_argn("vps_dynamic_priority_enabled", &vps_dynprio_bootarg, sizeof(vps_dynprio_bootarg))) {
4836 		vps_dynamic_priority_enabled = (vps_dynprio_bootarg ? TRUE : FALSE);
4837 		kprintf("Overriding vps_dynamic_priority_enabled to %d\n", vps_dynamic_priority_enabled);
4838 	} else {
4839 		if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
4840 			vps_dynamic_priority_enabled = TRUE;
4841 		} else {
4842 			vps_dynamic_priority_enabled = FALSE;
4843 		}
4844 	}
4845 
4846 	if (vps_dynamic_priority_enabled) {
4847 		sched_set_kernel_thread_priority(self, MAXPRI_THROTTLE);
4848 		thread_set_eager_preempt(self);
4849 	} else {
4850 		sched_set_kernel_thread_priority(self, BASEPRI_VM);
4851 	}
4852 
4853 #else /* CONFIG_VPS_DYNAMIC_PRIO */
4854 
4855 	vps_dynamic_priority_enabled = FALSE;
4856 	sched_set_kernel_thread_priority(self, BASEPRI_VM);
4857 
4858 #endif /* CONFIG_VPS_DYNAMIC_PRIO */
4859 
4860 	thread_lock(self);
4861 	self->options |= TH_OPT_VMPRIV;
4862 	thread_unlock(self);
4863 
4864 	if (!self->reserved_stack) {
4865 		self->reserved_stack = self->kernel_stack;
4866 	}
4867 
4868 	if (vm_pageout_state.vm_restricted_to_single_processor == TRUE &&
4869 	    vps_dynamic_priority_enabled == FALSE) {
4870 		thread_vm_bind_group_add();
4871 	}
4872 
4873 
4874 #if CONFIG_THREAD_GROUPS
4875 	thread_group_vm_add();
4876 #endif /* CONFIG_THREAD_GROUPS */
4877 
4878 #if __AMP__
4879 	PE_parse_boot_argn("vmpgo_pcluster", &vm_pgo_pbound, sizeof(vm_pgo_pbound));
4880 	if (vm_pgo_pbound) {
4881 		/*
4882 		 * Use the soft bound option for vm pageout to allow it to run on
4883 		 * E-cores if P-cluster is unavailable.
4884 		 */
4885 		thread_bind_cluster_type(self, 'P', true);
4886 	}
4887 #endif /* __AMP__ */
4888 
4889 	splx(s);
4890 
4891 	thread_set_thread_name(current_thread(), "VM_pageout_scan");
4892 
4893 	/*
4894 	 *	Initialize some paging parameters.
4895 	 */
4896 
4897 	vm_pageout_state.vm_pressure_thread_running = FALSE;
4898 	vm_pageout_state.vm_pressure_changed = FALSE;
4899 	vm_pageout_state.memorystatus_purge_on_warning = 2;
4900 	vm_pageout_state.memorystatus_purge_on_urgent = 5;
4901 	vm_pageout_state.memorystatus_purge_on_critical = 8;
4902 	vm_pageout_state.vm_page_speculative_q_age_ms = VM_PAGE_SPECULATIVE_Q_AGE_MS;
4903 	vm_pageout_state.vm_page_speculative_percentage = 5;
4904 	vm_pageout_state.vm_page_speculative_target = 0;
4905 
4906 	vm_pageout_state.vm_pageout_external_iothread = THREAD_NULL;
4907 	vm_pageout_state.vm_pageout_internal_iothread = THREAD_NULL;
4908 
4909 	vm_pageout_state.vm_pageout_swap_wait = 0;
4910 	vm_pageout_state.vm_pageout_idle_wait = 0;
4911 	vm_pageout_state.vm_pageout_empty_wait = 0;
4912 	vm_pageout_state.vm_pageout_burst_wait = 0;
4913 	vm_pageout_state.vm_pageout_deadlock_wait = 0;
4914 	vm_pageout_state.vm_pageout_deadlock_relief = 0;
4915 	vm_pageout_state.vm_pageout_burst_inactive_throttle = 0;
4916 
4917 	vm_pageout_state.vm_pageout_inactive = 0;
4918 	vm_pageout_state.vm_pageout_inactive_used = 0;
4919 	vm_pageout_state.vm_pageout_inactive_clean = 0;
4920 
4921 	vm_pageout_state.vm_memory_pressure = 0;
4922 	vm_pageout_state.vm_page_filecache_min = 0;
4923 #if CONFIG_JETSAM
4924 	vm_pageout_state.vm_page_filecache_min_divisor = 70;
4925 	vm_pageout_state.vm_page_xpmapped_min_divisor = 40;
4926 #else
4927 	vm_pageout_state.vm_page_filecache_min_divisor = 27;
4928 	vm_pageout_state.vm_page_xpmapped_min_divisor = 36;
4929 #endif
4930 	vm_pageout_state.vm_page_free_count_init = vm_page_free_count;
4931 
4932 	vm_pageout_state.vm_pageout_considered_page_last = 0;
4933 
4934 	if (vm_pageout_state.vm_pageout_swap_wait == 0) {
4935 		vm_pageout_state.vm_pageout_swap_wait = VM_PAGEOUT_SWAP_WAIT;
4936 	}
4937 
4938 	if (vm_pageout_state.vm_pageout_idle_wait == 0) {
4939 		vm_pageout_state.vm_pageout_idle_wait = VM_PAGEOUT_IDLE_WAIT;
4940 	}
4941 
4942 	if (vm_pageout_state.vm_pageout_burst_wait == 0) {
4943 		vm_pageout_state.vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT;
4944 	}
4945 
4946 	if (vm_pageout_state.vm_pageout_empty_wait == 0) {
4947 		vm_pageout_state.vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT;
4948 	}
4949 
4950 	if (vm_pageout_state.vm_pageout_deadlock_wait == 0) {
4951 		vm_pageout_state.vm_pageout_deadlock_wait = VM_PAGEOUT_DEADLOCK_WAIT;
4952 	}
4953 
4954 	if (vm_pageout_state.vm_pageout_deadlock_relief == 0) {
4955 		vm_pageout_state.vm_pageout_deadlock_relief = VM_PAGEOUT_DEADLOCK_RELIEF;
4956 	}
4957 
4958 	if (vm_pageout_state.vm_pageout_burst_inactive_throttle == 0) {
4959 		vm_pageout_state.vm_pageout_burst_inactive_throttle = VM_PAGEOUT_BURST_INACTIVE_THROTTLE;
4960 	}
4961 	/*
4962 	 * even if we've already called vm_page_free_reserve
4963 	 * call it again here to insure that the targets are
4964 	 * accurately calculated (it uses vm_page_free_count_init)
4965 	 * calling it with an arg of 0 will not change the reserve
4966 	 * but will re-calculate free_min and free_target
4967 	 */
4968 	if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED(processor_count)) {
4969 		vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count)) - vm_page_free_reserved);
4970 	} else {
4971 		vm_page_free_reserve(0);
4972 	}
4973 
4974 
4975 	vm_page_queue_init(&vm_pageout_queue_external.pgo_pending);
4976 	vm_pageout_queue_external.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
4977 	vm_pageout_queue_external.pgo_laundry = 0;
4978 	vm_pageout_queue_external.pgo_idle = FALSE;
4979 	vm_pageout_queue_external.pgo_busy = FALSE;
4980 	vm_pageout_queue_external.pgo_throttled = FALSE;
4981 	vm_pageout_queue_external.pgo_draining = FALSE;
4982 	vm_pageout_queue_external.pgo_lowpriority = FALSE;
4983 	vm_pageout_queue_external.pgo_tid = -1;
4984 	vm_pageout_queue_external.pgo_inited = FALSE;
4985 
4986 	vm_page_queue_init(&vm_pageout_queue_internal.pgo_pending);
4987 	vm_pageout_queue_internal.pgo_maxlaundry = 0;
4988 	vm_pageout_queue_internal.pgo_laundry = 0;
4989 	vm_pageout_queue_internal.pgo_idle = FALSE;
4990 	vm_pageout_queue_internal.pgo_busy = FALSE;
4991 	vm_pageout_queue_internal.pgo_throttled = FALSE;
4992 	vm_pageout_queue_internal.pgo_draining = FALSE;
4993 	vm_pageout_queue_internal.pgo_lowpriority = FALSE;
4994 	vm_pageout_queue_internal.pgo_tid = -1;
4995 	vm_pageout_queue_internal.pgo_inited = FALSE;
4996 
4997 	/* internal pageout thread started when default pager registered first time */
4998 	/* external pageout and garbage collection threads started here */
4999 
5000 	result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_external, NULL,
5001 	    BASEPRI_VM,
5002 	    &vm_pageout_state.vm_pageout_external_iothread);
5003 	if (result != KERN_SUCCESS) {
5004 		panic("vm_pageout_iothread_external: create failed");
5005 	}
5006 	thread_set_thread_name(vm_pageout_state.vm_pageout_external_iothread, "VM_pageout_external_iothread");
5007 	thread_deallocate(vm_pageout_state.vm_pageout_external_iothread);
5008 
5009 	result = kernel_thread_create((thread_continue_t)vm_pageout_garbage_collect, NULL,
5010 	    BASEPRI_DEFAULT,
5011 	    &thread);
5012 	if (result != KERN_SUCCESS) {
5013 		panic("vm_pageout_garbage_collect: create failed");
5014 	}
5015 	thread_set_thread_name(thread, "VM_pageout_garbage_collect");
5016 	if (thread->reserved_stack == 0) {
5017 		assert(thread->kernel_stack);
5018 		thread->reserved_stack = thread->kernel_stack;
5019 	}
5020 
5021 	thread_mtx_lock(thread);
5022 	thread_start(thread);
5023 	thread_mtx_unlock(thread);
5024 
5025 	thread_deallocate(thread);
5026 
5027 #if VM_PRESSURE_EVENTS
5028 	result = kernel_thread_start_priority((thread_continue_t)vm_pressure_thread, NULL,
5029 	    BASEPRI_DEFAULT,
5030 	    &thread);
5031 
5032 	if (result != KERN_SUCCESS) {
5033 		panic("vm_pressure_thread: create failed");
5034 	}
5035 
5036 	thread_deallocate(thread);
5037 #endif
5038 
5039 	vm_object_reaper_init();
5040 
5041 
5042 	if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
5043 		vm_compressor_init();
5044 	}
5045 
5046 #if VM_PRESSURE_EVENTS
5047 	vm_pressure_events_enabled = TRUE;
5048 #endif /* VM_PRESSURE_EVENTS */
5049 
5050 #if CONFIG_PHANTOM_CACHE
5051 	vm_phantom_cache_init();
5052 #endif
5053 #if VM_PAGE_BUCKETS_CHECK
5054 #if VM_PAGE_FAKE_BUCKETS
5055 	printf("**** DEBUG: protecting fake buckets [0x%llx:0x%llx]\n",
5056 	    (uint64_t) vm_page_fake_buckets_start,
5057 	    (uint64_t) vm_page_fake_buckets_end);
5058 	pmap_protect(kernel_pmap,
5059 	    vm_page_fake_buckets_start,
5060 	    vm_page_fake_buckets_end,
5061 	    VM_PROT_READ);
5062 //	*(char *) vm_page_fake_buckets_start = 'x';	/* panic! */
5063 #endif /* VM_PAGE_FAKE_BUCKETS */
5064 #endif /* VM_PAGE_BUCKETS_CHECK */
5065 
5066 #if VM_OBJECT_TRACKING
5067 	vm_object_tracking_init();
5068 #endif /* VM_OBJECT_TRACKING */
5069 
5070 	vm_pageout_continue();
5071 
5072 	/*
5073 	 * Unreached code!
5074 	 *
5075 	 * The vm_pageout_continue() call above never returns, so the code below is never
5076 	 * executed.  We take advantage of this to declare several DTrace VM related probe
5077 	 * points that our kernel doesn't have an analog for.  These are probe points that
5078 	 * exist in Solaris and are in the DTrace documentation, so people may have written
5079 	 * scripts that use them.  Declaring the probe points here means their scripts will
5080 	 * compile and execute which we want for portability of the scripts, but since this
5081 	 * section of code is never reached, the probe points will simply never fire.  Yes,
5082 	 * this is basically a hack.  The problem is the DTrace probe points were chosen with
5083 	 * Solaris specific VM events in mind, not portability to different VM implementations.
5084 	 */
5085 
5086 	DTRACE_VM2(execfree, int, 1, (uint64_t *), NULL);
5087 	DTRACE_VM2(execpgin, int, 1, (uint64_t *), NULL);
5088 	DTRACE_VM2(execpgout, int, 1, (uint64_t *), NULL);
5089 	DTRACE_VM2(pgswapin, int, 1, (uint64_t *), NULL);
5090 	DTRACE_VM2(pgswapout, int, 1, (uint64_t *), NULL);
5091 	DTRACE_VM2(swapin, int, 1, (uint64_t *), NULL);
5092 	DTRACE_VM2(swapout, int, 1, (uint64_t *), NULL);
5093 	/*NOTREACHED*/
5094 }
5095 
5096 
5097 
5098 kern_return_t
vm_pageout_internal_start(void)5099 vm_pageout_internal_start(void)
5100 {
5101 	kern_return_t   result;
5102 	host_basic_info_data_t hinfo;
5103 	vm_offset_t     buf, bufsize;
5104 
5105 	assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
5106 
5107 	mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
5108 #define BSD_HOST 1
5109 	host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
5110 
5111 	assert(hinfo.max_cpus > 0);
5112 
5113 #if !XNU_TARGET_OS_OSX
5114 	vm_pageout_state.vm_compressor_thread_count = 1;
5115 #else /* !XNU_TARGET_OS_OSX */
5116 	if (hinfo.max_cpus > 4) {
5117 		vm_pageout_state.vm_compressor_thread_count = 2;
5118 	} else {
5119 		vm_pageout_state.vm_compressor_thread_count = 1;
5120 	}
5121 #endif /* !XNU_TARGET_OS_OSX */
5122 	PE_parse_boot_argn("vmcomp_threads", &vm_pageout_state.vm_compressor_thread_count,
5123 	    sizeof(vm_pageout_state.vm_compressor_thread_count));
5124 
5125 #if     __AMP__
5126 	PE_parse_boot_argn("vmcomp_ecluster", &vm_compressor_ebound, sizeof(vm_compressor_ebound));
5127 	if (vm_compressor_ebound) {
5128 		vm_pageout_state.vm_compressor_thread_count = 2;
5129 	}
5130 #endif
5131 	if (vm_pageout_state.vm_compressor_thread_count >= hinfo.max_cpus) {
5132 		vm_pageout_state.vm_compressor_thread_count = hinfo.max_cpus - 1;
5133 	}
5134 	if (vm_pageout_state.vm_compressor_thread_count <= 0) {
5135 		vm_pageout_state.vm_compressor_thread_count = 1;
5136 	} else if (vm_pageout_state.vm_compressor_thread_count > MAX_COMPRESSOR_THREAD_COUNT) {
5137 		vm_pageout_state.vm_compressor_thread_count = MAX_COMPRESSOR_THREAD_COUNT;
5138 	}
5139 
5140 	vm_pageout_queue_internal.pgo_maxlaundry =
5141 	    (vm_pageout_state.vm_compressor_thread_count * 4) * VM_PAGE_LAUNDRY_MAX;
5142 
5143 	PE_parse_boot_argn("vmpgoi_maxlaundry",
5144 	    &vm_pageout_queue_internal.pgo_maxlaundry,
5145 	    sizeof(vm_pageout_queue_internal.pgo_maxlaundry));
5146 
5147 	bufsize = COMPRESSOR_SCRATCH_BUF_SIZE;
5148 	if (kernel_memory_allocate(kernel_map, &buf,
5149 	    bufsize * vm_pageout_state.vm_compressor_thread_count,
5150 	    0, KMA_KOBJECT | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR)) {
5151 		panic("vm_pageout_internal_start: Unable to allocate %zd bytes",
5152 		    (size_t)(bufsize * vm_pageout_state.vm_compressor_thread_count));
5153 	}
5154 
5155 	for (int i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) {
5156 		ciq[i].id = i;
5157 		ciq[i].q = &vm_pageout_queue_internal;
5158 		ciq[i].current_chead = NULL;
5159 		ciq[i].scratch_buf = (char *)(buf + i * bufsize);
5160 
5161 		result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal,
5162 		    (void *)&ciq[i], BASEPRI_VM,
5163 		    &vm_pageout_state.vm_pageout_internal_iothread);
5164 
5165 		if (result == KERN_SUCCESS) {
5166 			thread_deallocate(vm_pageout_state.vm_pageout_internal_iothread);
5167 		} else {
5168 			break;
5169 		}
5170 	}
5171 	return result;
5172 }
5173 
5174 #if CONFIG_IOSCHED
5175 /*
5176  * To support I/O Expedite for compressed files we mark the upls with special flags.
5177  * The way decmpfs works is that we create a big upl which marks all the pages needed to
5178  * represent the compressed file as busy. We tag this upl with the flag UPL_DECMP_REQ. Decmpfs
5179  * then issues smaller I/Os for compressed I/Os, deflates them and puts the data into the pages
5180  * being held in the big original UPL. We mark each of these smaller UPLs with the flag
5181  * UPL_DECMP_REAL_IO. Any outstanding real I/O UPL is tracked by the big req upl using the
5182  * decmp_io_upl field (in the upl structure). This link is protected in the forward direction
5183  * by the req upl lock (the reverse link doesnt need synch. since we never inspect this link
5184  * unless the real I/O upl is being destroyed).
5185  */
5186 
5187 
5188 static void
upl_set_decmp_info(upl_t upl,upl_t src_upl)5189 upl_set_decmp_info(upl_t upl, upl_t src_upl)
5190 {
5191 	assert((src_upl->flags & UPL_DECMP_REQ) != 0);
5192 
5193 	upl_lock(src_upl);
5194 	if (src_upl->decmp_io_upl) {
5195 		/*
5196 		 * If there is already an alive real I/O UPL, ignore this new UPL.
5197 		 * This case should rarely happen and even if it does, it just means
5198 		 * that we might issue a spurious expedite which the driver is expected
5199 		 * to handle.
5200 		 */
5201 		upl_unlock(src_upl);
5202 		return;
5203 	}
5204 	src_upl->decmp_io_upl = (void *)upl;
5205 	src_upl->ref_count++;
5206 
5207 	upl->flags |= UPL_DECMP_REAL_IO;
5208 	upl->decmp_io_upl = (void *)src_upl;
5209 	upl_unlock(src_upl);
5210 }
5211 #endif /* CONFIG_IOSCHED */
5212 
5213 #if UPL_DEBUG
5214 int     upl_debug_enabled = 1;
5215 #else
5216 int     upl_debug_enabled = 0;
5217 #endif
5218 
5219 static upl_t
upl_create(int type,int flags,upl_size_t size)5220 upl_create(int type, int flags, upl_size_t size)
5221 {
5222 	upl_t   upl;
5223 	vm_size_t       page_field_size = 0;
5224 	int     upl_flags = 0;
5225 	vm_size_t       upl_size  = sizeof(struct upl);
5226 
5227 	assert(page_aligned(size));
5228 
5229 	size = round_page_32(size);
5230 
5231 	if (type & UPL_CREATE_LITE) {
5232 		page_field_size = (atop(size) + 7) >> 3;
5233 		page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
5234 
5235 		upl_flags |= UPL_LITE;
5236 	}
5237 	if (type & UPL_CREATE_INTERNAL) {
5238 		upl_size += sizeof(struct upl_page_info) * atop(size);
5239 
5240 		upl_flags |= UPL_INTERNAL;
5241 	}
5242 	upl = (upl_t)kalloc(upl_size + page_field_size);
5243 
5244 	if (page_field_size) {
5245 		bzero((char *)upl + upl_size, page_field_size);
5246 	}
5247 
5248 	upl->flags = upl_flags | flags;
5249 	upl->kaddr = (vm_offset_t)0;
5250 	upl->u_offset = 0;
5251 	upl->u_size = 0;
5252 	upl->u_mapped_size = 0;
5253 	upl->map_object = NULL;
5254 	upl->ref_count = 1;
5255 	upl->ext_ref_count = 0;
5256 	upl->highest_page = 0;
5257 	upl_lock_init(upl);
5258 	upl->vector_upl = NULL;
5259 	upl->associated_upl = NULL;
5260 	upl->upl_iodone = NULL;
5261 #if CONFIG_IOSCHED
5262 	if (type & UPL_CREATE_IO_TRACKING) {
5263 		upl->upl_priority = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
5264 	}
5265 
5266 	upl->upl_reprio_info = 0;
5267 	upl->decmp_io_upl = 0;
5268 	if ((type & UPL_CREATE_INTERNAL) && (type & UPL_CREATE_EXPEDITE_SUP)) {
5269 		/* Only support expedite on internal UPLs */
5270 		thread_t        curthread = current_thread();
5271 		upl->upl_reprio_info = kalloc_data(sizeof(uint64_t) * atop(size), Z_WAITOK | Z_ZERO);
5272 		upl->flags |= UPL_EXPEDITE_SUPPORTED;
5273 		if (curthread->decmp_upl != NULL) {
5274 			upl_set_decmp_info(upl, curthread->decmp_upl);
5275 		}
5276 	}
5277 #endif
5278 #if CONFIG_IOSCHED || UPL_DEBUG
5279 	if ((type & UPL_CREATE_IO_TRACKING) || upl_debug_enabled) {
5280 		upl->upl_creator = current_thread();
5281 		upl->uplq.next = 0;
5282 		upl->uplq.prev = 0;
5283 		upl->flags |= UPL_TRACKED_BY_OBJECT;
5284 	}
5285 #endif
5286 
5287 #if UPL_DEBUG
5288 	upl->ubc_alias1 = 0;
5289 	upl->ubc_alias2 = 0;
5290 
5291 	upl->upl_state = 0;
5292 	upl->upl_commit_index = 0;
5293 	bzero(&upl->upl_commit_records[0], sizeof(upl->upl_commit_records));
5294 
5295 	(void) OSBacktrace(&upl->upl_create_retaddr[0], UPL_DEBUG_STACK_FRAMES);
5296 #endif /* UPL_DEBUG */
5297 
5298 	return upl;
5299 }
5300 
5301 static void
upl_destroy(upl_t upl)5302 upl_destroy(upl_t upl)
5303 {
5304 	int     page_field_size;  /* bit field in word size buf */
5305 	int     size;
5306 
5307 //	DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object);
5308 
5309 	if (upl->ext_ref_count) {
5310 		panic("upl(%p) ext_ref_count", upl);
5311 	}
5312 
5313 #if CONFIG_IOSCHED
5314 	if ((upl->flags & UPL_DECMP_REAL_IO) && upl->decmp_io_upl) {
5315 		upl_t src_upl;
5316 		src_upl = upl->decmp_io_upl;
5317 		assert((src_upl->flags & UPL_DECMP_REQ) != 0);
5318 		upl_lock(src_upl);
5319 		src_upl->decmp_io_upl = NULL;
5320 		upl_unlock(src_upl);
5321 		upl_deallocate(src_upl);
5322 	}
5323 #endif /* CONFIG_IOSCHED */
5324 
5325 #if CONFIG_IOSCHED || UPL_DEBUG
5326 	if (((upl->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) &&
5327 	    !(upl->flags & UPL_VECTOR)) {
5328 		vm_object_t     object;
5329 
5330 		if (upl->flags & UPL_SHADOWED) {
5331 			object = upl->map_object->shadow;
5332 		} else {
5333 			object = upl->map_object;
5334 		}
5335 
5336 		vm_object_lock(object);
5337 		queue_remove(&object->uplq, upl, upl_t, uplq);
5338 		vm_object_activity_end(object);
5339 		vm_object_collapse(object, 0, TRUE);
5340 		vm_object_unlock(object);
5341 	}
5342 #endif
5343 	/*
5344 	 * drop a reference on the map_object whether or
5345 	 * not a pageout object is inserted
5346 	 */
5347 	if (upl->flags & UPL_SHADOWED) {
5348 		vm_object_deallocate(upl->map_object);
5349 	}
5350 
5351 	if (upl->flags & UPL_DEVICE_MEMORY) {
5352 		size = PAGE_SIZE;
5353 	} else {
5354 		size = upl_adjusted_size(upl, PAGE_MASK);
5355 	}
5356 	page_field_size = 0;
5357 
5358 	if (upl->flags & UPL_LITE) {
5359 		page_field_size = ((size / PAGE_SIZE) + 7) >> 3;
5360 		page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
5361 	}
5362 	upl_lock_destroy(upl);
5363 	upl->vector_upl = (vector_upl_t) 0xfeedbeef;
5364 
5365 #if CONFIG_IOSCHED
5366 	if (upl->flags & UPL_EXPEDITE_SUPPORTED) {
5367 		kfree_data(upl->upl_reprio_info, sizeof(uint64_t) * (size / PAGE_SIZE));
5368 	}
5369 #endif
5370 
5371 	if (upl->flags & UPL_INTERNAL) {
5372 		kfree(upl,
5373 		    sizeof(struct upl) +
5374 		    (sizeof(struct upl_page_info) * (size / PAGE_SIZE))
5375 		    + page_field_size);
5376 	} else {
5377 		kfree(upl, sizeof(struct upl) + page_field_size);
5378 	}
5379 }
5380 
5381 void
upl_deallocate(upl_t upl)5382 upl_deallocate(upl_t upl)
5383 {
5384 	upl_lock(upl);
5385 
5386 	if (--upl->ref_count == 0) {
5387 		if (vector_upl_is_valid(upl)) {
5388 			vector_upl_deallocate(upl);
5389 		}
5390 		upl_unlock(upl);
5391 
5392 		if (upl->upl_iodone) {
5393 			upl_callout_iodone(upl);
5394 		}
5395 
5396 		upl_destroy(upl);
5397 	} else {
5398 		upl_unlock(upl);
5399 	}
5400 }
5401 
5402 #if CONFIG_IOSCHED
5403 void
upl_mark_decmp(upl_t upl)5404 upl_mark_decmp(upl_t upl)
5405 {
5406 	if (upl->flags & UPL_TRACKED_BY_OBJECT) {
5407 		upl->flags |= UPL_DECMP_REQ;
5408 		upl->upl_creator->decmp_upl = (void *)upl;
5409 	}
5410 }
5411 
5412 void
upl_unmark_decmp(upl_t upl)5413 upl_unmark_decmp(upl_t upl)
5414 {
5415 	if (upl && (upl->flags & UPL_DECMP_REQ)) {
5416 		upl->upl_creator->decmp_upl = NULL;
5417 	}
5418 }
5419 
5420 #endif /* CONFIG_IOSCHED */
5421 
5422 #define VM_PAGE_Q_BACKING_UP(q)         \
5423 	((q)->pgo_laundry >= (((q)->pgo_maxlaundry * 8) / 10))
5424 
5425 boolean_t must_throttle_writes(void);
5426 
5427 boolean_t
must_throttle_writes()5428 must_throttle_writes()
5429 {
5430 	if (VM_PAGE_Q_BACKING_UP(&vm_pageout_queue_external) &&
5431 	    vm_page_pageable_external_count > (AVAILABLE_NON_COMPRESSED_MEMORY * 6) / 10) {
5432 		return TRUE;
5433 	}
5434 
5435 	return FALSE;
5436 }
5437 
5438 #define MIN_DELAYED_WORK_CTX_ALLOCATED  (16)
5439 #define MAX_DELAYED_WORK_CTX_ALLOCATED  (512)
5440 
5441 int vm_page_delayed_work_ctx_needed = 0;
5442 SECURITY_READ_ONLY_LATE(zone_t) dw_ctx_zone;
5443 
5444 void
vm_page_delayed_work_init_ctx(void)5445 vm_page_delayed_work_init_ctx(void)
5446 {
5447 	size_t elem_size = sizeof(struct vm_page_delayed_work_ctx);
5448 
5449 	dw_ctx_zone = zone_create_ext("delayed-work-ctx", elem_size,
5450 	    ZC_NOGC, ZONE_ID_ANY, ^(zone_t z) {
5451 		zone_set_exhaustible(z, MAX_DELAYED_WORK_CTX_ALLOCATED);
5452 	});
5453 
5454 	zone_fill_initially(dw_ctx_zone, MIN_DELAYED_WORK_CTX_ALLOCATED);
5455 }
5456 
5457 struct vm_page_delayed_work*
vm_page_delayed_work_get_ctx(void)5458 vm_page_delayed_work_get_ctx(void)
5459 {
5460 	struct vm_page_delayed_work_ctx * dw_ctx = NULL;
5461 
5462 	dw_ctx = (struct vm_page_delayed_work_ctx*) zalloc_noblock(dw_ctx_zone);
5463 
5464 	if (dw_ctx) {
5465 		dw_ctx->delayed_owner = current_thread();
5466 	} else {
5467 		vm_page_delayed_work_ctx_needed++;
5468 	}
5469 	return dw_ctx ? dw_ctx->dwp : NULL;
5470 }
5471 
5472 void
vm_page_delayed_work_finish_ctx(struct vm_page_delayed_work * dwp)5473 vm_page_delayed_work_finish_ctx(struct vm_page_delayed_work* dwp)
5474 {
5475 	struct  vm_page_delayed_work_ctx *ldw_ctx;
5476 
5477 	ldw_ctx = (struct vm_page_delayed_work_ctx *)dwp;
5478 	ldw_ctx->delayed_owner = NULL;
5479 
5480 	zfree(dw_ctx_zone, ldw_ctx);
5481 }
5482 
5483 /*
5484  *	Routine:	vm_object_upl_request
5485  *	Purpose:
5486  *		Cause the population of a portion of a vm_object.
5487  *		Depending on the nature of the request, the pages
5488  *		returned may be contain valid data or be uninitialized.
5489  *		A page list structure, listing the physical pages
5490  *		will be returned upon request.
5491  *		This function is called by the file system or any other
5492  *		supplier of backing store to a pager.
5493  *		IMPORTANT NOTE: The caller must still respect the relationship
5494  *		between the vm_object and its backing memory object.  The
5495  *		caller MUST NOT substitute changes in the backing file
5496  *		without first doing a memory_object_lock_request on the
5497  *		target range unless it is know that the pages are not
5498  *		shared with another entity at the pager level.
5499  *		Copy_in_to:
5500  *			if a page list structure is present
5501  *			return the mapped physical pages, where a
5502  *			page is not present, return a non-initialized
5503  *			one.  If the no_sync bit is turned on, don't
5504  *			call the pager unlock to synchronize with other
5505  *			possible copies of the page. Leave pages busy
5506  *			in the original object, if a page list structure
5507  *			was specified.  When a commit of the page list
5508  *			pages is done, the dirty bit will be set for each one.
5509  *		Copy_out_from:
5510  *			If a page list structure is present, return
5511  *			all mapped pages.  Where a page does not exist
5512  *			map a zero filled one. Leave pages busy in
5513  *			the original object.  If a page list structure
5514  *			is not specified, this call is a no-op.
5515  *
5516  *		Note:  access of default pager objects has a rather interesting
5517  *		twist.  The caller of this routine, presumably the file system
5518  *		page cache handling code, will never actually make a request
5519  *		against a default pager backed object.  Only the default
5520  *		pager will make requests on backing store related vm_objects
5521  *		In this way the default pager can maintain the relationship
5522  *		between backing store files (abstract memory objects) and
5523  *		the vm_objects (cache objects), they support.
5524  *
5525  */
5526 
5527 __private_extern__ kern_return_t
vm_object_upl_request(vm_object_t object,vm_object_offset_t offset,upl_size_t size,upl_t * upl_ptr,upl_page_info_array_t user_page_list,unsigned int * page_list_count,upl_control_flags_t cntrl_flags,vm_tag_t tag)5528 vm_object_upl_request(
5529 	vm_object_t             object,
5530 	vm_object_offset_t      offset,
5531 	upl_size_t              size,
5532 	upl_t                   *upl_ptr,
5533 	upl_page_info_array_t   user_page_list,
5534 	unsigned int            *page_list_count,
5535 	upl_control_flags_t     cntrl_flags,
5536 	vm_tag_t                tag)
5537 {
5538 	vm_page_t               dst_page = VM_PAGE_NULL;
5539 	vm_object_offset_t      dst_offset;
5540 	upl_size_t              xfer_size;
5541 	unsigned int            size_in_pages;
5542 	boolean_t               dirty;
5543 	boolean_t               hw_dirty;
5544 	upl_t                   upl = NULL;
5545 	unsigned int            entry;
5546 	vm_page_t               alias_page = NULL;
5547 	int                     refmod_state = 0;
5548 	wpl_array_t             lite_list = NULL;
5549 	vm_object_t             last_copy_object;
5550 	struct  vm_page_delayed_work    dw_array;
5551 	struct  vm_page_delayed_work    *dwp, *dwp_start;
5552 	bool                    dwp_finish_ctx = TRUE;
5553 	int                     dw_count;
5554 	int                     dw_limit;
5555 	int                     io_tracking_flag = 0;
5556 	int                     grab_options;
5557 	int                     page_grab_count = 0;
5558 	ppnum_t                 phys_page;
5559 	pmap_flush_context      pmap_flush_context_storage;
5560 	boolean_t               pmap_flushes_delayed = FALSE;
5561 #if DEVELOPMENT || DEBUG
5562 	task_t                  task = current_task();
5563 #endif /* DEVELOPMENT || DEBUG */
5564 
5565 	dwp_start = dwp = NULL;
5566 
5567 	if (cntrl_flags & ~UPL_VALID_FLAGS) {
5568 		/*
5569 		 * For forward compatibility's sake,
5570 		 * reject any unknown flag.
5571 		 */
5572 		return KERN_INVALID_VALUE;
5573 	}
5574 	if ((!object->internal) && (object->paging_offset != 0)) {
5575 		panic("vm_object_upl_request: external object with non-zero paging offset");
5576 	}
5577 	if (object->phys_contiguous) {
5578 		panic("vm_object_upl_request: contiguous object specified");
5579 	}
5580 
5581 	assertf(page_aligned(offset) && page_aligned(size),
5582 	    "offset 0x%llx size 0x%x",
5583 	    offset, size);
5584 
5585 	VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request, VM_UPL_REQUEST, DBG_FUNC_START, size, cntrl_flags, 0, 0);
5586 
5587 	dw_count = 0;
5588 	dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
5589 	dwp_start = vm_page_delayed_work_get_ctx();
5590 	if (dwp_start == NULL) {
5591 		dwp_start = &dw_array;
5592 		dw_limit = 1;
5593 		dwp_finish_ctx = FALSE;
5594 	}
5595 
5596 	dwp = dwp_start;
5597 
5598 	if (size > MAX_UPL_SIZE_BYTES) {
5599 		size = MAX_UPL_SIZE_BYTES;
5600 	}
5601 
5602 	if ((cntrl_flags & UPL_SET_INTERNAL) && page_list_count != NULL) {
5603 		*page_list_count = MAX_UPL_SIZE_BYTES >> PAGE_SHIFT;
5604 	}
5605 
5606 #if CONFIG_IOSCHED || UPL_DEBUG
5607 	if (object->io_tracking || upl_debug_enabled) {
5608 		io_tracking_flag |= UPL_CREATE_IO_TRACKING;
5609 	}
5610 #endif
5611 #if CONFIG_IOSCHED
5612 	if (object->io_tracking) {
5613 		io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP;
5614 	}
5615 #endif
5616 
5617 	if (cntrl_flags & UPL_SET_INTERNAL) {
5618 		if (cntrl_flags & UPL_SET_LITE) {
5619 			upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size);
5620 
5621 			user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
5622 			lite_list = (wpl_array_t)
5623 			    (((uintptr_t)user_page_list) +
5624 			    ((size / PAGE_SIZE) * sizeof(upl_page_info_t)));
5625 			if (size == 0) {
5626 				user_page_list = NULL;
5627 				lite_list = NULL;
5628 			}
5629 		} else {
5630 			upl = upl_create(UPL_CREATE_INTERNAL | io_tracking_flag, 0, size);
5631 
5632 			user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
5633 			if (size == 0) {
5634 				user_page_list = NULL;
5635 			}
5636 		}
5637 	} else {
5638 		if (cntrl_flags & UPL_SET_LITE) {
5639 			upl = upl_create(UPL_CREATE_EXTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size);
5640 
5641 			lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
5642 			if (size == 0) {
5643 				lite_list = NULL;
5644 			}
5645 		} else {
5646 			upl = upl_create(UPL_CREATE_EXTERNAL | io_tracking_flag, 0, size);
5647 		}
5648 	}
5649 	*upl_ptr = upl;
5650 
5651 	if (user_page_list) {
5652 		user_page_list[0].device = FALSE;
5653 	}
5654 
5655 	if (cntrl_flags & UPL_SET_LITE) {
5656 		upl->map_object = object;
5657 	} else {
5658 		upl->map_object = vm_object_allocate(size);
5659 		/*
5660 		 * No neeed to lock the new object: nobody else knows
5661 		 * about it yet, so it's all ours so far.
5662 		 */
5663 		upl->map_object->shadow = object;
5664 		upl->map_object->pageout = TRUE;
5665 		upl->map_object->can_persist = FALSE;
5666 		upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
5667 		upl->map_object->vo_shadow_offset = offset;
5668 		upl->map_object->wimg_bits = object->wimg_bits;
5669 		assertf(page_aligned(upl->map_object->vo_shadow_offset),
5670 		    "object %p shadow_offset 0x%llx",
5671 		    upl->map_object, upl->map_object->vo_shadow_offset);
5672 
5673 		alias_page = vm_page_grab_fictitious(TRUE);
5674 
5675 		upl->flags |= UPL_SHADOWED;
5676 	}
5677 	if (cntrl_flags & UPL_FOR_PAGEOUT) {
5678 		upl->flags |= UPL_PAGEOUT;
5679 	}
5680 
5681 	vm_object_lock(object);
5682 	vm_object_activity_begin(object);
5683 
5684 	grab_options = 0;
5685 #if CONFIG_SECLUDED_MEMORY
5686 	if (object->can_grab_secluded) {
5687 		grab_options |= VM_PAGE_GRAB_SECLUDED;
5688 	}
5689 #endif /* CONFIG_SECLUDED_MEMORY */
5690 
5691 	/*
5692 	 * we can lock in the paging_offset once paging_in_progress is set
5693 	 */
5694 	upl->u_size = size;
5695 	upl->u_offset = offset + object->paging_offset;
5696 
5697 #if CONFIG_IOSCHED || UPL_DEBUG
5698 	if (object->io_tracking || upl_debug_enabled) {
5699 		vm_object_activity_begin(object);
5700 		queue_enter(&object->uplq, upl, upl_t, uplq);
5701 	}
5702 #endif
5703 	if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != VM_OBJECT_NULL) {
5704 		/*
5705 		 * Honor copy-on-write obligations
5706 		 *
5707 		 * The caller is gathering these pages and
5708 		 * might modify their contents.  We need to
5709 		 * make sure that the copy object has its own
5710 		 * private copies of these pages before we let
5711 		 * the caller modify them.
5712 		 */
5713 		vm_object_update(object,
5714 		    offset,
5715 		    size,
5716 		    NULL,
5717 		    NULL,
5718 		    FALSE,              /* should_return */
5719 		    MEMORY_OBJECT_COPY_SYNC,
5720 		    VM_PROT_NO_CHANGE);
5721 
5722 		VM_PAGEOUT_DEBUG(upl_cow, 1);
5723 		VM_PAGEOUT_DEBUG(upl_cow_pages, (size >> PAGE_SHIFT));
5724 	}
5725 	/*
5726 	 * remember which copy object we synchronized with
5727 	 */
5728 	last_copy_object = object->copy;
5729 	entry = 0;
5730 
5731 	xfer_size = size;
5732 	dst_offset = offset;
5733 	size_in_pages = size / PAGE_SIZE;
5734 
5735 	if (vm_page_free_count > (vm_page_free_target + size_in_pages) ||
5736 	    object->resident_page_count < ((MAX_UPL_SIZE_BYTES * 2) >> PAGE_SHIFT)) {
5737 		object->scan_collisions = 0;
5738 	}
5739 
5740 	if ((cntrl_flags & UPL_WILL_MODIFY) && must_throttle_writes() == TRUE) {
5741 		boolean_t       isSSD = FALSE;
5742 
5743 #if !XNU_TARGET_OS_OSX
5744 		isSSD = TRUE;
5745 #else /* !XNU_TARGET_OS_OSX */
5746 		vnode_pager_get_isSSD(object->pager, &isSSD);
5747 #endif /* !XNU_TARGET_OS_OSX */
5748 		vm_object_unlock(object);
5749 
5750 		OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
5751 
5752 		if (isSSD == TRUE) {
5753 			delay(1000 * size_in_pages);
5754 		} else {
5755 			delay(5000 * size_in_pages);
5756 		}
5757 		OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
5758 
5759 		vm_object_lock(object);
5760 	}
5761 
5762 	while (xfer_size) {
5763 		dwp->dw_mask = 0;
5764 
5765 		if ((alias_page == NULL) && !(cntrl_flags & UPL_SET_LITE)) {
5766 			vm_object_unlock(object);
5767 			alias_page = vm_page_grab_fictitious(TRUE);
5768 			vm_object_lock(object);
5769 		}
5770 		if (cntrl_flags & UPL_COPYOUT_FROM) {
5771 			upl->flags |= UPL_PAGE_SYNC_DONE;
5772 
5773 			if (((dst_page = vm_page_lookup(object, dst_offset)) == VM_PAGE_NULL) ||
5774 			    dst_page->vmp_fictitious ||
5775 			    dst_page->vmp_absent ||
5776 			    dst_page->vmp_error ||
5777 			    dst_page->vmp_cleaning ||
5778 			    (VM_PAGE_WIRED(dst_page))) {
5779 				if (user_page_list) {
5780 					user_page_list[entry].phys_addr = 0;
5781 				}
5782 
5783 				goto try_next_page;
5784 			}
5785 			phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
5786 
5787 			/*
5788 			 * grab this up front...
5789 			 * a high percentange of the time we're going to
5790 			 * need the hardware modification state a bit later
5791 			 * anyway... so we can eliminate an extra call into
5792 			 * the pmap layer by grabbing it here and recording it
5793 			 */
5794 			if (dst_page->vmp_pmapped) {
5795 				refmod_state = pmap_get_refmod(phys_page);
5796 			} else {
5797 				refmod_state = 0;
5798 			}
5799 
5800 			if ((refmod_state & VM_MEM_REFERENCED) && VM_PAGE_INACTIVE(dst_page)) {
5801 				/*
5802 				 * page is on inactive list and referenced...
5803 				 * reactivate it now... this gets it out of the
5804 				 * way of vm_pageout_scan which would have to
5805 				 * reactivate it upon tripping over it
5806 				 */
5807 				dwp->dw_mask |= DW_vm_page_activate;
5808 			}
5809 			if (cntrl_flags & UPL_RET_ONLY_DIRTY) {
5810 				/*
5811 				 * we're only asking for DIRTY pages to be returned
5812 				 */
5813 				if (dst_page->vmp_laundry || !(cntrl_flags & UPL_FOR_PAGEOUT)) {
5814 					/*
5815 					 * if we were the page stolen by vm_pageout_scan to be
5816 					 * cleaned (as opposed to a buddy being clustered in
5817 					 * or this request is not being driven by a PAGEOUT cluster
5818 					 * then we only need to check for the page being dirty or
5819 					 * precious to decide whether to return it
5820 					 */
5821 					if (dst_page->vmp_dirty || dst_page->vmp_precious || (refmod_state & VM_MEM_MODIFIED)) {
5822 						goto check_busy;
5823 					}
5824 					goto dont_return;
5825 				}
5826 				/*
5827 				 * this is a request for a PAGEOUT cluster and this page
5828 				 * is merely along for the ride as a 'buddy'... not only
5829 				 * does it have to be dirty to be returned, but it also
5830 				 * can't have been referenced recently...
5831 				 */
5832 				if ((hibernate_cleaning_in_progress == TRUE ||
5833 				    (!((refmod_state & VM_MEM_REFERENCED) || dst_page->vmp_reference) ||
5834 				    (dst_page->vmp_q_state == VM_PAGE_ON_THROTTLED_Q))) &&
5835 				    ((refmod_state & VM_MEM_MODIFIED) || dst_page->vmp_dirty || dst_page->vmp_precious)) {
5836 					goto check_busy;
5837 				}
5838 dont_return:
5839 				/*
5840 				 * if we reach here, we're not to return
5841 				 * the page... go on to the next one
5842 				 */
5843 				if (dst_page->vmp_laundry == TRUE) {
5844 					/*
5845 					 * if we get here, the page is not 'cleaning' (filtered out above).
5846 					 * since it has been referenced, remove it from the laundry
5847 					 * so we don't pay the cost of an I/O to clean a page
5848 					 * we're just going to take back
5849 					 */
5850 					vm_page_lockspin_queues();
5851 
5852 					vm_pageout_steal_laundry(dst_page, TRUE);
5853 					vm_page_activate(dst_page);
5854 
5855 					vm_page_unlock_queues();
5856 				}
5857 				if (user_page_list) {
5858 					user_page_list[entry].phys_addr = 0;
5859 				}
5860 
5861 				goto try_next_page;
5862 			}
5863 check_busy:
5864 			if (dst_page->vmp_busy) {
5865 				if (cntrl_flags & UPL_NOBLOCK) {
5866 					if (user_page_list) {
5867 						user_page_list[entry].phys_addr = 0;
5868 					}
5869 					dwp->dw_mask = 0;
5870 
5871 					goto try_next_page;
5872 				}
5873 				/*
5874 				 * someone else is playing with the
5875 				 * page.  We will have to wait.
5876 				 */
5877 				PAGE_SLEEP(object, dst_page, THREAD_UNINT);
5878 
5879 				continue;
5880 			}
5881 			if (dst_page->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
5882 				vm_page_lockspin_queues();
5883 
5884 				if (dst_page->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
5885 					/*
5886 					 * we've buddied up a page for a clustered pageout
5887 					 * that has already been moved to the pageout
5888 					 * queue by pageout_scan... we need to remove
5889 					 * it from the queue and drop the laundry count
5890 					 * on that queue
5891 					 */
5892 					vm_pageout_throttle_up(dst_page);
5893 				}
5894 				vm_page_unlock_queues();
5895 			}
5896 			hw_dirty = refmod_state & VM_MEM_MODIFIED;
5897 			dirty = hw_dirty ? TRUE : dst_page->vmp_dirty;
5898 
5899 			if (phys_page > upl->highest_page) {
5900 				upl->highest_page = phys_page;
5901 			}
5902 
5903 			assert(!pmap_is_noencrypt(phys_page));
5904 
5905 			if (cntrl_flags & UPL_SET_LITE) {
5906 				unsigned int    pg_num;
5907 
5908 				pg_num = (unsigned int) ((dst_offset - offset) / PAGE_SIZE);
5909 				assert(pg_num == (dst_offset - offset) / PAGE_SIZE);
5910 				lite_list[pg_num >> 5] |= 1U << (pg_num & 31);
5911 
5912 				if (hw_dirty) {
5913 					if (pmap_flushes_delayed == FALSE) {
5914 						pmap_flush_context_init(&pmap_flush_context_storage);
5915 						pmap_flushes_delayed = TRUE;
5916 					}
5917 					pmap_clear_refmod_options(phys_page,
5918 					    VM_MEM_MODIFIED,
5919 					    PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_CLEAR_WRITE,
5920 					    &pmap_flush_context_storage);
5921 				}
5922 
5923 				/*
5924 				 * Mark original page as cleaning
5925 				 * in place.
5926 				 */
5927 				dst_page->vmp_cleaning = TRUE;
5928 				dst_page->vmp_precious = FALSE;
5929 			} else {
5930 				/*
5931 				 * use pageclean setup, it is more
5932 				 * convenient even for the pageout
5933 				 * cases here
5934 				 */
5935 				vm_object_lock(upl->map_object);
5936 				vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
5937 				vm_object_unlock(upl->map_object);
5938 
5939 				alias_page->vmp_absent = FALSE;
5940 				alias_page = NULL;
5941 			}
5942 			if (dirty) {
5943 				SET_PAGE_DIRTY(dst_page, FALSE);
5944 			} else {
5945 				dst_page->vmp_dirty = FALSE;
5946 			}
5947 
5948 			if (!dirty) {
5949 				dst_page->vmp_precious = TRUE;
5950 			}
5951 
5952 			if (!(cntrl_flags & UPL_CLEAN_IN_PLACE)) {
5953 				if (!VM_PAGE_WIRED(dst_page)) {
5954 					dst_page->vmp_free_when_done = TRUE;
5955 				}
5956 			}
5957 		} else {
5958 			if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != last_copy_object) {
5959 				/*
5960 				 * Honor copy-on-write obligations
5961 				 *
5962 				 * The copy object has changed since we
5963 				 * last synchronized for copy-on-write.
5964 				 * Another copy object might have been
5965 				 * inserted while we released the object's
5966 				 * lock.  Since someone could have seen the
5967 				 * original contents of the remaining pages
5968 				 * through that new object, we have to
5969 				 * synchronize with it again for the remaining
5970 				 * pages only.  The previous pages are "busy"
5971 				 * so they can not be seen through the new
5972 				 * mapping.  The new mapping will see our
5973 				 * upcoming changes for those previous pages,
5974 				 * but that's OK since they couldn't see what
5975 				 * was there before.  It's just a race anyway
5976 				 * and there's no guarantee of consistency or
5977 				 * atomicity.  We just don't want new mappings
5978 				 * to see both the *before* and *after* pages.
5979 				 */
5980 				if (object->copy != VM_OBJECT_NULL) {
5981 					vm_object_update(
5982 						object,
5983 						dst_offset,/* current offset */
5984 						xfer_size, /* remaining size */
5985 						NULL,
5986 						NULL,
5987 						FALSE,     /* should_return */
5988 						MEMORY_OBJECT_COPY_SYNC,
5989 						VM_PROT_NO_CHANGE);
5990 
5991 					VM_PAGEOUT_DEBUG(upl_cow_again, 1);
5992 					VM_PAGEOUT_DEBUG(upl_cow_again_pages, (xfer_size >> PAGE_SHIFT));
5993 				}
5994 				/*
5995 				 * remember the copy object we synced with
5996 				 */
5997 				last_copy_object = object->copy;
5998 			}
5999 			dst_page = vm_page_lookup(object, dst_offset);
6000 
6001 			if (dst_page != VM_PAGE_NULL) {
6002 				if ((cntrl_flags & UPL_RET_ONLY_ABSENT)) {
6003 					/*
6004 					 * skip over pages already present in the cache
6005 					 */
6006 					if (user_page_list) {
6007 						user_page_list[entry].phys_addr = 0;
6008 					}
6009 
6010 					goto try_next_page;
6011 				}
6012 				if (dst_page->vmp_fictitious) {
6013 					panic("need corner case for fictitious page");
6014 				}
6015 
6016 				if (dst_page->vmp_busy || dst_page->vmp_cleaning) {
6017 					/*
6018 					 * someone else is playing with the
6019 					 * page.  We will have to wait.
6020 					 */
6021 					PAGE_SLEEP(object, dst_page, THREAD_UNINT);
6022 
6023 					continue;
6024 				}
6025 				if (dst_page->vmp_laundry) {
6026 					vm_pageout_steal_laundry(dst_page, FALSE);
6027 				}
6028 			} else {
6029 				if (object->private) {
6030 					/*
6031 					 * This is a nasty wrinkle for users
6032 					 * of upl who encounter device or
6033 					 * private memory however, it is
6034 					 * unavoidable, only a fault can
6035 					 * resolve the actual backing
6036 					 * physical page by asking the
6037 					 * backing device.
6038 					 */
6039 					if (user_page_list) {
6040 						user_page_list[entry].phys_addr = 0;
6041 					}
6042 
6043 					goto try_next_page;
6044 				}
6045 				if (object->scan_collisions) {
6046 					/*
6047 					 * the pageout_scan thread is trying to steal
6048 					 * pages from this object, but has run into our
6049 					 * lock... grab 2 pages from the head of the object...
6050 					 * the first is freed on behalf of pageout_scan, the
6051 					 * 2nd is for our own use... we use vm_object_page_grab
6052 					 * in both cases to avoid taking pages from the free
6053 					 * list since we are under memory pressure and our
6054 					 * lock on this object is getting in the way of
6055 					 * relieving it
6056 					 */
6057 					dst_page = vm_object_page_grab(object);
6058 
6059 					if (dst_page != VM_PAGE_NULL) {
6060 						vm_page_release(dst_page,
6061 						    FALSE);
6062 					}
6063 
6064 					dst_page = vm_object_page_grab(object);
6065 				}
6066 				if (dst_page == VM_PAGE_NULL) {
6067 					/*
6068 					 * need to allocate a page
6069 					 */
6070 					dst_page = vm_page_grab_options(grab_options);
6071 					if (dst_page != VM_PAGE_NULL) {
6072 						page_grab_count++;
6073 					}
6074 				}
6075 				if (dst_page == VM_PAGE_NULL) {
6076 					if ((cntrl_flags & (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) == (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) {
6077 						/*
6078 						 * we don't want to stall waiting for pages to come onto the free list
6079 						 * while we're already holding absent pages in this UPL
6080 						 * the caller will deal with the empty slots
6081 						 */
6082 						if (user_page_list) {
6083 							user_page_list[entry].phys_addr = 0;
6084 						}
6085 
6086 						goto try_next_page;
6087 					}
6088 					/*
6089 					 * no pages available... wait
6090 					 * then try again for the same
6091 					 * offset...
6092 					 */
6093 					vm_object_unlock(object);
6094 
6095 					OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
6096 
6097 					VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
6098 
6099 					VM_PAGE_WAIT();
6100 					OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
6101 
6102 					VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
6103 
6104 					vm_object_lock(object);
6105 
6106 					continue;
6107 				}
6108 				vm_page_insert(dst_page, object, dst_offset);
6109 
6110 				dst_page->vmp_absent = TRUE;
6111 				dst_page->vmp_busy = FALSE;
6112 
6113 				if (cntrl_flags & UPL_RET_ONLY_ABSENT) {
6114 					/*
6115 					 * if UPL_RET_ONLY_ABSENT was specified,
6116 					 * than we're definitely setting up a
6117 					 * upl for a clustered read/pagein
6118 					 * operation... mark the pages as clustered
6119 					 * so upl_commit_range can put them on the
6120 					 * speculative list
6121 					 */
6122 					dst_page->vmp_clustered = TRUE;
6123 
6124 					if (!(cntrl_flags & UPL_FILE_IO)) {
6125 						counter_inc(&vm_statistics_pageins);
6126 					}
6127 				}
6128 			}
6129 			phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
6130 
6131 			dst_page->vmp_overwriting = TRUE;
6132 
6133 			if (dst_page->vmp_pmapped) {
6134 				if (!(cntrl_flags & UPL_FILE_IO)) {
6135 					/*
6136 					 * eliminate all mappings from the
6137 					 * original object and its prodigy
6138 					 */
6139 					refmod_state = pmap_disconnect(phys_page);
6140 				} else {
6141 					refmod_state = pmap_get_refmod(phys_page);
6142 				}
6143 			} else {
6144 				refmod_state = 0;
6145 			}
6146 
6147 			hw_dirty = refmod_state & VM_MEM_MODIFIED;
6148 			dirty = hw_dirty ? TRUE : dst_page->vmp_dirty;
6149 
6150 			if (cntrl_flags & UPL_SET_LITE) {
6151 				unsigned int    pg_num;
6152 
6153 				pg_num = (unsigned int) ((dst_offset - offset) / PAGE_SIZE);
6154 				assert(pg_num == (dst_offset - offset) / PAGE_SIZE);
6155 				lite_list[pg_num >> 5] |= 1U << (pg_num & 31);
6156 
6157 				if (hw_dirty) {
6158 					pmap_clear_modify(phys_page);
6159 				}
6160 
6161 				/*
6162 				 * Mark original page as cleaning
6163 				 * in place.
6164 				 */
6165 				dst_page->vmp_cleaning = TRUE;
6166 				dst_page->vmp_precious = FALSE;
6167 			} else {
6168 				/*
6169 				 * use pageclean setup, it is more
6170 				 * convenient even for the pageout
6171 				 * cases here
6172 				 */
6173 				vm_object_lock(upl->map_object);
6174 				vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
6175 				vm_object_unlock(upl->map_object);
6176 
6177 				alias_page->vmp_absent = FALSE;
6178 				alias_page = NULL;
6179 			}
6180 
6181 			if (cntrl_flags & UPL_REQUEST_SET_DIRTY) {
6182 				upl->flags &= ~UPL_CLEAR_DIRTY;
6183 				upl->flags |= UPL_SET_DIRTY;
6184 				dirty = TRUE;
6185 				/*
6186 				 * Page belonging to a code-signed object is about to
6187 				 * be written. Mark it tainted and disconnect it from
6188 				 * all pmaps so processes have to fault it back in and
6189 				 * deal with the tainted bit.
6190 				 */
6191 				if (object->code_signed && dst_page->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
6192 					dst_page->vmp_cs_tainted = VMP_CS_ALL_TRUE;
6193 					vm_page_upl_tainted++;
6194 					if (dst_page->vmp_pmapped) {
6195 						refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
6196 						if (refmod_state & VM_MEM_REFERENCED) {
6197 							dst_page->vmp_reference = TRUE;
6198 						}
6199 					}
6200 				}
6201 			} else if (cntrl_flags & UPL_CLEAN_IN_PLACE) {
6202 				/*
6203 				 * clean in place for read implies
6204 				 * that a write will be done on all
6205 				 * the pages that are dirty before
6206 				 * a upl commit is done.  The caller
6207 				 * is obligated to preserve the
6208 				 * contents of all pages marked dirty
6209 				 */
6210 				upl->flags |= UPL_CLEAR_DIRTY;
6211 			}
6212 			dst_page->vmp_dirty = dirty;
6213 
6214 			if (!dirty) {
6215 				dst_page->vmp_precious = TRUE;
6216 			}
6217 
6218 			if (!VM_PAGE_WIRED(dst_page)) {
6219 				/*
6220 				 * deny access to the target page while
6221 				 * it is being worked on
6222 				 */
6223 				dst_page->vmp_busy = TRUE;
6224 			} else {
6225 				dwp->dw_mask |= DW_vm_page_wire;
6226 			}
6227 
6228 			/*
6229 			 * We might be about to satisfy a fault which has been
6230 			 * requested. So no need for the "restart" bit.
6231 			 */
6232 			dst_page->vmp_restart = FALSE;
6233 			if (!dst_page->vmp_absent && !(cntrl_flags & UPL_WILL_MODIFY)) {
6234 				/*
6235 				 * expect the page to be used
6236 				 */
6237 				dwp->dw_mask |= DW_set_reference;
6238 			}
6239 			if (cntrl_flags & UPL_PRECIOUS) {
6240 				if (object->internal) {
6241 					SET_PAGE_DIRTY(dst_page, FALSE);
6242 					dst_page->vmp_precious = FALSE;
6243 				} else {
6244 					dst_page->vmp_precious = TRUE;
6245 				}
6246 			} else {
6247 				dst_page->vmp_precious = FALSE;
6248 			}
6249 		}
6250 		if (dst_page->vmp_busy) {
6251 			upl->flags |= UPL_HAS_BUSY;
6252 		}
6253 
6254 		if (phys_page > upl->highest_page) {
6255 			upl->highest_page = phys_page;
6256 		}
6257 		assert(!pmap_is_noencrypt(phys_page));
6258 		if (user_page_list) {
6259 			user_page_list[entry].phys_addr = phys_page;
6260 			user_page_list[entry].free_when_done    = dst_page->vmp_free_when_done;
6261 			user_page_list[entry].absent    = dst_page->vmp_absent;
6262 			user_page_list[entry].dirty     = dst_page->vmp_dirty;
6263 			user_page_list[entry].precious  = dst_page->vmp_precious;
6264 			user_page_list[entry].device    = FALSE;
6265 			user_page_list[entry].needed    = FALSE;
6266 			if (dst_page->vmp_clustered == TRUE) {
6267 				user_page_list[entry].speculative = (dst_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE;
6268 			} else {
6269 				user_page_list[entry].speculative = FALSE;
6270 			}
6271 			user_page_list[entry].cs_validated = dst_page->vmp_cs_validated;
6272 			user_page_list[entry].cs_tainted = dst_page->vmp_cs_tainted;
6273 			user_page_list[entry].cs_nx = dst_page->vmp_cs_nx;
6274 			user_page_list[entry].mark      = FALSE;
6275 		}
6276 		/*
6277 		 * if UPL_RET_ONLY_ABSENT is set, then
6278 		 * we are working with a fresh page and we've
6279 		 * just set the clustered flag on it to
6280 		 * indicate that it was drug in as part of a
6281 		 * speculative cluster... so leave it alone
6282 		 */
6283 		if (!(cntrl_flags & UPL_RET_ONLY_ABSENT)) {
6284 			/*
6285 			 * someone is explicitly grabbing this page...
6286 			 * update clustered and speculative state
6287 			 *
6288 			 */
6289 			if (dst_page->vmp_clustered) {
6290 				VM_PAGE_CONSUME_CLUSTERED(dst_page);
6291 			}
6292 		}
6293 try_next_page:
6294 		if (dwp->dw_mask) {
6295 			if (dwp->dw_mask & DW_vm_page_activate) {
6296 				counter_inc(&vm_statistics_reactivations);
6297 			}
6298 
6299 			VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count);
6300 
6301 			if (dw_count >= dw_limit) {
6302 				vm_page_do_delayed_work(object, tag, dwp_start, dw_count);
6303 
6304 				dwp = dwp_start;
6305 				dw_count = 0;
6306 			}
6307 		}
6308 		entry++;
6309 		dst_offset += PAGE_SIZE_64;
6310 		xfer_size -= PAGE_SIZE;
6311 	}
6312 	if (dw_count) {
6313 		vm_page_do_delayed_work(object, tag, dwp_start, dw_count);
6314 		dwp = dwp_start;
6315 		dw_count = 0;
6316 	}
6317 
6318 	if (alias_page != NULL) {
6319 		VM_PAGE_FREE(alias_page);
6320 	}
6321 	if (pmap_flushes_delayed == TRUE) {
6322 		pmap_flush(&pmap_flush_context_storage);
6323 	}
6324 
6325 	if (page_list_count != NULL) {
6326 		if (upl->flags & UPL_INTERNAL) {
6327 			*page_list_count = 0;
6328 		} else if (*page_list_count > entry) {
6329 			*page_list_count = entry;
6330 		}
6331 	}
6332 #if UPL_DEBUG
6333 	upl->upl_state = 1;
6334 #endif
6335 	vm_object_unlock(object);
6336 
6337 	VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request, VM_UPL_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0);
6338 #if DEVELOPMENT || DEBUG
6339 	if (task != NULL) {
6340 		ledger_credit(task->ledger, task_ledgers.pages_grabbed_upl, page_grab_count);
6341 	}
6342 #endif /* DEVELOPMENT || DEBUG */
6343 
6344 	if (dwp_start && dwp_finish_ctx) {
6345 		vm_page_delayed_work_finish_ctx(dwp_start);
6346 		dwp_start = dwp = NULL;
6347 	}
6348 
6349 	return KERN_SUCCESS;
6350 }
6351 
6352 /*
6353  *	Routine:	vm_object_super_upl_request
6354  *	Purpose:
6355  *		Cause the population of a portion of a vm_object
6356  *		in much the same way as memory_object_upl_request.
6357  *		Depending on the nature of the request, the pages
6358  *		returned may be contain valid data or be uninitialized.
6359  *		However, the region may be expanded up to the super
6360  *		cluster size provided.
6361  */
6362 
6363 __private_extern__ kern_return_t
vm_object_super_upl_request(vm_object_t object,vm_object_offset_t offset,upl_size_t size,upl_size_t super_cluster,upl_t * upl,upl_page_info_t * user_page_list,unsigned int * page_list_count,upl_control_flags_t cntrl_flags,vm_tag_t tag)6364 vm_object_super_upl_request(
6365 	vm_object_t object,
6366 	vm_object_offset_t      offset,
6367 	upl_size_t              size,
6368 	upl_size_t              super_cluster,
6369 	upl_t                   *upl,
6370 	upl_page_info_t         *user_page_list,
6371 	unsigned int            *page_list_count,
6372 	upl_control_flags_t     cntrl_flags,
6373 	vm_tag_t                tag)
6374 {
6375 	if (object->paging_offset > offset || ((cntrl_flags & UPL_VECTOR) == UPL_VECTOR)) {
6376 		return KERN_FAILURE;
6377 	}
6378 
6379 	assert(object->paging_in_progress);
6380 	offset = offset - object->paging_offset;
6381 
6382 	if (super_cluster > size) {
6383 		vm_object_offset_t      base_offset;
6384 		upl_size_t              super_size;
6385 		vm_object_size_t        super_size_64;
6386 
6387 		base_offset = (offset & ~((vm_object_offset_t) super_cluster - 1));
6388 		super_size = (offset + size) > (base_offset + super_cluster) ? super_cluster << 1 : super_cluster;
6389 		super_size_64 = ((base_offset + super_size) > object->vo_size) ? (object->vo_size - base_offset) : super_size;
6390 		super_size = (upl_size_t) super_size_64;
6391 		assert(super_size == super_size_64);
6392 
6393 		if (offset > (base_offset + super_size)) {
6394 			panic("vm_object_super_upl_request: Missed target pageout"
6395 			    " %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
6396 			    offset, base_offset, super_size, super_cluster,
6397 			    size, object->paging_offset);
6398 		}
6399 		/*
6400 		 * apparently there is a case where the vm requests a
6401 		 * page to be written out who's offset is beyond the
6402 		 * object size
6403 		 */
6404 		if ((offset + size) > (base_offset + super_size)) {
6405 			super_size_64 = (offset + size) - base_offset;
6406 			super_size = (upl_size_t) super_size_64;
6407 			assert(super_size == super_size_64);
6408 		}
6409 
6410 		offset = base_offset;
6411 		size = super_size;
6412 	}
6413 	return vm_object_upl_request(object, offset, size, upl, user_page_list, page_list_count, cntrl_flags, tag);
6414 }
6415 
6416 int cs_executable_create_upl = 0;
6417 extern int proc_selfpid(void);
6418 extern char *proc_name_address(void *p);
6419 
6420 kern_return_t
vm_map_create_upl(vm_map_t map,vm_map_address_t offset,upl_size_t * upl_size,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,upl_control_flags_t * flags,vm_tag_t tag)6421 vm_map_create_upl(
6422 	vm_map_t                map,
6423 	vm_map_address_t        offset,
6424 	upl_size_t              *upl_size,
6425 	upl_t                   *upl,
6426 	upl_page_info_array_t   page_list,
6427 	unsigned int            *count,
6428 	upl_control_flags_t     *flags,
6429 	vm_tag_t                tag)
6430 {
6431 	vm_map_entry_t          entry;
6432 	upl_control_flags_t     caller_flags;
6433 	int                     force_data_sync;
6434 	int                     sync_cow_data;
6435 	vm_object_t             local_object;
6436 	vm_map_offset_t         local_offset;
6437 	vm_map_offset_t         local_start;
6438 	kern_return_t           ret;
6439 	vm_map_address_t        original_offset;
6440 	vm_map_size_t           original_size, adjusted_size;
6441 	vm_map_offset_t         local_entry_start;
6442 	vm_object_offset_t      local_entry_offset;
6443 	vm_object_offset_t      offset_in_mapped_page;
6444 	boolean_t               release_map = FALSE;
6445 
6446 start_with_map:
6447 
6448 	original_offset = offset;
6449 	original_size = *upl_size;
6450 	adjusted_size = original_size;
6451 
6452 	caller_flags = *flags;
6453 
6454 	if (caller_flags & ~UPL_VALID_FLAGS) {
6455 		/*
6456 		 * For forward compatibility's sake,
6457 		 * reject any unknown flag.
6458 		 */
6459 		ret = KERN_INVALID_VALUE;
6460 		goto done;
6461 	}
6462 	force_data_sync = (caller_flags & UPL_FORCE_DATA_SYNC);
6463 	sync_cow_data = !(caller_flags & UPL_COPYOUT_FROM);
6464 
6465 	if (upl == NULL) {
6466 		ret = KERN_INVALID_ARGUMENT;
6467 		goto done;
6468 	}
6469 
6470 REDISCOVER_ENTRY:
6471 	vm_map_lock_read(map);
6472 
6473 	if (!vm_map_lookup_entry(map, offset, &entry)) {
6474 		vm_map_unlock_read(map);
6475 		ret = KERN_FAILURE;
6476 		goto done;
6477 	}
6478 
6479 	local_entry_start = entry->vme_start;
6480 	local_entry_offset = VME_OFFSET(entry);
6481 
6482 	if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) {
6483 		DEBUG4K_UPL("map %p (%d) offset 0x%llx size 0x%x flags 0x%llx\n", map, VM_MAP_PAGE_SHIFT(map), (uint64_t)offset, *upl_size, *flags);
6484 	}
6485 
6486 	if (entry->vme_end - original_offset < adjusted_size) {
6487 		adjusted_size = entry->vme_end - original_offset;
6488 		assert(adjusted_size > 0);
6489 		*upl_size = (upl_size_t) adjusted_size;
6490 		assert(*upl_size == adjusted_size);
6491 	}
6492 
6493 	if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
6494 		*flags = 0;
6495 
6496 		if (!entry->is_sub_map &&
6497 		    VME_OBJECT(entry) != VM_OBJECT_NULL) {
6498 			if (VME_OBJECT(entry)->private) {
6499 				*flags = UPL_DEV_MEMORY;
6500 			}
6501 
6502 			if (VME_OBJECT(entry)->phys_contiguous) {
6503 				*flags |= UPL_PHYS_CONTIG;
6504 			}
6505 		}
6506 		vm_map_unlock_read(map);
6507 		ret = KERN_SUCCESS;
6508 		goto done;
6509 	}
6510 
6511 	offset_in_mapped_page = 0;
6512 	if (VM_MAP_PAGE_SIZE(map) < PAGE_SIZE) {
6513 		offset = vm_map_trunc_page(original_offset, VM_MAP_PAGE_MASK(map));
6514 		*upl_size = (upl_size_t)
6515 		    (vm_map_round_page(original_offset + adjusted_size,
6516 		    VM_MAP_PAGE_MASK(map))
6517 		    - offset);
6518 
6519 		offset_in_mapped_page = original_offset - offset;
6520 		assert(offset_in_mapped_page < VM_MAP_PAGE_SIZE(map));
6521 
6522 		DEBUG4K_UPL("map %p (%d) offset 0x%llx size 0x%llx flags 0x%llx -> offset 0x%llx adjusted_size 0x%llx *upl_size 0x%x offset_in_mapped_page 0x%llx\n", map, VM_MAP_PAGE_SHIFT(map), (uint64_t)original_offset, (uint64_t)original_size, *flags, (uint64_t)offset, (uint64_t)adjusted_size, *upl_size, offset_in_mapped_page);
6523 	}
6524 
6525 	if (VME_OBJECT(entry) == VM_OBJECT_NULL ||
6526 	    !VME_OBJECT(entry)->phys_contiguous) {
6527 		if (*upl_size > MAX_UPL_SIZE_BYTES) {
6528 			*upl_size = MAX_UPL_SIZE_BYTES;
6529 		}
6530 	}
6531 
6532 	/*
6533 	 *      Create an object if necessary.
6534 	 */
6535 	if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
6536 		if (vm_map_lock_read_to_write(map)) {
6537 			goto REDISCOVER_ENTRY;
6538 		}
6539 
6540 		VME_OBJECT_SET(entry,
6541 		    vm_object_allocate((vm_size_t)
6542 		    vm_object_round_page((entry->vme_end - entry->vme_start))));
6543 		VME_OFFSET_SET(entry, 0);
6544 		assert(entry->use_pmap);
6545 
6546 		vm_map_lock_write_to_read(map);
6547 	}
6548 
6549 	if (!(caller_flags & UPL_COPYOUT_FROM) &&
6550 	    !entry->is_sub_map &&
6551 	    !(entry->protection & VM_PROT_WRITE)) {
6552 		vm_map_unlock_read(map);
6553 		ret = KERN_PROTECTION_FAILURE;
6554 		goto done;
6555 	}
6556 
6557 #if !XNU_TARGET_OS_OSX
6558 	if (map->pmap != kernel_pmap &&
6559 	    (caller_flags & UPL_COPYOUT_FROM) &&
6560 	    (entry->protection & VM_PROT_EXECUTE) &&
6561 	    !(entry->protection & VM_PROT_WRITE)) {
6562 		vm_offset_t     kaddr;
6563 		vm_size_t       ksize;
6564 
6565 		/*
6566 		 * We're about to create a read-only UPL backed by
6567 		 * memory from an executable mapping.
6568 		 * Wiring the pages would result in the pages being copied
6569 		 * (due to the "MAP_PRIVATE" mapping) and no longer
6570 		 * code-signed, so no longer eligible for execution.
6571 		 * Instead, let's copy the data into a kernel buffer and
6572 		 * create the UPL from this kernel buffer.
6573 		 * The kernel buffer is then freed, leaving the UPL holding
6574 		 * the last reference on the VM object, so the memory will
6575 		 * be released when the UPL is committed.
6576 		 */
6577 
6578 		vm_map_unlock_read(map);
6579 		entry = VM_MAP_ENTRY_NULL;
6580 		/* allocate kernel buffer */
6581 		ksize = round_page(*upl_size);
6582 		kaddr = 0;
6583 		ret = kmem_alloc_pageable(kernel_map,
6584 		    &kaddr,
6585 		    ksize,
6586 		    tag);
6587 		if (ret == KERN_SUCCESS) {
6588 			/* copyin the user data */
6589 			ret = copyinmap(map, offset, (void *)kaddr, *upl_size);
6590 		}
6591 		if (ret == KERN_SUCCESS) {
6592 			if (ksize > *upl_size) {
6593 				/* zero out the extra space in kernel buffer */
6594 				memset((void *)(kaddr + *upl_size),
6595 				    0,
6596 				    ksize - *upl_size);
6597 			}
6598 			/* create the UPL from the kernel buffer */
6599 			vm_object_offset_t      offset_in_object;
6600 			vm_object_offset_t      offset_in_object_page;
6601 
6602 			offset_in_object = offset - local_entry_start + local_entry_offset;
6603 			offset_in_object_page = offset_in_object - vm_object_trunc_page(offset_in_object);
6604 			assert(offset_in_object_page < PAGE_SIZE);
6605 			assert(offset_in_object_page + offset_in_mapped_page < PAGE_SIZE);
6606 			*upl_size -= offset_in_object_page + offset_in_mapped_page;
6607 			ret = vm_map_create_upl(kernel_map,
6608 			    (vm_map_address_t)(kaddr + offset_in_object_page + offset_in_mapped_page),
6609 			    upl_size, upl, page_list, count, flags, tag);
6610 		}
6611 		if (kaddr != 0) {
6612 			/* free the kernel buffer */
6613 			kmem_free(kernel_map, kaddr, ksize);
6614 			kaddr = 0;
6615 			ksize = 0;
6616 		}
6617 #if DEVELOPMENT || DEBUG
6618 		DTRACE_VM4(create_upl_from_executable,
6619 		    vm_map_t, map,
6620 		    vm_map_address_t, offset,
6621 		    upl_size_t, *upl_size,
6622 		    kern_return_t, ret);
6623 #endif /* DEVELOPMENT || DEBUG */
6624 		goto done;
6625 	}
6626 #endif /* !XNU_TARGET_OS_OSX */
6627 
6628 	local_object = VME_OBJECT(entry);
6629 	assert(local_object != VM_OBJECT_NULL);
6630 
6631 	if (!entry->is_sub_map &&
6632 	    !entry->needs_copy &&
6633 	    *upl_size != 0 &&
6634 	    local_object->vo_size > *upl_size && /* partial UPL */
6635 	    entry->wired_count == 0 && /* No COW for entries that are wired */
6636 	    (map->pmap != kernel_pmap) && /* alias checks */
6637 	    (vm_map_entry_should_cow_for_true_share(entry) /* case 1 */
6638 	    ||
6639 	    ( /* case 2 */
6640 		    local_object->internal &&
6641 		    (local_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) &&
6642 		    local_object->ref_count > 1))) {
6643 		vm_prot_t       prot;
6644 
6645 		/*
6646 		 * Case 1:
6647 		 * Set up the targeted range for copy-on-write to avoid
6648 		 * applying true_share/copy_delay to the entire object.
6649 		 *
6650 		 * Case 2:
6651 		 * This map entry covers only part of an internal
6652 		 * object.  There could be other map entries covering
6653 		 * other areas of this object and some of these map
6654 		 * entries could be marked as "needs_copy", which
6655 		 * assumes that the object is COPY_SYMMETRIC.
6656 		 * To avoid marking this object as COPY_DELAY and
6657 		 * "true_share", let's shadow it and mark the new
6658 		 * (smaller) object as "true_share" and COPY_DELAY.
6659 		 */
6660 
6661 		if (vm_map_lock_read_to_write(map)) {
6662 			goto REDISCOVER_ENTRY;
6663 		}
6664 		vm_map_lock_assert_exclusive(map);
6665 		assert(VME_OBJECT(entry) == local_object);
6666 
6667 		vm_map_clip_start(map,
6668 		    entry,
6669 		    vm_map_trunc_page(offset,
6670 		    VM_MAP_PAGE_MASK(map)));
6671 		vm_map_clip_end(map,
6672 		    entry,
6673 		    vm_map_round_page(offset + *upl_size,
6674 		    VM_MAP_PAGE_MASK(map)));
6675 		if ((entry->vme_end - offset) < *upl_size) {
6676 			*upl_size = (upl_size_t) (entry->vme_end - offset);
6677 			assert(*upl_size == entry->vme_end - offset);
6678 		}
6679 
6680 		prot = entry->protection & ~VM_PROT_WRITE;
6681 		if (override_nx(map, VME_ALIAS(entry)) && prot) {
6682 			prot |= VM_PROT_EXECUTE;
6683 		}
6684 		vm_object_pmap_protect(local_object,
6685 		    VME_OFFSET(entry),
6686 		    entry->vme_end - entry->vme_start,
6687 		    ((entry->is_shared ||
6688 		    map->mapped_in_other_pmaps)
6689 		    ? PMAP_NULL
6690 		    : map->pmap),
6691 		    VM_MAP_PAGE_SIZE(map),
6692 		    entry->vme_start,
6693 		    prot);
6694 
6695 		assert(entry->wired_count == 0);
6696 
6697 		/*
6698 		 * Lock the VM object and re-check its status: if it's mapped
6699 		 * in another address space, we could still be racing with
6700 		 * another thread holding that other VM map exclusively.
6701 		 */
6702 		vm_object_lock(local_object);
6703 		if (local_object->true_share) {
6704 			/* object is already in proper state: no COW needed */
6705 			assert(local_object->copy_strategy !=
6706 			    MEMORY_OBJECT_COPY_SYMMETRIC);
6707 		} else {
6708 			/* not true_share: ask for copy-on-write below */
6709 			assert(local_object->copy_strategy ==
6710 			    MEMORY_OBJECT_COPY_SYMMETRIC);
6711 			entry->needs_copy = TRUE;
6712 		}
6713 		vm_object_unlock(local_object);
6714 
6715 		vm_map_lock_write_to_read(map);
6716 	}
6717 
6718 	if (entry->needs_copy) {
6719 		/*
6720 		 * Honor copy-on-write for COPY_SYMMETRIC
6721 		 * strategy.
6722 		 */
6723 		vm_map_t                local_map;
6724 		vm_object_t             object;
6725 		vm_object_offset_t      new_offset;
6726 		vm_prot_t               prot;
6727 		boolean_t               wired;
6728 		vm_map_version_t        version;
6729 		vm_map_t                real_map;
6730 		vm_prot_t               fault_type;
6731 
6732 		local_map = map;
6733 
6734 		if (caller_flags & UPL_COPYOUT_FROM) {
6735 			fault_type = VM_PROT_READ | VM_PROT_COPY;
6736 			vm_counters.create_upl_extra_cow++;
6737 			vm_counters.create_upl_extra_cow_pages +=
6738 			    (entry->vme_end - entry->vme_start) / PAGE_SIZE;
6739 		} else {
6740 			fault_type = VM_PROT_WRITE;
6741 		}
6742 		if (vm_map_lookup_locked(&local_map,
6743 		    offset, fault_type,
6744 		    OBJECT_LOCK_EXCLUSIVE,
6745 		    &version, &object,
6746 		    &new_offset, &prot, &wired,
6747 		    NULL,
6748 		    &real_map, NULL) != KERN_SUCCESS) {
6749 			if (fault_type == VM_PROT_WRITE) {
6750 				vm_counters.create_upl_lookup_failure_write++;
6751 			} else {
6752 				vm_counters.create_upl_lookup_failure_copy++;
6753 			}
6754 			vm_map_unlock_read(local_map);
6755 			ret = KERN_FAILURE;
6756 			goto done;
6757 		}
6758 		if (real_map != local_map) {
6759 			vm_map_unlock(real_map);
6760 		}
6761 		vm_map_unlock_read(local_map);
6762 
6763 		vm_object_unlock(object);
6764 
6765 		goto REDISCOVER_ENTRY;
6766 	}
6767 
6768 	if (entry->is_sub_map) {
6769 		vm_map_t        submap;
6770 
6771 		submap = VME_SUBMAP(entry);
6772 		local_start = entry->vme_start;
6773 		local_offset = (vm_map_offset_t)VME_OFFSET(entry);
6774 
6775 		vm_map_reference(submap);
6776 		vm_map_unlock_read(map);
6777 
6778 		DEBUG4K_UPL("map %p offset 0x%llx (0x%llx) size 0x%x (adjusted 0x%llx original 0x%llx) offset_in_mapped_page 0x%llx submap %p\n", map, (uint64_t)offset, (uint64_t)original_offset, *upl_size, (uint64_t)adjusted_size, (uint64_t)original_size, offset_in_mapped_page, submap);
6779 		offset += offset_in_mapped_page;
6780 		*upl_size -= offset_in_mapped_page;
6781 
6782 		if (release_map) {
6783 			vm_map_deallocate(map);
6784 		}
6785 		map = submap;
6786 		release_map = TRUE;
6787 		offset = local_offset + (offset - local_start);
6788 		goto start_with_map;
6789 	}
6790 
6791 	if (sync_cow_data &&
6792 	    (VME_OBJECT(entry)->shadow ||
6793 	    VME_OBJECT(entry)->copy)) {
6794 		local_object = VME_OBJECT(entry);
6795 		local_start = entry->vme_start;
6796 		local_offset = (vm_map_offset_t)VME_OFFSET(entry);
6797 
6798 		vm_object_reference(local_object);
6799 		vm_map_unlock_read(map);
6800 
6801 		if (local_object->shadow && local_object->copy) {
6802 			vm_object_lock_request(local_object->shadow,
6803 			    ((vm_object_offset_t)
6804 			    ((offset - local_start) +
6805 			    local_offset) +
6806 			    local_object->vo_shadow_offset),
6807 			    *upl_size, FALSE,
6808 			    MEMORY_OBJECT_DATA_SYNC,
6809 			    VM_PROT_NO_CHANGE);
6810 		}
6811 		sync_cow_data = FALSE;
6812 		vm_object_deallocate(local_object);
6813 
6814 		goto REDISCOVER_ENTRY;
6815 	}
6816 	if (force_data_sync) {
6817 		local_object = VME_OBJECT(entry);
6818 		local_start = entry->vme_start;
6819 		local_offset = (vm_map_offset_t)VME_OFFSET(entry);
6820 
6821 		vm_object_reference(local_object);
6822 		vm_map_unlock_read(map);
6823 
6824 		vm_object_lock_request(local_object,
6825 		    ((vm_object_offset_t)
6826 		    ((offset - local_start) +
6827 		    local_offset)),
6828 		    (vm_object_size_t)*upl_size,
6829 		    FALSE,
6830 		    MEMORY_OBJECT_DATA_SYNC,
6831 		    VM_PROT_NO_CHANGE);
6832 
6833 		force_data_sync = FALSE;
6834 		vm_object_deallocate(local_object);
6835 
6836 		goto REDISCOVER_ENTRY;
6837 	}
6838 	if (VME_OBJECT(entry)->private) {
6839 		*flags = UPL_DEV_MEMORY;
6840 	} else {
6841 		*flags = 0;
6842 	}
6843 
6844 	if (VME_OBJECT(entry)->phys_contiguous) {
6845 		*flags |= UPL_PHYS_CONTIG;
6846 	}
6847 
6848 	local_object = VME_OBJECT(entry);
6849 	local_offset = (vm_map_offset_t)VME_OFFSET(entry);
6850 	local_start = entry->vme_start;
6851 
6852 	/*
6853 	 * Wiring will copy the pages to the shadow object.
6854 	 * The shadow object will not be code-signed so
6855 	 * attempting to execute code from these copied pages
6856 	 * would trigger a code-signing violation.
6857 	 */
6858 	if (entry->protection & VM_PROT_EXECUTE) {
6859 #if MACH_ASSERT
6860 		printf("pid %d[%s] create_upl out of executable range from "
6861 		    "0x%llx to 0x%llx: side effects may include "
6862 		    "code-signing violations later on\n",
6863 		    proc_selfpid(),
6864 		    (current_task()->bsd_info
6865 		    ? proc_name_address(current_task()->bsd_info)
6866 		    : "?"),
6867 		    (uint64_t) entry->vme_start,
6868 		    (uint64_t) entry->vme_end);
6869 #endif /* MACH_ASSERT */
6870 		DTRACE_VM2(cs_executable_create_upl,
6871 		    uint64_t, (uint64_t)entry->vme_start,
6872 		    uint64_t, (uint64_t)entry->vme_end);
6873 		cs_executable_create_upl++;
6874 	}
6875 
6876 	vm_object_lock(local_object);
6877 
6878 	/*
6879 	 * Ensure that this object is "true_share" and "copy_delay" now,
6880 	 * while we're still holding the VM map lock.  After we unlock the map,
6881 	 * anything could happen to that mapping, including some copy-on-write
6882 	 * activity.  We need to make sure that the IOPL will point at the
6883 	 * same memory as the mapping.
6884 	 */
6885 	if (local_object->true_share) {
6886 		assert(local_object->copy_strategy !=
6887 		    MEMORY_OBJECT_COPY_SYMMETRIC);
6888 	} else if (local_object != kernel_object &&
6889 	    local_object != compressor_object &&
6890 	    !local_object->phys_contiguous) {
6891 #if VM_OBJECT_TRACKING_OP_TRUESHARE
6892 		if (!local_object->true_share &&
6893 		    vm_object_tracking_inited) {
6894 			void *bt[VM_OBJECT_TRACKING_BTDEPTH];
6895 			int num = 0;
6896 			num = OSBacktrace(bt,
6897 			    VM_OBJECT_TRACKING_BTDEPTH);
6898 			btlog_add_entry(vm_object_tracking_btlog,
6899 			    local_object,
6900 			    VM_OBJECT_TRACKING_OP_TRUESHARE,
6901 			    bt,
6902 			    num);
6903 		}
6904 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
6905 		local_object->true_share = TRUE;
6906 		if (local_object->copy_strategy ==
6907 		    MEMORY_OBJECT_COPY_SYMMETRIC) {
6908 			local_object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
6909 		}
6910 	}
6911 
6912 	vm_object_reference_locked(local_object);
6913 	vm_object_unlock(local_object);
6914 
6915 	vm_map_unlock_read(map);
6916 
6917 	offset += offset_in_mapped_page;
6918 	assert(*upl_size > offset_in_mapped_page);
6919 	*upl_size -= offset_in_mapped_page;
6920 
6921 	ret = vm_object_iopl_request(local_object,
6922 	    ((vm_object_offset_t)
6923 	    ((offset - local_start) + local_offset)),
6924 	    *upl_size,
6925 	    upl,
6926 	    page_list,
6927 	    count,
6928 	    caller_flags,
6929 	    tag);
6930 	vm_object_deallocate(local_object);
6931 
6932 done:
6933 	if (release_map) {
6934 		vm_map_deallocate(map);
6935 	}
6936 
6937 	return ret;
6938 }
6939 
6940 /*
6941  * Internal routine to enter a UPL into a VM map.
6942  *
6943  * JMM - This should just be doable through the standard
6944  * vm_map_enter() API.
6945  */
6946 kern_return_t
vm_map_enter_upl_range(vm_map_t map,upl_t upl,vm_object_offset_t offset_to_map,upl_size_t size_to_map,vm_prot_t prot_to_map,vm_map_offset_t * dst_addr)6947 vm_map_enter_upl_range(
6948 	vm_map_t                map,
6949 	upl_t                   upl,
6950 	vm_object_offset_t      offset_to_map,
6951 	upl_size_t              size_to_map,
6952 	vm_prot_t               prot_to_map,
6953 	vm_map_offset_t         *dst_addr)
6954 {
6955 	vm_map_size_t           size;
6956 	vm_object_offset_t      offset;
6957 	vm_map_offset_t         addr;
6958 	vm_page_t               m;
6959 	kern_return_t           kr;
6960 	int                     isVectorUPL = 0, curr_upl = 0;
6961 	upl_t                   vector_upl = NULL;
6962 	vm_offset_t             vector_upl_dst_addr = 0;
6963 	vm_map_t                vector_upl_submap = NULL;
6964 	upl_offset_t            subupl_offset = 0;
6965 	upl_size_t              subupl_size = 0;
6966 
6967 	if (upl == UPL_NULL) {
6968 		return KERN_INVALID_ARGUMENT;
6969 	}
6970 
6971 	DEBUG4K_UPL("map %p upl %p flags 0x%x object %p offset 0x%llx (uploff: 0x%llx) size 0x%x (uplsz: 0x%x) \n", map, upl, upl->flags, upl->map_object, offset_to_map, upl->u_offset, size_to_map, upl->u_size);
6972 	assert(map == kernel_map);
6973 
6974 	if ((isVectorUPL = vector_upl_is_valid(upl))) {
6975 		int mapped = 0, valid_upls = 0;
6976 		vector_upl = upl;
6977 
6978 		upl_lock(vector_upl);
6979 		for (curr_upl = 0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
6980 			upl =  vector_upl_subupl_byindex(vector_upl, curr_upl );
6981 			if (upl == NULL) {
6982 				continue;
6983 			}
6984 			valid_upls++;
6985 			if (UPL_PAGE_LIST_MAPPED & upl->flags) {
6986 				mapped++;
6987 			}
6988 		}
6989 
6990 		if (mapped) {
6991 			if (mapped != valid_upls) {
6992 				panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped", mapped, valid_upls);
6993 			} else {
6994 				upl_unlock(vector_upl);
6995 				return KERN_FAILURE;
6996 			}
6997 		}
6998 
6999 		if (VM_MAP_PAGE_MASK(map) < PAGE_MASK) {
7000 			panic("TODO4K: vector UPL not implemented");
7001 		}
7002 
7003 		kr = kmem_suballoc(map, &vector_upl_dst_addr,
7004 		    vector_upl->u_size,
7005 		    FALSE,
7006 		    VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_NONE,
7007 		    &vector_upl_submap);
7008 		if (kr != KERN_SUCCESS) {
7009 			panic("Vector UPL submap allocation failed");
7010 		}
7011 		map = vector_upl_submap;
7012 		vector_upl_set_submap(vector_upl, vector_upl_submap, vector_upl_dst_addr);
7013 		curr_upl = 0;
7014 	} else {
7015 		upl_lock(upl);
7016 	}
7017 
7018 process_upl_to_enter:
7019 	if (isVectorUPL) {
7020 		if (curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
7021 			*dst_addr = vector_upl_dst_addr;
7022 			upl_unlock(vector_upl);
7023 			return KERN_SUCCESS;
7024 		}
7025 		upl =  vector_upl_subupl_byindex(vector_upl, curr_upl++ );
7026 		if (upl == NULL) {
7027 			goto process_upl_to_enter;
7028 		}
7029 
7030 		vector_upl_get_iostate(vector_upl, upl, &subupl_offset, &subupl_size);
7031 		*dst_addr = (vm_map_offset_t)(vector_upl_dst_addr + (vm_map_offset_t)subupl_offset);
7032 	} else {
7033 		/*
7034 		 * check to see if already mapped
7035 		 */
7036 		if (UPL_PAGE_LIST_MAPPED & upl->flags) {
7037 			upl_unlock(upl);
7038 			return KERN_FAILURE;
7039 		}
7040 	}
7041 
7042 	if ((!(upl->flags & UPL_SHADOWED)) &&
7043 	    ((upl->flags & UPL_HAS_BUSY) ||
7044 	    !((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || (upl->map_object->phys_contiguous)))) {
7045 		vm_object_t             object;
7046 		vm_page_t               alias_page;
7047 		vm_object_offset_t      new_offset;
7048 		unsigned int            pg_num;
7049 		wpl_array_t             lite_list;
7050 
7051 		size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map));
7052 		if (upl->flags & UPL_INTERNAL) {
7053 			lite_list = (wpl_array_t)
7054 			    ((((uintptr_t)upl) + sizeof(struct upl))
7055 			    + ((size / PAGE_SIZE) * sizeof(upl_page_info_t)));
7056 		} else {
7057 			lite_list = (wpl_array_t)(((uintptr_t)upl) + sizeof(struct upl));
7058 		}
7059 		object = upl->map_object;
7060 		upl->map_object = vm_object_allocate(vm_object_round_page(size));
7061 
7062 		vm_object_lock(upl->map_object);
7063 
7064 		upl->map_object->shadow = object;
7065 		upl->map_object->pageout = TRUE;
7066 		upl->map_object->can_persist = FALSE;
7067 		upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
7068 		upl->map_object->vo_shadow_offset = upl_adjusted_offset(upl, PAGE_MASK) - object->paging_offset;
7069 		assertf(page_aligned(upl->map_object->vo_shadow_offset),
7070 		    "object %p shadow_offset 0x%llx",
7071 		    upl->map_object,
7072 		    (uint64_t)upl->map_object->vo_shadow_offset);
7073 		upl->map_object->wimg_bits = object->wimg_bits;
7074 		offset = upl->map_object->vo_shadow_offset;
7075 		new_offset = 0;
7076 
7077 		upl->flags |= UPL_SHADOWED;
7078 
7079 		while (size) {
7080 			pg_num = (unsigned int) (new_offset / PAGE_SIZE);
7081 			assert(pg_num == new_offset / PAGE_SIZE);
7082 
7083 			if (lite_list[pg_num >> 5] & (1U << (pg_num & 31))) {
7084 				alias_page = vm_page_grab_fictitious(TRUE);
7085 
7086 				vm_object_lock(object);
7087 
7088 				m = vm_page_lookup(object, offset);
7089 				if (m == VM_PAGE_NULL) {
7090 					panic("vm_upl_map: page missing");
7091 				}
7092 
7093 				/*
7094 				 * Convert the fictitious page to a private
7095 				 * shadow of the real page.
7096 				 */
7097 				assert(alias_page->vmp_fictitious);
7098 				alias_page->vmp_fictitious = FALSE;
7099 				alias_page->vmp_private = TRUE;
7100 				alias_page->vmp_free_when_done = TRUE;
7101 				/*
7102 				 * since m is a page in the upl it must
7103 				 * already be wired or BUSY, so it's
7104 				 * safe to assign the underlying physical
7105 				 * page to the alias
7106 				 */
7107 				VM_PAGE_SET_PHYS_PAGE(alias_page, VM_PAGE_GET_PHYS_PAGE(m));
7108 
7109 				vm_object_unlock(object);
7110 
7111 				vm_page_lockspin_queues();
7112 				vm_page_wire(alias_page, VM_KERN_MEMORY_NONE, TRUE);
7113 				vm_page_unlock_queues();
7114 
7115 				vm_page_insert_wired(alias_page, upl->map_object, new_offset, VM_KERN_MEMORY_NONE);
7116 
7117 				assert(!alias_page->vmp_wanted);
7118 				alias_page->vmp_busy = FALSE;
7119 				alias_page->vmp_absent = FALSE;
7120 			}
7121 			size -= PAGE_SIZE;
7122 			offset += PAGE_SIZE_64;
7123 			new_offset += PAGE_SIZE_64;
7124 		}
7125 		vm_object_unlock(upl->map_object);
7126 	}
7127 	if (upl->flags & UPL_SHADOWED) {
7128 		if (isVectorUPL) {
7129 			offset = 0;
7130 		} else {
7131 			offset = offset_to_map;
7132 		}
7133 	} else {
7134 		offset = upl_adjusted_offset(upl, VM_MAP_PAGE_MASK(map)) - upl->map_object->paging_offset;
7135 		if (!isVectorUPL) {
7136 			offset += offset_to_map;
7137 		}
7138 	}
7139 
7140 	if (isVectorUPL) {
7141 		size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map));
7142 	} else {
7143 		size = MIN(upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map)), size_to_map);
7144 	}
7145 
7146 	vm_object_reference(upl->map_object);
7147 
7148 	if (!isVectorUPL) {
7149 		*dst_addr = 0;
7150 		/*
7151 		 * NEED A UPL_MAP ALIAS
7152 		 */
7153 		kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
7154 		    VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_OSFMK,
7155 		    upl->map_object, offset, FALSE,
7156 		    prot_to_map, VM_PROT_ALL, VM_INHERIT_DEFAULT);
7157 
7158 		if (kr != KERN_SUCCESS) {
7159 			vm_object_deallocate(upl->map_object);
7160 			upl_unlock(upl);
7161 			return kr;
7162 		}
7163 	} else {
7164 		kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
7165 		    VM_FLAGS_FIXED, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_OSFMK,
7166 		    upl->map_object, offset, FALSE,
7167 		    prot_to_map, VM_PROT_ALL, VM_INHERIT_DEFAULT);
7168 		if (kr) {
7169 			panic("vm_map_enter failed for a Vector UPL");
7170 		}
7171 	}
7172 	upl->u_mapped_size = (upl_size_t) size; /* When we allow multiple submappings of the UPL */
7173 	                                        /* this will have to be an increment rather than */
7174 	                                        /* an assignment. */
7175 	vm_object_lock(upl->map_object);
7176 
7177 	for (addr = *dst_addr; size > 0; size -= PAGE_SIZE, addr += PAGE_SIZE) {
7178 		m = vm_page_lookup(upl->map_object, offset);
7179 
7180 		if (m) {
7181 			m->vmp_pmapped = TRUE;
7182 
7183 			/* CODE SIGNING ENFORCEMENT: page has been wpmapped,
7184 			 * but only in kernel space. If this was on a user map,
7185 			 * we'd have to set the wpmapped bit. */
7186 			/* m->vmp_wpmapped = TRUE; */
7187 			assert(map->pmap == kernel_pmap);
7188 
7189 			PMAP_ENTER(map->pmap, addr, m, prot_to_map, VM_PROT_NONE, 0, TRUE, kr);
7190 
7191 			assert(kr == KERN_SUCCESS);
7192 #if KASAN
7193 			kasan_notify_address(addr, PAGE_SIZE_64);
7194 #endif
7195 		}
7196 		offset += PAGE_SIZE_64;
7197 	}
7198 	vm_object_unlock(upl->map_object);
7199 
7200 	/*
7201 	 * hold a reference for the mapping
7202 	 */
7203 	upl->ref_count++;
7204 	upl->flags |= UPL_PAGE_LIST_MAPPED;
7205 	upl->kaddr = (vm_offset_t) *dst_addr;
7206 	assert(upl->kaddr == *dst_addr);
7207 
7208 	if (isVectorUPL) {
7209 		goto process_upl_to_enter;
7210 	}
7211 
7212 	if (!isVectorUPL) {
7213 		vm_map_offset_t addr_adjustment;
7214 
7215 		addr_adjustment = (vm_map_offset_t)(upl->u_offset - upl_adjusted_offset(upl, VM_MAP_PAGE_MASK(map)));
7216 		if (addr_adjustment) {
7217 			assert(VM_MAP_PAGE_MASK(map) != PAGE_MASK);
7218 			DEBUG4K_UPL("dst_addr 0x%llx (+ 0x%llx) -> 0x%llx\n", (uint64_t)*dst_addr, (uint64_t)addr_adjustment, (uint64_t)(*dst_addr + addr_adjustment));
7219 			*dst_addr += addr_adjustment;
7220 		}
7221 	}
7222 
7223 	upl_unlock(upl);
7224 
7225 	return KERN_SUCCESS;
7226 }
7227 
7228 kern_return_t
vm_map_enter_upl(vm_map_t map,upl_t upl,vm_map_offset_t * dst_addr)7229 vm_map_enter_upl(
7230 	vm_map_t                map,
7231 	upl_t                   upl,
7232 	vm_map_offset_t         *dst_addr)
7233 {
7234 	upl_size_t upl_size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map));
7235 	return vm_map_enter_upl_range(map, upl, 0, upl_size, VM_PROT_DEFAULT, dst_addr);
7236 }
7237 
7238 /*
7239  * Internal routine to remove a UPL mapping from a VM map.
7240  *
7241  * XXX - This should just be doable through a standard
7242  * vm_map_remove() operation.  Otherwise, implicit clean-up
7243  * of the target map won't be able to correctly remove
7244  * these (and release the reference on the UPL).  Having
7245  * to do this means we can't map these into user-space
7246  * maps yet.
7247  */
7248 kern_return_t
vm_map_remove_upl_range(vm_map_t map,upl_t upl,__unused vm_object_offset_t offset_to_unmap,__unused upl_size_t size_to_unmap)7249 vm_map_remove_upl_range(
7250 	vm_map_t        map,
7251 	upl_t           upl,
7252 	__unused vm_object_offset_t    offset_to_unmap,
7253 	__unused upl_size_t      size_to_unmap)
7254 {
7255 	vm_address_t    addr;
7256 	upl_size_t      size;
7257 	int             isVectorUPL = 0, curr_upl = 0;
7258 	upl_t           vector_upl = NULL;
7259 
7260 	if (upl == UPL_NULL) {
7261 		return KERN_INVALID_ARGUMENT;
7262 	}
7263 
7264 	if ((isVectorUPL = vector_upl_is_valid(upl))) {
7265 		int     unmapped = 0, valid_upls = 0;
7266 		vector_upl = upl;
7267 		upl_lock(vector_upl);
7268 		for (curr_upl = 0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
7269 			upl =  vector_upl_subupl_byindex(vector_upl, curr_upl );
7270 			if (upl == NULL) {
7271 				continue;
7272 			}
7273 			valid_upls++;
7274 			if (!(UPL_PAGE_LIST_MAPPED & upl->flags)) {
7275 				unmapped++;
7276 			}
7277 		}
7278 
7279 		if (unmapped) {
7280 			if (unmapped != valid_upls) {
7281 				panic("%d of the %d sub-upls within the Vector UPL is/are not mapped", unmapped, valid_upls);
7282 			} else {
7283 				upl_unlock(vector_upl);
7284 				return KERN_FAILURE;
7285 			}
7286 		}
7287 		curr_upl = 0;
7288 	} else {
7289 		upl_lock(upl);
7290 	}
7291 
7292 process_upl_to_remove:
7293 	if (isVectorUPL) {
7294 		if (curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
7295 			vm_map_t v_upl_submap;
7296 			vm_offset_t v_upl_submap_dst_addr;
7297 			vector_upl_get_submap(vector_upl, &v_upl_submap, &v_upl_submap_dst_addr);
7298 
7299 			vm_map_remove(map, v_upl_submap_dst_addr,
7300 			    v_upl_submap_dst_addr + vector_upl->u_size,
7301 			    VM_MAP_REMOVE_NO_FLAGS);
7302 			vm_map_deallocate(v_upl_submap);
7303 			upl_unlock(vector_upl);
7304 			return KERN_SUCCESS;
7305 		}
7306 
7307 		upl =  vector_upl_subupl_byindex(vector_upl, curr_upl++ );
7308 		if (upl == NULL) {
7309 			goto process_upl_to_remove;
7310 		}
7311 	}
7312 
7313 	if (upl->flags & UPL_PAGE_LIST_MAPPED) {
7314 		addr = upl->kaddr;
7315 		size = upl->u_mapped_size;
7316 
7317 		assert(upl->ref_count > 1);
7318 		upl->ref_count--;               /* removing mapping ref */
7319 
7320 		upl->flags &= ~UPL_PAGE_LIST_MAPPED;
7321 		upl->kaddr = (vm_offset_t) 0;
7322 		upl->u_mapped_size = 0;
7323 
7324 		if (!isVectorUPL) {
7325 			upl_unlock(upl);
7326 
7327 			vm_map_remove(
7328 				map,
7329 				vm_map_trunc_page(addr,
7330 				VM_MAP_PAGE_MASK(map)),
7331 				vm_map_round_page(addr + size,
7332 				VM_MAP_PAGE_MASK(map)),
7333 				VM_MAP_REMOVE_NO_FLAGS);
7334 			return KERN_SUCCESS;
7335 		} else {
7336 			/*
7337 			 * If it's a Vectored UPL, we'll be removing the entire
7338 			 * submap anyways, so no need to remove individual UPL
7339 			 * element mappings from within the submap
7340 			 */
7341 			goto process_upl_to_remove;
7342 		}
7343 	}
7344 	upl_unlock(upl);
7345 
7346 	return KERN_FAILURE;
7347 }
7348 
7349 kern_return_t
vm_map_remove_upl(vm_map_t map,upl_t upl)7350 vm_map_remove_upl(
7351 	vm_map_t        map,
7352 	upl_t           upl)
7353 {
7354 	upl_size_t upl_size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map));
7355 	return vm_map_remove_upl_range(map, upl, 0, upl_size);
7356 }
7357 
7358 kern_return_t
upl_commit_range(upl_t upl,upl_offset_t offset,upl_size_t size,int flags,upl_page_info_t * page_list,mach_msg_type_number_t count,boolean_t * empty)7359 upl_commit_range(
7360 	upl_t                   upl,
7361 	upl_offset_t            offset,
7362 	upl_size_t              size,
7363 	int                     flags,
7364 	upl_page_info_t         *page_list,
7365 	mach_msg_type_number_t  count,
7366 	boolean_t               *empty)
7367 {
7368 	upl_size_t              xfer_size, subupl_size;
7369 	vm_object_t             shadow_object;
7370 	vm_object_t             object;
7371 	vm_object_t             m_object;
7372 	vm_object_offset_t      target_offset;
7373 	upl_offset_t            subupl_offset = offset;
7374 	int                     entry;
7375 	wpl_array_t             lite_list;
7376 	int                     occupied;
7377 	int                     clear_refmod = 0;
7378 	int                     pgpgout_count = 0;
7379 	struct  vm_page_delayed_work    dw_array;
7380 	struct  vm_page_delayed_work    *dwp, *dwp_start;
7381 	bool                    dwp_finish_ctx = TRUE;
7382 	int                     dw_count;
7383 	int                     dw_limit;
7384 	int                     isVectorUPL = 0;
7385 	upl_t                   vector_upl = NULL;
7386 	boolean_t               should_be_throttled = FALSE;
7387 
7388 	vm_page_t               nxt_page = VM_PAGE_NULL;
7389 	int                     fast_path_possible = 0;
7390 	int                     fast_path_full_commit = 0;
7391 	int                     throttle_page = 0;
7392 	int                     unwired_count = 0;
7393 	int                     local_queue_count = 0;
7394 	vm_page_t               first_local, last_local;
7395 	vm_object_offset_t      obj_start, obj_end, obj_offset;
7396 	kern_return_t           kr = KERN_SUCCESS;
7397 
7398 //	DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p offset 0x%llx size 0x%llx flags 0x%x\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object, (uint64_t)offset, (uint64_t)size, flags);
7399 
7400 	dwp_start = dwp = NULL;
7401 
7402 	subupl_size = size;
7403 	*empty = FALSE;
7404 
7405 	if (upl == UPL_NULL) {
7406 		return KERN_INVALID_ARGUMENT;
7407 	}
7408 
7409 	dw_count = 0;
7410 	dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
7411 	dwp_start = vm_page_delayed_work_get_ctx();
7412 	if (dwp_start == NULL) {
7413 		dwp_start = &dw_array;
7414 		dw_limit = 1;
7415 		dwp_finish_ctx = FALSE;
7416 	}
7417 
7418 	dwp = dwp_start;
7419 
7420 	if (count == 0) {
7421 		page_list = NULL;
7422 	}
7423 
7424 	if ((isVectorUPL = vector_upl_is_valid(upl))) {
7425 		vector_upl = upl;
7426 		upl_lock(vector_upl);
7427 	} else {
7428 		upl_lock(upl);
7429 	}
7430 
7431 process_upl_to_commit:
7432 
7433 	if (isVectorUPL) {
7434 		size = subupl_size;
7435 		offset = subupl_offset;
7436 		if (size == 0) {
7437 			upl_unlock(vector_upl);
7438 			kr = KERN_SUCCESS;
7439 			goto done;
7440 		}
7441 		upl =  vector_upl_subupl_byoffset(vector_upl, &offset, &size);
7442 		if (upl == NULL) {
7443 			upl_unlock(vector_upl);
7444 			kr = KERN_FAILURE;
7445 			goto done;
7446 		}
7447 		page_list = UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl);
7448 		subupl_size -= size;
7449 		subupl_offset += size;
7450 	}
7451 
7452 #if UPL_DEBUG
7453 	if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
7454 		(void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
7455 
7456 		upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
7457 		upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
7458 
7459 		upl->upl_commit_index++;
7460 	}
7461 #endif
7462 	if (upl->flags & UPL_DEVICE_MEMORY) {
7463 		xfer_size = 0;
7464 	} else if ((offset + size) <= upl_adjusted_size(upl, PAGE_MASK)) {
7465 		xfer_size = size;
7466 	} else {
7467 		if (!isVectorUPL) {
7468 			upl_unlock(upl);
7469 		} else {
7470 			upl_unlock(vector_upl);
7471 		}
7472 		DEBUG4K_ERROR("upl %p (u_offset 0x%llx u_size 0x%x) offset 0x%x size 0x%x\n", upl, upl->u_offset, upl->u_size, offset, size);
7473 		kr = KERN_FAILURE;
7474 		goto done;
7475 	}
7476 	if (upl->flags & UPL_SET_DIRTY) {
7477 		flags |= UPL_COMMIT_SET_DIRTY;
7478 	}
7479 	if (upl->flags & UPL_CLEAR_DIRTY) {
7480 		flags |= UPL_COMMIT_CLEAR_DIRTY;
7481 	}
7482 
7483 	if (upl->flags & UPL_INTERNAL) {
7484 		lite_list = (wpl_array_t) ((((uintptr_t)upl) + sizeof(struct upl))
7485 		    + ((upl_adjusted_size(upl, PAGE_MASK) / PAGE_SIZE) * sizeof(upl_page_info_t)));
7486 	} else {
7487 		lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
7488 	}
7489 
7490 	object = upl->map_object;
7491 
7492 	if (upl->flags & UPL_SHADOWED) {
7493 		vm_object_lock(object);
7494 		shadow_object = object->shadow;
7495 	} else {
7496 		shadow_object = object;
7497 	}
7498 	entry = offset / PAGE_SIZE;
7499 	target_offset = (vm_object_offset_t)offset;
7500 
7501 	if (upl->flags & UPL_KERNEL_OBJECT) {
7502 		vm_object_lock_shared(shadow_object);
7503 	} else {
7504 		vm_object_lock(shadow_object);
7505 	}
7506 
7507 	VM_OBJECT_WIRED_PAGE_UPDATE_START(shadow_object);
7508 
7509 	if (upl->flags & UPL_ACCESS_BLOCKED) {
7510 		assert(shadow_object->blocked_access);
7511 		shadow_object->blocked_access = FALSE;
7512 		vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
7513 	}
7514 
7515 	if (shadow_object->code_signed) {
7516 		/*
7517 		 * CODE SIGNING:
7518 		 * If the object is code-signed, do not let this UPL tell
7519 		 * us if the pages are valid or not.  Let the pages be
7520 		 * validated by VM the normal way (when they get mapped or
7521 		 * copied).
7522 		 */
7523 		flags &= ~UPL_COMMIT_CS_VALIDATED;
7524 	}
7525 	if (!page_list) {
7526 		/*
7527 		 * No page list to get the code-signing info from !?
7528 		 */
7529 		flags &= ~UPL_COMMIT_CS_VALIDATED;
7530 	}
7531 	if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal) {
7532 		should_be_throttled = TRUE;
7533 	}
7534 
7535 	if ((upl->flags & UPL_IO_WIRE) &&
7536 	    !(flags & UPL_COMMIT_FREE_ABSENT) &&
7537 	    !isVectorUPL &&
7538 	    shadow_object->purgable != VM_PURGABLE_VOLATILE &&
7539 	    shadow_object->purgable != VM_PURGABLE_EMPTY) {
7540 		if (!vm_page_queue_empty(&shadow_object->memq)) {
7541 			if (size == shadow_object->vo_size) {
7542 				nxt_page = (vm_page_t)vm_page_queue_first(&shadow_object->memq);
7543 				fast_path_full_commit = 1;
7544 			}
7545 			fast_path_possible = 1;
7546 
7547 			if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal &&
7548 			    (shadow_object->purgable == VM_PURGABLE_DENY ||
7549 			    shadow_object->purgable == VM_PURGABLE_NONVOLATILE ||
7550 			    shadow_object->purgable == VM_PURGABLE_VOLATILE)) {
7551 				throttle_page = 1;
7552 			}
7553 		}
7554 	}
7555 	first_local = VM_PAGE_NULL;
7556 	last_local = VM_PAGE_NULL;
7557 
7558 	obj_start = target_offset + upl->u_offset - shadow_object->paging_offset;
7559 	obj_end = obj_start + xfer_size;
7560 	obj_start = vm_object_trunc_page(obj_start);
7561 	obj_end = vm_object_round_page(obj_end);
7562 	for (obj_offset = obj_start;
7563 	    obj_offset < obj_end;
7564 	    obj_offset += PAGE_SIZE) {
7565 		vm_page_t       t, m;
7566 
7567 		dwp->dw_mask = 0;
7568 		clear_refmod = 0;
7569 
7570 		m = VM_PAGE_NULL;
7571 
7572 		if (upl->flags & UPL_LITE) {
7573 			unsigned int    pg_num;
7574 
7575 			if (nxt_page != VM_PAGE_NULL) {
7576 				m = nxt_page;
7577 				nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->vmp_listq);
7578 				target_offset = m->vmp_offset;
7579 			}
7580 			pg_num = (unsigned int) (target_offset / PAGE_SIZE);
7581 			assert(pg_num == target_offset / PAGE_SIZE);
7582 
7583 			if (lite_list[pg_num >> 5] & (1U << (pg_num & 31))) {
7584 				lite_list[pg_num >> 5] &= ~(1U << (pg_num & 31));
7585 
7586 				if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) {
7587 					m = vm_page_lookup(shadow_object, obj_offset);
7588 				}
7589 			} else {
7590 				m = NULL;
7591 			}
7592 		}
7593 		if (upl->flags & UPL_SHADOWED) {
7594 			if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
7595 				t->vmp_free_when_done = FALSE;
7596 
7597 				VM_PAGE_FREE(t);
7598 
7599 				if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) {
7600 					m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
7601 				}
7602 			}
7603 		}
7604 		if (m == VM_PAGE_NULL) {
7605 			goto commit_next_page;
7606 		}
7607 
7608 		m_object = VM_PAGE_OBJECT(m);
7609 
7610 		if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
7611 			assert(m->vmp_busy);
7612 
7613 			dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
7614 			goto commit_next_page;
7615 		}
7616 
7617 		if (flags & UPL_COMMIT_CS_VALIDATED) {
7618 			/*
7619 			 * CODE SIGNING:
7620 			 * Set the code signing bits according to
7621 			 * what the UPL says they should be.
7622 			 */
7623 			m->vmp_cs_validated |= page_list[entry].cs_validated;
7624 			m->vmp_cs_tainted |= page_list[entry].cs_tainted;
7625 			m->vmp_cs_nx |= page_list[entry].cs_nx;
7626 		}
7627 		if (flags & UPL_COMMIT_WRITTEN_BY_KERNEL) {
7628 			m->vmp_written_by_kernel = TRUE;
7629 		}
7630 
7631 		if (upl->flags & UPL_IO_WIRE) {
7632 			if (page_list) {
7633 				page_list[entry].phys_addr = 0;
7634 			}
7635 
7636 			if (flags & UPL_COMMIT_SET_DIRTY) {
7637 				SET_PAGE_DIRTY(m, FALSE);
7638 			} else if (flags & UPL_COMMIT_CLEAR_DIRTY) {
7639 				m->vmp_dirty = FALSE;
7640 
7641 				if (!(flags & UPL_COMMIT_CS_VALIDATED) &&
7642 				    m->vmp_cs_validated &&
7643 				    m->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
7644 					/*
7645 					 * CODE SIGNING:
7646 					 * This page is no longer dirty
7647 					 * but could have been modified,
7648 					 * so it will need to be
7649 					 * re-validated.
7650 					 */
7651 					m->vmp_cs_validated = VMP_CS_ALL_FALSE;
7652 
7653 					VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
7654 
7655 					pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
7656 				}
7657 				clear_refmod |= VM_MEM_MODIFIED;
7658 			}
7659 			if (upl->flags & UPL_ACCESS_BLOCKED) {
7660 				/*
7661 				 * We blocked access to the pages in this UPL.
7662 				 * Clear the "busy" bit and wake up any waiter
7663 				 * for this page.
7664 				 */
7665 				dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
7666 			}
7667 			if (fast_path_possible) {
7668 				assert(m_object->purgable != VM_PURGABLE_EMPTY);
7669 				assert(m_object->purgable != VM_PURGABLE_VOLATILE);
7670 				if (m->vmp_absent) {
7671 					assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
7672 					assert(m->vmp_wire_count == 0);
7673 					assert(m->vmp_busy);
7674 
7675 					m->vmp_absent = FALSE;
7676 					dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
7677 				} else {
7678 					if (m->vmp_wire_count == 0) {
7679 						panic("wire_count == 0, m = %p, obj = %p", m, shadow_object);
7680 					}
7681 					assert(m->vmp_q_state == VM_PAGE_IS_WIRED);
7682 
7683 					/*
7684 					 * XXX FBDP need to update some other
7685 					 * counters here (purgeable_wired_count)
7686 					 * (ledgers), ...
7687 					 */
7688 					assert(m->vmp_wire_count > 0);
7689 					m->vmp_wire_count--;
7690 
7691 					if (m->vmp_wire_count == 0) {
7692 						m->vmp_q_state = VM_PAGE_NOT_ON_Q;
7693 						unwired_count++;
7694 					}
7695 				}
7696 				if (m->vmp_wire_count == 0) {
7697 					assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
7698 
7699 					if (last_local == VM_PAGE_NULL) {
7700 						assert(first_local == VM_PAGE_NULL);
7701 
7702 						last_local = m;
7703 						first_local = m;
7704 					} else {
7705 						assert(first_local != VM_PAGE_NULL);
7706 
7707 						m->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
7708 						first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(m);
7709 						first_local = m;
7710 					}
7711 					local_queue_count++;
7712 
7713 					if (throttle_page) {
7714 						m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
7715 					} else {
7716 						if (flags & UPL_COMMIT_INACTIVATE) {
7717 							if (shadow_object->internal) {
7718 								m->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
7719 							} else {
7720 								m->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
7721 							}
7722 						} else {
7723 							m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
7724 						}
7725 					}
7726 				}
7727 			} else {
7728 				if (flags & UPL_COMMIT_INACTIVATE) {
7729 					dwp->dw_mask |= DW_vm_page_deactivate_internal;
7730 					clear_refmod |= VM_MEM_REFERENCED;
7731 				}
7732 				if (m->vmp_absent) {
7733 					if (flags & UPL_COMMIT_FREE_ABSENT) {
7734 						dwp->dw_mask |= DW_vm_page_free;
7735 					} else {
7736 						m->vmp_absent = FALSE;
7737 						dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
7738 
7739 						if (!(dwp->dw_mask & DW_vm_page_deactivate_internal)) {
7740 							dwp->dw_mask |= DW_vm_page_activate;
7741 						}
7742 					}
7743 				} else {
7744 					dwp->dw_mask |= DW_vm_page_unwire;
7745 				}
7746 			}
7747 			goto commit_next_page;
7748 		}
7749 		assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
7750 
7751 		if (page_list) {
7752 			page_list[entry].phys_addr = 0;
7753 		}
7754 
7755 		/*
7756 		 * make sure to clear the hardware
7757 		 * modify or reference bits before
7758 		 * releasing the BUSY bit on this page
7759 		 * otherwise we risk losing a legitimate
7760 		 * change of state
7761 		 */
7762 		if (flags & UPL_COMMIT_CLEAR_DIRTY) {
7763 			m->vmp_dirty = FALSE;
7764 
7765 			clear_refmod |= VM_MEM_MODIFIED;
7766 		}
7767 		if (m->vmp_laundry) {
7768 			dwp->dw_mask |= DW_vm_pageout_throttle_up;
7769 		}
7770 
7771 		if (VM_PAGE_WIRED(m)) {
7772 			m->vmp_free_when_done = FALSE;
7773 		}
7774 
7775 		if (!(flags & UPL_COMMIT_CS_VALIDATED) &&
7776 		    m->vmp_cs_validated &&
7777 		    m->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
7778 			/*
7779 			 * CODE SIGNING:
7780 			 * This page is no longer dirty
7781 			 * but could have been modified,
7782 			 * so it will need to be
7783 			 * re-validated.
7784 			 */
7785 			m->vmp_cs_validated = VMP_CS_ALL_FALSE;
7786 
7787 			VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
7788 
7789 			pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
7790 		}
7791 		if (m->vmp_overwriting) {
7792 			/*
7793 			 * the (COPY_OUT_FROM == FALSE) request_page_list case
7794 			 */
7795 			if (m->vmp_busy) {
7796 #if CONFIG_PHANTOM_CACHE
7797 				if (m->vmp_absent && !m_object->internal) {
7798 					dwp->dw_mask |= DW_vm_phantom_cache_update;
7799 				}
7800 #endif
7801 				m->vmp_absent = FALSE;
7802 
7803 				dwp->dw_mask |= DW_clear_busy;
7804 			} else {
7805 				/*
7806 				 * alternate (COPY_OUT_FROM == FALSE) page_list case
7807 				 * Occurs when the original page was wired
7808 				 * at the time of the list request
7809 				 */
7810 				assert(VM_PAGE_WIRED(m));
7811 
7812 				dwp->dw_mask |= DW_vm_page_unwire; /* reactivates */
7813 			}
7814 			m->vmp_overwriting = FALSE;
7815 		}
7816 		m->vmp_cleaning = FALSE;
7817 
7818 		if (m->vmp_free_when_done) {
7819 			/*
7820 			 * With the clean queue enabled, UPL_PAGEOUT should
7821 			 * no longer set the pageout bit. Its pages now go
7822 			 * to the clean queue.
7823 			 *
7824 			 * We don't use the cleaned Q anymore and so this
7825 			 * assert isn't correct. The code for the clean Q
7826 			 * still exists and might be used in the future. If we
7827 			 * go back to the cleaned Q, we will re-enable this
7828 			 * assert.
7829 			 *
7830 			 * assert(!(upl->flags & UPL_PAGEOUT));
7831 			 */
7832 			assert(!m_object->internal);
7833 
7834 			m->vmp_free_when_done = FALSE;
7835 
7836 			if ((flags & UPL_COMMIT_SET_DIRTY) ||
7837 			    (m->vmp_pmapped && (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED))) {
7838 				/*
7839 				 * page was re-dirtied after we started
7840 				 * the pageout... reactivate it since
7841 				 * we don't know whether the on-disk
7842 				 * copy matches what is now in memory
7843 				 */
7844 				SET_PAGE_DIRTY(m, FALSE);
7845 
7846 				dwp->dw_mask |= DW_vm_page_activate | DW_PAGE_WAKEUP;
7847 
7848 				if (upl->flags & UPL_PAGEOUT) {
7849 					counter_inc(&vm_statistics_reactivations);
7850 					DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
7851 				}
7852 			} else {
7853 				/*
7854 				 * page has been successfully cleaned
7855 				 * go ahead and free it for other use
7856 				 */
7857 				if (m_object->internal) {
7858 					DTRACE_VM2(anonpgout, int, 1, (uint64_t *), NULL);
7859 				} else {
7860 					DTRACE_VM2(fspgout, int, 1, (uint64_t *), NULL);
7861 				}
7862 				m->vmp_dirty = FALSE;
7863 				m->vmp_busy = TRUE;
7864 
7865 				dwp->dw_mask |= DW_vm_page_free;
7866 			}
7867 			goto commit_next_page;
7868 		}
7869 		/*
7870 		 * It is a part of the semantic of COPYOUT_FROM
7871 		 * UPLs that a commit implies cache sync
7872 		 * between the vm page and the backing store
7873 		 * this can be used to strip the precious bit
7874 		 * as well as clean
7875 		 */
7876 		if ((upl->flags & UPL_PAGE_SYNC_DONE) || (flags & UPL_COMMIT_CLEAR_PRECIOUS)) {
7877 			m->vmp_precious = FALSE;
7878 		}
7879 
7880 		if (flags & UPL_COMMIT_SET_DIRTY) {
7881 			SET_PAGE_DIRTY(m, FALSE);
7882 		} else {
7883 			m->vmp_dirty = FALSE;
7884 		}
7885 
7886 		/* with the clean queue on, move *all* cleaned pages to the clean queue */
7887 		if (hibernate_cleaning_in_progress == FALSE && !m->vmp_dirty && (upl->flags & UPL_PAGEOUT)) {
7888 			pgpgout_count++;
7889 
7890 			counter_inc(&vm_statistics_pageouts);
7891 			DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL);
7892 
7893 			dwp->dw_mask |= DW_enqueue_cleaned;
7894 		} else if (should_be_throttled == TRUE && (m->vmp_q_state == VM_PAGE_NOT_ON_Q)) {
7895 			/*
7896 			 * page coming back in from being 'frozen'...
7897 			 * it was dirty before it was frozen, so keep it so
7898 			 * the vm_page_activate will notice that it really belongs
7899 			 * on the throttle queue and put it there
7900 			 */
7901 			SET_PAGE_DIRTY(m, FALSE);
7902 			dwp->dw_mask |= DW_vm_page_activate;
7903 		} else {
7904 			if ((flags & UPL_COMMIT_INACTIVATE) && !m->vmp_clustered && (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q)) {
7905 				dwp->dw_mask |= DW_vm_page_deactivate_internal;
7906 				clear_refmod |= VM_MEM_REFERENCED;
7907 			} else if (!VM_PAGE_PAGEABLE(m)) {
7908 				if (m->vmp_clustered || (flags & UPL_COMMIT_SPECULATE)) {
7909 					dwp->dw_mask |= DW_vm_page_speculate;
7910 				} else if (m->vmp_reference) {
7911 					dwp->dw_mask |= DW_vm_page_activate;
7912 				} else {
7913 					dwp->dw_mask |= DW_vm_page_deactivate_internal;
7914 					clear_refmod |= VM_MEM_REFERENCED;
7915 				}
7916 			}
7917 		}
7918 		if (upl->flags & UPL_ACCESS_BLOCKED) {
7919 			/*
7920 			 * We blocked access to the pages in this URL.
7921 			 * Clear the "busy" bit on this page before we
7922 			 * wake up any waiter.
7923 			 */
7924 			dwp->dw_mask |= DW_clear_busy;
7925 		}
7926 		/*
7927 		 * Wakeup any thread waiting for the page to be un-cleaning.
7928 		 */
7929 		dwp->dw_mask |= DW_PAGE_WAKEUP;
7930 
7931 commit_next_page:
7932 		if (clear_refmod) {
7933 			pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m), clear_refmod);
7934 		}
7935 
7936 		target_offset += PAGE_SIZE_64;
7937 		xfer_size -= PAGE_SIZE;
7938 		entry++;
7939 
7940 		if (dwp->dw_mask) {
7941 			if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
7942 				VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
7943 
7944 				if (dw_count >= dw_limit) {
7945 					vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
7946 
7947 					dwp = dwp_start;
7948 					dw_count = 0;
7949 				}
7950 			} else {
7951 				if (dwp->dw_mask & DW_clear_busy) {
7952 					m->vmp_busy = FALSE;
7953 				}
7954 
7955 				if (dwp->dw_mask & DW_PAGE_WAKEUP) {
7956 					PAGE_WAKEUP(m);
7957 				}
7958 			}
7959 		}
7960 	}
7961 	if (dw_count) {
7962 		vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
7963 		dwp = dwp_start;
7964 		dw_count = 0;
7965 	}
7966 
7967 	if (fast_path_possible) {
7968 		assert(shadow_object->purgable != VM_PURGABLE_VOLATILE);
7969 		assert(shadow_object->purgable != VM_PURGABLE_EMPTY);
7970 
7971 		if (local_queue_count || unwired_count) {
7972 			if (local_queue_count) {
7973 				vm_page_t       first_target;
7974 				vm_page_queue_head_t    *target_queue;
7975 
7976 				if (throttle_page) {
7977 					target_queue = &vm_page_queue_throttled;
7978 				} else {
7979 					if (flags & UPL_COMMIT_INACTIVATE) {
7980 						if (shadow_object->internal) {
7981 							target_queue = &vm_page_queue_anonymous;
7982 						} else {
7983 							target_queue = &vm_page_queue_inactive;
7984 						}
7985 					} else {
7986 						target_queue = &vm_page_queue_active;
7987 					}
7988 				}
7989 				/*
7990 				 * Transfer the entire local queue to a regular LRU page queues.
7991 				 */
7992 				vm_page_lockspin_queues();
7993 
7994 				first_target = (vm_page_t) vm_page_queue_first(target_queue);
7995 
7996 				if (vm_page_queue_empty(target_queue)) {
7997 					target_queue->prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
7998 				} else {
7999 					first_target->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
8000 				}
8001 
8002 				target_queue->next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
8003 				first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(target_queue);
8004 				last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_target);
8005 
8006 				/*
8007 				 * Adjust the global page counts.
8008 				 */
8009 				if (throttle_page) {
8010 					vm_page_throttled_count += local_queue_count;
8011 				} else {
8012 					if (flags & UPL_COMMIT_INACTIVATE) {
8013 						if (shadow_object->internal) {
8014 							vm_page_anonymous_count += local_queue_count;
8015 						}
8016 						vm_page_inactive_count += local_queue_count;
8017 
8018 						token_new_pagecount += local_queue_count;
8019 					} else {
8020 						vm_page_active_count += local_queue_count;
8021 					}
8022 
8023 					if (shadow_object->internal) {
8024 						vm_page_pageable_internal_count += local_queue_count;
8025 					} else {
8026 						vm_page_pageable_external_count += local_queue_count;
8027 					}
8028 				}
8029 			} else {
8030 				vm_page_lockspin_queues();
8031 			}
8032 			if (unwired_count) {
8033 				vm_page_wire_count -= unwired_count;
8034 				VM_CHECK_MEMORYSTATUS;
8035 			}
8036 			vm_page_unlock_queues();
8037 
8038 			VM_OBJECT_WIRED_PAGE_COUNT(shadow_object, -unwired_count);
8039 		}
8040 	}
8041 	occupied = 1;
8042 
8043 	if (upl->flags & UPL_DEVICE_MEMORY) {
8044 		occupied = 0;
8045 	} else if (upl->flags & UPL_LITE) {
8046 		int     pg_num;
8047 		int     i;
8048 
8049 		occupied = 0;
8050 
8051 		if (!fast_path_full_commit) {
8052 			pg_num = upl_adjusted_size(upl, PAGE_MASK) / PAGE_SIZE;
8053 			pg_num = (pg_num + 31) >> 5;
8054 
8055 			for (i = 0; i < pg_num; i++) {
8056 				if (lite_list[i] != 0) {
8057 					occupied = 1;
8058 					break;
8059 				}
8060 			}
8061 		}
8062 	} else {
8063 		if (vm_page_queue_empty(&upl->map_object->memq)) {
8064 			occupied = 0;
8065 		}
8066 	}
8067 	if (occupied == 0) {
8068 		/*
8069 		 * If this UPL element belongs to a Vector UPL and is
8070 		 * empty, then this is the right function to deallocate
8071 		 * it. So go ahead set the *empty variable. The flag
8072 		 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
8073 		 * should be considered relevant for the Vector UPL and not
8074 		 * the internal UPLs.
8075 		 */
8076 		if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) {
8077 			*empty = TRUE;
8078 		}
8079 
8080 		if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
8081 			/*
8082 			 * this is not a paging object
8083 			 * so we need to drop the paging reference
8084 			 * that was taken when we created the UPL
8085 			 * against this object
8086 			 */
8087 			vm_object_activity_end(shadow_object);
8088 			vm_object_collapse(shadow_object, 0, TRUE);
8089 		} else {
8090 			/*
8091 			 * we dontated the paging reference to
8092 			 * the map object... vm_pageout_object_terminate
8093 			 * will drop this reference
8094 			 */
8095 		}
8096 	}
8097 	VM_OBJECT_WIRED_PAGE_UPDATE_END(shadow_object, shadow_object->wire_tag);
8098 	vm_object_unlock(shadow_object);
8099 	if (object != shadow_object) {
8100 		vm_object_unlock(object);
8101 	}
8102 
8103 	if (!isVectorUPL) {
8104 		upl_unlock(upl);
8105 	} else {
8106 		/*
8107 		 * If we completed our operations on an UPL that is
8108 		 * part of a Vectored UPL and if empty is TRUE, then
8109 		 * we should go ahead and deallocate this UPL element.
8110 		 * Then we check if this was the last of the UPL elements
8111 		 * within that Vectored UPL. If so, set empty to TRUE
8112 		 * so that in ubc_upl_commit_range or ubc_upl_commit, we
8113 		 * can go ahead and deallocate the Vector UPL too.
8114 		 */
8115 		if (*empty == TRUE) {
8116 			*empty = vector_upl_set_subupl(vector_upl, upl, 0);
8117 			upl_deallocate(upl);
8118 		}
8119 		goto process_upl_to_commit;
8120 	}
8121 	if (pgpgout_count) {
8122 		DTRACE_VM2(pgpgout, int, pgpgout_count, (uint64_t *), NULL);
8123 	}
8124 
8125 	kr = KERN_SUCCESS;
8126 done:
8127 	if (dwp_start && dwp_finish_ctx) {
8128 		vm_page_delayed_work_finish_ctx(dwp_start);
8129 		dwp_start = dwp = NULL;
8130 	}
8131 
8132 	return kr;
8133 }
8134 
8135 kern_return_t
upl_abort_range(upl_t upl,upl_offset_t offset,upl_size_t size,int error,boolean_t * empty)8136 upl_abort_range(
8137 	upl_t                   upl,
8138 	upl_offset_t            offset,
8139 	upl_size_t              size,
8140 	int                     error,
8141 	boolean_t               *empty)
8142 {
8143 	upl_page_info_t         *user_page_list = NULL;
8144 	upl_size_t              xfer_size, subupl_size;
8145 	vm_object_t             shadow_object;
8146 	vm_object_t             object;
8147 	vm_object_offset_t      target_offset;
8148 	upl_offset_t            subupl_offset = offset;
8149 	int                     entry;
8150 	wpl_array_t             lite_list;
8151 	int                     occupied;
8152 	struct  vm_page_delayed_work    dw_array;
8153 	struct  vm_page_delayed_work    *dwp, *dwp_start;
8154 	bool                    dwp_finish_ctx = TRUE;
8155 	int                     dw_count;
8156 	int                     dw_limit;
8157 	int                     isVectorUPL = 0;
8158 	upl_t                   vector_upl = NULL;
8159 	vm_object_offset_t      obj_start, obj_end, obj_offset;
8160 	kern_return_t           kr = KERN_SUCCESS;
8161 
8162 //	DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p offset 0x%llx size 0x%llx error 0x%x\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object, (uint64_t)offset, (uint64_t)size, error);
8163 
8164 	dwp_start = dwp = NULL;
8165 
8166 	subupl_size = size;
8167 	*empty = FALSE;
8168 
8169 	if (upl == UPL_NULL) {
8170 		return KERN_INVALID_ARGUMENT;
8171 	}
8172 
8173 	if ((upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES)) {
8174 		return upl_commit_range(upl, offset, size, UPL_COMMIT_FREE_ABSENT, NULL, 0, empty);
8175 	}
8176 
8177 	dw_count = 0;
8178 	dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
8179 	dwp_start = vm_page_delayed_work_get_ctx();
8180 	if (dwp_start == NULL) {
8181 		dwp_start = &dw_array;
8182 		dw_limit = 1;
8183 		dwp_finish_ctx = FALSE;
8184 	}
8185 
8186 	dwp = dwp_start;
8187 
8188 	if ((isVectorUPL = vector_upl_is_valid(upl))) {
8189 		vector_upl = upl;
8190 		upl_lock(vector_upl);
8191 	} else {
8192 		upl_lock(upl);
8193 	}
8194 
8195 process_upl_to_abort:
8196 	if (isVectorUPL) {
8197 		size = subupl_size;
8198 		offset = subupl_offset;
8199 		if (size == 0) {
8200 			upl_unlock(vector_upl);
8201 			kr = KERN_SUCCESS;
8202 			goto done;
8203 		}
8204 		upl =  vector_upl_subupl_byoffset(vector_upl, &offset, &size);
8205 		if (upl == NULL) {
8206 			upl_unlock(vector_upl);
8207 			kr = KERN_FAILURE;
8208 			goto done;
8209 		}
8210 		subupl_size -= size;
8211 		subupl_offset += size;
8212 	}
8213 
8214 	*empty = FALSE;
8215 
8216 #if UPL_DEBUG
8217 	if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
8218 		(void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
8219 
8220 		upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
8221 		upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
8222 		upl->upl_commit_records[upl->upl_commit_index].c_aborted = 1;
8223 
8224 		upl->upl_commit_index++;
8225 	}
8226 #endif
8227 	if (upl->flags & UPL_DEVICE_MEMORY) {
8228 		xfer_size = 0;
8229 	} else if ((offset + size) <= upl_adjusted_size(upl, PAGE_MASK)) {
8230 		xfer_size = size;
8231 	} else {
8232 		if (!isVectorUPL) {
8233 			upl_unlock(upl);
8234 		} else {
8235 			upl_unlock(vector_upl);
8236 		}
8237 		DEBUG4K_ERROR("upl %p (u_offset 0x%llx u_size 0x%x) offset 0x%x size 0x%x\n", upl, upl->u_offset, upl->u_size, offset, size);
8238 		kr = KERN_FAILURE;
8239 		goto done;
8240 	}
8241 	if (upl->flags & UPL_INTERNAL) {
8242 		lite_list = (wpl_array_t)
8243 		    ((((uintptr_t)upl) + sizeof(struct upl))
8244 		    + ((upl_adjusted_size(upl, PAGE_MASK) / PAGE_SIZE) * sizeof(upl_page_info_t)));
8245 
8246 		user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
8247 	} else {
8248 		lite_list = (wpl_array_t)
8249 		    (((uintptr_t)upl) + sizeof(struct upl));
8250 	}
8251 	object = upl->map_object;
8252 
8253 	if (upl->flags & UPL_SHADOWED) {
8254 		vm_object_lock(object);
8255 		shadow_object = object->shadow;
8256 	} else {
8257 		shadow_object = object;
8258 	}
8259 
8260 	entry = offset / PAGE_SIZE;
8261 	target_offset = (vm_object_offset_t)offset;
8262 
8263 	if (upl->flags & UPL_KERNEL_OBJECT) {
8264 		vm_object_lock_shared(shadow_object);
8265 	} else {
8266 		vm_object_lock(shadow_object);
8267 	}
8268 
8269 	if (upl->flags & UPL_ACCESS_BLOCKED) {
8270 		assert(shadow_object->blocked_access);
8271 		shadow_object->blocked_access = FALSE;
8272 		vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
8273 	}
8274 
8275 	if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT)) {
8276 		panic("upl_abort_range: kernel_object being DUMPED");
8277 	}
8278 
8279 	obj_start = target_offset + upl->u_offset - shadow_object->paging_offset;
8280 	obj_end = obj_start + xfer_size;
8281 	obj_start = vm_object_trunc_page(obj_start);
8282 	obj_end = vm_object_round_page(obj_end);
8283 	for (obj_offset = obj_start;
8284 	    obj_offset < obj_end;
8285 	    obj_offset += PAGE_SIZE) {
8286 		vm_page_t       t, m;
8287 		unsigned int    pg_num;
8288 		boolean_t       needed;
8289 
8290 		pg_num = (unsigned int) (target_offset / PAGE_SIZE);
8291 		assert(pg_num == target_offset / PAGE_SIZE);
8292 
8293 		needed = FALSE;
8294 
8295 		if (user_page_list) {
8296 			needed = user_page_list[pg_num].needed;
8297 		}
8298 
8299 		dwp->dw_mask = 0;
8300 		m = VM_PAGE_NULL;
8301 
8302 		if (upl->flags & UPL_LITE) {
8303 			if (lite_list[pg_num >> 5] & (1U << (pg_num & 31))) {
8304 				lite_list[pg_num >> 5] &= ~(1U << (pg_num & 31));
8305 
8306 				if (!(upl->flags & UPL_KERNEL_OBJECT)) {
8307 					m = vm_page_lookup(shadow_object, obj_offset);
8308 				}
8309 			}
8310 		}
8311 		if (upl->flags & UPL_SHADOWED) {
8312 			if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
8313 				t->vmp_free_when_done = FALSE;
8314 
8315 				VM_PAGE_FREE(t);
8316 
8317 				if (m == VM_PAGE_NULL) {
8318 					m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
8319 				}
8320 			}
8321 		}
8322 		if ((upl->flags & UPL_KERNEL_OBJECT)) {
8323 			goto abort_next_page;
8324 		}
8325 
8326 		if (m != VM_PAGE_NULL) {
8327 			assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
8328 
8329 			if (m->vmp_absent) {
8330 				boolean_t must_free = TRUE;
8331 
8332 				/*
8333 				 * COPYOUT = FALSE case
8334 				 * check for error conditions which must
8335 				 * be passed back to the pages customer
8336 				 */
8337 				if (error & UPL_ABORT_RESTART) {
8338 					m->vmp_restart = TRUE;
8339 					m->vmp_absent = FALSE;
8340 					m->vmp_unusual = TRUE;
8341 					must_free = FALSE;
8342 				} else if (error & UPL_ABORT_UNAVAILABLE) {
8343 					m->vmp_restart = FALSE;
8344 					m->vmp_unusual = TRUE;
8345 					must_free = FALSE;
8346 				} else if (error & UPL_ABORT_ERROR) {
8347 					m->vmp_restart = FALSE;
8348 					m->vmp_absent = FALSE;
8349 					m->vmp_error = TRUE;
8350 					m->vmp_unusual = TRUE;
8351 					must_free = FALSE;
8352 				}
8353 				if (m->vmp_clustered && needed == FALSE) {
8354 					/*
8355 					 * This page was a part of a speculative
8356 					 * read-ahead initiated by the kernel
8357 					 * itself.  No one is expecting this
8358 					 * page and no one will clean up its
8359 					 * error state if it ever becomes valid
8360 					 * in the future.
8361 					 * We have to free it here.
8362 					 */
8363 					must_free = TRUE;
8364 				}
8365 				m->vmp_cleaning = FALSE;
8366 
8367 				if (m->vmp_overwriting && !m->vmp_busy) {
8368 					/*
8369 					 * this shouldn't happen since
8370 					 * this is an 'absent' page, but
8371 					 * it doesn't hurt to check for
8372 					 * the 'alternate' method of
8373 					 * stabilizing the page...
8374 					 * we will mark 'busy' to be cleared
8375 					 * in the following code which will
8376 					 * take care of the primary stabilzation
8377 					 * method (i.e. setting 'busy' to TRUE)
8378 					 */
8379 					dwp->dw_mask |= DW_vm_page_unwire;
8380 				}
8381 				m->vmp_overwriting = FALSE;
8382 
8383 				dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
8384 
8385 				if (must_free == TRUE) {
8386 					dwp->dw_mask |= DW_vm_page_free;
8387 				} else {
8388 					dwp->dw_mask |= DW_vm_page_activate;
8389 				}
8390 			} else {
8391 				/*
8392 				 * Handle the trusted pager throttle.
8393 				 */
8394 				if (m->vmp_laundry) {
8395 					dwp->dw_mask |= DW_vm_pageout_throttle_up;
8396 				}
8397 
8398 				if (upl->flags & UPL_ACCESS_BLOCKED) {
8399 					/*
8400 					 * We blocked access to the pages in this UPL.
8401 					 * Clear the "busy" bit and wake up any waiter
8402 					 * for this page.
8403 					 */
8404 					dwp->dw_mask |= DW_clear_busy;
8405 				}
8406 				if (m->vmp_overwriting) {
8407 					if (m->vmp_busy) {
8408 						dwp->dw_mask |= DW_clear_busy;
8409 					} else {
8410 						/*
8411 						 * deal with the 'alternate' method
8412 						 * of stabilizing the page...
8413 						 * we will either free the page
8414 						 * or mark 'busy' to be cleared
8415 						 * in the following code which will
8416 						 * take care of the primary stabilzation
8417 						 * method (i.e. setting 'busy' to TRUE)
8418 						 */
8419 						dwp->dw_mask |= DW_vm_page_unwire;
8420 					}
8421 					m->vmp_overwriting = FALSE;
8422 				}
8423 				m->vmp_free_when_done = FALSE;
8424 				m->vmp_cleaning = FALSE;
8425 
8426 				if (error & UPL_ABORT_DUMP_PAGES) {
8427 					pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
8428 
8429 					dwp->dw_mask |= DW_vm_page_free;
8430 				} else {
8431 					if (!(dwp->dw_mask & DW_vm_page_unwire)) {
8432 						if (error & UPL_ABORT_REFERENCE) {
8433 							/*
8434 							 * we've been told to explictly
8435 							 * reference this page... for
8436 							 * file I/O, this is done by
8437 							 * implementing an LRU on the inactive q
8438 							 */
8439 							dwp->dw_mask |= DW_vm_page_lru;
8440 						} else if (!VM_PAGE_PAGEABLE(m)) {
8441 							dwp->dw_mask |= DW_vm_page_deactivate_internal;
8442 						}
8443 					}
8444 					dwp->dw_mask |= DW_PAGE_WAKEUP;
8445 				}
8446 			}
8447 		}
8448 abort_next_page:
8449 		target_offset += PAGE_SIZE_64;
8450 		xfer_size -= PAGE_SIZE;
8451 		entry++;
8452 
8453 		if (dwp->dw_mask) {
8454 			if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
8455 				VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
8456 
8457 				if (dw_count >= dw_limit) {
8458 					vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
8459 
8460 					dwp = dwp_start;
8461 					dw_count = 0;
8462 				}
8463 			} else {
8464 				if (dwp->dw_mask & DW_clear_busy) {
8465 					m->vmp_busy = FALSE;
8466 				}
8467 
8468 				if (dwp->dw_mask & DW_PAGE_WAKEUP) {
8469 					PAGE_WAKEUP(m);
8470 				}
8471 			}
8472 		}
8473 	}
8474 	if (dw_count) {
8475 		vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
8476 		dwp = dwp_start;
8477 		dw_count = 0;
8478 	}
8479 
8480 	occupied = 1;
8481 
8482 	if (upl->flags & UPL_DEVICE_MEMORY) {
8483 		occupied = 0;
8484 	} else if (upl->flags & UPL_LITE) {
8485 		int     pg_num;
8486 		int     i;
8487 
8488 		pg_num = upl_adjusted_size(upl, PAGE_MASK) / PAGE_SIZE;
8489 		pg_num = (pg_num + 31) >> 5;
8490 		occupied = 0;
8491 
8492 		for (i = 0; i < pg_num; i++) {
8493 			if (lite_list[i] != 0) {
8494 				occupied = 1;
8495 				break;
8496 			}
8497 		}
8498 	} else {
8499 		if (vm_page_queue_empty(&upl->map_object->memq)) {
8500 			occupied = 0;
8501 		}
8502 	}
8503 	if (occupied == 0) {
8504 		/*
8505 		 * If this UPL element belongs to a Vector UPL and is
8506 		 * empty, then this is the right function to deallocate
8507 		 * it. So go ahead set the *empty variable. The flag
8508 		 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
8509 		 * should be considered relevant for the Vector UPL and
8510 		 * not the internal UPLs.
8511 		 */
8512 		if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) {
8513 			*empty = TRUE;
8514 		}
8515 
8516 		if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
8517 			/*
8518 			 * this is not a paging object
8519 			 * so we need to drop the paging reference
8520 			 * that was taken when we created the UPL
8521 			 * against this object
8522 			 */
8523 			vm_object_activity_end(shadow_object);
8524 			vm_object_collapse(shadow_object, 0, TRUE);
8525 		} else {
8526 			/*
8527 			 * we dontated the paging reference to
8528 			 * the map object... vm_pageout_object_terminate
8529 			 * will drop this reference
8530 			 */
8531 		}
8532 	}
8533 	vm_object_unlock(shadow_object);
8534 	if (object != shadow_object) {
8535 		vm_object_unlock(object);
8536 	}
8537 
8538 	if (!isVectorUPL) {
8539 		upl_unlock(upl);
8540 	} else {
8541 		/*
8542 		 * If we completed our operations on an UPL that is
8543 		 * part of a Vectored UPL and if empty is TRUE, then
8544 		 * we should go ahead and deallocate this UPL element.
8545 		 * Then we check if this was the last of the UPL elements
8546 		 * within that Vectored UPL. If so, set empty to TRUE
8547 		 * so that in ubc_upl_abort_range or ubc_upl_abort, we
8548 		 * can go ahead and deallocate the Vector UPL too.
8549 		 */
8550 		if (*empty == TRUE) {
8551 			*empty = vector_upl_set_subupl(vector_upl, upl, 0);
8552 			upl_deallocate(upl);
8553 		}
8554 		goto process_upl_to_abort;
8555 	}
8556 
8557 	kr = KERN_SUCCESS;
8558 
8559 done:
8560 	if (dwp_start && dwp_finish_ctx) {
8561 		vm_page_delayed_work_finish_ctx(dwp_start);
8562 		dwp_start = dwp = NULL;
8563 	}
8564 
8565 	return kr;
8566 }
8567 
8568 
8569 kern_return_t
upl_abort(upl_t upl,int error)8570 upl_abort(
8571 	upl_t   upl,
8572 	int     error)
8573 {
8574 	boolean_t       empty;
8575 
8576 	if (upl == UPL_NULL) {
8577 		return KERN_INVALID_ARGUMENT;
8578 	}
8579 
8580 	return upl_abort_range(upl, 0, upl->u_size, error, &empty);
8581 }
8582 
8583 
8584 /* an option on commit should be wire */
8585 kern_return_t
upl_commit(upl_t upl,upl_page_info_t * page_list,mach_msg_type_number_t count)8586 upl_commit(
8587 	upl_t                   upl,
8588 	upl_page_info_t         *page_list,
8589 	mach_msg_type_number_t  count)
8590 {
8591 	boolean_t       empty;
8592 
8593 	if (upl == UPL_NULL) {
8594 		return KERN_INVALID_ARGUMENT;
8595 	}
8596 
8597 	return upl_commit_range(upl, 0, upl->u_size, 0,
8598 	           page_list, count, &empty);
8599 }
8600 
8601 
8602 void
iopl_valid_data(upl_t upl,vm_tag_t tag)8603 iopl_valid_data(
8604 	upl_t    upl,
8605 	vm_tag_t tag)
8606 {
8607 	vm_object_t     object;
8608 	vm_offset_t     offset;
8609 	vm_page_t       m, nxt_page = VM_PAGE_NULL;
8610 	upl_size_t      size;
8611 	int             wired_count = 0;
8612 
8613 	if (upl == NULL) {
8614 		panic("iopl_valid_data: NULL upl");
8615 	}
8616 	if (vector_upl_is_valid(upl)) {
8617 		panic("iopl_valid_data: vector upl");
8618 	}
8619 	if ((upl->flags & (UPL_DEVICE_MEMORY | UPL_SHADOWED | UPL_ACCESS_BLOCKED | UPL_IO_WIRE | UPL_INTERNAL)) != UPL_IO_WIRE) {
8620 		panic("iopl_valid_data: unsupported upl, flags = %x", upl->flags);
8621 	}
8622 
8623 	object = upl->map_object;
8624 
8625 	if (object == kernel_object || object == compressor_object) {
8626 		panic("iopl_valid_data: object == kernel or compressor");
8627 	}
8628 
8629 	if (object->purgable == VM_PURGABLE_VOLATILE ||
8630 	    object->purgable == VM_PURGABLE_EMPTY) {
8631 		panic("iopl_valid_data: object %p purgable %d",
8632 		    object, object->purgable);
8633 	}
8634 
8635 	size = upl_adjusted_size(upl, PAGE_MASK);
8636 
8637 	vm_object_lock(object);
8638 	VM_OBJECT_WIRED_PAGE_UPDATE_START(object);
8639 
8640 	if (object->vo_size == size && object->resident_page_count == (size / PAGE_SIZE)) {
8641 		nxt_page = (vm_page_t)vm_page_queue_first(&object->memq);
8642 	} else {
8643 		offset = (vm_offset_t)(upl_adjusted_offset(upl, PAGE_MASK) - object->paging_offset);
8644 	}
8645 
8646 	while (size) {
8647 		if (nxt_page != VM_PAGE_NULL) {
8648 			m = nxt_page;
8649 			nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->vmp_listq);
8650 		} else {
8651 			m = vm_page_lookup(object, offset);
8652 			offset += PAGE_SIZE;
8653 
8654 			if (m == VM_PAGE_NULL) {
8655 				panic("iopl_valid_data: missing expected page at offset %lx", (long)offset);
8656 			}
8657 		}
8658 		if (m->vmp_busy) {
8659 			if (!m->vmp_absent) {
8660 				panic("iopl_valid_data: busy page w/o absent");
8661 			}
8662 
8663 			if (m->vmp_pageq.next || m->vmp_pageq.prev) {
8664 				panic("iopl_valid_data: busy+absent page on page queue");
8665 			}
8666 			if (m->vmp_reusable) {
8667 				panic("iopl_valid_data: %p is reusable", m);
8668 			}
8669 
8670 			m->vmp_absent = FALSE;
8671 			m->vmp_dirty = TRUE;
8672 			assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
8673 			assert(m->vmp_wire_count == 0);
8674 			m->vmp_wire_count++;
8675 			assert(m->vmp_wire_count);
8676 			if (m->vmp_wire_count == 1) {
8677 				m->vmp_q_state = VM_PAGE_IS_WIRED;
8678 				wired_count++;
8679 			} else {
8680 				panic("iopl_valid_data: %p already wired", m);
8681 			}
8682 
8683 			PAGE_WAKEUP_DONE(m);
8684 		}
8685 		size -= PAGE_SIZE;
8686 	}
8687 	if (wired_count) {
8688 		VM_OBJECT_WIRED_PAGE_COUNT(object, wired_count);
8689 		assert(object->resident_page_count >= object->wired_page_count);
8690 
8691 		/* no need to adjust purgeable accounting for this object: */
8692 		assert(object->purgable != VM_PURGABLE_VOLATILE);
8693 		assert(object->purgable != VM_PURGABLE_EMPTY);
8694 
8695 		vm_page_lockspin_queues();
8696 		vm_page_wire_count += wired_count;
8697 		vm_page_unlock_queues();
8698 	}
8699 	VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag);
8700 	vm_object_unlock(object);
8701 }
8702 
8703 
8704 void
vm_object_set_pmap_cache_attr(vm_object_t object,upl_page_info_array_t user_page_list,unsigned int num_pages,boolean_t batch_pmap_op)8705 vm_object_set_pmap_cache_attr(
8706 	vm_object_t             object,
8707 	upl_page_info_array_t   user_page_list,
8708 	unsigned int            num_pages,
8709 	boolean_t               batch_pmap_op)
8710 {
8711 	unsigned int    cache_attr = 0;
8712 
8713 	cache_attr = object->wimg_bits & VM_WIMG_MASK;
8714 	assert(user_page_list);
8715 	if (cache_attr != VM_WIMG_USE_DEFAULT) {
8716 		PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, cache_attr, num_pages, batch_pmap_op);
8717 	}
8718 }
8719 
8720 
8721 boolean_t       vm_object_iopl_wire_full(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t, vm_tag_t);
8722 kern_return_t   vm_object_iopl_wire_empty(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t, vm_tag_t, vm_object_offset_t *, int, int*);
8723 
8724 
8725 
8726 boolean_t
vm_object_iopl_wire_full(vm_object_t object,upl_t upl,upl_page_info_array_t user_page_list,wpl_array_t lite_list,upl_control_flags_t cntrl_flags,vm_tag_t tag)8727 vm_object_iopl_wire_full(vm_object_t object, upl_t upl, upl_page_info_array_t user_page_list,
8728     wpl_array_t lite_list, upl_control_flags_t cntrl_flags, vm_tag_t tag)
8729 {
8730 	vm_page_t       dst_page;
8731 	unsigned int    entry;
8732 	int             page_count;
8733 	int             delayed_unlock = 0;
8734 	boolean_t       retval = TRUE;
8735 	ppnum_t         phys_page;
8736 
8737 	vm_object_lock_assert_exclusive(object);
8738 	assert(object->purgable != VM_PURGABLE_VOLATILE);
8739 	assert(object->purgable != VM_PURGABLE_EMPTY);
8740 	assert(object->pager == NULL);
8741 	assert(object->copy == NULL);
8742 	assert(object->shadow == NULL);
8743 
8744 	page_count = object->resident_page_count;
8745 	dst_page = (vm_page_t)vm_page_queue_first(&object->memq);
8746 
8747 	vm_page_lock_queues();
8748 
8749 	while (page_count--) {
8750 		if (dst_page->vmp_busy ||
8751 		    dst_page->vmp_fictitious ||
8752 		    dst_page->vmp_absent ||
8753 		    dst_page->vmp_error ||
8754 		    dst_page->vmp_cleaning ||
8755 		    dst_page->vmp_restart ||
8756 		    dst_page->vmp_laundry) {
8757 			retval = FALSE;
8758 			goto done;
8759 		}
8760 		if ((cntrl_flags & UPL_REQUEST_FORCE_COHERENCY) && dst_page->vmp_written_by_kernel == TRUE) {
8761 			retval = FALSE;
8762 			goto done;
8763 		}
8764 		dst_page->vmp_reference = TRUE;
8765 
8766 		vm_page_wire(dst_page, tag, FALSE);
8767 
8768 		if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
8769 			SET_PAGE_DIRTY(dst_page, FALSE);
8770 		}
8771 		entry = (unsigned int)(dst_page->vmp_offset / PAGE_SIZE);
8772 		assert(entry >= 0 && entry < object->resident_page_count);
8773 		lite_list[entry >> 5] |= 1U << (entry & 31);
8774 
8775 		phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
8776 
8777 		if (phys_page > upl->highest_page) {
8778 			upl->highest_page = phys_page;
8779 		}
8780 
8781 		if (user_page_list) {
8782 			user_page_list[entry].phys_addr = phys_page;
8783 			user_page_list[entry].absent    = dst_page->vmp_absent;
8784 			user_page_list[entry].dirty     = dst_page->vmp_dirty;
8785 			user_page_list[entry].free_when_done   = dst_page->vmp_free_when_done;
8786 			user_page_list[entry].precious  = dst_page->vmp_precious;
8787 			user_page_list[entry].device    = FALSE;
8788 			user_page_list[entry].speculative = FALSE;
8789 			user_page_list[entry].cs_validated = FALSE;
8790 			user_page_list[entry].cs_tainted = FALSE;
8791 			user_page_list[entry].cs_nx     = FALSE;
8792 			user_page_list[entry].needed    = FALSE;
8793 			user_page_list[entry].mark      = FALSE;
8794 		}
8795 		if (delayed_unlock++ > 256) {
8796 			delayed_unlock = 0;
8797 			lck_mtx_yield(&vm_page_queue_lock);
8798 
8799 			VM_CHECK_MEMORYSTATUS;
8800 		}
8801 		dst_page = (vm_page_t)vm_page_queue_next(&dst_page->vmp_listq);
8802 	}
8803 done:
8804 	vm_page_unlock_queues();
8805 
8806 	VM_CHECK_MEMORYSTATUS;
8807 
8808 	return retval;
8809 }
8810 
8811 
8812 kern_return_t
vm_object_iopl_wire_empty(vm_object_t object,upl_t upl,upl_page_info_array_t user_page_list,wpl_array_t lite_list,upl_control_flags_t cntrl_flags,vm_tag_t tag,vm_object_offset_t * dst_offset,int page_count,int * page_grab_count)8813 vm_object_iopl_wire_empty(vm_object_t object, upl_t upl, upl_page_info_array_t user_page_list,
8814     wpl_array_t lite_list, upl_control_flags_t cntrl_flags, vm_tag_t tag, vm_object_offset_t *dst_offset,
8815     int page_count, int* page_grab_count)
8816 {
8817 	vm_page_t       dst_page;
8818 	boolean_t       no_zero_fill = FALSE;
8819 	int             interruptible;
8820 	int             pages_wired = 0;
8821 	int             pages_inserted = 0;
8822 	int             entry = 0;
8823 	uint64_t        delayed_ledger_update = 0;
8824 	kern_return_t   ret = KERN_SUCCESS;
8825 	int             grab_options;
8826 	ppnum_t         phys_page;
8827 
8828 	vm_object_lock_assert_exclusive(object);
8829 	assert(object->purgable != VM_PURGABLE_VOLATILE);
8830 	assert(object->purgable != VM_PURGABLE_EMPTY);
8831 	assert(object->pager == NULL);
8832 	assert(object->copy == NULL);
8833 	assert(object->shadow == NULL);
8834 
8835 	if (cntrl_flags & UPL_SET_INTERRUPTIBLE) {
8836 		interruptible = THREAD_ABORTSAFE;
8837 	} else {
8838 		interruptible = THREAD_UNINT;
8839 	}
8840 
8841 	if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO)) {
8842 		no_zero_fill = TRUE;
8843 	}
8844 
8845 	grab_options = 0;
8846 #if CONFIG_SECLUDED_MEMORY
8847 	if (object->can_grab_secluded) {
8848 		grab_options |= VM_PAGE_GRAB_SECLUDED;
8849 	}
8850 #endif /* CONFIG_SECLUDED_MEMORY */
8851 
8852 	while (page_count--) {
8853 		while ((dst_page = vm_page_grab_options(grab_options))
8854 		    == VM_PAGE_NULL) {
8855 			OSAddAtomic(page_count, &vm_upl_wait_for_pages);
8856 
8857 			VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
8858 
8859 			if (vm_page_wait(interruptible) == FALSE) {
8860 				/*
8861 				 * interrupted case
8862 				 */
8863 				OSAddAtomic(-page_count, &vm_upl_wait_for_pages);
8864 
8865 				VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1);
8866 
8867 				ret = MACH_SEND_INTERRUPTED;
8868 				goto done;
8869 			}
8870 			OSAddAtomic(-page_count, &vm_upl_wait_for_pages);
8871 
8872 			VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
8873 		}
8874 		if (no_zero_fill == FALSE) {
8875 			vm_page_zero_fill(dst_page);
8876 		} else {
8877 			dst_page->vmp_absent = TRUE;
8878 		}
8879 
8880 		dst_page->vmp_reference = TRUE;
8881 
8882 		if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
8883 			SET_PAGE_DIRTY(dst_page, FALSE);
8884 		}
8885 		if (dst_page->vmp_absent == FALSE) {
8886 			assert(dst_page->vmp_q_state == VM_PAGE_NOT_ON_Q);
8887 			assert(dst_page->vmp_wire_count == 0);
8888 			dst_page->vmp_wire_count++;
8889 			dst_page->vmp_q_state = VM_PAGE_IS_WIRED;
8890 			assert(dst_page->vmp_wire_count);
8891 			pages_wired++;
8892 			PAGE_WAKEUP_DONE(dst_page);
8893 		}
8894 		pages_inserted++;
8895 
8896 		vm_page_insert_internal(dst_page, object, *dst_offset, tag, FALSE, TRUE, TRUE, TRUE, &delayed_ledger_update);
8897 
8898 		lite_list[entry >> 5] |= 1U << (entry & 31);
8899 
8900 		phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
8901 
8902 		if (phys_page > upl->highest_page) {
8903 			upl->highest_page = phys_page;
8904 		}
8905 
8906 		if (user_page_list) {
8907 			user_page_list[entry].phys_addr = phys_page;
8908 			user_page_list[entry].absent    = dst_page->vmp_absent;
8909 			user_page_list[entry].dirty     = dst_page->vmp_dirty;
8910 			user_page_list[entry].free_when_done    = FALSE;
8911 			user_page_list[entry].precious  = FALSE;
8912 			user_page_list[entry].device    = FALSE;
8913 			user_page_list[entry].speculative = FALSE;
8914 			user_page_list[entry].cs_validated = FALSE;
8915 			user_page_list[entry].cs_tainted = FALSE;
8916 			user_page_list[entry].cs_nx     = FALSE;
8917 			user_page_list[entry].needed    = FALSE;
8918 			user_page_list[entry].mark      = FALSE;
8919 		}
8920 		entry++;
8921 		*dst_offset += PAGE_SIZE_64;
8922 	}
8923 done:
8924 	if (pages_wired) {
8925 		vm_page_lockspin_queues();
8926 		vm_page_wire_count += pages_wired;
8927 		vm_page_unlock_queues();
8928 	}
8929 	if (pages_inserted) {
8930 		if (object->internal) {
8931 			OSAddAtomic(pages_inserted, &vm_page_internal_count);
8932 		} else {
8933 			OSAddAtomic(pages_inserted, &vm_page_external_count);
8934 		}
8935 	}
8936 	if (delayed_ledger_update) {
8937 		task_t          owner;
8938 		int             ledger_idx_volatile;
8939 		int             ledger_idx_nonvolatile;
8940 		int             ledger_idx_volatile_compressed;
8941 		int             ledger_idx_nonvolatile_compressed;
8942 		boolean_t       do_footprint;
8943 
8944 		owner = VM_OBJECT_OWNER(object);
8945 		assert(owner);
8946 
8947 		vm_object_ledger_tag_ledgers(object,
8948 		    &ledger_idx_volatile,
8949 		    &ledger_idx_nonvolatile,
8950 		    &ledger_idx_volatile_compressed,
8951 		    &ledger_idx_nonvolatile_compressed,
8952 		    &do_footprint);
8953 
8954 		/* more non-volatile bytes */
8955 		ledger_credit(owner->ledger,
8956 		    ledger_idx_nonvolatile,
8957 		    delayed_ledger_update);
8958 		if (do_footprint) {
8959 			/* more footprint */
8960 			ledger_credit(owner->ledger,
8961 			    task_ledgers.phys_footprint,
8962 			    delayed_ledger_update);
8963 		}
8964 	}
8965 
8966 	assert(page_grab_count);
8967 	*page_grab_count = pages_inserted;
8968 
8969 	return ret;
8970 }
8971 
8972 
8973 
8974 kern_return_t
vm_object_iopl_request(vm_object_t object,vm_object_offset_t offset,upl_size_t size,upl_t * upl_ptr,upl_page_info_array_t user_page_list,unsigned int * page_list_count,upl_control_flags_t cntrl_flags,vm_tag_t tag)8975 vm_object_iopl_request(
8976 	vm_object_t             object,
8977 	vm_object_offset_t      offset,
8978 	upl_size_t              size,
8979 	upl_t                   *upl_ptr,
8980 	upl_page_info_array_t   user_page_list,
8981 	unsigned int            *page_list_count,
8982 	upl_control_flags_t     cntrl_flags,
8983 	vm_tag_t                tag)
8984 {
8985 	vm_page_t               dst_page;
8986 	vm_object_offset_t      dst_offset;
8987 	upl_size_t              xfer_size;
8988 	upl_t                   upl = NULL;
8989 	unsigned int            entry;
8990 	wpl_array_t             lite_list = NULL;
8991 	int                     no_zero_fill = FALSE;
8992 	unsigned int            size_in_pages;
8993 	int                     page_grab_count = 0;
8994 	u_int32_t               psize;
8995 	kern_return_t           ret;
8996 	vm_prot_t               prot;
8997 	struct vm_object_fault_info fault_info = {};
8998 	struct  vm_page_delayed_work    dw_array;
8999 	struct  vm_page_delayed_work    *dwp, *dwp_start;
9000 	bool                    dwp_finish_ctx = TRUE;
9001 	int                     dw_count;
9002 	int                     dw_limit;
9003 	int                     dw_index;
9004 	boolean_t               caller_lookup;
9005 	int                     io_tracking_flag = 0;
9006 	int                     interruptible;
9007 	ppnum_t                 phys_page;
9008 
9009 	boolean_t               set_cache_attr_needed = FALSE;
9010 	boolean_t               free_wired_pages = FALSE;
9011 	boolean_t               fast_path_empty_req = FALSE;
9012 	boolean_t               fast_path_full_req = FALSE;
9013 
9014 #if DEVELOPMENT || DEBUG
9015 	task_t                  task = current_task();
9016 #endif /* DEVELOPMENT || DEBUG */
9017 
9018 	dwp_start = dwp = NULL;
9019 
9020 	vm_object_offset_t original_offset = offset;
9021 	upl_size_t original_size = size;
9022 
9023 //	DEBUG4K_UPL("object %p offset 0x%llx size 0x%llx cntrl_flags 0x%llx\n", object, (uint64_t)offset, (uint64_t)size, cntrl_flags);
9024 
9025 	size = (upl_size_t)(vm_object_round_page(offset + size) - vm_object_trunc_page(offset));
9026 	offset = vm_object_trunc_page(offset);
9027 	if (size != original_size || offset != original_offset) {
9028 		DEBUG4K_IOKIT("flags 0x%llx object %p offset 0x%llx size 0x%x -> offset 0x%llx size 0x%x\n", cntrl_flags, object, original_offset, original_size, offset, size);
9029 	}
9030 
9031 	if (cntrl_flags & ~UPL_VALID_FLAGS) {
9032 		/*
9033 		 * For forward compatibility's sake,
9034 		 * reject any unknown flag.
9035 		 */
9036 		return KERN_INVALID_VALUE;
9037 	}
9038 	if (vm_lopage_needed == FALSE) {
9039 		cntrl_flags &= ~UPL_NEED_32BIT_ADDR;
9040 	}
9041 
9042 	if (cntrl_flags & UPL_NEED_32BIT_ADDR) {
9043 		if ((cntrl_flags & (UPL_SET_IO_WIRE | UPL_SET_LITE)) != (UPL_SET_IO_WIRE | UPL_SET_LITE)) {
9044 			return KERN_INVALID_VALUE;
9045 		}
9046 
9047 		if (object->phys_contiguous) {
9048 			if ((offset + object->vo_shadow_offset) >= (vm_object_offset_t)max_valid_dma_address) {
9049 				return KERN_INVALID_ADDRESS;
9050 			}
9051 
9052 			if (((offset + object->vo_shadow_offset) + size) >= (vm_object_offset_t)max_valid_dma_address) {
9053 				return KERN_INVALID_ADDRESS;
9054 			}
9055 		}
9056 	}
9057 	if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO)) {
9058 		no_zero_fill = TRUE;
9059 	}
9060 
9061 	if (cntrl_flags & UPL_COPYOUT_FROM) {
9062 		prot = VM_PROT_READ;
9063 	} else {
9064 		prot = VM_PROT_READ | VM_PROT_WRITE;
9065 	}
9066 
9067 	if ((!object->internal) && (object->paging_offset != 0)) {
9068 		panic("vm_object_iopl_request: external object with non-zero paging offset");
9069 	}
9070 
9071 
9072 	VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_START, size, cntrl_flags, prot, 0);
9073 
9074 #if CONFIG_IOSCHED || UPL_DEBUG
9075 	if ((object->io_tracking && object != kernel_object) || upl_debug_enabled) {
9076 		io_tracking_flag |= UPL_CREATE_IO_TRACKING;
9077 	}
9078 #endif
9079 
9080 #if CONFIG_IOSCHED
9081 	if (object->io_tracking) {
9082 		/* Check if we're dealing with the kernel object. We do not support expedite on kernel object UPLs */
9083 		if (object != kernel_object) {
9084 			io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP;
9085 		}
9086 	}
9087 #endif
9088 
9089 	if (object->phys_contiguous) {
9090 		psize = PAGE_SIZE;
9091 	} else {
9092 		psize = size;
9093 
9094 		dw_count = 0;
9095 		dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
9096 		dwp_start = vm_page_delayed_work_get_ctx();
9097 		if (dwp_start == NULL) {
9098 			dwp_start = &dw_array;
9099 			dw_limit = 1;
9100 			dwp_finish_ctx = FALSE;
9101 		}
9102 
9103 		dwp = dwp_start;
9104 	}
9105 
9106 	if (cntrl_flags & UPL_SET_INTERNAL) {
9107 		upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize);
9108 
9109 		user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
9110 		lite_list = (wpl_array_t) (((uintptr_t)user_page_list) +
9111 		    ((psize / PAGE_SIZE) * sizeof(upl_page_info_t)));
9112 		if (size == 0) {
9113 			user_page_list = NULL;
9114 			lite_list = NULL;
9115 		}
9116 	} else {
9117 		upl = upl_create(UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize);
9118 
9119 		lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
9120 		if (size == 0) {
9121 			lite_list = NULL;
9122 		}
9123 	}
9124 	if (user_page_list) {
9125 		user_page_list[0].device = FALSE;
9126 	}
9127 	*upl_ptr = upl;
9128 
9129 	if (cntrl_flags & UPL_NOZEROFILLIO) {
9130 		DTRACE_VM4(upl_nozerofillio,
9131 		    vm_object_t, object,
9132 		    vm_object_offset_t, offset,
9133 		    upl_size_t, size,
9134 		    upl_t, upl);
9135 	}
9136 
9137 	upl->map_object = object;
9138 	upl->u_offset = original_offset;
9139 	upl->u_size = original_size;
9140 
9141 	size_in_pages = size / PAGE_SIZE;
9142 
9143 	if (object == kernel_object &&
9144 	    !(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS))) {
9145 		upl->flags |= UPL_KERNEL_OBJECT;
9146 #if UPL_DEBUG
9147 		vm_object_lock(object);
9148 #else
9149 		vm_object_lock_shared(object);
9150 #endif
9151 	} else {
9152 		vm_object_lock(object);
9153 		vm_object_activity_begin(object);
9154 	}
9155 	/*
9156 	 * paging in progress also protects the paging_offset
9157 	 */
9158 	upl->u_offset = original_offset + object->paging_offset;
9159 
9160 	if (cntrl_flags & UPL_BLOCK_ACCESS) {
9161 		/*
9162 		 * The user requested that access to the pages in this UPL
9163 		 * be blocked until the UPL is commited or aborted.
9164 		 */
9165 		upl->flags |= UPL_ACCESS_BLOCKED;
9166 	}
9167 
9168 #if CONFIG_IOSCHED || UPL_DEBUG
9169 	if ((upl->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) {
9170 		vm_object_activity_begin(object);
9171 		queue_enter(&object->uplq, upl, upl_t, uplq);
9172 	}
9173 #endif
9174 
9175 	if (object->phys_contiguous) {
9176 		if (upl->flags & UPL_ACCESS_BLOCKED) {
9177 			assert(!object->blocked_access);
9178 			object->blocked_access = TRUE;
9179 		}
9180 
9181 		vm_object_unlock(object);
9182 
9183 		/*
9184 		 * don't need any shadow mappings for this one
9185 		 * since it is already I/O memory
9186 		 */
9187 		upl->flags |= UPL_DEVICE_MEMORY;
9188 
9189 		upl->highest_page = (ppnum_t) ((offset + object->vo_shadow_offset + size - 1) >> PAGE_SHIFT);
9190 
9191 		if (user_page_list) {
9192 			user_page_list[0].phys_addr = (ppnum_t) ((offset + object->vo_shadow_offset) >> PAGE_SHIFT);
9193 			user_page_list[0].device = TRUE;
9194 		}
9195 		if (page_list_count != NULL) {
9196 			if (upl->flags & UPL_INTERNAL) {
9197 				*page_list_count = 0;
9198 			} else {
9199 				*page_list_count = 1;
9200 			}
9201 		}
9202 
9203 		VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, KERN_SUCCESS, 0, 0);
9204 #if DEVELOPMENT || DEBUG
9205 		if (task != NULL) {
9206 			ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count);
9207 		}
9208 #endif /* DEVELOPMENT || DEBUG */
9209 		return KERN_SUCCESS;
9210 	}
9211 	if (object != kernel_object && object != compressor_object) {
9212 		/*
9213 		 * Protect user space from future COW operations
9214 		 */
9215 #if VM_OBJECT_TRACKING_OP_TRUESHARE
9216 		if (!object->true_share &&
9217 		    vm_object_tracking_inited) {
9218 			void *bt[VM_OBJECT_TRACKING_BTDEPTH];
9219 			int num = 0;
9220 
9221 			num = OSBacktrace(bt,
9222 			    VM_OBJECT_TRACKING_BTDEPTH);
9223 			btlog_add_entry(vm_object_tracking_btlog,
9224 			    object,
9225 			    VM_OBJECT_TRACKING_OP_TRUESHARE,
9226 			    bt,
9227 			    num);
9228 		}
9229 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
9230 
9231 		vm_object_lock_assert_exclusive(object);
9232 		object->true_share = TRUE;
9233 
9234 		if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
9235 			object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
9236 		}
9237 	}
9238 
9239 	if (!(cntrl_flags & UPL_COPYOUT_FROM) &&
9240 	    object->copy != VM_OBJECT_NULL) {
9241 		/*
9242 		 * Honor copy-on-write obligations
9243 		 *
9244 		 * The caller is gathering these pages and
9245 		 * might modify their contents.  We need to
9246 		 * make sure that the copy object has its own
9247 		 * private copies of these pages before we let
9248 		 * the caller modify them.
9249 		 *
9250 		 * NOTE: someone else could map the original object
9251 		 * after we've done this copy-on-write here, and they
9252 		 * could then see an inconsistent picture of the memory
9253 		 * while it's being modified via the UPL.  To prevent this,
9254 		 * we would have to block access to these pages until the
9255 		 * UPL is released.  We could use the UPL_BLOCK_ACCESS
9256 		 * code path for that...
9257 		 */
9258 		vm_object_update(object,
9259 		    offset,
9260 		    size,
9261 		    NULL,
9262 		    NULL,
9263 		    FALSE,              /* should_return */
9264 		    MEMORY_OBJECT_COPY_SYNC,
9265 		    VM_PROT_NO_CHANGE);
9266 		VM_PAGEOUT_DEBUG(iopl_cow, 1);
9267 		VM_PAGEOUT_DEBUG(iopl_cow_pages, (size >> PAGE_SHIFT));
9268 	}
9269 	if (!(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS)) &&
9270 	    object->purgable != VM_PURGABLE_VOLATILE &&
9271 	    object->purgable != VM_PURGABLE_EMPTY &&
9272 	    object->copy == NULL &&
9273 	    size == object->vo_size &&
9274 	    offset == 0 &&
9275 	    object->shadow == NULL &&
9276 	    object->pager == NULL) {
9277 		if (object->resident_page_count == size_in_pages) {
9278 			assert(object != compressor_object);
9279 			assert(object != kernel_object);
9280 			fast_path_full_req = TRUE;
9281 		} else if (object->resident_page_count == 0) {
9282 			assert(object != compressor_object);
9283 			assert(object != kernel_object);
9284 			fast_path_empty_req = TRUE;
9285 			set_cache_attr_needed = TRUE;
9286 		}
9287 	}
9288 
9289 	if (cntrl_flags & UPL_SET_INTERRUPTIBLE) {
9290 		interruptible = THREAD_ABORTSAFE;
9291 	} else {
9292 		interruptible = THREAD_UNINT;
9293 	}
9294 
9295 	entry = 0;
9296 
9297 	xfer_size = size;
9298 	dst_offset = offset;
9299 
9300 	if (fast_path_full_req) {
9301 		if (vm_object_iopl_wire_full(object, upl, user_page_list, lite_list, cntrl_flags, tag) == TRUE) {
9302 			goto finish;
9303 		}
9304 		/*
9305 		 * we couldn't complete the processing of this request on the fast path
9306 		 * so fall through to the slow path and finish up
9307 		 */
9308 	} else if (fast_path_empty_req) {
9309 		if (cntrl_flags & UPL_REQUEST_NO_FAULT) {
9310 			ret = KERN_MEMORY_ERROR;
9311 			goto return_err;
9312 		}
9313 		ret = vm_object_iopl_wire_empty(object, upl, user_page_list, lite_list, cntrl_flags, tag, &dst_offset, size_in_pages, &page_grab_count);
9314 
9315 		if (ret) {
9316 			free_wired_pages = TRUE;
9317 			goto return_err;
9318 		}
9319 		goto finish;
9320 	}
9321 
9322 	fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
9323 	fault_info.lo_offset = offset;
9324 	fault_info.hi_offset = offset + xfer_size;
9325 	fault_info.mark_zf_absent = TRUE;
9326 	fault_info.interruptible = interruptible;
9327 	fault_info.batch_pmap_op = TRUE;
9328 
9329 	while (xfer_size) {
9330 		vm_fault_return_t       result;
9331 
9332 		dwp->dw_mask = 0;
9333 
9334 		if (fast_path_full_req) {
9335 			/*
9336 			 * if we get here, it means that we ran into a page
9337 			 * state we couldn't handle in the fast path and
9338 			 * bailed out to the slow path... since the order
9339 			 * we look at pages is different between the 2 paths,
9340 			 * the following check is needed to determine whether
9341 			 * this page was already processed in the fast path
9342 			 */
9343 			if (lite_list[entry >> 5] & (1 << (entry & 31))) {
9344 				goto skip_page;
9345 			}
9346 		}
9347 		dst_page = vm_page_lookup(object, dst_offset);
9348 
9349 		if (dst_page == VM_PAGE_NULL ||
9350 		    dst_page->vmp_busy ||
9351 		    dst_page->vmp_error ||
9352 		    dst_page->vmp_restart ||
9353 		    dst_page->vmp_absent ||
9354 		    dst_page->vmp_fictitious) {
9355 			if (object == kernel_object) {
9356 				panic("vm_object_iopl_request: missing/bad page in kernel object");
9357 			}
9358 			if (object == compressor_object) {
9359 				panic("vm_object_iopl_request: missing/bad page in compressor object");
9360 			}
9361 
9362 			if (cntrl_flags & UPL_REQUEST_NO_FAULT) {
9363 				ret = KERN_MEMORY_ERROR;
9364 				goto return_err;
9365 			}
9366 			set_cache_attr_needed = TRUE;
9367 
9368 			/*
9369 			 * We just looked up the page and the result remains valid
9370 			 * until the object lock is release, so send it to
9371 			 * vm_fault_page() (as "dst_page"), to avoid having to
9372 			 * look it up again there.
9373 			 */
9374 			caller_lookup = TRUE;
9375 
9376 			do {
9377 				vm_page_t       top_page;
9378 				kern_return_t   error_code;
9379 
9380 				fault_info.cluster_size = xfer_size;
9381 
9382 				vm_object_paging_begin(object);
9383 
9384 				result = vm_fault_page(object, dst_offset,
9385 				    prot | VM_PROT_WRITE, FALSE,
9386 				    caller_lookup,
9387 				    &prot, &dst_page, &top_page,
9388 				    (int *)0,
9389 				    &error_code, no_zero_fill,
9390 				    FALSE, &fault_info);
9391 
9392 				/* our lookup is no longer valid at this point */
9393 				caller_lookup = FALSE;
9394 
9395 				switch (result) {
9396 				case VM_FAULT_SUCCESS:
9397 					page_grab_count++;
9398 
9399 					if (!dst_page->vmp_absent) {
9400 						PAGE_WAKEUP_DONE(dst_page);
9401 					} else {
9402 						/*
9403 						 * we only get back an absent page if we
9404 						 * requested that it not be zero-filled
9405 						 * because we are about to fill it via I/O
9406 						 *
9407 						 * absent pages should be left BUSY
9408 						 * to prevent them from being faulted
9409 						 * into an address space before we've
9410 						 * had a chance to complete the I/O on
9411 						 * them since they may contain info that
9412 						 * shouldn't be seen by the faulting task
9413 						 */
9414 					}
9415 					/*
9416 					 *	Release paging references and
9417 					 *	top-level placeholder page, if any.
9418 					 */
9419 					if (top_page != VM_PAGE_NULL) {
9420 						vm_object_t local_object;
9421 
9422 						local_object = VM_PAGE_OBJECT(top_page);
9423 
9424 						/*
9425 						 * comparing 2 packed pointers
9426 						 */
9427 						if (top_page->vmp_object != dst_page->vmp_object) {
9428 							vm_object_lock(local_object);
9429 							VM_PAGE_FREE(top_page);
9430 							vm_object_paging_end(local_object);
9431 							vm_object_unlock(local_object);
9432 						} else {
9433 							VM_PAGE_FREE(top_page);
9434 							vm_object_paging_end(local_object);
9435 						}
9436 					}
9437 					vm_object_paging_end(object);
9438 					break;
9439 
9440 				case VM_FAULT_RETRY:
9441 					vm_object_lock(object);
9442 					break;
9443 
9444 				case VM_FAULT_MEMORY_SHORTAGE:
9445 					OSAddAtomic((size_in_pages - entry), &vm_upl_wait_for_pages);
9446 
9447 					VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
9448 
9449 					if (vm_page_wait(interruptible)) {
9450 						OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages);
9451 
9452 						VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
9453 						vm_object_lock(object);
9454 
9455 						break;
9456 					}
9457 					OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages);
9458 
9459 					VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1);
9460 
9461 					OS_FALLTHROUGH;
9462 
9463 				case VM_FAULT_INTERRUPTED:
9464 					error_code = MACH_SEND_INTERRUPTED;
9465 					OS_FALLTHROUGH;
9466 				case VM_FAULT_MEMORY_ERROR:
9467 memory_error:
9468 					ret = (error_code ? error_code: KERN_MEMORY_ERROR);
9469 
9470 					vm_object_lock(object);
9471 					goto return_err;
9472 
9473 				case VM_FAULT_SUCCESS_NO_VM_PAGE:
9474 					/* success but no page: fail */
9475 					vm_object_paging_end(object);
9476 					vm_object_unlock(object);
9477 					goto memory_error;
9478 
9479 				default:
9480 					panic("vm_object_iopl_request: unexpected error"
9481 					    " 0x%x from vm_fault_page()\n", result);
9482 				}
9483 			} while (result != VM_FAULT_SUCCESS);
9484 		}
9485 		phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
9486 
9487 		if (upl->flags & UPL_KERNEL_OBJECT) {
9488 			goto record_phys_addr;
9489 		}
9490 
9491 		if (dst_page->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
9492 			dst_page->vmp_busy = TRUE;
9493 			goto record_phys_addr;
9494 		}
9495 
9496 		if (dst_page->vmp_cleaning) {
9497 			/*
9498 			 * Someone else is cleaning this page in place.
9499 			 * In theory, we should be able to  proceed and use this
9500 			 * page but they'll probably end up clearing the "busy"
9501 			 * bit on it in upl_commit_range() but they didn't set
9502 			 * it, so they would clear our "busy" bit and open
9503 			 * us to race conditions.
9504 			 * We'd better wait for the cleaning to complete and
9505 			 * then try again.
9506 			 */
9507 			VM_PAGEOUT_DEBUG(vm_object_iopl_request_sleep_for_cleaning, 1);
9508 			PAGE_SLEEP(object, dst_page, THREAD_UNINT);
9509 			continue;
9510 		}
9511 		if (dst_page->vmp_laundry) {
9512 			vm_pageout_steal_laundry(dst_page, FALSE);
9513 		}
9514 
9515 		if ((cntrl_flags & UPL_NEED_32BIT_ADDR) &&
9516 		    phys_page >= (max_valid_dma_address >> PAGE_SHIFT)) {
9517 			vm_page_t       low_page;
9518 			int             refmod;
9519 
9520 			/*
9521 			 * support devices that can't DMA above 32 bits
9522 			 * by substituting pages from a pool of low address
9523 			 * memory for any pages we find above the 4G mark
9524 			 * can't substitute if the page is already wired because
9525 			 * we don't know whether that physical address has been
9526 			 * handed out to some other 64 bit capable DMA device to use
9527 			 */
9528 			if (VM_PAGE_WIRED(dst_page)) {
9529 				ret = KERN_PROTECTION_FAILURE;
9530 				goto return_err;
9531 			}
9532 			low_page = vm_page_grablo();
9533 
9534 			if (low_page == VM_PAGE_NULL) {
9535 				ret = KERN_RESOURCE_SHORTAGE;
9536 				goto return_err;
9537 			}
9538 			/*
9539 			 * from here until the vm_page_replace completes
9540 			 * we musn't drop the object lock... we don't
9541 			 * want anyone refaulting this page in and using
9542 			 * it after we disconnect it... we want the fault
9543 			 * to find the new page being substituted.
9544 			 */
9545 			if (dst_page->vmp_pmapped) {
9546 				refmod = pmap_disconnect(phys_page);
9547 			} else {
9548 				refmod = 0;
9549 			}
9550 
9551 			if (!dst_page->vmp_absent) {
9552 				vm_page_copy(dst_page, low_page);
9553 			}
9554 
9555 			low_page->vmp_reference = dst_page->vmp_reference;
9556 			low_page->vmp_dirty     = dst_page->vmp_dirty;
9557 			low_page->vmp_absent    = dst_page->vmp_absent;
9558 
9559 			if (refmod & VM_MEM_REFERENCED) {
9560 				low_page->vmp_reference = TRUE;
9561 			}
9562 			if (refmod & VM_MEM_MODIFIED) {
9563 				SET_PAGE_DIRTY(low_page, FALSE);
9564 			}
9565 
9566 			vm_page_replace(low_page, object, dst_offset);
9567 
9568 			dst_page = low_page;
9569 			/*
9570 			 * vm_page_grablo returned the page marked
9571 			 * BUSY... we don't need a PAGE_WAKEUP_DONE
9572 			 * here, because we've never dropped the object lock
9573 			 */
9574 			if (!dst_page->vmp_absent) {
9575 				dst_page->vmp_busy = FALSE;
9576 			}
9577 
9578 			phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
9579 		}
9580 		if (!dst_page->vmp_busy) {
9581 			dwp->dw_mask |= DW_vm_page_wire;
9582 		}
9583 
9584 		if (cntrl_flags & UPL_BLOCK_ACCESS) {
9585 			/*
9586 			 * Mark the page "busy" to block any future page fault
9587 			 * on this page in addition to wiring it.
9588 			 * We'll also remove the mapping
9589 			 * of all these pages before leaving this routine.
9590 			 */
9591 			assert(!dst_page->vmp_fictitious);
9592 			dst_page->vmp_busy = TRUE;
9593 		}
9594 		/*
9595 		 * expect the page to be used
9596 		 * page queues lock must be held to set 'reference'
9597 		 */
9598 		dwp->dw_mask |= DW_set_reference;
9599 
9600 		if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
9601 			SET_PAGE_DIRTY(dst_page, TRUE);
9602 			/*
9603 			 * Page belonging to a code-signed object is about to
9604 			 * be written. Mark it tainted and disconnect it from
9605 			 * all pmaps so processes have to fault it back in and
9606 			 * deal with the tainted bit.
9607 			 */
9608 			if (object->code_signed && dst_page->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
9609 				dst_page->vmp_cs_tainted = VMP_CS_ALL_TRUE;
9610 				vm_page_iopl_tainted++;
9611 				if (dst_page->vmp_pmapped) {
9612 					int refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
9613 					if (refmod & VM_MEM_REFERENCED) {
9614 						dst_page->vmp_reference = TRUE;
9615 					}
9616 				}
9617 			}
9618 		}
9619 		if ((cntrl_flags & UPL_REQUEST_FORCE_COHERENCY) && dst_page->vmp_written_by_kernel == TRUE) {
9620 			pmap_sync_page_attributes_phys(phys_page);
9621 			dst_page->vmp_written_by_kernel = FALSE;
9622 		}
9623 
9624 record_phys_addr:
9625 		if (dst_page->vmp_busy) {
9626 			upl->flags |= UPL_HAS_BUSY;
9627 		}
9628 
9629 		lite_list[entry >> 5] |= 1U << (entry & 31);
9630 
9631 		if (phys_page > upl->highest_page) {
9632 			upl->highest_page = phys_page;
9633 		}
9634 
9635 		if (user_page_list) {
9636 			user_page_list[entry].phys_addr = phys_page;
9637 			user_page_list[entry].free_when_done    = dst_page->vmp_free_when_done;
9638 			user_page_list[entry].absent    = dst_page->vmp_absent;
9639 			user_page_list[entry].dirty     = dst_page->vmp_dirty;
9640 			user_page_list[entry].precious  = dst_page->vmp_precious;
9641 			user_page_list[entry].device    = FALSE;
9642 			user_page_list[entry].needed    = FALSE;
9643 			if (dst_page->vmp_clustered == TRUE) {
9644 				user_page_list[entry].speculative = (dst_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE;
9645 			} else {
9646 				user_page_list[entry].speculative = FALSE;
9647 			}
9648 			user_page_list[entry].cs_validated = dst_page->vmp_cs_validated;
9649 			user_page_list[entry].cs_tainted = dst_page->vmp_cs_tainted;
9650 			user_page_list[entry].cs_nx = dst_page->vmp_cs_nx;
9651 			user_page_list[entry].mark      = FALSE;
9652 		}
9653 		if (object != kernel_object && object != compressor_object) {
9654 			/*
9655 			 * someone is explicitly grabbing this page...
9656 			 * update clustered and speculative state
9657 			 *
9658 			 */
9659 			if (dst_page->vmp_clustered) {
9660 				VM_PAGE_CONSUME_CLUSTERED(dst_page);
9661 			}
9662 		}
9663 skip_page:
9664 		entry++;
9665 		dst_offset += PAGE_SIZE_64;
9666 		xfer_size -= PAGE_SIZE;
9667 
9668 		if (dwp->dw_mask) {
9669 			VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count);
9670 
9671 			if (dw_count >= dw_limit) {
9672 				vm_page_do_delayed_work(object, tag, dwp_start, dw_count);
9673 
9674 				dwp = dwp_start;
9675 				dw_count = 0;
9676 			}
9677 		}
9678 	}
9679 	assert(entry == size_in_pages);
9680 
9681 	if (dw_count) {
9682 		vm_page_do_delayed_work(object, tag, dwp_start, dw_count);
9683 		dwp = dwp_start;
9684 		dw_count = 0;
9685 	}
9686 finish:
9687 	if (user_page_list && set_cache_attr_needed == TRUE) {
9688 		vm_object_set_pmap_cache_attr(object, user_page_list, size_in_pages, TRUE);
9689 	}
9690 
9691 	if (page_list_count != NULL) {
9692 		if (upl->flags & UPL_INTERNAL) {
9693 			*page_list_count = 0;
9694 		} else if (*page_list_count > size_in_pages) {
9695 			*page_list_count = size_in_pages;
9696 		}
9697 	}
9698 	vm_object_unlock(object);
9699 
9700 	if (cntrl_flags & UPL_BLOCK_ACCESS) {
9701 		/*
9702 		 * We've marked all the pages "busy" so that future
9703 		 * page faults will block.
9704 		 * Now remove the mapping for these pages, so that they
9705 		 * can't be accessed without causing a page fault.
9706 		 */
9707 		vm_object_pmap_protect(object, offset, (vm_object_size_t)size,
9708 		    PMAP_NULL,
9709 		    PAGE_SIZE,
9710 		    0, VM_PROT_NONE);
9711 		assert(!object->blocked_access);
9712 		object->blocked_access = TRUE;
9713 	}
9714 
9715 	VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, KERN_SUCCESS, 0, 0);
9716 #if DEVELOPMENT || DEBUG
9717 	if (task != NULL) {
9718 		ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count);
9719 	}
9720 #endif /* DEVELOPMENT || DEBUG */
9721 
9722 	if (dwp_start && dwp_finish_ctx) {
9723 		vm_page_delayed_work_finish_ctx(dwp_start);
9724 		dwp_start = dwp = NULL;
9725 	}
9726 
9727 	return KERN_SUCCESS;
9728 
9729 return_err:
9730 	dw_index = 0;
9731 
9732 	for (; offset < dst_offset; offset += PAGE_SIZE) {
9733 		boolean_t need_unwire;
9734 
9735 		dst_page = vm_page_lookup(object, offset);
9736 
9737 		if (dst_page == VM_PAGE_NULL) {
9738 			panic("vm_object_iopl_request: Wired page missing.");
9739 		}
9740 
9741 		/*
9742 		 * if we've already processed this page in an earlier
9743 		 * dw_do_work, we need to undo the wiring... we will
9744 		 * leave the dirty and reference bits on if they
9745 		 * were set, since we don't have a good way of knowing
9746 		 * what the previous state was and we won't get here
9747 		 * under any normal circumstances...  we will always
9748 		 * clear BUSY and wakeup any waiters via vm_page_free
9749 		 * or PAGE_WAKEUP_DONE
9750 		 */
9751 		need_unwire = TRUE;
9752 
9753 		if (dw_count) {
9754 			if ((dwp_start)[dw_index].dw_m == dst_page) {
9755 				/*
9756 				 * still in the deferred work list
9757 				 * which means we haven't yet called
9758 				 * vm_page_wire on this page
9759 				 */
9760 				need_unwire = FALSE;
9761 
9762 				dw_index++;
9763 				dw_count--;
9764 			}
9765 		}
9766 		vm_page_lock_queues();
9767 
9768 		if (dst_page->vmp_absent || free_wired_pages == TRUE) {
9769 			vm_page_free(dst_page);
9770 
9771 			need_unwire = FALSE;
9772 		} else {
9773 			if (need_unwire == TRUE) {
9774 				vm_page_unwire(dst_page, TRUE);
9775 			}
9776 
9777 			PAGE_WAKEUP_DONE(dst_page);
9778 		}
9779 		vm_page_unlock_queues();
9780 
9781 		if (need_unwire == TRUE) {
9782 			counter_inc(&vm_statistics_reactivations);
9783 		}
9784 	}
9785 #if UPL_DEBUG
9786 	upl->upl_state = 2;
9787 #endif
9788 	if (!(upl->flags & UPL_KERNEL_OBJECT)) {
9789 		vm_object_activity_end(object);
9790 		vm_object_collapse(object, 0, TRUE);
9791 	}
9792 	vm_object_unlock(object);
9793 	upl_destroy(upl);
9794 
9795 	VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, ret, 0, 0);
9796 #if DEVELOPMENT || DEBUG
9797 	if (task != NULL) {
9798 		ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count);
9799 	}
9800 #endif /* DEVELOPMENT || DEBUG */
9801 
9802 	if (dwp_start && dwp_finish_ctx) {
9803 		vm_page_delayed_work_finish_ctx(dwp_start);
9804 		dwp_start = dwp = NULL;
9805 	}
9806 	return ret;
9807 }
9808 
9809 kern_return_t
upl_transpose(upl_t upl1,upl_t upl2)9810 upl_transpose(
9811 	upl_t           upl1,
9812 	upl_t           upl2)
9813 {
9814 	kern_return_t           retval;
9815 	boolean_t               upls_locked;
9816 	vm_object_t             object1, object2;
9817 
9818 	/* LD: Should mapped UPLs be eligible for a transpose? */
9819 	if (upl1 == UPL_NULL || upl2 == UPL_NULL || upl1 == upl2 || ((upl1->flags & UPL_VECTOR) == UPL_VECTOR) || ((upl2->flags & UPL_VECTOR) == UPL_VECTOR)) {
9820 		return KERN_INVALID_ARGUMENT;
9821 	}
9822 
9823 	upls_locked = FALSE;
9824 
9825 	/*
9826 	 * Since we need to lock both UPLs at the same time,
9827 	 * avoid deadlocks by always taking locks in the same order.
9828 	 */
9829 	if (upl1 < upl2) {
9830 		upl_lock(upl1);
9831 		upl_lock(upl2);
9832 	} else {
9833 		upl_lock(upl2);
9834 		upl_lock(upl1);
9835 	}
9836 	upls_locked = TRUE;     /* the UPLs will need to be unlocked */
9837 
9838 	object1 = upl1->map_object;
9839 	object2 = upl2->map_object;
9840 
9841 	if (upl1->u_offset != 0 || upl2->u_offset != 0 ||
9842 	    upl1->u_size != upl2->u_size) {
9843 		/*
9844 		 * We deal only with full objects, not subsets.
9845 		 * That's because we exchange the entire backing store info
9846 		 * for the objects: pager, resident pages, etc...  We can't do
9847 		 * only part of it.
9848 		 */
9849 		retval = KERN_INVALID_VALUE;
9850 		goto done;
9851 	}
9852 
9853 	/*
9854 	 * Tranpose the VM objects' backing store.
9855 	 */
9856 	retval = vm_object_transpose(object1, object2,
9857 	    upl_adjusted_size(upl1, PAGE_MASK));
9858 
9859 	if (retval == KERN_SUCCESS) {
9860 		/*
9861 		 * Make each UPL point to the correct VM object, i.e. the
9862 		 * object holding the pages that the UPL refers to...
9863 		 */
9864 #if CONFIG_IOSCHED || UPL_DEBUG
9865 		if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || (upl2->flags & UPL_TRACKED_BY_OBJECT)) {
9866 			vm_object_lock(object1);
9867 			vm_object_lock(object2);
9868 		}
9869 		if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) {
9870 			queue_remove(&object1->uplq, upl1, upl_t, uplq);
9871 		}
9872 		if ((upl2->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) {
9873 			queue_remove(&object2->uplq, upl2, upl_t, uplq);
9874 		}
9875 #endif
9876 		upl1->map_object = object2;
9877 		upl2->map_object = object1;
9878 
9879 #if CONFIG_IOSCHED || UPL_DEBUG
9880 		if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) {
9881 			queue_enter(&object2->uplq, upl1, upl_t, uplq);
9882 		}
9883 		if ((upl2->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) {
9884 			queue_enter(&object1->uplq, upl2, upl_t, uplq);
9885 		}
9886 		if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || (upl2->flags & UPL_TRACKED_BY_OBJECT)) {
9887 			vm_object_unlock(object2);
9888 			vm_object_unlock(object1);
9889 		}
9890 #endif
9891 	}
9892 
9893 done:
9894 	/*
9895 	 * Cleanup.
9896 	 */
9897 	if (upls_locked) {
9898 		upl_unlock(upl1);
9899 		upl_unlock(upl2);
9900 		upls_locked = FALSE;
9901 	}
9902 
9903 	return retval;
9904 }
9905 
9906 void
upl_range_needed(upl_t upl,int index,int count)9907 upl_range_needed(
9908 	upl_t           upl,
9909 	int             index,
9910 	int             count)
9911 {
9912 	upl_page_info_t *user_page_list;
9913 	int             size_in_pages;
9914 
9915 	if (!(upl->flags & UPL_INTERNAL) || count <= 0) {
9916 		return;
9917 	}
9918 
9919 	size_in_pages = upl_adjusted_size(upl, PAGE_MASK) / PAGE_SIZE;
9920 
9921 	user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
9922 
9923 	while (count-- && index < size_in_pages) {
9924 		user_page_list[index++].needed = TRUE;
9925 	}
9926 }
9927 
9928 
9929 /*
9930  * Reserve of virtual addresses in the kernel address space.
9931  * We need to map the physical pages in the kernel, so that we
9932  * can call the code-signing or slide routines with a kernel
9933  * virtual address.  We keep this pool of pre-allocated kernel
9934  * virtual addresses so that we don't have to scan the kernel's
9935  * virtaul address space each time we need to work with
9936  * a physical page.
9937  */
9938 SIMPLE_LOCK_DECLARE(vm_paging_lock, 0);
9939 #define VM_PAGING_NUM_PAGES     64
9940 vm_map_offset_t vm_paging_base_address = 0;
9941 boolean_t       vm_paging_page_inuse[VM_PAGING_NUM_PAGES] = { FALSE, };
9942 int             vm_paging_max_index = 0;
9943 int             vm_paging_page_waiter = 0;
9944 int             vm_paging_page_waiter_total = 0;
9945 
9946 unsigned long   vm_paging_no_kernel_page = 0;
9947 unsigned long   vm_paging_objects_mapped = 0;
9948 unsigned long   vm_paging_pages_mapped = 0;
9949 unsigned long   vm_paging_objects_mapped_slow = 0;
9950 unsigned long   vm_paging_pages_mapped_slow = 0;
9951 
9952 __startup_func
9953 void
vm_paging_map_init(void)9954 vm_paging_map_init(void)
9955 {
9956 	kern_return_t   kr;
9957 	vm_map_offset_t page_map_offset;
9958 	vm_map_entry_t  map_entry;
9959 
9960 	assert(vm_paging_base_address == 0);
9961 
9962 	/*
9963 	 * Initialize our pool of pre-allocated kernel
9964 	 * virtual addresses.
9965 	 */
9966 	page_map_offset = 0;
9967 	kr = vm_map_find_space(kernel_map,
9968 	    &page_map_offset,
9969 	    VM_PAGING_NUM_PAGES * PAGE_SIZE,
9970 	    0,
9971 	    0,
9972 	    VM_MAP_KERNEL_FLAGS_NONE,
9973 	    VM_KERN_MEMORY_NONE,
9974 	    &map_entry);
9975 	if (kr != KERN_SUCCESS) {
9976 		panic("vm_paging_map_init: kernel_map full");
9977 	}
9978 	VME_OBJECT_SET(map_entry, kernel_object);
9979 	VME_OFFSET_SET(map_entry, page_map_offset);
9980 	map_entry->protection = VM_PROT_NONE;
9981 	map_entry->max_protection = VM_PROT_NONE;
9982 	map_entry->permanent = TRUE;
9983 	vm_object_reference(kernel_object);
9984 	vm_map_unlock(kernel_map);
9985 
9986 	assert(vm_paging_base_address == 0);
9987 	vm_paging_base_address = page_map_offset;
9988 }
9989 
9990 /*
9991  * vm_paging_map_object:
9992  *	Maps part of a VM object's pages in the kernel
9993  *      virtual address space, using the pre-allocated
9994  *	kernel virtual addresses, if possible.
9995  * Context:
9996  *      The VM object is locked.  This lock will get
9997  *      dropped and re-acquired though, so the caller
9998  *      must make sure the VM object is kept alive
9999  *	(by holding a VM map that has a reference
10000  *      on it, for example, or taking an extra reference).
10001  *      The page should also be kept busy to prevent
10002  *	it from being reclaimed.
10003  */
10004 kern_return_t
vm_paging_map_object(vm_page_t page,vm_object_t object,vm_object_offset_t offset,vm_prot_t protection,boolean_t can_unlock_object,vm_map_size_t * size,vm_map_offset_t * address,boolean_t * need_unmap)10005 vm_paging_map_object(
10006 	vm_page_t               page,
10007 	vm_object_t             object,
10008 	vm_object_offset_t      offset,
10009 	vm_prot_t               protection,
10010 	boolean_t               can_unlock_object,
10011 	vm_map_size_t           *size,          /* IN/OUT */
10012 	vm_map_offset_t         *address,       /* OUT */
10013 	boolean_t               *need_unmap)    /* OUT */
10014 {
10015 	kern_return_t           kr;
10016 	vm_map_offset_t         page_map_offset;
10017 	vm_map_size_t           map_size;
10018 	vm_object_offset_t      object_offset;
10019 	int                     i;
10020 
10021 	if (page != VM_PAGE_NULL && *size == PAGE_SIZE) {
10022 		/* use permanent 1-to-1 kernel mapping of physical memory ? */
10023 		*address = (vm_map_offset_t)
10024 		    phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(page) << PAGE_SHIFT);
10025 		*need_unmap = FALSE;
10026 		return KERN_SUCCESS;
10027 
10028 		assert(page->vmp_busy);
10029 		/*
10030 		 * Use one of the pre-allocated kernel virtual addresses
10031 		 * and just enter the VM page in the kernel address space
10032 		 * at that virtual address.
10033 		 */
10034 		simple_lock(&vm_paging_lock, &vm_pageout_lck_grp);
10035 
10036 		/*
10037 		 * Try and find an available kernel virtual address
10038 		 * from our pre-allocated pool.
10039 		 */
10040 		page_map_offset = 0;
10041 		for (;;) {
10042 			for (i = 0; i < VM_PAGING_NUM_PAGES; i++) {
10043 				if (vm_paging_page_inuse[i] == FALSE) {
10044 					page_map_offset =
10045 					    vm_paging_base_address +
10046 					    (i * PAGE_SIZE);
10047 					break;
10048 				}
10049 			}
10050 			if (page_map_offset != 0) {
10051 				/* found a space to map our page ! */
10052 				break;
10053 			}
10054 
10055 			if (can_unlock_object) {
10056 				/*
10057 				 * If we can afford to unlock the VM object,
10058 				 * let's take the slow path now...
10059 				 */
10060 				break;
10061 			}
10062 			/*
10063 			 * We can't afford to unlock the VM object, so
10064 			 * let's wait for a space to become available...
10065 			 */
10066 			vm_paging_page_waiter_total++;
10067 			vm_paging_page_waiter++;
10068 			kr = assert_wait((event_t)&vm_paging_page_waiter, THREAD_UNINT);
10069 			if (kr == THREAD_WAITING) {
10070 				simple_unlock(&vm_paging_lock);
10071 				kr = thread_block(THREAD_CONTINUE_NULL);
10072 				simple_lock(&vm_paging_lock, &vm_pageout_lck_grp);
10073 			}
10074 			vm_paging_page_waiter--;
10075 			/* ... and try again */
10076 		}
10077 
10078 		if (page_map_offset != 0) {
10079 			/*
10080 			 * We found a kernel virtual address;
10081 			 * map the physical page to that virtual address.
10082 			 */
10083 			if (i > vm_paging_max_index) {
10084 				vm_paging_max_index = i;
10085 			}
10086 			vm_paging_page_inuse[i] = TRUE;
10087 			simple_unlock(&vm_paging_lock);
10088 
10089 			page->vmp_pmapped = TRUE;
10090 
10091 			/*
10092 			 * Keep the VM object locked over the PMAP_ENTER
10093 			 * and the actual use of the page by the kernel,
10094 			 * or this pmap mapping might get undone by a
10095 			 * vm_object_pmap_protect() call...
10096 			 */
10097 			PMAP_ENTER(kernel_pmap,
10098 			    page_map_offset,
10099 			    page,
10100 			    protection,
10101 			    VM_PROT_NONE,
10102 			    0,
10103 			    TRUE,
10104 			    kr);
10105 			assert(kr == KERN_SUCCESS);
10106 			vm_paging_objects_mapped++;
10107 			vm_paging_pages_mapped++;
10108 			*address = page_map_offset;
10109 			*need_unmap = TRUE;
10110 
10111 #if KASAN
10112 			kasan_notify_address(page_map_offset, PAGE_SIZE);
10113 #endif
10114 
10115 			/* all done and mapped, ready to use ! */
10116 			return KERN_SUCCESS;
10117 		}
10118 
10119 		/*
10120 		 * We ran out of pre-allocated kernel virtual
10121 		 * addresses.  Just map the page in the kernel
10122 		 * the slow and regular way.
10123 		 */
10124 		vm_paging_no_kernel_page++;
10125 		simple_unlock(&vm_paging_lock);
10126 	}
10127 
10128 	if (!can_unlock_object) {
10129 		*address = 0;
10130 		*size = 0;
10131 		*need_unmap = FALSE;
10132 		return KERN_NOT_SUPPORTED;
10133 	}
10134 
10135 	object_offset = vm_object_trunc_page(offset);
10136 	map_size = vm_map_round_page(*size,
10137 	    VM_MAP_PAGE_MASK(kernel_map));
10138 
10139 	/*
10140 	 * Try and map the required range of the object
10141 	 * in the kernel_map
10142 	 */
10143 
10144 	vm_object_reference_locked(object);     /* for the map entry */
10145 	vm_object_unlock(object);
10146 
10147 	kr = vm_map_enter(kernel_map,
10148 	    address,
10149 	    map_size,
10150 	    0,
10151 	    VM_FLAGS_ANYWHERE,
10152 	    VM_MAP_KERNEL_FLAGS_NONE,
10153 	    VM_KERN_MEMORY_NONE,
10154 	    object,
10155 	    object_offset,
10156 	    FALSE,
10157 	    protection,
10158 	    VM_PROT_ALL,
10159 	    VM_INHERIT_NONE);
10160 	if (kr != KERN_SUCCESS) {
10161 		*address = 0;
10162 		*size = 0;
10163 		*need_unmap = FALSE;
10164 		vm_object_deallocate(object);   /* for the map entry */
10165 		vm_object_lock(object);
10166 		return kr;
10167 	}
10168 
10169 	*size = map_size;
10170 
10171 	/*
10172 	 * Enter the mapped pages in the page table now.
10173 	 */
10174 	vm_object_lock(object);
10175 	/*
10176 	 * VM object must be kept locked from before PMAP_ENTER()
10177 	 * until after the kernel is done accessing the page(s).
10178 	 * Otherwise, the pmap mappings in the kernel could be
10179 	 * undone by a call to vm_object_pmap_protect().
10180 	 */
10181 
10182 	for (page_map_offset = 0;
10183 	    map_size != 0;
10184 	    map_size -= PAGE_SIZE_64, page_map_offset += PAGE_SIZE_64) {
10185 		page = vm_page_lookup(object, offset + page_map_offset);
10186 		if (page == VM_PAGE_NULL) {
10187 			printf("vm_paging_map_object: no page !?");
10188 			vm_object_unlock(object);
10189 			kr = vm_map_remove(kernel_map, *address, *size,
10190 			    VM_MAP_REMOVE_NO_FLAGS);
10191 			assert(kr == KERN_SUCCESS);
10192 			*address = 0;
10193 			*size = 0;
10194 			*need_unmap = FALSE;
10195 			vm_object_lock(object);
10196 			return KERN_MEMORY_ERROR;
10197 		}
10198 		page->vmp_pmapped = TRUE;
10199 
10200 		PMAP_ENTER(kernel_pmap,
10201 		    *address + page_map_offset,
10202 		    page,
10203 		    protection,
10204 		    VM_PROT_NONE,
10205 		    0,
10206 		    TRUE,
10207 		    kr);
10208 		assert(kr == KERN_SUCCESS);
10209 #if KASAN
10210 		kasan_notify_address(*address + page_map_offset, PAGE_SIZE);
10211 #endif
10212 	}
10213 
10214 	vm_paging_objects_mapped_slow++;
10215 	vm_paging_pages_mapped_slow += (unsigned long) (map_size / PAGE_SIZE_64);
10216 
10217 	*need_unmap = TRUE;
10218 
10219 	return KERN_SUCCESS;
10220 }
10221 
10222 /*
10223  * vm_paging_unmap_object:
10224  *	Unmaps part of a VM object's pages from the kernel
10225  *      virtual address space.
10226  * Context:
10227  *      The VM object is locked.  This lock will get
10228  *      dropped and re-acquired though.
10229  */
10230 void
vm_paging_unmap_object(vm_object_t object,vm_map_offset_t start,vm_map_offset_t end)10231 vm_paging_unmap_object(
10232 	vm_object_t     object,
10233 	vm_map_offset_t start,
10234 	vm_map_offset_t end)
10235 {
10236 	kern_return_t   kr;
10237 	int             i;
10238 
10239 	if ((vm_paging_base_address == 0) ||
10240 	    (start < vm_paging_base_address) ||
10241 	    (end > (vm_paging_base_address
10242 	    + (VM_PAGING_NUM_PAGES * PAGE_SIZE)))) {
10243 		/*
10244 		 * We didn't use our pre-allocated pool of
10245 		 * kernel virtual address.  Deallocate the
10246 		 * virtual memory.
10247 		 */
10248 		if (object != VM_OBJECT_NULL) {
10249 			vm_object_unlock(object);
10250 		}
10251 		kr = vm_map_remove(kernel_map, start, end,
10252 		    VM_MAP_REMOVE_NO_FLAGS);
10253 		if (object != VM_OBJECT_NULL) {
10254 			vm_object_lock(object);
10255 		}
10256 		assert(kr == KERN_SUCCESS);
10257 	} else {
10258 		/*
10259 		 * We used a kernel virtual address from our
10260 		 * pre-allocated pool.  Put it back in the pool
10261 		 * for next time.
10262 		 */
10263 		assert(end - start == PAGE_SIZE);
10264 		i = (int) ((start - vm_paging_base_address) >> PAGE_SHIFT);
10265 		assert(i >= 0 && i < VM_PAGING_NUM_PAGES);
10266 
10267 		/* undo the pmap mapping */
10268 		pmap_remove(kernel_pmap, start, end);
10269 
10270 		simple_lock(&vm_paging_lock, &vm_pageout_lck_grp);
10271 		vm_paging_page_inuse[i] = FALSE;
10272 		if (vm_paging_page_waiter) {
10273 			thread_wakeup(&vm_paging_page_waiter);
10274 		}
10275 		simple_unlock(&vm_paging_lock);
10276 	}
10277 }
10278 
10279 
10280 /*
10281  * page->vmp_object must be locked
10282  */
10283 void
vm_pageout_steal_laundry(vm_page_t page,boolean_t queues_locked)10284 vm_pageout_steal_laundry(vm_page_t page, boolean_t queues_locked)
10285 {
10286 	if (!queues_locked) {
10287 		vm_page_lockspin_queues();
10288 	}
10289 
10290 	page->vmp_free_when_done = FALSE;
10291 	/*
10292 	 * need to drop the laundry count...
10293 	 * we may also need to remove it
10294 	 * from the I/O paging queue...
10295 	 * vm_pageout_throttle_up handles both cases
10296 	 *
10297 	 * the laundry and pageout_queue flags are cleared...
10298 	 */
10299 	vm_pageout_throttle_up(page);
10300 
10301 	if (!queues_locked) {
10302 		vm_page_unlock_queues();
10303 	}
10304 }
10305 
10306 upl_t
vector_upl_create(vm_offset_t upl_offset)10307 vector_upl_create(vm_offset_t upl_offset)
10308 {
10309 	int i = 0;
10310 	upl_t   upl;
10311 	vector_upl_t vector_upl = kalloc_type(struct _vector_upl, Z_WAITOK);
10312 
10313 	upl = upl_create(0, UPL_VECTOR, 0);
10314 	upl->vector_upl = vector_upl;
10315 	upl->u_offset = upl_offset;
10316 	vector_upl->size = 0;
10317 	vector_upl->offset = upl_offset;
10318 	vector_upl->invalid_upls = 0;
10319 	vector_upl->num_upls = 0;
10320 	vector_upl->pagelist = NULL;
10321 
10322 	for (i = 0; i < MAX_VECTOR_UPL_ELEMENTS; i++) {
10323 		vector_upl->upl_iostates[i].size = 0;
10324 		vector_upl->upl_iostates[i].offset = 0;
10325 	}
10326 	return upl;
10327 }
10328 
10329 void
vector_upl_deallocate(upl_t upl)10330 vector_upl_deallocate(upl_t upl)
10331 {
10332 	if (upl) {
10333 		vector_upl_t vector_upl = upl->vector_upl;
10334 		if (vector_upl) {
10335 			if (vector_upl->invalid_upls != vector_upl->num_upls) {
10336 				panic("Deallocating non-empty Vectored UPL");
10337 			}
10338 			kfree_data(vector_upl->pagelist, sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE));
10339 			vector_upl->invalid_upls = 0;
10340 			vector_upl->num_upls = 0;
10341 			vector_upl->pagelist = NULL;
10342 			vector_upl->size = 0;
10343 			vector_upl->offset = 0;
10344 			kfree_type(struct _vector_upl, vector_upl);
10345 			vector_upl = (vector_upl_t)0xfeedfeed;
10346 		} else {
10347 			panic("vector_upl_deallocate was passed a non-vectored upl");
10348 		}
10349 	} else {
10350 		panic("vector_upl_deallocate was passed a NULL upl");
10351 	}
10352 }
10353 
10354 boolean_t
vector_upl_is_valid(upl_t upl)10355 vector_upl_is_valid(upl_t upl)
10356 {
10357 	if (upl && ((upl->flags & UPL_VECTOR) == UPL_VECTOR)) {
10358 		vector_upl_t vector_upl = upl->vector_upl;
10359 		if (vector_upl == NULL || vector_upl == (vector_upl_t)0xfeedfeed || vector_upl == (vector_upl_t)0xfeedbeef) {
10360 			return FALSE;
10361 		} else {
10362 			return TRUE;
10363 		}
10364 	}
10365 	return FALSE;
10366 }
10367 
10368 boolean_t
vector_upl_set_subupl(upl_t upl,upl_t subupl,uint32_t io_size)10369 vector_upl_set_subupl(upl_t upl, upl_t subupl, uint32_t io_size)
10370 {
10371 	if (vector_upl_is_valid(upl)) {
10372 		vector_upl_t vector_upl = upl->vector_upl;
10373 
10374 		if (vector_upl) {
10375 			if (subupl) {
10376 				if (io_size) {
10377 					if (io_size < PAGE_SIZE) {
10378 						io_size = PAGE_SIZE;
10379 					}
10380 					subupl->vector_upl = (void*)vector_upl;
10381 					vector_upl->upl_elems[vector_upl->num_upls++] = subupl;
10382 					vector_upl->size += io_size;
10383 					upl->u_size += io_size;
10384 				} else {
10385 					uint32_t i = 0, invalid_upls = 0;
10386 					for (i = 0; i < vector_upl->num_upls; i++) {
10387 						if (vector_upl->upl_elems[i] == subupl) {
10388 							break;
10389 						}
10390 					}
10391 					if (i == vector_upl->num_upls) {
10392 						panic("Trying to remove sub-upl when none exists");
10393 					}
10394 
10395 					vector_upl->upl_elems[i] = NULL;
10396 					invalid_upls = os_atomic_inc(&(vector_upl)->invalid_upls,
10397 					    relaxed);
10398 					if (invalid_upls == vector_upl->num_upls) {
10399 						return TRUE;
10400 					} else {
10401 						return FALSE;
10402 					}
10403 				}
10404 			} else {
10405 				panic("vector_upl_set_subupl was passed a NULL upl element");
10406 			}
10407 		} else {
10408 			panic("vector_upl_set_subupl was passed a non-vectored upl");
10409 		}
10410 	} else {
10411 		panic("vector_upl_set_subupl was passed a NULL upl");
10412 	}
10413 
10414 	return FALSE;
10415 }
10416 
10417 void
vector_upl_set_pagelist(upl_t upl)10418 vector_upl_set_pagelist(upl_t upl)
10419 {
10420 	if (vector_upl_is_valid(upl)) {
10421 		uint32_t i = 0;
10422 		vector_upl_t vector_upl = upl->vector_upl;
10423 
10424 		if (vector_upl) {
10425 			vm_offset_t pagelist_size = 0, cur_upl_pagelist_size = 0;
10426 
10427 			vector_upl->pagelist = kalloc_data(sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE), Z_WAITOK);
10428 
10429 			for (i = 0; i < vector_upl->num_upls; i++) {
10430 				cur_upl_pagelist_size = sizeof(struct upl_page_info) * upl_adjusted_size(vector_upl->upl_elems[i], PAGE_MASK) / PAGE_SIZE;
10431 				bcopy(UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(vector_upl->upl_elems[i]), (char*)vector_upl->pagelist + pagelist_size, cur_upl_pagelist_size);
10432 				pagelist_size += cur_upl_pagelist_size;
10433 				if (vector_upl->upl_elems[i]->highest_page > upl->highest_page) {
10434 					upl->highest_page = vector_upl->upl_elems[i]->highest_page;
10435 				}
10436 			}
10437 			assert( pagelist_size == (sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE)));
10438 		} else {
10439 			panic("vector_upl_set_pagelist was passed a non-vectored upl");
10440 		}
10441 	} else {
10442 		panic("vector_upl_set_pagelist was passed a NULL upl");
10443 	}
10444 }
10445 
10446 upl_t
vector_upl_subupl_byindex(upl_t upl,uint32_t index)10447 vector_upl_subupl_byindex(upl_t upl, uint32_t index)
10448 {
10449 	if (vector_upl_is_valid(upl)) {
10450 		vector_upl_t vector_upl = upl->vector_upl;
10451 		if (vector_upl) {
10452 			if (index < vector_upl->num_upls) {
10453 				return vector_upl->upl_elems[index];
10454 			}
10455 		} else {
10456 			panic("vector_upl_subupl_byindex was passed a non-vectored upl");
10457 		}
10458 	}
10459 	return NULL;
10460 }
10461 
10462 upl_t
vector_upl_subupl_byoffset(upl_t upl,upl_offset_t * upl_offset,upl_size_t * upl_size)10463 vector_upl_subupl_byoffset(upl_t upl, upl_offset_t *upl_offset, upl_size_t *upl_size)
10464 {
10465 	if (vector_upl_is_valid(upl)) {
10466 		uint32_t i = 0;
10467 		vector_upl_t vector_upl = upl->vector_upl;
10468 
10469 		if (vector_upl) {
10470 			upl_t subupl = NULL;
10471 			vector_upl_iostates_t subupl_state;
10472 
10473 			for (i = 0; i < vector_upl->num_upls; i++) {
10474 				subupl = vector_upl->upl_elems[i];
10475 				subupl_state = vector_upl->upl_iostates[i];
10476 				if (*upl_offset <= (subupl_state.offset + subupl_state.size - 1)) {
10477 					/* We could have been passed an offset/size pair that belongs
10478 					 * to an UPL element that has already been committed/aborted.
10479 					 * If so, return NULL.
10480 					 */
10481 					if (subupl == NULL) {
10482 						return NULL;
10483 					}
10484 					if ((subupl_state.offset + subupl_state.size) < (*upl_offset + *upl_size)) {
10485 						*upl_size = (subupl_state.offset + subupl_state.size) - *upl_offset;
10486 						if (*upl_size > subupl_state.size) {
10487 							*upl_size = subupl_state.size;
10488 						}
10489 					}
10490 					if (*upl_offset >= subupl_state.offset) {
10491 						*upl_offset -= subupl_state.offset;
10492 					} else if (i) {
10493 						panic("Vector UPL offset miscalculation");
10494 					}
10495 					return subupl;
10496 				}
10497 			}
10498 		} else {
10499 			panic("vector_upl_subupl_byoffset was passed a non-vectored UPL");
10500 		}
10501 	}
10502 	return NULL;
10503 }
10504 
10505 void
vector_upl_get_submap(upl_t upl,vm_map_t * v_upl_submap,vm_offset_t * submap_dst_addr)10506 vector_upl_get_submap(upl_t upl, vm_map_t *v_upl_submap, vm_offset_t *submap_dst_addr)
10507 {
10508 	*v_upl_submap = NULL;
10509 
10510 	if (vector_upl_is_valid(upl)) {
10511 		vector_upl_t vector_upl = upl->vector_upl;
10512 		if (vector_upl) {
10513 			*v_upl_submap = vector_upl->submap;
10514 			*submap_dst_addr = vector_upl->submap_dst_addr;
10515 		} else {
10516 			panic("vector_upl_get_submap was passed a non-vectored UPL");
10517 		}
10518 	} else {
10519 		panic("vector_upl_get_submap was passed a null UPL");
10520 	}
10521 }
10522 
10523 void
vector_upl_set_submap(upl_t upl,vm_map_t submap,vm_offset_t submap_dst_addr)10524 vector_upl_set_submap(upl_t upl, vm_map_t submap, vm_offset_t submap_dst_addr)
10525 {
10526 	if (vector_upl_is_valid(upl)) {
10527 		vector_upl_t vector_upl = upl->vector_upl;
10528 		if (vector_upl) {
10529 			vector_upl->submap = submap;
10530 			vector_upl->submap_dst_addr = submap_dst_addr;
10531 		} else {
10532 			panic("vector_upl_get_submap was passed a non-vectored UPL");
10533 		}
10534 	} else {
10535 		panic("vector_upl_get_submap was passed a NULL UPL");
10536 	}
10537 }
10538 
10539 void
vector_upl_set_iostate(upl_t upl,upl_t subupl,upl_offset_t offset,upl_size_t size)10540 vector_upl_set_iostate(upl_t upl, upl_t subupl, upl_offset_t offset, upl_size_t size)
10541 {
10542 	if (vector_upl_is_valid(upl)) {
10543 		uint32_t i = 0;
10544 		vector_upl_t vector_upl = upl->vector_upl;
10545 
10546 		if (vector_upl) {
10547 			for (i = 0; i < vector_upl->num_upls; i++) {
10548 				if (vector_upl->upl_elems[i] == subupl) {
10549 					break;
10550 				}
10551 			}
10552 
10553 			if (i == vector_upl->num_upls) {
10554 				panic("setting sub-upl iostate when none exists");
10555 			}
10556 
10557 			vector_upl->upl_iostates[i].offset = offset;
10558 			if (size < PAGE_SIZE) {
10559 				size = PAGE_SIZE;
10560 			}
10561 			vector_upl->upl_iostates[i].size = size;
10562 		} else {
10563 			panic("vector_upl_set_iostate was passed a non-vectored UPL");
10564 		}
10565 	} else {
10566 		panic("vector_upl_set_iostate was passed a NULL UPL");
10567 	}
10568 }
10569 
10570 void
vector_upl_get_iostate(upl_t upl,upl_t subupl,upl_offset_t * offset,upl_size_t * size)10571 vector_upl_get_iostate(upl_t upl, upl_t subupl, upl_offset_t *offset, upl_size_t *size)
10572 {
10573 	if (vector_upl_is_valid(upl)) {
10574 		uint32_t i = 0;
10575 		vector_upl_t vector_upl = upl->vector_upl;
10576 
10577 		if (vector_upl) {
10578 			for (i = 0; i < vector_upl->num_upls; i++) {
10579 				if (vector_upl->upl_elems[i] == subupl) {
10580 					break;
10581 				}
10582 			}
10583 
10584 			if (i == vector_upl->num_upls) {
10585 				panic("getting sub-upl iostate when none exists");
10586 			}
10587 
10588 			*offset = vector_upl->upl_iostates[i].offset;
10589 			*size = vector_upl->upl_iostates[i].size;
10590 		} else {
10591 			panic("vector_upl_get_iostate was passed a non-vectored UPL");
10592 		}
10593 	} else {
10594 		panic("vector_upl_get_iostate was passed a NULL UPL");
10595 	}
10596 }
10597 
10598 void
vector_upl_get_iostate_byindex(upl_t upl,uint32_t index,upl_offset_t * offset,upl_size_t * size)10599 vector_upl_get_iostate_byindex(upl_t upl, uint32_t index, upl_offset_t *offset, upl_size_t *size)
10600 {
10601 	if (vector_upl_is_valid(upl)) {
10602 		vector_upl_t vector_upl = upl->vector_upl;
10603 		if (vector_upl) {
10604 			if (index < vector_upl->num_upls) {
10605 				*offset = vector_upl->upl_iostates[index].offset;
10606 				*size = vector_upl->upl_iostates[index].size;
10607 			} else {
10608 				*offset = *size = 0;
10609 			}
10610 		} else {
10611 			panic("vector_upl_get_iostate_byindex was passed a non-vectored UPL");
10612 		}
10613 	} else {
10614 		panic("vector_upl_get_iostate_byindex was passed a NULL UPL");
10615 	}
10616 }
10617 
10618 upl_page_info_t *
upl_get_internal_vectorupl_pagelist(upl_t upl)10619 upl_get_internal_vectorupl_pagelist(upl_t upl)
10620 {
10621 	return ((vector_upl_t)(upl->vector_upl))->pagelist;
10622 }
10623 
10624 void *
upl_get_internal_vectorupl(upl_t upl)10625 upl_get_internal_vectorupl(upl_t upl)
10626 {
10627 	return upl->vector_upl;
10628 }
10629 
10630 vm_size_t
upl_get_internal_pagelist_offset(void)10631 upl_get_internal_pagelist_offset(void)
10632 {
10633 	return sizeof(struct upl);
10634 }
10635 
10636 void
upl_clear_dirty(upl_t upl,boolean_t value)10637 upl_clear_dirty(
10638 	upl_t           upl,
10639 	boolean_t       value)
10640 {
10641 	if (value) {
10642 		upl->flags |= UPL_CLEAR_DIRTY;
10643 	} else {
10644 		upl->flags &= ~UPL_CLEAR_DIRTY;
10645 	}
10646 }
10647 
10648 void
upl_set_referenced(upl_t upl,boolean_t value)10649 upl_set_referenced(
10650 	upl_t           upl,
10651 	boolean_t       value)
10652 {
10653 	upl_lock(upl);
10654 	if (value) {
10655 		upl->ext_ref_count++;
10656 	} else {
10657 		if (!upl->ext_ref_count) {
10658 			panic("upl_set_referenced not %p", upl);
10659 		}
10660 		upl->ext_ref_count--;
10661 	}
10662 	upl_unlock(upl);
10663 }
10664 
10665 #if CONFIG_IOSCHED
10666 void
upl_set_blkno(upl_t upl,vm_offset_t upl_offset,int io_size,int64_t blkno)10667 upl_set_blkno(
10668 	upl_t           upl,
10669 	vm_offset_t     upl_offset,
10670 	int             io_size,
10671 	int64_t         blkno)
10672 {
10673 	int i, j;
10674 	if ((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) {
10675 		return;
10676 	}
10677 
10678 	assert(upl->upl_reprio_info != 0);
10679 	for (i = (int)(upl_offset / PAGE_SIZE), j = 0; j < io_size; i++, j += PAGE_SIZE) {
10680 		UPL_SET_REPRIO_INFO(upl, i, blkno, io_size);
10681 	}
10682 }
10683 #endif
10684 
10685 void inline
memoryshot(unsigned int event,unsigned int control)10686 memoryshot(unsigned int event, unsigned int control)
10687 {
10688 	if (vm_debug_events) {
10689 		KERNEL_DEBUG_CONSTANT1((MACHDBG_CODE(DBG_MACH_VM_PRESSURE, event)) | control,
10690 		    vm_page_active_count, vm_page_inactive_count,
10691 		    vm_page_free_count, vm_page_speculative_count,
10692 		    vm_page_throttled_count);
10693 	} else {
10694 		(void) event;
10695 		(void) control;
10696 	}
10697 }
10698 
10699 #ifdef MACH_BSD
10700 
10701 boolean_t
upl_device_page(upl_page_info_t * upl)10702 upl_device_page(upl_page_info_t *upl)
10703 {
10704 	return UPL_DEVICE_PAGE(upl);
10705 }
10706 boolean_t
upl_page_present(upl_page_info_t * upl,int index)10707 upl_page_present(upl_page_info_t *upl, int index)
10708 {
10709 	return UPL_PAGE_PRESENT(upl, index);
10710 }
10711 boolean_t
upl_speculative_page(upl_page_info_t * upl,int index)10712 upl_speculative_page(upl_page_info_t *upl, int index)
10713 {
10714 	return UPL_SPECULATIVE_PAGE(upl, index);
10715 }
10716 boolean_t
upl_dirty_page(upl_page_info_t * upl,int index)10717 upl_dirty_page(upl_page_info_t *upl, int index)
10718 {
10719 	return UPL_DIRTY_PAGE(upl, index);
10720 }
10721 boolean_t
upl_valid_page(upl_page_info_t * upl,int index)10722 upl_valid_page(upl_page_info_t *upl, int index)
10723 {
10724 	return UPL_VALID_PAGE(upl, index);
10725 }
10726 ppnum_t
upl_phys_page(upl_page_info_t * upl,int index)10727 upl_phys_page(upl_page_info_t *upl, int index)
10728 {
10729 	return UPL_PHYS_PAGE(upl, index);
10730 }
10731 
10732 void
upl_page_set_mark(upl_page_info_t * upl,int index,boolean_t v)10733 upl_page_set_mark(upl_page_info_t *upl, int index, boolean_t v)
10734 {
10735 	upl[index].mark = v;
10736 }
10737 
10738 boolean_t
upl_page_get_mark(upl_page_info_t * upl,int index)10739 upl_page_get_mark(upl_page_info_t *upl, int index)
10740 {
10741 	return upl[index].mark;
10742 }
10743 
10744 void
vm_countdirtypages(void)10745 vm_countdirtypages(void)
10746 {
10747 	vm_page_t m;
10748 	int dpages;
10749 	int pgopages;
10750 	int precpages;
10751 
10752 
10753 	dpages = 0;
10754 	pgopages = 0;
10755 	precpages = 0;
10756 
10757 	vm_page_lock_queues();
10758 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
10759 	do {
10760 		if (m == (vm_page_t)0) {
10761 			break;
10762 		}
10763 
10764 		if (m->vmp_dirty) {
10765 			dpages++;
10766 		}
10767 		if (m->vmp_free_when_done) {
10768 			pgopages++;
10769 		}
10770 		if (m->vmp_precious) {
10771 			precpages++;
10772 		}
10773 
10774 		assert(VM_PAGE_OBJECT(m) != kernel_object);
10775 		m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
10776 		if (m == (vm_page_t)0) {
10777 			break;
10778 		}
10779 	} while (!vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t) m));
10780 	vm_page_unlock_queues();
10781 
10782 	vm_page_lock_queues();
10783 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
10784 	do {
10785 		if (m == (vm_page_t)0) {
10786 			break;
10787 		}
10788 
10789 		dpages++;
10790 		assert(m->vmp_dirty);
10791 		assert(!m->vmp_free_when_done);
10792 		assert(VM_PAGE_OBJECT(m) != kernel_object);
10793 		m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
10794 		if (m == (vm_page_t)0) {
10795 			break;
10796 		}
10797 	} while (!vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t) m));
10798 	vm_page_unlock_queues();
10799 
10800 	vm_page_lock_queues();
10801 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
10802 	do {
10803 		if (m == (vm_page_t)0) {
10804 			break;
10805 		}
10806 
10807 		if (m->vmp_dirty) {
10808 			dpages++;
10809 		}
10810 		if (m->vmp_free_when_done) {
10811 			pgopages++;
10812 		}
10813 		if (m->vmp_precious) {
10814 			precpages++;
10815 		}
10816 
10817 		assert(VM_PAGE_OBJECT(m) != kernel_object);
10818 		m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
10819 		if (m == (vm_page_t)0) {
10820 			break;
10821 		}
10822 	} while (!vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t) m));
10823 	vm_page_unlock_queues();
10824 
10825 	printf("IN Q: %d : %d : %d\n", dpages, pgopages, precpages);
10826 
10827 	dpages = 0;
10828 	pgopages = 0;
10829 	precpages = 0;
10830 
10831 	vm_page_lock_queues();
10832 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
10833 
10834 	do {
10835 		if (m == (vm_page_t)0) {
10836 			break;
10837 		}
10838 		if (m->vmp_dirty) {
10839 			dpages++;
10840 		}
10841 		if (m->vmp_free_when_done) {
10842 			pgopages++;
10843 		}
10844 		if (m->vmp_precious) {
10845 			precpages++;
10846 		}
10847 
10848 		assert(VM_PAGE_OBJECT(m) != kernel_object);
10849 		m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
10850 		if (m == (vm_page_t)0) {
10851 			break;
10852 		}
10853 	} while (!vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t) m));
10854 	vm_page_unlock_queues();
10855 
10856 	printf("AC Q: %d : %d : %d\n", dpages, pgopages, precpages);
10857 }
10858 #endif /* MACH_BSD */
10859 
10860 
10861 #if CONFIG_IOSCHED
10862 int
upl_get_cached_tier(upl_t upl)10863 upl_get_cached_tier(upl_t  upl)
10864 {
10865 	assert(upl);
10866 	if (upl->flags & UPL_TRACKED_BY_OBJECT) {
10867 		return upl->upl_priority;
10868 	}
10869 	return -1;
10870 }
10871 #endif /* CONFIG_IOSCHED */
10872 
10873 
10874 void
upl_callout_iodone(upl_t upl)10875 upl_callout_iodone(upl_t upl)
10876 {
10877 	struct upl_io_completion *upl_ctx = upl->upl_iodone;
10878 
10879 	if (upl_ctx) {
10880 		void    (*iodone_func)(void *, int) = upl_ctx->io_done;
10881 
10882 		assert(upl_ctx->io_done);
10883 
10884 		(*iodone_func)(upl_ctx->io_context, upl_ctx->io_error);
10885 	}
10886 }
10887 
10888 void
upl_set_iodone(upl_t upl,void * upl_iodone)10889 upl_set_iodone(upl_t upl, void *upl_iodone)
10890 {
10891 	upl->upl_iodone = (struct upl_io_completion *)upl_iodone;
10892 }
10893 
10894 void
upl_set_iodone_error(upl_t upl,int error)10895 upl_set_iodone_error(upl_t upl, int error)
10896 {
10897 	struct upl_io_completion *upl_ctx = upl->upl_iodone;
10898 
10899 	if (upl_ctx) {
10900 		upl_ctx->io_error = error;
10901 	}
10902 }
10903 
10904 
10905 ppnum_t
upl_get_highest_page(upl_t upl)10906 upl_get_highest_page(
10907 	upl_t                      upl)
10908 {
10909 	return upl->highest_page;
10910 }
10911 
10912 upl_size_t
upl_get_size(upl_t upl)10913 upl_get_size(
10914 	upl_t                      upl)
10915 {
10916 	return upl_adjusted_size(upl, PAGE_MASK);
10917 }
10918 
10919 upl_size_t
upl_adjusted_size(upl_t upl,vm_map_offset_t pgmask)10920 upl_adjusted_size(
10921 	upl_t upl,
10922 	vm_map_offset_t pgmask)
10923 {
10924 	vm_object_offset_t start_offset, end_offset;
10925 
10926 	start_offset = trunc_page_mask_64(upl->u_offset, pgmask);
10927 	end_offset = round_page_mask_64(upl->u_offset + upl->u_size, pgmask);
10928 
10929 	return (upl_size_t)(end_offset - start_offset);
10930 }
10931 
10932 vm_object_offset_t
upl_adjusted_offset(upl_t upl,vm_map_offset_t pgmask)10933 upl_adjusted_offset(
10934 	upl_t upl,
10935 	vm_map_offset_t pgmask)
10936 {
10937 	return trunc_page_mask_64(upl->u_offset, pgmask);
10938 }
10939 
10940 vm_object_offset_t
upl_get_data_offset(upl_t upl)10941 upl_get_data_offset(
10942 	upl_t upl)
10943 {
10944 	return upl->u_offset - upl_adjusted_offset(upl, PAGE_MASK);
10945 }
10946 
10947 upl_t
upl_associated_upl(upl_t upl)10948 upl_associated_upl(upl_t upl)
10949 {
10950 	return upl->associated_upl;
10951 }
10952 
10953 void
upl_set_associated_upl(upl_t upl,upl_t associated_upl)10954 upl_set_associated_upl(upl_t upl, upl_t associated_upl)
10955 {
10956 	upl->associated_upl = associated_upl;
10957 }
10958 
10959 struct vnode *
upl_lookup_vnode(upl_t upl)10960 upl_lookup_vnode(upl_t upl)
10961 {
10962 	if (!upl->map_object->internal) {
10963 		return vnode_pager_lookup_vnode(upl->map_object->pager);
10964 	} else {
10965 		return NULL;
10966 	}
10967 }
10968 
10969 #if UPL_DEBUG
10970 kern_return_t
upl_ubc_alias_set(upl_t upl,uintptr_t alias1,uintptr_t alias2)10971 upl_ubc_alias_set(upl_t upl, uintptr_t alias1, uintptr_t alias2)
10972 {
10973 	upl->ubc_alias1 = alias1;
10974 	upl->ubc_alias2 = alias2;
10975 	return KERN_SUCCESS;
10976 }
10977 int
upl_ubc_alias_get(upl_t upl,uintptr_t * al,uintptr_t * al2)10978 upl_ubc_alias_get(upl_t upl, uintptr_t * al, uintptr_t * al2)
10979 {
10980 	if (al) {
10981 		*al = upl->ubc_alias1;
10982 	}
10983 	if (al2) {
10984 		*al2 = upl->ubc_alias2;
10985 	}
10986 	return KERN_SUCCESS;
10987 }
10988 #endif /* UPL_DEBUG */
10989 
10990 #if VM_PRESSURE_EVENTS
10991 /*
10992  * Upward trajectory.
10993  */
10994 extern boolean_t vm_compressor_low_on_space(void);
10995 
10996 boolean_t
VM_PRESSURE_NORMAL_TO_WARNING(void)10997 VM_PRESSURE_NORMAL_TO_WARNING(void)
10998 {
10999 	if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
11000 		/* Available pages below our threshold */
11001 		if (memorystatus_available_pages < memorystatus_available_pages_pressure) {
11002 			/* No frozen processes to kill */
11003 			if (memorystatus_frozen_count == 0) {
11004 				/* Not enough suspended processes available. */
11005 				if (memorystatus_suspended_count < MEMORYSTATUS_SUSPENDED_THRESHOLD) {
11006 					return TRUE;
11007 				}
11008 			}
11009 		}
11010 		return FALSE;
11011 	} else {
11012 		return (AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) ? 1 : 0;
11013 	}
11014 }
11015 
11016 boolean_t
VM_PRESSURE_WARNING_TO_CRITICAL(void)11017 VM_PRESSURE_WARNING_TO_CRITICAL(void)
11018 {
11019 	if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
11020 		/* Available pages below our threshold */
11021 		if (memorystatus_available_pages < memorystatus_available_pages_critical) {
11022 			return TRUE;
11023 		}
11024 		return FALSE;
11025 	} else {
11026 		return vm_compressor_low_on_space() || (AVAILABLE_NON_COMPRESSED_MEMORY < ((12 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0;
11027 	}
11028 }
11029 
11030 /*
11031  * Downward trajectory.
11032  */
11033 boolean_t
VM_PRESSURE_WARNING_TO_NORMAL(void)11034 VM_PRESSURE_WARNING_TO_NORMAL(void)
11035 {
11036 	if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
11037 		/* Available pages above our threshold */
11038 		unsigned int target_threshold = (unsigned int) (memorystatus_available_pages_pressure + ((15 * memorystatus_available_pages_pressure) / 100));
11039 		if (memorystatus_available_pages > target_threshold) {
11040 			return TRUE;
11041 		}
11042 		return FALSE;
11043 	} else {
11044 		return (AVAILABLE_NON_COMPRESSED_MEMORY > ((12 * VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) / 10)) ? 1 : 0;
11045 	}
11046 }
11047 
11048 boolean_t
VM_PRESSURE_CRITICAL_TO_WARNING(void)11049 VM_PRESSURE_CRITICAL_TO_WARNING(void)
11050 {
11051 	if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
11052 		/* Available pages above our threshold */
11053 		unsigned int target_threshold = (unsigned int)(memorystatus_available_pages_critical + ((15 * memorystatus_available_pages_critical) / 100));
11054 		if (memorystatus_available_pages > target_threshold) {
11055 			return TRUE;
11056 		}
11057 		return FALSE;
11058 	} else {
11059 		return (AVAILABLE_NON_COMPRESSED_MEMORY > ((14 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0;
11060 	}
11061 }
11062 #endif /* VM_PRESSURE_EVENTS */
11063