1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_pageout.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * The proverbial page-out daemon.
64 */
65
66 #include <stdint.h>
67 #include <ptrauth.h>
68
69 #include <debug.h>
70
71 #include <mach/mach_types.h>
72 #include <mach/memory_object.h>
73 #include <mach/mach_host_server.h>
74 #include <mach/upl.h>
75 #include <mach/vm_map.h>
76 #include <mach/vm_param.h>
77 #include <mach/vm_statistics.h>
78 #include <mach/sdt.h>
79
80 #include <kern/kern_types.h>
81 #include <kern/counter.h>
82 #include <kern/host_statistics.h>
83 #include <kern/machine.h>
84 #include <kern/misc_protos.h>
85 #include <kern/sched.h>
86 #include <kern/thread.h>
87 #include <kern/kalloc.h>
88 #include <kern/zalloc_internal.h>
89 #include <kern/policy_internal.h>
90 #include <kern/thread_group.h>
91
92 #include <os/log.h>
93
94 #include <sys/kdebug_triage.h>
95
96 #include <machine/vm_tuning.h>
97 #include <machine/commpage.h>
98
99 #include <vm/pmap.h>
100 #include <vm/vm_compressor_pager.h>
101 #include <vm/vm_fault.h>
102 #include <vm/vm_map_internal.h>
103 #include <vm/vm_object.h>
104 #include <vm/vm_page.h>
105 #include <vm/vm_pageout.h>
106 #include <vm/vm_protos.h> /* must be last */
107 #include <vm/memory_object.h>
108 #include <vm/vm_purgeable_internal.h>
109 #include <vm/vm_shared_region.h>
110 #include <vm/vm_compressor.h>
111
112 #include <san/kasan.h>
113
114 #if CONFIG_PHANTOM_CACHE
115 #include <vm/vm_phantom_cache.h>
116 #endif
117
118 #if UPL_DEBUG
119 #include <libkern/OSDebug.h>
120 #endif
121
122 extern int cs_debug;
123
124 #if CONFIG_MBUF_MCACHE
125 extern void mbuf_drain(boolean_t);
126 #endif /* CONFIG_MBUF_MCACHE */
127
128 #if VM_PRESSURE_EVENTS
129 #if CONFIG_JETSAM
130 extern unsigned int memorystatus_available_pages;
131 extern unsigned int memorystatus_available_pages_pressure;
132 extern unsigned int memorystatus_available_pages_critical;
133 #else /* CONFIG_JETSAM */
134 extern uint64_t memorystatus_available_pages;
135 extern uint64_t memorystatus_available_pages_pressure;
136 extern uint64_t memorystatus_available_pages_critical;
137 #endif /* CONFIG_JETSAM */
138
139 extern unsigned int memorystatus_frozen_count;
140 extern unsigned int memorystatus_suspended_count;
141 extern vm_pressure_level_t memorystatus_vm_pressure_level;
142
143 extern lck_mtx_t memorystatus_jetsam_fg_band_lock;
144 extern uint32_t memorystatus_jetsam_fg_band_waiters;
145
146 void vm_pressure_response(void);
147 extern void consider_vm_pressure_events(void);
148
149 #define MEMORYSTATUS_SUSPENDED_THRESHOLD 4
150 #endif /* VM_PRESSURE_EVENTS */
151
152 SECURITY_READ_ONLY_LATE(thread_t) vm_pageout_scan_thread;
153 SECURITY_READ_ONLY_LATE(thread_t) vm_pageout_gc_thread;
154 #if CONFIG_VPS_DYNAMIC_PRIO
155 TUNABLE(bool, vps_dynamic_priority_enabled, "vps_dynamic_priority_enabled", false);
156 #else
157 const bool vps_dynamic_priority_enabled = false;
158 #endif
159 boolean_t vps_yield_for_pgqlockwaiters = TRUE;
160
161 #ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE /* maximum iterations of the inactive queue w/o stealing/cleaning a page */
162 #if !XNU_TARGET_OS_OSX
163 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 1024
164 #else /* !XNU_TARGET_OS_OSX */
165 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096
166 #endif /* !XNU_TARGET_OS_OSX */
167 #endif
168
169 #ifndef VM_PAGEOUT_DEADLOCK_RELIEF
170 #define VM_PAGEOUT_DEADLOCK_RELIEF 100 /* number of pages to move to break deadlock */
171 #endif
172
173 #ifndef VM_PAGE_LAUNDRY_MAX
174 #define VM_PAGE_LAUNDRY_MAX 128UL /* maximum pageouts on a given pageout queue */
175 #endif /* VM_PAGEOUT_LAUNDRY_MAX */
176
177 #ifndef VM_PAGEOUT_BURST_WAIT
178 #define VM_PAGEOUT_BURST_WAIT 1 /* milliseconds */
179 #endif /* VM_PAGEOUT_BURST_WAIT */
180
181 #ifndef VM_PAGEOUT_EMPTY_WAIT
182 #define VM_PAGEOUT_EMPTY_WAIT 50 /* milliseconds */
183 #endif /* VM_PAGEOUT_EMPTY_WAIT */
184
185 #ifndef VM_PAGEOUT_DEADLOCK_WAIT
186 #define VM_PAGEOUT_DEADLOCK_WAIT 100 /* milliseconds */
187 #endif /* VM_PAGEOUT_DEADLOCK_WAIT */
188
189 #ifndef VM_PAGEOUT_IDLE_WAIT
190 #define VM_PAGEOUT_IDLE_WAIT 10 /* milliseconds */
191 #endif /* VM_PAGEOUT_IDLE_WAIT */
192
193 #ifndef VM_PAGEOUT_SWAP_WAIT
194 #define VM_PAGEOUT_SWAP_WAIT 10 /* milliseconds */
195 #endif /* VM_PAGEOUT_SWAP_WAIT */
196
197
198 #ifndef VM_PAGE_SPECULATIVE_TARGET
199 #define VM_PAGE_SPECULATIVE_TARGET(total) ((total) * 1 / (100 / vm_pageout_state.vm_page_speculative_percentage))
200 #endif /* VM_PAGE_SPECULATIVE_TARGET */
201
202
203 /*
204 * To obtain a reasonable LRU approximation, the inactive queue
205 * needs to be large enough to give pages on it a chance to be
206 * referenced a second time. This macro defines the fraction
207 * of active+inactive pages that should be inactive.
208 * The pageout daemon uses it to update vm_page_inactive_target.
209 *
210 * If vm_page_free_count falls below vm_page_free_target and
211 * vm_page_inactive_count is below vm_page_inactive_target,
212 * then the pageout daemon starts running.
213 */
214
215 #ifndef VM_PAGE_INACTIVE_TARGET
216 #define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 2)
217 #endif /* VM_PAGE_INACTIVE_TARGET */
218
219 /*
220 * Once the pageout daemon starts running, it keeps going
221 * until vm_page_free_count meets or exceeds vm_page_free_target.
222 */
223
224 #ifndef VM_PAGE_FREE_TARGET
225 #if !XNU_TARGET_OS_OSX
226 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 100)
227 #else /* !XNU_TARGET_OS_OSX */
228 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
229 #endif /* !XNU_TARGET_OS_OSX */
230 #endif /* VM_PAGE_FREE_TARGET */
231
232
233 /*
234 * The pageout daemon always starts running once vm_page_free_count
235 * falls below vm_page_free_min.
236 */
237
238 #ifndef VM_PAGE_FREE_MIN
239 #if !XNU_TARGET_OS_OSX
240 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 200)
241 #else /* !XNU_TARGET_OS_OSX */
242 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
243 #endif /* !XNU_TARGET_OS_OSX */
244 #endif /* VM_PAGE_FREE_MIN */
245
246 #if !XNU_TARGET_OS_OSX
247 #define VM_PAGE_FREE_RESERVED_LIMIT 100
248 #define VM_PAGE_FREE_MIN_LIMIT 1500
249 #define VM_PAGE_FREE_TARGET_LIMIT 2000
250 #else /* !XNU_TARGET_OS_OSX */
251 #define VM_PAGE_FREE_RESERVED_LIMIT 1700
252 #define VM_PAGE_FREE_MIN_LIMIT 3500
253 #define VM_PAGE_FREE_TARGET_LIMIT 4000
254 #endif /* !XNU_TARGET_OS_OSX */
255
256 /*
257 * When vm_page_free_count falls below vm_page_free_reserved,
258 * only vm-privileged threads can allocate pages. vm-privilege
259 * allows the pageout daemon and default pager (and any other
260 * associated threads needed for default pageout) to continue
261 * operation by dipping into the reserved pool of pages.
262 */
263
264 #ifndef VM_PAGE_FREE_RESERVED
265 #define VM_PAGE_FREE_RESERVED(n) \
266 ((unsigned) (6 * VM_PAGE_LAUNDRY_MAX) + (n))
267 #endif /* VM_PAGE_FREE_RESERVED */
268
269 /*
270 * When we dequeue pages from the inactive list, they are
271 * reactivated (ie, put back on the active queue) if referenced.
272 * However, it is possible to starve the free list if other
273 * processors are referencing pages faster than we can turn off
274 * the referenced bit. So we limit the number of reactivations
275 * we will make per call of vm_pageout_scan().
276 */
277 #define VM_PAGE_REACTIVATE_LIMIT_MAX 20000
278
279 #ifndef VM_PAGE_REACTIVATE_LIMIT
280 #if !XNU_TARGET_OS_OSX
281 #define VM_PAGE_REACTIVATE_LIMIT(avail) (VM_PAGE_INACTIVE_TARGET(avail) / 2)
282 #else /* !XNU_TARGET_OS_OSX */
283 #define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX))
284 #endif /* !XNU_TARGET_OS_OSX */
285 #endif /* VM_PAGE_REACTIVATE_LIMIT */
286 #define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM 1000
287
288 int vm_pageout_protect_realtime = true;
289
290 extern boolean_t hibernate_cleaning_in_progress;
291
292 struct pgo_iothread_state pgo_iothread_internal_state[MAX_COMPRESSOR_THREAD_COUNT];
293 struct pgo_iothread_state pgo_iothread_external_state;
294
295 #if VM_PRESSURE_EVENTS
296 void vm_pressure_thread(void);
297
298 boolean_t VM_PRESSURE_NORMAL_TO_WARNING(void);
299 boolean_t VM_PRESSURE_WARNING_TO_CRITICAL(void);
300
301 boolean_t VM_PRESSURE_WARNING_TO_NORMAL(void);
302 boolean_t VM_PRESSURE_CRITICAL_TO_WARNING(void);
303 #endif
304
305 static void vm_pageout_iothread_external(struct pgo_iothread_state *, wait_result_t);
306 static void vm_pageout_iothread_internal(struct pgo_iothread_state *, wait_result_t);
307 static void vm_pageout_adjust_eq_iothrottle(struct pgo_iothread_state *, boolean_t);
308
309 extern void vm_pageout_continue(void);
310 extern void vm_pageout_scan(void);
311
312 boolean_t vm_pageout_running = FALSE;
313
314 uint32_t vm_page_upl_tainted = 0;
315 uint32_t vm_page_iopl_tainted = 0;
316
317 #if XNU_TARGET_OS_OSX
318 static boolean_t vm_pageout_waiter = FALSE;
319 #endif /* XNU_TARGET_OS_OSX */
320
321
322 #if DEVELOPMENT || DEBUG
323 struct vm_pageout_debug vm_pageout_debug;
324 #endif
325 struct vm_pageout_vminfo vm_pageout_vminfo;
326 struct vm_pageout_state vm_pageout_state;
327 struct vm_config vm_config;
328
329 struct vm_pageout_queue vm_pageout_queue_internal VM_PAGE_PACKED_ALIGNED;
330 struct vm_pageout_queue vm_pageout_queue_external VM_PAGE_PACKED_ALIGNED;
331 #if DEVELOPMENT || DEBUG
332 struct vm_pageout_queue vm_pageout_queue_benchmark VM_PAGE_PACKED_ALIGNED;
333 #endif /* DEVELOPMENT || DEBUG */
334
335 int vm_upl_wait_for_pages = 0;
336 vm_object_t vm_pageout_scan_wants_object = VM_OBJECT_NULL;
337
338 boolean_t(*volatile consider_buffer_cache_collect)(int) = NULL;
339
340 int vm_debug_events = 0;
341
342 LCK_GRP_DECLARE(vm_pageout_lck_grp, "vm_pageout");
343
344 #if CONFIG_MEMORYSTATUS
345 extern void memorystatus_kill_on_vps_starvation(void);
346
347 uint32_t vm_pageout_memorystatus_fb_factor_nr = 5;
348 uint32_t vm_pageout_memorystatus_fb_factor_dr = 2;
349
350 #endif
351
352 #if __AMP__
353
354
355 /*
356 * Bind compressor threads to e-cores unless there are multiple non-e clusters
357 */
358 #if (MAX_CPU_CLUSTERS > 2)
359 #define VM_COMPRESSOR_EBOUND_DEFAULT false
360 #else
361 #define VM_COMPRESSOR_EBOUND_DEFAULT true
362 #endif
363
364 TUNABLE(bool, vm_compressor_ebound, "vmcomp_ecluster", VM_COMPRESSOR_EBOUND_DEFAULT);
365 int vm_pgo_pbound = 0;
366 extern void thread_bind_cluster_type(thread_t, char, bool);
367
368 #endif /* __AMP__ */
369
370
371 /*
372 * Routine: vm_pageout_object_terminate
373 * Purpose:
374 * Destroy the pageout_object, and perform all of the
375 * required cleanup actions.
376 *
377 * In/Out conditions:
378 * The object must be locked, and will be returned locked.
379 */
380 void
vm_pageout_object_terminate(vm_object_t object)381 vm_pageout_object_terminate(
382 vm_object_t object)
383 {
384 vm_object_t shadow_object;
385
386 /*
387 * Deal with the deallocation (last reference) of a pageout object
388 * (used for cleaning-in-place) by dropping the paging references/
389 * freeing pages in the original object.
390 */
391
392 assert(object->pageout);
393 shadow_object = object->shadow;
394 vm_object_lock(shadow_object);
395
396 while (!vm_page_queue_empty(&object->memq)) {
397 vm_page_t p, m;
398 vm_object_offset_t offset;
399
400 p = (vm_page_t) vm_page_queue_first(&object->memq);
401
402 assert(p->vmp_private);
403 assert(p->vmp_free_when_done);
404 p->vmp_free_when_done = FALSE;
405 assert(!p->vmp_cleaning);
406 assert(!p->vmp_laundry);
407
408 offset = p->vmp_offset;
409 VM_PAGE_FREE(p);
410 p = VM_PAGE_NULL;
411
412 m = vm_page_lookup(shadow_object,
413 offset + object->vo_shadow_offset);
414
415 if (m == VM_PAGE_NULL) {
416 continue;
417 }
418
419 assert((m->vmp_dirty) || (m->vmp_precious) ||
420 (m->vmp_busy && m->vmp_cleaning));
421
422 /*
423 * Handle the trusted pager throttle.
424 * Also decrement the burst throttle (if external).
425 */
426 vm_page_lock_queues();
427 if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
428 vm_pageout_throttle_up(m);
429 }
430
431 /*
432 * Handle the "target" page(s). These pages are to be freed if
433 * successfully cleaned. Target pages are always busy, and are
434 * wired exactly once. The initial target pages are not mapped,
435 * (so cannot be referenced or modified) but converted target
436 * pages may have been modified between the selection as an
437 * adjacent page and conversion to a target.
438 */
439 if (m->vmp_free_when_done) {
440 assert(m->vmp_busy);
441 assert(m->vmp_q_state == VM_PAGE_IS_WIRED);
442 assert(m->vmp_wire_count == 1);
443 m->vmp_cleaning = FALSE;
444 m->vmp_free_when_done = FALSE;
445 /*
446 * Revoke all access to the page. Since the object is
447 * locked, and the page is busy, this prevents the page
448 * from being dirtied after the pmap_disconnect() call
449 * returns.
450 *
451 * Since the page is left "dirty" but "not modifed", we
452 * can detect whether the page was redirtied during
453 * pageout by checking the modify state.
454 */
455 if (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED) {
456 SET_PAGE_DIRTY(m, FALSE);
457 } else {
458 m->vmp_dirty = FALSE;
459 }
460
461 if (m->vmp_dirty) {
462 vm_page_unwire(m, TRUE); /* reactivates */
463 counter_inc(&vm_statistics_reactivations);
464 PAGE_WAKEUP_DONE(m);
465 } else {
466 vm_page_free(m); /* clears busy, etc. */
467 }
468 vm_page_unlock_queues();
469 continue;
470 }
471 /*
472 * Handle the "adjacent" pages. These pages were cleaned in
473 * place, and should be left alone.
474 * If prep_pin_count is nonzero, then someone is using the
475 * page, so make it active.
476 */
477 if ((m->vmp_q_state == VM_PAGE_NOT_ON_Q) && !m->vmp_private) {
478 if (m->vmp_reference) {
479 vm_page_activate(m);
480 } else {
481 vm_page_deactivate(m);
482 }
483 }
484 if (m->vmp_overwriting) {
485 /*
486 * the (COPY_OUT_FROM == FALSE) request_page_list case
487 */
488 if (m->vmp_busy) {
489 /*
490 * We do not re-set m->vmp_dirty !
491 * The page was busy so no extraneous activity
492 * could have occurred. COPY_INTO is a read into the
493 * new pages. CLEAN_IN_PLACE does actually write
494 * out the pages but handling outside of this code
495 * will take care of resetting dirty. We clear the
496 * modify however for the Programmed I/O case.
497 */
498 pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
499
500 m->vmp_busy = FALSE;
501 m->vmp_absent = FALSE;
502 } else {
503 /*
504 * alternate (COPY_OUT_FROM == FALSE) request_page_list case
505 * Occurs when the original page was wired
506 * at the time of the list request
507 */
508 assert(VM_PAGE_WIRED(m));
509 vm_page_unwire(m, TRUE); /* reactivates */
510 }
511 m->vmp_overwriting = FALSE;
512 } else {
513 m->vmp_dirty = FALSE;
514 }
515 m->vmp_cleaning = FALSE;
516
517 /*
518 * Wakeup any thread waiting for the page to be un-cleaning.
519 */
520 PAGE_WAKEUP(m);
521 vm_page_unlock_queues();
522 }
523 /*
524 * Account for the paging reference taken in vm_paging_object_allocate.
525 */
526 vm_object_activity_end(shadow_object);
527 vm_object_unlock(shadow_object);
528
529 assert(object->ref_count == 0);
530 assert(object->paging_in_progress == 0);
531 assert(object->activity_in_progress == 0);
532 assert(object->resident_page_count == 0);
533 return;
534 }
535
536 /*
537 * Routine: vm_pageclean_setup
538 *
539 * Purpose: setup a page to be cleaned (made non-dirty), but not
540 * necessarily flushed from the VM page cache.
541 * This is accomplished by cleaning in place.
542 *
543 * The page must not be busy, and new_object
544 * must be locked.
545 *
546 */
547 static void
vm_pageclean_setup(vm_page_t m,vm_page_t new_m,vm_object_t new_object,vm_object_offset_t new_offset)548 vm_pageclean_setup(
549 vm_page_t m,
550 vm_page_t new_m,
551 vm_object_t new_object,
552 vm_object_offset_t new_offset)
553 {
554 assert(!m->vmp_busy);
555 #if 0
556 assert(!m->vmp_cleaning);
557 #endif
558
559 pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
560
561 /*
562 * Mark original page as cleaning in place.
563 */
564 m->vmp_cleaning = TRUE;
565 SET_PAGE_DIRTY(m, FALSE);
566 m->vmp_precious = FALSE;
567
568 /*
569 * Convert the fictitious page to a private shadow of
570 * the real page.
571 */
572 assert(new_m->vmp_fictitious);
573 assert(VM_PAGE_GET_PHYS_PAGE(new_m) == vm_page_fictitious_addr);
574 new_m->vmp_fictitious = FALSE;
575 new_m->vmp_private = TRUE;
576 new_m->vmp_free_when_done = TRUE;
577 VM_PAGE_SET_PHYS_PAGE(new_m, VM_PAGE_GET_PHYS_PAGE(m));
578
579 vm_page_lockspin_queues();
580 vm_page_wire(new_m, VM_KERN_MEMORY_NONE, TRUE);
581 vm_page_unlock_queues();
582
583 vm_page_insert_wired(new_m, new_object, new_offset, VM_KERN_MEMORY_NONE);
584 assert(!new_m->vmp_wanted);
585 new_m->vmp_busy = FALSE;
586 }
587
588 /*
589 * Routine: vm_pageout_initialize_page
590 * Purpose:
591 * Causes the specified page to be initialized in
592 * the appropriate memory object. This routine is used to push
593 * pages into a copy-object when they are modified in the
594 * permanent object.
595 *
596 * The page is moved to a temporary object and paged out.
597 *
598 * In/out conditions:
599 * The page in question must not be on any pageout queues.
600 * The object to which it belongs must be locked.
601 * The page must be busy, but not hold a paging reference.
602 *
603 * Implementation:
604 * Move this page to a completely new object.
605 */
606 void
vm_pageout_initialize_page(vm_page_t m)607 vm_pageout_initialize_page(
608 vm_page_t m)
609 {
610 vm_object_t object;
611 vm_object_offset_t paging_offset;
612 memory_object_t pager;
613
614 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
615
616 object = VM_PAGE_OBJECT(m);
617
618 assert(m->vmp_busy);
619 assert(object->internal);
620
621 /*
622 * Verify that we really want to clean this page
623 */
624 assert(!m->vmp_absent);
625 assert(m->vmp_dirty);
626
627 /*
628 * Create a paging reference to let us play with the object.
629 */
630 paging_offset = m->vmp_offset + object->paging_offset;
631
632 if (m->vmp_absent || VMP_ERROR_GET(m) || m->vmp_restart || (!m->vmp_dirty && !m->vmp_precious)) {
633 panic("reservation without pageout?"); /* alan */
634
635 VM_PAGE_FREE(m);
636 vm_object_unlock(object);
637
638 return;
639 }
640
641 /*
642 * If there's no pager, then we can't clean the page. This should
643 * never happen since this should be a copy object and therefore not
644 * an external object, so the pager should always be there.
645 */
646
647 pager = object->pager;
648
649 if (pager == MEMORY_OBJECT_NULL) {
650 panic("missing pager for copy object");
651
652 VM_PAGE_FREE(m);
653 return;
654 }
655
656 /*
657 * set the page for future call to vm_fault_list_request
658 */
659 pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
660 SET_PAGE_DIRTY(m, FALSE);
661
662 /*
663 * keep the object from collapsing or terminating
664 */
665 vm_object_paging_begin(object);
666 vm_object_unlock(object);
667
668 /*
669 * Write the data to its pager.
670 * Note that the data is passed by naming the new object,
671 * not a virtual address; the pager interface has been
672 * manipulated to use the "internal memory" data type.
673 * [The object reference from its allocation is donated
674 * to the eventual recipient.]
675 */
676 memory_object_data_initialize(pager, paging_offset, PAGE_SIZE);
677
678 vm_object_lock(object);
679 vm_object_paging_end(object);
680 }
681
682
683 /*
684 * vm_pageout_cluster:
685 *
686 * Given a page, queue it to the appropriate I/O thread,
687 * which will page it out and attempt to clean adjacent pages
688 * in the same operation.
689 *
690 * The object and queues must be locked. We will take a
691 * paging reference to prevent deallocation or collapse when we
692 * release the object lock back at the call site. The I/O thread
693 * is responsible for consuming this reference
694 *
695 * The page must not be on any pageout queue.
696 */
697 #if DEVELOPMENT || DEBUG
698 vmct_stats_t vmct_stats;
699
700 int32_t vmct_active = 0;
701 uint64_t vm_compressor_epoch_start = 0;
702 uint64_t vm_compressor_epoch_stop = 0;
703
704 typedef enum vmct_state_t {
705 VMCT_IDLE,
706 VMCT_AWAKENED,
707 VMCT_ACTIVE,
708 } vmct_state_t;
709 vmct_state_t vmct_state[MAX_COMPRESSOR_THREAD_COUNT];
710 #endif
711
712
713
714 static void
vm_pageout_cluster_to_queue(vm_page_t m,struct vm_pageout_queue * q)715 vm_pageout_cluster_to_queue(vm_page_t m, struct vm_pageout_queue *q)
716 {
717 vm_object_t object = VM_PAGE_OBJECT(m);
718
719 VM_PAGE_CHECK(m);
720 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
721 vm_object_lock_assert_exclusive(object);
722
723 /*
724 * Make sure it's OK to page this out.
725 */
726 assert((m->vmp_dirty || m->vmp_precious) && (!VM_PAGE_WIRED(m)));
727 assert(!m->vmp_cleaning && !m->vmp_laundry);
728 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
729
730 /*
731 * protect the object from collapse or termination
732 */
733 vm_object_activity_begin(object);
734
735
736 /*
737 * pgo_laundry count is tied to the laundry bit
738 */
739 m->vmp_laundry = TRUE;
740 q->pgo_laundry++;
741
742 m->vmp_q_state = VM_PAGE_ON_PAGEOUT_Q;
743 vm_page_queue_enter(&q->pgo_pending, m, vmp_pageq);
744
745 // the benchmark queue will be woken up independently by the benchmark itself
746 if (
747 object->internal == TRUE
748 #if DEVELOPMENT || DEBUG
749 && q != &vm_pageout_queue_benchmark
750 #endif
751 ) {
752 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
753 m->vmp_busy = TRUE;
754 // Wake up the first compressor thread. It will wake subsequent threads if necessary.
755 sched_cond_signal(&pgo_iothread_internal_state[0].pgo_wakeup, pgo_iothread_internal_state[0].pgo_iothread);
756 } else {
757 sched_cond_signal(&pgo_iothread_external_state.pgo_wakeup, pgo_iothread_external_state.pgo_iothread);
758 }
759 VM_PAGE_CHECK(m);
760 }
761
762 void
vm_pageout_cluster(vm_page_t m)763 vm_pageout_cluster(vm_page_t m)
764 {
765 struct vm_pageout_queue *q;
766 vm_object_t object = VM_PAGE_OBJECT(m);
767 if (object->internal) {
768 q = &vm_pageout_queue_internal;
769 } else {
770 q = &vm_pageout_queue_external;
771 }
772 vm_pageout_cluster_to_queue(m, q);
773 }
774
775
776 /*
777 * A page is back from laundry or we are stealing it back from
778 * the laundering state. See if there are some pages waiting to
779 * go to laundry and if we can let some of them go now.
780 *
781 * Object and page queues must be locked.
782 */
783 void
vm_pageout_throttle_up(vm_page_t m)784 vm_pageout_throttle_up(
785 vm_page_t m)
786 {
787 struct vm_pageout_queue *q;
788 vm_object_t m_object;
789
790 m_object = VM_PAGE_OBJECT(m);
791
792 assert(m_object != VM_OBJECT_NULL);
793 assert(!is_kernel_object(m_object));
794
795 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
796 vm_object_lock_assert_exclusive(m_object);
797
798 if (m_object->internal == TRUE) {
799 q = &vm_pageout_queue_internal;
800 } else {
801 q = &vm_pageout_queue_external;
802 }
803
804 if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
805 vm_page_queue_remove(&q->pgo_pending, m, vmp_pageq);
806 m->vmp_q_state = VM_PAGE_NOT_ON_Q;
807
808 VM_PAGE_ZERO_PAGEQ_ENTRY(m);
809
810 vm_object_activity_end(m_object);
811
812 VM_PAGEOUT_DEBUG(vm_page_steal_pageout_page, 1);
813 }
814 if (m->vmp_laundry == TRUE) {
815 m->vmp_laundry = FALSE;
816 q->pgo_laundry--;
817
818 if (q->pgo_throttled == TRUE) {
819 q->pgo_throttled = FALSE;
820 thread_wakeup((event_t) &q->pgo_laundry);
821 }
822 if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
823 q->pgo_draining = FALSE;
824 thread_wakeup((event_t) (&q->pgo_laundry + 1));
825 }
826 VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count, 1);
827 }
828 }
829
830
831 static void
vm_pageout_throttle_up_batch(struct vm_pageout_queue * q,int batch_cnt)832 vm_pageout_throttle_up_batch(
833 struct vm_pageout_queue *q,
834 int batch_cnt)
835 {
836 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
837
838 VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count, batch_cnt);
839
840 q->pgo_laundry -= batch_cnt;
841
842 if (q->pgo_throttled == TRUE) {
843 q->pgo_throttled = FALSE;
844 thread_wakeup((event_t) &q->pgo_laundry);
845 }
846 if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
847 q->pgo_draining = FALSE;
848 thread_wakeup((event_t) (&q->pgo_laundry + 1));
849 }
850 }
851
852
853
854 /*
855 * VM memory pressure monitoring.
856 *
857 * vm_pageout_scan() keeps track of the number of pages it considers and
858 * reclaims, in the currently active vm_pageout_stat[vm_pageout_stat_now].
859 *
860 * compute_memory_pressure() is called every second from compute_averages()
861 * and moves "vm_pageout_stat_now" forward, to start accumulating the number
862 * of recalimed pages in a new vm_pageout_stat[] bucket.
863 *
864 * mach_vm_pressure_monitor() collects past statistics about memory pressure.
865 * The caller provides the number of seconds ("nsecs") worth of statistics
866 * it wants, up to 30 seconds.
867 * It computes the number of pages reclaimed in the past "nsecs" seconds and
868 * also returns the number of pages the system still needs to reclaim at this
869 * moment in time.
870 */
871 #if DEVELOPMENT || DEBUG
872 #define VM_PAGEOUT_STAT_SIZE (30 * 8) + 1
873 #else
874 #define VM_PAGEOUT_STAT_SIZE (1 * 8) + 1
875 #endif
876 struct vm_pageout_stat {
877 unsigned long vm_page_active_count;
878 unsigned long vm_page_speculative_count;
879 unsigned long vm_page_inactive_count;
880 unsigned long vm_page_anonymous_count;
881
882 unsigned long vm_page_free_count;
883 unsigned long vm_page_wire_count;
884 unsigned long vm_page_compressor_count;
885
886 unsigned long vm_page_pages_compressed;
887 unsigned long vm_page_pageable_internal_count;
888 unsigned long vm_page_pageable_external_count;
889 unsigned long vm_page_xpmapped_external_count;
890
891 unsigned int pages_grabbed;
892 unsigned int pages_freed;
893
894 unsigned int pages_compressed;
895 unsigned int pages_grabbed_by_compressor;
896 unsigned int failed_compressions;
897
898 unsigned int pages_evicted;
899 unsigned int pages_purged;
900
901 unsigned int considered;
902 unsigned int considered_bq_internal;
903 unsigned int considered_bq_external;
904
905 unsigned int skipped_external;
906 unsigned int skipped_internal;
907 unsigned int filecache_min_reactivations;
908
909 unsigned int freed_speculative;
910 unsigned int freed_cleaned;
911 unsigned int freed_internal;
912 unsigned int freed_external;
913
914 unsigned int cleaned_dirty_external;
915 unsigned int cleaned_dirty_internal;
916
917 unsigned int inactive_referenced;
918 unsigned int inactive_nolock;
919 unsigned int reactivation_limit_exceeded;
920 unsigned int forced_inactive_reclaim;
921
922 unsigned int throttled_internal_q;
923 unsigned int throttled_external_q;
924
925 unsigned int phantom_ghosts_found;
926 unsigned int phantom_ghosts_added;
927
928 unsigned int vm_page_realtime_count;
929 unsigned int forcereclaimed_sharedcache;
930 unsigned int forcereclaimed_realtime;
931 unsigned int protected_sharedcache;
932 unsigned int protected_realtime;
933 } vm_pageout_stats[VM_PAGEOUT_STAT_SIZE];
934
935 unsigned int vm_pageout_stat_now = 0;
936
937 #define VM_PAGEOUT_STAT_BEFORE(i) \
938 (((i) == 0) ? VM_PAGEOUT_STAT_SIZE - 1 : (i) - 1)
939 #define VM_PAGEOUT_STAT_AFTER(i) \
940 (((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1)
941
942 #if VM_PAGE_BUCKETS_CHECK
943 int vm_page_buckets_check_interval = 80; /* in eighths of a second */
944 #endif /* VM_PAGE_BUCKETS_CHECK */
945
946
947 void
948 record_memory_pressure(void);
949 void
record_memory_pressure(void)950 record_memory_pressure(void)
951 {
952 unsigned int vm_pageout_next;
953
954 #if VM_PAGE_BUCKETS_CHECK
955 /* check the consistency of VM page buckets at regular interval */
956 static int counter = 0;
957 if ((++counter % vm_page_buckets_check_interval) == 0) {
958 vm_page_buckets_check();
959 }
960 #endif /* VM_PAGE_BUCKETS_CHECK */
961
962 vm_pageout_state.vm_memory_pressure =
963 vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_speculative +
964 vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_cleaned +
965 vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_internal +
966 vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_external;
967
968 commpage_set_memory_pressure((unsigned int)vm_pageout_state.vm_memory_pressure );
969
970 /* move "now" forward */
971 vm_pageout_next = VM_PAGEOUT_STAT_AFTER(vm_pageout_stat_now);
972
973 bzero(&vm_pageout_stats[vm_pageout_next], sizeof(struct vm_pageout_stat));
974
975 vm_pageout_stat_now = vm_pageout_next;
976 }
977
978
979 /*
980 * IMPORTANT
981 * mach_vm_ctl_page_free_wanted() is called indirectly, via
982 * mach_vm_pressure_monitor(), when taking a stackshot. Therefore,
983 * it must be safe in the restricted stackshot context. Locks and/or
984 * blocking are not allowable.
985 */
986 unsigned int
mach_vm_ctl_page_free_wanted(void)987 mach_vm_ctl_page_free_wanted(void)
988 {
989 unsigned int page_free_target, page_free_count, page_free_wanted;
990
991 page_free_target = vm_page_free_target;
992 page_free_count = vm_page_free_count;
993 if (page_free_target > page_free_count) {
994 page_free_wanted = page_free_target - page_free_count;
995 } else {
996 page_free_wanted = 0;
997 }
998
999 return page_free_wanted;
1000 }
1001
1002
1003 /*
1004 * IMPORTANT:
1005 * mach_vm_pressure_monitor() is called when taking a stackshot, with
1006 * wait_for_pressure FALSE, so that code path must remain safe in the
1007 * restricted stackshot context. No blocking or locks are allowable.
1008 * on that code path.
1009 */
1010
1011 kern_return_t
mach_vm_pressure_monitor(boolean_t wait_for_pressure,unsigned int nsecs_monitored,unsigned int * pages_reclaimed_p,unsigned int * pages_wanted_p)1012 mach_vm_pressure_monitor(
1013 boolean_t wait_for_pressure,
1014 unsigned int nsecs_monitored,
1015 unsigned int *pages_reclaimed_p,
1016 unsigned int *pages_wanted_p)
1017 {
1018 wait_result_t wr;
1019 unsigned int vm_pageout_then, vm_pageout_now;
1020 unsigned int pages_reclaimed;
1021 unsigned int units_of_monitor;
1022
1023 units_of_monitor = 8 * nsecs_monitored;
1024 /*
1025 * We don't take the vm_page_queue_lock here because we don't want
1026 * vm_pressure_monitor() to get in the way of the vm_pageout_scan()
1027 * thread when it's trying to reclaim memory. We don't need fully
1028 * accurate monitoring anyway...
1029 */
1030
1031 if (wait_for_pressure) {
1032 /* wait until there's memory pressure */
1033 while (vm_page_free_count >= vm_page_free_target) {
1034 wr = assert_wait((event_t) &vm_page_free_wanted,
1035 THREAD_INTERRUPTIBLE);
1036 if (wr == THREAD_WAITING) {
1037 wr = thread_block(THREAD_CONTINUE_NULL);
1038 }
1039 if (wr == THREAD_INTERRUPTED) {
1040 return KERN_ABORTED;
1041 }
1042 if (wr == THREAD_AWAKENED) {
1043 /*
1044 * The memory pressure might have already
1045 * been relieved but let's not block again
1046 * and let's report that there was memory
1047 * pressure at some point.
1048 */
1049 break;
1050 }
1051 }
1052 }
1053
1054 /* provide the number of pages the system wants to reclaim */
1055 if (pages_wanted_p != NULL) {
1056 *pages_wanted_p = mach_vm_ctl_page_free_wanted();
1057 }
1058
1059 if (pages_reclaimed_p == NULL) {
1060 return KERN_SUCCESS;
1061 }
1062
1063 /* provide number of pages reclaimed in the last "nsecs_monitored" */
1064 vm_pageout_now = vm_pageout_stat_now;
1065 pages_reclaimed = 0;
1066 for (vm_pageout_then =
1067 VM_PAGEOUT_STAT_BEFORE(vm_pageout_now);
1068 vm_pageout_then != vm_pageout_now &&
1069 units_of_monitor-- != 0;
1070 vm_pageout_then =
1071 VM_PAGEOUT_STAT_BEFORE(vm_pageout_then)) {
1072 pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_speculative;
1073 pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_cleaned;
1074 pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_internal;
1075 pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_external;
1076 }
1077 *pages_reclaimed_p = pages_reclaimed;
1078
1079 return KERN_SUCCESS;
1080 }
1081
1082
1083
1084 #if DEVELOPMENT || DEBUG
1085
1086 static void
1087 vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *, int);
1088
1089 /*
1090 * condition variable used to make sure there is
1091 * only a single sweep going on at a time
1092 */
1093 bool vm_pageout_disconnect_all_pages_active = false;
1094
1095 void
vm_pageout_disconnect_all_pages()1096 vm_pageout_disconnect_all_pages()
1097 {
1098 vm_page_lock_queues();
1099
1100 if (vm_pageout_disconnect_all_pages_active) {
1101 vm_page_unlock_queues();
1102 return;
1103 }
1104 vm_pageout_disconnect_all_pages_active = true;
1105
1106 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_throttled,
1107 vm_page_throttled_count);
1108 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_anonymous,
1109 vm_page_anonymous_count);
1110 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_inactive,
1111 (vm_page_inactive_count - vm_page_anonymous_count));
1112 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_active,
1113 vm_page_active_count);
1114 #ifdef CONFIG_SECLUDED_MEMORY
1115 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_secluded,
1116 vm_page_secluded_count);
1117 #endif /* CONFIG_SECLUDED_MEMORY */
1118 vm_page_unlock_queues();
1119
1120 vm_pageout_disconnect_all_pages_active = false;
1121 }
1122
1123 /* NB: assumes the page_queues lock is held on entry, returns with page queue lock held */
1124 void
vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t * q,int qcount)1125 vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *q, int qcount)
1126 {
1127 vm_page_t m;
1128 vm_object_t t_object = NULL;
1129 vm_object_t l_object = NULL;
1130 vm_object_t m_object = NULL;
1131 int delayed_unlock = 0;
1132 int try_failed_count = 0;
1133 int disconnected_count = 0;
1134 int paused_count = 0;
1135 int object_locked_count = 0;
1136
1137 KDBG((MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_ALL_PAGE_MAPPINGS) |
1138 DBG_FUNC_START),
1139 q, qcount);
1140
1141 while (qcount && !vm_page_queue_empty(q)) {
1142 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
1143
1144 m = (vm_page_t) vm_page_queue_first(q);
1145 m_object = VM_PAGE_OBJECT(m);
1146
1147 /*
1148 * check to see if we currently are working
1149 * with the same object... if so, we've
1150 * already got the lock
1151 */
1152 if (m_object != l_object) {
1153 /*
1154 * the object associated with candidate page is
1155 * different from the one we were just working
1156 * with... dump the lock if we still own it
1157 */
1158 if (l_object != NULL) {
1159 vm_object_unlock(l_object);
1160 l_object = NULL;
1161 }
1162 if (m_object != t_object) {
1163 try_failed_count = 0;
1164 }
1165
1166 /*
1167 * Try to lock object; since we've alread got the
1168 * page queues lock, we can only 'try' for this one.
1169 * if the 'try' fails, we need to do a mutex_pause
1170 * to allow the owner of the object lock a chance to
1171 * run...
1172 */
1173 if (!vm_object_lock_try_scan(m_object)) {
1174 if (try_failed_count > 20) {
1175 goto reenter_pg_on_q;
1176 }
1177 vm_page_unlock_queues();
1178 mutex_pause(try_failed_count++);
1179 vm_page_lock_queues();
1180 delayed_unlock = 0;
1181
1182 paused_count++;
1183
1184 t_object = m_object;
1185 continue;
1186 }
1187 object_locked_count++;
1188
1189 l_object = m_object;
1190 }
1191 if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry ||
1192 m->vmp_busy || m->vmp_absent || VMP_ERROR_GET(m) ||
1193 m->vmp_free_when_done) {
1194 /*
1195 * put it back on the head of its queue
1196 */
1197 goto reenter_pg_on_q;
1198 }
1199 if (m->vmp_pmapped == TRUE) {
1200 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
1201
1202 disconnected_count++;
1203 }
1204 reenter_pg_on_q:
1205 vm_page_queue_remove(q, m, vmp_pageq);
1206 vm_page_queue_enter(q, m, vmp_pageq);
1207
1208 qcount--;
1209 try_failed_count = 0;
1210
1211 if (delayed_unlock++ > 128) {
1212 if (l_object != NULL) {
1213 vm_object_unlock(l_object);
1214 l_object = NULL;
1215 }
1216 lck_mtx_yield(&vm_page_queue_lock);
1217 delayed_unlock = 0;
1218 }
1219 }
1220 if (l_object != NULL) {
1221 vm_object_unlock(l_object);
1222 l_object = NULL;
1223 }
1224
1225 KDBG((MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_ALL_PAGE_MAPPINGS) |
1226 DBG_FUNC_END),
1227 q, disconnected_count, object_locked_count, paused_count);
1228 }
1229
1230 extern char* proc_best_name(struct proc* proc);
1231
1232 int
vm_toggle_task_selfdonate_pages(task_t task)1233 vm_toggle_task_selfdonate_pages(task_t task)
1234 {
1235 int state = 0;
1236 if (vm_page_donate_mode == VM_PAGE_DONATE_DISABLED) {
1237 printf("VM Donation mode is OFF on the system\n");
1238 return state;
1239 }
1240 if (task != kernel_task) {
1241 task_lock(task);
1242 if (!task->donates_own_pages) {
1243 printf("SELF DONATE for %s ON\n", proc_best_name(get_bsdtask_info(task)));
1244 task->donates_own_pages = true;
1245 state = 1;
1246 } else if (task->donates_own_pages) {
1247 printf("SELF DONATE for %s OFF\n", proc_best_name(get_bsdtask_info(task)));
1248 task->donates_own_pages = false;
1249 state = 0;
1250 }
1251 task_unlock(task);
1252 }
1253 return state;
1254 }
1255 #endif /* DEVELOPMENT || DEBUG */
1256
1257 void
vm_task_set_selfdonate_pages(task_t task,bool donate)1258 vm_task_set_selfdonate_pages(task_t task, bool donate)
1259 {
1260 assert(vm_page_donate_mode != VM_PAGE_DONATE_DISABLED);
1261 assert(task != kernel_task);
1262
1263 task_lock(task);
1264 task->donates_own_pages = donate;
1265 task_unlock(task);
1266 }
1267
1268
1269
1270 static size_t
1271 vm_pageout_page_queue(vm_page_queue_head_t *, size_t, bool);
1272
1273 /*
1274 * condition variable used to make sure there is
1275 * only a single sweep going on at a time
1276 */
1277 boolean_t vm_pageout_anonymous_pages_active = FALSE;
1278
1279
1280 void
vm_pageout_anonymous_pages()1281 vm_pageout_anonymous_pages()
1282 {
1283 if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
1284 vm_page_lock_queues();
1285
1286 if (vm_pageout_anonymous_pages_active == TRUE) {
1287 vm_page_unlock_queues();
1288 return;
1289 }
1290 vm_pageout_anonymous_pages_active = TRUE;
1291 vm_page_unlock_queues();
1292
1293 vm_pageout_page_queue(&vm_page_queue_throttled, vm_page_throttled_count, false);
1294 vm_pageout_page_queue(&vm_page_queue_anonymous, vm_page_anonymous_count, false);
1295 vm_pageout_page_queue(&vm_page_queue_active, vm_page_active_count, false);
1296
1297 if (VM_CONFIG_SWAP_IS_PRESENT) {
1298 vm_consider_swapping();
1299 }
1300
1301 vm_page_lock_queues();
1302 vm_pageout_anonymous_pages_active = FALSE;
1303 vm_page_unlock_queues();
1304 }
1305 }
1306
1307
1308 size_t
vm_pageout_page_queue(vm_page_queue_head_t * q,size_t qcount,bool perf_test)1309 vm_pageout_page_queue(vm_page_queue_head_t *q, size_t qcount, bool perf_test)
1310 {
1311 vm_page_t m;
1312 vm_object_t t_object = NULL;
1313 vm_object_t l_object = NULL;
1314 vm_object_t m_object = NULL;
1315 int delayed_unlock = 0;
1316 int try_failed_count = 0;
1317 int refmod_state;
1318 int pmap_options;
1319 struct vm_pageout_queue *iq;
1320 ppnum_t phys_page;
1321 size_t pages_moved = 0;
1322
1323
1324 iq = &vm_pageout_queue_internal;
1325
1326 vm_page_lock_queues();
1327
1328 #if DEVELOPMENT || DEBUG
1329 if (perf_test) {
1330 iq = &vm_pageout_queue_benchmark;
1331 // ensure the benchmark queue isn't throttled
1332 iq->pgo_maxlaundry = (unsigned int) qcount;
1333 }
1334 #endif /* DEVELOPMENT ||DEBUG */
1335
1336 while (qcount && !vm_page_queue_empty(q)) {
1337 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
1338
1339 if (VM_PAGE_Q_THROTTLED(iq)) {
1340 if (l_object != NULL) {
1341 vm_object_unlock(l_object);
1342 l_object = NULL;
1343 }
1344 iq->pgo_draining = TRUE;
1345
1346 assert_wait((event_t) (&iq->pgo_laundry + 1), THREAD_INTERRUPTIBLE);
1347 vm_page_unlock_queues();
1348
1349 thread_block(THREAD_CONTINUE_NULL);
1350
1351 vm_page_lock_queues();
1352 delayed_unlock = 0;
1353 continue;
1354 }
1355 m = (vm_page_t) vm_page_queue_first(q);
1356 m_object = VM_PAGE_OBJECT(m);
1357
1358 /*
1359 * check to see if we currently are working
1360 * with the same object... if so, we've
1361 * already got the lock
1362 */
1363 if (m_object != l_object) {
1364 if (!m_object->internal) {
1365 goto reenter_pg_on_q;
1366 }
1367
1368 /*
1369 * the object associated with candidate page is
1370 * different from the one we were just working
1371 * with... dump the lock if we still own it
1372 */
1373 if (l_object != NULL) {
1374 vm_object_unlock(l_object);
1375 l_object = NULL;
1376 }
1377 if (m_object != t_object) {
1378 try_failed_count = 0;
1379 }
1380
1381 /*
1382 * Try to lock object; since we've alread got the
1383 * page queues lock, we can only 'try' for this one.
1384 * if the 'try' fails, we need to do a mutex_pause
1385 * to allow the owner of the object lock a chance to
1386 * run...
1387 */
1388 if (!vm_object_lock_try_scan(m_object)) {
1389 if (try_failed_count > 20) {
1390 goto reenter_pg_on_q;
1391 }
1392 vm_page_unlock_queues();
1393 mutex_pause(try_failed_count++);
1394 vm_page_lock_queues();
1395 delayed_unlock = 0;
1396
1397 t_object = m_object;
1398 continue;
1399 }
1400 l_object = m_object;
1401 }
1402 if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || VMP_ERROR_GET(m) || m->vmp_free_when_done) {
1403 /*
1404 * page is not to be cleaned
1405 * put it back on the head of its queue
1406 */
1407 goto reenter_pg_on_q;
1408 }
1409 phys_page = VM_PAGE_GET_PHYS_PAGE(m);
1410
1411 if (m->vmp_reference == FALSE && m->vmp_pmapped == TRUE) {
1412 refmod_state = pmap_get_refmod(phys_page);
1413
1414 if (refmod_state & VM_MEM_REFERENCED) {
1415 m->vmp_reference = TRUE;
1416 }
1417 if (refmod_state & VM_MEM_MODIFIED) {
1418 SET_PAGE_DIRTY(m, FALSE);
1419 }
1420 }
1421 if (m->vmp_reference == TRUE) {
1422 m->vmp_reference = FALSE;
1423 pmap_clear_refmod_options(phys_page, VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
1424 goto reenter_pg_on_q;
1425 }
1426 if (m->vmp_pmapped == TRUE) {
1427 if (m->vmp_dirty || m->vmp_precious) {
1428 pmap_options = PMAP_OPTIONS_COMPRESSOR;
1429 } else {
1430 pmap_options = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
1431 }
1432 refmod_state = pmap_disconnect_options(phys_page, pmap_options, NULL);
1433 if (refmod_state & VM_MEM_MODIFIED) {
1434 SET_PAGE_DIRTY(m, FALSE);
1435 }
1436 }
1437
1438 if (!m->vmp_dirty && !m->vmp_precious) {
1439 vm_page_unlock_queues();
1440 VM_PAGE_FREE(m);
1441 vm_page_lock_queues();
1442 delayed_unlock = 0;
1443
1444 goto next_pg;
1445 }
1446 if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL) {
1447 if (!m_object->pager_initialized) {
1448 vm_page_unlock_queues();
1449
1450 vm_object_collapse(m_object, (vm_object_offset_t) 0, TRUE);
1451
1452 if (!m_object->pager_initialized) {
1453 vm_object_compressor_pager_create(m_object);
1454 }
1455
1456 vm_page_lock_queues();
1457 delayed_unlock = 0;
1458 }
1459 if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL) {
1460 goto reenter_pg_on_q;
1461 }
1462 /*
1463 * vm_object_compressor_pager_create will drop the object lock
1464 * which means 'm' may no longer be valid to use
1465 */
1466 continue;
1467 }
1468
1469 if (!perf_test) {
1470 /*
1471 * we've already factored out pages in the laundry which
1472 * means this page can't be on the pageout queue so it's
1473 * safe to do the vm_page_queues_remove
1474 */
1475 bool donate = (m->vmp_on_specialq == VM_PAGE_SPECIAL_Q_DONATE);
1476 vm_page_queues_remove(m, TRUE);
1477 if (donate) {
1478 /*
1479 * The compressor needs to see this bit to know
1480 * where this page needs to land. Also if stolen,
1481 * this bit helps put the page back in the right
1482 * special queue where it belongs.
1483 */
1484 m->vmp_on_specialq = VM_PAGE_SPECIAL_Q_DONATE;
1485 }
1486 } else {
1487 vm_page_queue_remove(q, m, vmp_pageq);
1488 }
1489
1490 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
1491
1492 vm_pageout_cluster_to_queue(m, iq);
1493
1494 pages_moved++;
1495 goto next_pg;
1496
1497 reenter_pg_on_q:
1498 vm_page_queue_remove(q, m, vmp_pageq);
1499 vm_page_queue_enter(q, m, vmp_pageq);
1500 next_pg:
1501 qcount--;
1502 try_failed_count = 0;
1503
1504 if (delayed_unlock++ > 128) {
1505 if (l_object != NULL) {
1506 vm_object_unlock(l_object);
1507 l_object = NULL;
1508 }
1509 lck_mtx_yield(&vm_page_queue_lock);
1510 delayed_unlock = 0;
1511 }
1512 }
1513 if (l_object != NULL) {
1514 vm_object_unlock(l_object);
1515 l_object = NULL;
1516 }
1517 vm_page_unlock_queues();
1518 return pages_moved;
1519 }
1520
1521
1522
1523 /*
1524 * function in BSD to apply I/O throttle to the pageout thread
1525 */
1526 extern void vm_pageout_io_throttle(void);
1527
1528 #define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, obj) \
1529 MACRO_BEGIN \
1530 /* \
1531 * If a "reusable" page somehow made it back into \
1532 * the active queue, it's been re-used and is not \
1533 * quite re-usable. \
1534 * If the VM object was "all_reusable", consider it \
1535 * as "all re-used" instead of converting it to \
1536 * "partially re-used", which could be expensive. \
1537 */ \
1538 assert(VM_PAGE_OBJECT((m)) == (obj)); \
1539 if ((m)->vmp_reusable || \
1540 (obj)->all_reusable) { \
1541 vm_object_reuse_pages((obj), \
1542 (m)->vmp_offset, \
1543 (m)->vmp_offset + PAGE_SIZE_64, \
1544 FALSE); \
1545 } \
1546 MACRO_END
1547
1548
1549 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT 64
1550 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX 1024
1551
1552 #define FCS_IDLE 0
1553 #define FCS_DELAYED 1
1554 #define FCS_DEADLOCK_DETECTED 2
1555
1556 struct flow_control {
1557 int state;
1558 mach_timespec_t ts;
1559 };
1560
1561
1562 uint64_t vm_pageout_rejected_bq_internal = 0;
1563 uint64_t vm_pageout_rejected_bq_external = 0;
1564 uint64_t vm_pageout_skipped_bq_internal = 0;
1565 uint64_t vm_pageout_skipped_bq_external = 0;
1566
1567 #define ANONS_GRABBED_LIMIT 2
1568
1569
1570 #if 0
1571 static void vm_pageout_delayed_unlock(int *, int *, vm_page_t *);
1572 #endif
1573 static void vm_pageout_prepare_to_block(vm_object_t *, int *, vm_page_t *, int *, int);
1574
1575 #define VM_PAGEOUT_PB_NO_ACTION 0
1576 #define VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER 1
1577 #define VM_PAGEOUT_PB_THREAD_YIELD 2
1578
1579
1580 #if 0
1581 static void
1582 vm_pageout_delayed_unlock(int *delayed_unlock, int *local_freed, vm_page_t *local_freeq)
1583 {
1584 if (*local_freeq) {
1585 vm_page_unlock_queues();
1586
1587 VM_DEBUG_CONSTANT_EVENT(
1588 vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
1589 vm_page_free_count, 0, 0, 1);
1590
1591 vm_page_free_list(*local_freeq, TRUE);
1592
1593 VM_DEBUG_CONSTANT_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
1594 vm_page_free_count, *local_freed, 0, 1);
1595
1596 *local_freeq = NULL;
1597 *local_freed = 0;
1598
1599 vm_page_lock_queues();
1600 } else {
1601 lck_mtx_yield(&vm_page_queue_lock);
1602 }
1603 *delayed_unlock = 1;
1604 }
1605 #endif
1606
1607
1608 static void
vm_pageout_prepare_to_block(vm_object_t * object,int * delayed_unlock,vm_page_t * local_freeq,int * local_freed,int action)1609 vm_pageout_prepare_to_block(vm_object_t *object, int *delayed_unlock,
1610 vm_page_t *local_freeq, int *local_freed, int action)
1611 {
1612 vm_page_unlock_queues();
1613
1614 if (*object != NULL) {
1615 vm_object_unlock(*object);
1616 *object = NULL;
1617 }
1618 if (*local_freeq) {
1619 vm_page_free_list(*local_freeq, TRUE);
1620
1621 *local_freeq = NULL;
1622 *local_freed = 0;
1623 }
1624 *delayed_unlock = 1;
1625
1626 switch (action) {
1627 case VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER:
1628 vm_consider_waking_compactor_swapper();
1629 break;
1630 case VM_PAGEOUT_PB_THREAD_YIELD:
1631 thread_yield_internal(1);
1632 break;
1633 case VM_PAGEOUT_PB_NO_ACTION:
1634 default:
1635 break;
1636 }
1637 vm_page_lock_queues();
1638 }
1639
1640
1641 static struct vm_pageout_vminfo last;
1642
1643 uint64_t last_vm_page_pages_grabbed = 0;
1644
1645 extern uint32_t c_segment_pages_compressed;
1646
1647 extern uint64_t shared_region_pager_reclaimed;
1648 extern struct memory_object_pager_ops shared_region_pager_ops;
1649
1650 void
update_vm_info(void)1651 update_vm_info(void)
1652 {
1653 unsigned long tmp;
1654 uint64_t tmp64;
1655
1656 vm_pageout_stats[vm_pageout_stat_now].vm_page_active_count = vm_page_active_count;
1657 vm_pageout_stats[vm_pageout_stat_now].vm_page_speculative_count = vm_page_speculative_count;
1658 vm_pageout_stats[vm_pageout_stat_now].vm_page_inactive_count = vm_page_inactive_count;
1659 vm_pageout_stats[vm_pageout_stat_now].vm_page_anonymous_count = vm_page_anonymous_count;
1660
1661 vm_pageout_stats[vm_pageout_stat_now].vm_page_free_count = vm_page_free_count;
1662 vm_pageout_stats[vm_pageout_stat_now].vm_page_wire_count = vm_page_wire_count;
1663 vm_pageout_stats[vm_pageout_stat_now].vm_page_compressor_count = VM_PAGE_COMPRESSOR_COUNT;
1664
1665 vm_pageout_stats[vm_pageout_stat_now].vm_page_pages_compressed = c_segment_pages_compressed;
1666 vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_internal_count = vm_page_pageable_internal_count;
1667 vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_external_count = vm_page_pageable_external_count;
1668 vm_pageout_stats[vm_pageout_stat_now].vm_page_xpmapped_external_count = vm_page_xpmapped_external_count;
1669 vm_pageout_stats[vm_pageout_stat_now].vm_page_realtime_count = vm_page_realtime_count;
1670
1671 tmp = vm_pageout_vminfo.vm_pageout_considered_page;
1672 vm_pageout_stats[vm_pageout_stat_now].considered = (unsigned int)(tmp - last.vm_pageout_considered_page);
1673 last.vm_pageout_considered_page = tmp;
1674
1675 tmp64 = vm_pageout_vminfo.vm_pageout_compressions;
1676 vm_pageout_stats[vm_pageout_stat_now].pages_compressed = (unsigned int)(tmp64 - last.vm_pageout_compressions);
1677 last.vm_pageout_compressions = tmp64;
1678
1679 tmp = vm_pageout_vminfo.vm_compressor_failed;
1680 vm_pageout_stats[vm_pageout_stat_now].failed_compressions = (unsigned int)(tmp - last.vm_compressor_failed);
1681 last.vm_compressor_failed = tmp;
1682
1683 tmp64 = vm_pageout_vminfo.vm_compressor_pages_grabbed;
1684 vm_pageout_stats[vm_pageout_stat_now].pages_grabbed_by_compressor = (unsigned int)(tmp64 - last.vm_compressor_pages_grabbed);
1685 last.vm_compressor_pages_grabbed = tmp64;
1686
1687 tmp = vm_pageout_vminfo.vm_phantom_cache_found_ghost;
1688 vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_found = (unsigned int)(tmp - last.vm_phantom_cache_found_ghost);
1689 last.vm_phantom_cache_found_ghost = tmp;
1690
1691 tmp = vm_pageout_vminfo.vm_phantom_cache_added_ghost;
1692 vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_added = (unsigned int)(tmp - last.vm_phantom_cache_added_ghost);
1693 last.vm_phantom_cache_added_ghost = tmp;
1694
1695 tmp64 = counter_load(&vm_page_grab_count);
1696 vm_pageout_stats[vm_pageout_stat_now].pages_grabbed = (unsigned int)(tmp64 - last_vm_page_pages_grabbed);
1697 last_vm_page_pages_grabbed = tmp64;
1698
1699 tmp = vm_pageout_vminfo.vm_page_pages_freed;
1700 vm_pageout_stats[vm_pageout_stat_now].pages_freed = (unsigned int)(tmp - last.vm_page_pages_freed);
1701 last.vm_page_pages_freed = tmp;
1702
1703 if (vm_pageout_stats[vm_pageout_stat_now].considered) {
1704 tmp = vm_pageout_vminfo.vm_pageout_pages_evicted;
1705 vm_pageout_stats[vm_pageout_stat_now].pages_evicted = (unsigned int)(tmp - last.vm_pageout_pages_evicted);
1706 last.vm_pageout_pages_evicted = tmp;
1707
1708 tmp = vm_pageout_vminfo.vm_pageout_pages_purged;
1709 vm_pageout_stats[vm_pageout_stat_now].pages_purged = (unsigned int)(tmp - last.vm_pageout_pages_purged);
1710 last.vm_pageout_pages_purged = tmp;
1711
1712 tmp = vm_pageout_vminfo.vm_pageout_freed_speculative;
1713 vm_pageout_stats[vm_pageout_stat_now].freed_speculative = (unsigned int)(tmp - last.vm_pageout_freed_speculative);
1714 last.vm_pageout_freed_speculative = tmp;
1715
1716 tmp = vm_pageout_vminfo.vm_pageout_freed_external;
1717 vm_pageout_stats[vm_pageout_stat_now].freed_external = (unsigned int)(tmp - last.vm_pageout_freed_external);
1718 last.vm_pageout_freed_external = tmp;
1719
1720 tmp = vm_pageout_vminfo.vm_pageout_inactive_referenced;
1721 vm_pageout_stats[vm_pageout_stat_now].inactive_referenced = (unsigned int)(tmp - last.vm_pageout_inactive_referenced);
1722 last.vm_pageout_inactive_referenced = tmp;
1723
1724 tmp = vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_external;
1725 vm_pageout_stats[vm_pageout_stat_now].throttled_external_q = (unsigned int)(tmp - last.vm_pageout_scan_inactive_throttled_external);
1726 last.vm_pageout_scan_inactive_throttled_external = tmp;
1727
1728 tmp = vm_pageout_vminfo.vm_pageout_inactive_dirty_external;
1729 vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_external = (unsigned int)(tmp - last.vm_pageout_inactive_dirty_external);
1730 last.vm_pageout_inactive_dirty_external = tmp;
1731
1732 tmp = vm_pageout_vminfo.vm_pageout_freed_cleaned;
1733 vm_pageout_stats[vm_pageout_stat_now].freed_cleaned = (unsigned int)(tmp - last.vm_pageout_freed_cleaned);
1734 last.vm_pageout_freed_cleaned = tmp;
1735
1736 tmp = vm_pageout_vminfo.vm_pageout_inactive_nolock;
1737 vm_pageout_stats[vm_pageout_stat_now].inactive_nolock = (unsigned int)(tmp - last.vm_pageout_inactive_nolock);
1738 last.vm_pageout_inactive_nolock = tmp;
1739
1740 tmp = vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_internal;
1741 vm_pageout_stats[vm_pageout_stat_now].throttled_internal_q = (unsigned int)(tmp - last.vm_pageout_scan_inactive_throttled_internal);
1742 last.vm_pageout_scan_inactive_throttled_internal = tmp;
1743
1744 tmp = vm_pageout_vminfo.vm_pageout_skipped_external;
1745 vm_pageout_stats[vm_pageout_stat_now].skipped_external = (unsigned int)(tmp - last.vm_pageout_skipped_external);
1746 last.vm_pageout_skipped_external = tmp;
1747
1748 tmp = vm_pageout_vminfo.vm_pageout_skipped_internal;
1749 vm_pageout_stats[vm_pageout_stat_now].skipped_internal = (unsigned int)(tmp - last.vm_pageout_skipped_internal);
1750 last.vm_pageout_skipped_internal = tmp;
1751
1752 tmp = vm_pageout_vminfo.vm_pageout_reactivation_limit_exceeded;
1753 vm_pageout_stats[vm_pageout_stat_now].reactivation_limit_exceeded = (unsigned int)(tmp - last.vm_pageout_reactivation_limit_exceeded);
1754 last.vm_pageout_reactivation_limit_exceeded = tmp;
1755
1756 tmp = vm_pageout_vminfo.vm_pageout_inactive_force_reclaim;
1757 vm_pageout_stats[vm_pageout_stat_now].forced_inactive_reclaim = (unsigned int)(tmp - last.vm_pageout_inactive_force_reclaim);
1758 last.vm_pageout_inactive_force_reclaim = tmp;
1759
1760 tmp = vm_pageout_vminfo.vm_pageout_freed_internal;
1761 vm_pageout_stats[vm_pageout_stat_now].freed_internal = (unsigned int)(tmp - last.vm_pageout_freed_internal);
1762 last.vm_pageout_freed_internal = tmp;
1763
1764 tmp = vm_pageout_vminfo.vm_pageout_considered_bq_internal;
1765 vm_pageout_stats[vm_pageout_stat_now].considered_bq_internal = (unsigned int)(tmp - last.vm_pageout_considered_bq_internal);
1766 last.vm_pageout_considered_bq_internal = tmp;
1767
1768 tmp = vm_pageout_vminfo.vm_pageout_considered_bq_external;
1769 vm_pageout_stats[vm_pageout_stat_now].considered_bq_external = (unsigned int)(tmp - last.vm_pageout_considered_bq_external);
1770 last.vm_pageout_considered_bq_external = tmp;
1771
1772 tmp = vm_pageout_vminfo.vm_pageout_filecache_min_reactivated;
1773 vm_pageout_stats[vm_pageout_stat_now].filecache_min_reactivations = (unsigned int)(tmp - last.vm_pageout_filecache_min_reactivated);
1774 last.vm_pageout_filecache_min_reactivated = tmp;
1775
1776 tmp = vm_pageout_vminfo.vm_pageout_inactive_dirty_internal;
1777 vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_internal = (unsigned int)(tmp - last.vm_pageout_inactive_dirty_internal);
1778 last.vm_pageout_inactive_dirty_internal = tmp;
1779
1780 tmp = vm_pageout_vminfo.vm_pageout_forcereclaimed_sharedcache;
1781 vm_pageout_stats[vm_pageout_stat_now].forcereclaimed_sharedcache = (unsigned int)(tmp - last.vm_pageout_forcereclaimed_sharedcache);
1782 last.vm_pageout_forcereclaimed_sharedcache = tmp;
1783
1784 tmp = vm_pageout_vminfo.vm_pageout_forcereclaimed_realtime;
1785 vm_pageout_stats[vm_pageout_stat_now].forcereclaimed_realtime = (unsigned int)(tmp - last.vm_pageout_forcereclaimed_realtime);
1786 last.vm_pageout_forcereclaimed_realtime = tmp;
1787
1788 tmp = vm_pageout_vminfo.vm_pageout_protected_sharedcache;
1789 vm_pageout_stats[vm_pageout_stat_now].protected_sharedcache = (unsigned int)(tmp - last.vm_pageout_protected_sharedcache);
1790 last.vm_pageout_protected_sharedcache = tmp;
1791
1792 tmp = vm_pageout_vminfo.vm_pageout_protected_realtime;
1793 vm_pageout_stats[vm_pageout_stat_now].protected_realtime = (unsigned int)(tmp - last.vm_pageout_protected_realtime);
1794 last.vm_pageout_protected_realtime = tmp;
1795 }
1796
1797 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO1)) | DBG_FUNC_NONE,
1798 vm_pageout_stats[vm_pageout_stat_now].vm_page_active_count,
1799 vm_pageout_stats[vm_pageout_stat_now].vm_page_speculative_count,
1800 vm_pageout_stats[vm_pageout_stat_now].vm_page_inactive_count,
1801 vm_pageout_stats[vm_pageout_stat_now].vm_page_anonymous_count,
1802 0);
1803
1804 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO2)) | DBG_FUNC_NONE,
1805 vm_pageout_stats[vm_pageout_stat_now].vm_page_free_count,
1806 vm_pageout_stats[vm_pageout_stat_now].vm_page_wire_count,
1807 vm_pageout_stats[vm_pageout_stat_now].vm_page_compressor_count,
1808 0,
1809 0);
1810
1811 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO3)) | DBG_FUNC_NONE,
1812 vm_pageout_stats[vm_pageout_stat_now].vm_page_pages_compressed,
1813 vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_internal_count,
1814 vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_external_count,
1815 vm_pageout_stats[vm_pageout_stat_now].vm_page_xpmapped_external_count,
1816 0);
1817
1818 if (vm_pageout_stats[vm_pageout_stat_now].considered ||
1819 vm_pageout_stats[vm_pageout_stat_now].pages_compressed ||
1820 vm_pageout_stats[vm_pageout_stat_now].failed_compressions) {
1821 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO4)) | DBG_FUNC_NONE,
1822 vm_pageout_stats[vm_pageout_stat_now].considered,
1823 vm_pageout_stats[vm_pageout_stat_now].freed_speculative,
1824 vm_pageout_stats[vm_pageout_stat_now].freed_external,
1825 vm_pageout_stats[vm_pageout_stat_now].inactive_referenced,
1826 0);
1827
1828 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO5)) | DBG_FUNC_NONE,
1829 vm_pageout_stats[vm_pageout_stat_now].throttled_external_q,
1830 vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_external,
1831 vm_pageout_stats[vm_pageout_stat_now].freed_cleaned,
1832 vm_pageout_stats[vm_pageout_stat_now].inactive_nolock,
1833 0);
1834
1835 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO6)) | DBG_FUNC_NONE,
1836 vm_pageout_stats[vm_pageout_stat_now].throttled_internal_q,
1837 vm_pageout_stats[vm_pageout_stat_now].pages_compressed,
1838 vm_pageout_stats[vm_pageout_stat_now].pages_grabbed_by_compressor,
1839 vm_pageout_stats[vm_pageout_stat_now].skipped_external,
1840 0);
1841
1842 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO7)) | DBG_FUNC_NONE,
1843 vm_pageout_stats[vm_pageout_stat_now].reactivation_limit_exceeded,
1844 vm_pageout_stats[vm_pageout_stat_now].forced_inactive_reclaim,
1845 vm_pageout_stats[vm_pageout_stat_now].failed_compressions,
1846 vm_pageout_stats[vm_pageout_stat_now].freed_internal,
1847 0);
1848
1849 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO8)) | DBG_FUNC_NONE,
1850 vm_pageout_stats[vm_pageout_stat_now].considered_bq_internal,
1851 vm_pageout_stats[vm_pageout_stat_now].considered_bq_external,
1852 vm_pageout_stats[vm_pageout_stat_now].filecache_min_reactivations,
1853 vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_internal,
1854 0);
1855
1856 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO10)) | DBG_FUNC_NONE,
1857 vm_pageout_stats[vm_pageout_stat_now].forcereclaimed_sharedcache,
1858 vm_pageout_stats[vm_pageout_stat_now].forcereclaimed_realtime,
1859 vm_pageout_stats[vm_pageout_stat_now].protected_sharedcache,
1860 vm_pageout_stats[vm_pageout_stat_now].protected_realtime,
1861 0);
1862 }
1863 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO9)) | DBG_FUNC_NONE,
1864 vm_pageout_stats[vm_pageout_stat_now].pages_grabbed,
1865 vm_pageout_stats[vm_pageout_stat_now].pages_freed,
1866 vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_found,
1867 vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_added,
1868 0);
1869
1870 record_memory_pressure();
1871 }
1872
1873 extern boolean_t hibernation_vmqueues_inspection;
1874
1875 /*
1876 * Return values for functions called by vm_pageout_scan
1877 * that control its flow.
1878 *
1879 * PROCEED -- vm_pageout_scan will keep making forward progress.
1880 * DONE_RETURN -- page demand satisfied, work is done -> vm_pageout_scan returns.
1881 * NEXT_ITERATION -- restart the 'for' loop in vm_pageout_scan aka continue.
1882 */
1883
1884 #define VM_PAGEOUT_SCAN_PROCEED (0)
1885 #define VM_PAGEOUT_SCAN_DONE_RETURN (1)
1886 #define VM_PAGEOUT_SCAN_NEXT_ITERATION (2)
1887
1888 /*
1889 * This function is called only from vm_pageout_scan and
1890 * it moves overflow secluded pages (one-at-a-time) to the
1891 * batched 'local' free Q or active Q.
1892 */
1893 static void
vps_deal_with_secluded_page_overflow(vm_page_t * local_freeq,int * local_freed)1894 vps_deal_with_secluded_page_overflow(vm_page_t *local_freeq, int *local_freed)
1895 {
1896 #if CONFIG_SECLUDED_MEMORY
1897 /*
1898 * Deal with secluded_q overflow.
1899 */
1900 if (vm_page_secluded_count > vm_page_secluded_target) {
1901 vm_page_t secluded_page;
1902
1903 /*
1904 * SECLUDED_AGING_BEFORE_ACTIVE:
1905 * Excess secluded pages go to the active queue and
1906 * will later go to the inactive queue.
1907 */
1908 assert((vm_page_secluded_count_free +
1909 vm_page_secluded_count_inuse) ==
1910 vm_page_secluded_count);
1911 secluded_page = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
1912 assert(secluded_page->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
1913
1914 vm_page_queues_remove(secluded_page, FALSE);
1915 assert(!secluded_page->vmp_fictitious);
1916 assert(!VM_PAGE_WIRED(secluded_page));
1917
1918 if (secluded_page->vmp_object == 0) {
1919 /* transfer to free queue */
1920 assert(secluded_page->vmp_busy);
1921 secluded_page->vmp_snext = *local_freeq;
1922 *local_freeq = secluded_page;
1923 *local_freed += 1;
1924 } else {
1925 /* transfer to head of active queue */
1926 vm_page_enqueue_active(secluded_page, FALSE);
1927 secluded_page = VM_PAGE_NULL;
1928 }
1929 }
1930 #else /* CONFIG_SECLUDED_MEMORY */
1931
1932 #pragma unused(local_freeq)
1933 #pragma unused(local_freed)
1934
1935 return;
1936
1937 #endif /* CONFIG_SECLUDED_MEMORY */
1938 }
1939
1940 /*
1941 * This function is called only from vm_pageout_scan and
1942 * it initializes the loop targets for vm_pageout_scan().
1943 */
1944 static void
vps_init_page_targets(void)1945 vps_init_page_targets(void)
1946 {
1947 /*
1948 * LD TODO: Other page targets should be calculated here too.
1949 */
1950 vm_page_anonymous_min = vm_page_inactive_target / 20;
1951
1952 if (vm_pageout_state.vm_page_speculative_percentage > 50) {
1953 vm_pageout_state.vm_page_speculative_percentage = 50;
1954 } else if (vm_pageout_state.vm_page_speculative_percentage <= 0) {
1955 vm_pageout_state.vm_page_speculative_percentage = 1;
1956 }
1957
1958 vm_pageout_state.vm_page_speculative_target = VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count +
1959 vm_page_inactive_count);
1960 }
1961
1962 /*
1963 * This function is called only from vm_pageout_scan and
1964 * it purges a single VM object at-a-time and will either
1965 * make vm_pageout_scan() restart the loop or keeping moving forward.
1966 */
1967 static int
vps_purge_object()1968 vps_purge_object()
1969 {
1970 int force_purge;
1971
1972 assert(available_for_purge >= 0);
1973 force_purge = 0; /* no force-purging */
1974
1975 #if VM_PRESSURE_EVENTS
1976 vm_pressure_level_t pressure_level;
1977
1978 pressure_level = memorystatus_vm_pressure_level;
1979
1980 if (pressure_level > kVMPressureNormal) {
1981 if (pressure_level >= kVMPressureCritical) {
1982 force_purge = vm_pageout_state.memorystatus_purge_on_critical;
1983 } else if (pressure_level >= kVMPressureUrgent) {
1984 force_purge = vm_pageout_state.memorystatus_purge_on_urgent;
1985 } else if (pressure_level >= kVMPressureWarning) {
1986 force_purge = vm_pageout_state.memorystatus_purge_on_warning;
1987 }
1988 }
1989 #endif /* VM_PRESSURE_EVENTS */
1990
1991 if (available_for_purge || force_purge) {
1992 memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_START);
1993
1994 VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_START, vm_page_free_count, 0, 0, 0);
1995 if (vm_purgeable_object_purge_one(force_purge, C_DONT_BLOCK)) {
1996 VM_PAGEOUT_DEBUG(vm_pageout_purged_objects, 1);
1997 VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, vm_page_free_count, 0, 0, 0);
1998 memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END);
1999
2000 return VM_PAGEOUT_SCAN_NEXT_ITERATION;
2001 }
2002 VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, 0, 0, 0, -1);
2003 memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END);
2004 }
2005
2006 return VM_PAGEOUT_SCAN_PROCEED;
2007 }
2008
2009 /*
2010 * This function is called only from vm_pageout_scan and
2011 * it will try to age the next speculative Q if the oldest
2012 * one is empty.
2013 */
2014 static int
vps_age_speculative_queue(boolean_t force_speculative_aging)2015 vps_age_speculative_queue(boolean_t force_speculative_aging)
2016 {
2017 #define DELAY_SPECULATIVE_AGE 1000
2018
2019 /*
2020 * try to pull pages from the aging bins...
2021 * see vm_page.h for an explanation of how
2022 * this mechanism works
2023 */
2024 boolean_t can_steal = FALSE;
2025 int num_scanned_queues;
2026 static int delay_speculative_age = 0; /* depends the # of times we go through the main pageout_scan loop.*/
2027 mach_timespec_t ts;
2028 struct vm_speculative_age_q *aq;
2029 struct vm_speculative_age_q *sq;
2030
2031 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
2032
2033 aq = &vm_page_queue_speculative[speculative_steal_index];
2034
2035 num_scanned_queues = 0;
2036 while (vm_page_queue_empty(&aq->age_q) &&
2037 num_scanned_queues++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
2038 speculative_steal_index++;
2039
2040 if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
2041 speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
2042 }
2043
2044 aq = &vm_page_queue_speculative[speculative_steal_index];
2045 }
2046
2047 if (num_scanned_queues == VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1) {
2048 /*
2049 * XXX We've scanned all the speculative
2050 * queues but still haven't found one
2051 * that is not empty, even though
2052 * vm_page_speculative_count is not 0.
2053 */
2054 if (!vm_page_queue_empty(&sq->age_q)) {
2055 return VM_PAGEOUT_SCAN_NEXT_ITERATION;
2056 }
2057 #if DEVELOPMENT || DEBUG
2058 panic("vm_pageout_scan: vm_page_speculative_count=%d but queues are empty", vm_page_speculative_count);
2059 #endif
2060 /* readjust... */
2061 vm_page_speculative_count = 0;
2062 /* ... and continue */
2063 return VM_PAGEOUT_SCAN_NEXT_ITERATION;
2064 }
2065
2066 if (vm_page_speculative_count > vm_pageout_state.vm_page_speculative_target || force_speculative_aging == TRUE) {
2067 can_steal = TRUE;
2068 } else {
2069 if (!delay_speculative_age) {
2070 mach_timespec_t ts_fully_aged;
2071
2072 ts_fully_aged.tv_sec = (VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_pageout_state.vm_page_speculative_q_age_ms) / 1000;
2073 ts_fully_aged.tv_nsec = ((VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_pageout_state.vm_page_speculative_q_age_ms) % 1000)
2074 * 1000 * NSEC_PER_USEC;
2075
2076 ADD_MACH_TIMESPEC(&ts_fully_aged, &aq->age_ts);
2077
2078 clock_sec_t sec;
2079 clock_nsec_t nsec;
2080 clock_get_system_nanotime(&sec, &nsec);
2081 ts.tv_sec = (unsigned int) sec;
2082 ts.tv_nsec = nsec;
2083
2084 if (CMP_MACH_TIMESPEC(&ts, &ts_fully_aged) >= 0) {
2085 can_steal = TRUE;
2086 } else {
2087 delay_speculative_age++;
2088 }
2089 } else {
2090 delay_speculative_age++;
2091 if (delay_speculative_age == DELAY_SPECULATIVE_AGE) {
2092 delay_speculative_age = 0;
2093 }
2094 }
2095 }
2096 if (can_steal == TRUE) {
2097 vm_page_speculate_ageit(aq);
2098 }
2099
2100 return VM_PAGEOUT_SCAN_PROCEED;
2101 }
2102
2103 /*
2104 * This function is called only from vm_pageout_scan and
2105 * it evicts a single VM object from the cache.
2106 */
2107 static int inline
vps_object_cache_evict(vm_object_t * object_to_unlock)2108 vps_object_cache_evict(vm_object_t *object_to_unlock)
2109 {
2110 static int cache_evict_throttle = 0;
2111 struct vm_speculative_age_q *sq;
2112
2113 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
2114
2115 if (vm_page_queue_empty(&sq->age_q) && cache_evict_throttle == 0) {
2116 int pages_evicted;
2117
2118 if (*object_to_unlock != NULL) {
2119 vm_object_unlock(*object_to_unlock);
2120 *object_to_unlock = NULL;
2121 }
2122 KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0);
2123
2124 pages_evicted = vm_object_cache_evict(100, 10);
2125
2126 KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_END, pages_evicted, 0, 0, 0, 0);
2127
2128 if (pages_evicted) {
2129 vm_pageout_vminfo.vm_pageout_pages_evicted += pages_evicted;
2130
2131 VM_DEBUG_EVENT(vm_pageout_cache_evict, VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE,
2132 vm_page_free_count, pages_evicted, vm_pageout_vminfo.vm_pageout_pages_evicted, 0);
2133 memoryshot(VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE);
2134
2135 /*
2136 * we just freed up to 100 pages,
2137 * so go back to the top of the main loop
2138 * and re-evaulate the memory situation
2139 */
2140 return VM_PAGEOUT_SCAN_NEXT_ITERATION;
2141 } else {
2142 cache_evict_throttle = 1000;
2143 }
2144 }
2145 if (cache_evict_throttle) {
2146 cache_evict_throttle--;
2147 }
2148
2149 return VM_PAGEOUT_SCAN_PROCEED;
2150 }
2151
2152
2153 /*
2154 * This function is called only from vm_pageout_scan and
2155 * it calculates the filecache min. that needs to be maintained
2156 * as we start to steal pages.
2157 */
2158 static void
vps_calculate_filecache_min(void)2159 vps_calculate_filecache_min(void)
2160 {
2161 int divisor = vm_pageout_state.vm_page_filecache_min_divisor;
2162
2163 #if CONFIG_JETSAM
2164 /*
2165 * don't let the filecache_min fall below 15% of available memory
2166 * on systems with an active compressor that isn't nearing its
2167 * limits w/r to accepting new data
2168 *
2169 * on systems w/o the compressor/swapper, the filecache is always
2170 * a very large percentage of the AVAILABLE_NON_COMPRESSED_MEMORY
2171 * since most (if not all) of the anonymous pages are in the
2172 * throttled queue (which isn't counted as available) which
2173 * effectively disables this filter
2174 */
2175 if (vm_compressor_low_on_space() || divisor == 0) {
2176 vm_pageout_state.vm_page_filecache_min = 0;
2177 } else {
2178 vm_pageout_state.vm_page_filecache_min =
2179 ((AVAILABLE_NON_COMPRESSED_MEMORY) * 10) / divisor;
2180 }
2181 #else
2182 if (vm_compressor_out_of_space() || divisor == 0) {
2183 vm_pageout_state.vm_page_filecache_min = 0;
2184 } else {
2185 /*
2186 * don't let the filecache_min fall below the specified critical level
2187 */
2188 vm_pageout_state.vm_page_filecache_min =
2189 ((AVAILABLE_NON_COMPRESSED_MEMORY) * 10) / divisor;
2190 }
2191 #endif
2192 if (vm_page_free_count < (vm_page_free_reserved / 4)) {
2193 vm_pageout_state.vm_page_filecache_min = 0;
2194 }
2195 }
2196
2197 /*
2198 * This function is called only from vm_pageout_scan and
2199 * it updates the flow control time to detect if VM pageoutscan
2200 * isn't making progress.
2201 */
2202 static void
vps_flow_control_reset_deadlock_timer(struct flow_control * flow_control)2203 vps_flow_control_reset_deadlock_timer(struct flow_control *flow_control)
2204 {
2205 mach_timespec_t ts;
2206 clock_sec_t sec;
2207 clock_nsec_t nsec;
2208
2209 ts.tv_sec = vm_pageout_state.vm_pageout_deadlock_wait / 1000;
2210 ts.tv_nsec = (vm_pageout_state.vm_pageout_deadlock_wait % 1000) * 1000 * NSEC_PER_USEC;
2211 clock_get_system_nanotime(&sec, &nsec);
2212 flow_control->ts.tv_sec = (unsigned int) sec;
2213 flow_control->ts.tv_nsec = nsec;
2214 ADD_MACH_TIMESPEC(&flow_control->ts, &ts);
2215
2216 flow_control->state = FCS_DELAYED;
2217
2218 vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_internal++;
2219 }
2220
2221 /*
2222 * This function is called only from vm_pageout_scan and
2223 * it is the flow control logic of VM pageout scan which
2224 * controls if it should block and for how long.
2225 * Any blocking of vm_pageout_scan happens ONLY in this function.
2226 */
2227 static int
vps_flow_control(struct flow_control * flow_control,int * anons_grabbed,vm_object_t * object,int * delayed_unlock,vm_page_t * local_freeq,int * local_freed,int * vm_pageout_deadlock_target,unsigned int inactive_burst_count)2228 vps_flow_control(struct flow_control *flow_control, int *anons_grabbed, vm_object_t *object, int *delayed_unlock,
2229 vm_page_t *local_freeq, int *local_freed, int *vm_pageout_deadlock_target, unsigned int inactive_burst_count)
2230 {
2231 boolean_t exceeded_burst_throttle = FALSE;
2232 unsigned int msecs = 0;
2233 uint32_t inactive_external_count;
2234 mach_timespec_t ts;
2235 struct vm_pageout_queue *iq;
2236 struct vm_pageout_queue *eq;
2237 struct vm_speculative_age_q *sq;
2238
2239 iq = &vm_pageout_queue_internal;
2240 eq = &vm_pageout_queue_external;
2241 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
2242
2243 /*
2244 * Sometimes we have to pause:
2245 * 1) No inactive pages - nothing to do.
2246 * 2) Loop control - no acceptable pages found on the inactive queue
2247 * within the last vm_pageout_burst_inactive_throttle iterations
2248 * 3) Flow control - default pageout queue is full
2249 */
2250 if (vm_page_queue_empty(&vm_page_queue_inactive) &&
2251 vm_page_queue_empty(&vm_page_queue_anonymous) &&
2252 vm_page_queue_empty(&vm_page_queue_cleaned) &&
2253 vm_page_queue_empty(&sq->age_q)) {
2254 VM_PAGEOUT_DEBUG(vm_pageout_scan_empty_throttle, 1);
2255 msecs = vm_pageout_state.vm_pageout_empty_wait;
2256 } else if (inactive_burst_count >=
2257 MIN(vm_pageout_state.vm_pageout_burst_inactive_throttle,
2258 (vm_page_inactive_count +
2259 vm_page_speculative_count))) {
2260 VM_PAGEOUT_DEBUG(vm_pageout_scan_burst_throttle, 1);
2261 msecs = vm_pageout_state.vm_pageout_burst_wait;
2262
2263 exceeded_burst_throttle = TRUE;
2264 } else if (VM_PAGE_Q_THROTTLED(iq) &&
2265 VM_DYNAMIC_PAGING_ENABLED()) {
2266 clock_sec_t sec;
2267 clock_nsec_t nsec;
2268
2269 switch (flow_control->state) {
2270 case FCS_IDLE:
2271 if ((vm_page_free_count + *local_freed) < vm_page_free_target &&
2272 vm_pageout_state.vm_restricted_to_single_processor == FALSE) {
2273 /*
2274 * since the compressor is running independently of vm_pageout_scan
2275 * let's not wait for it just yet... as long as we have a healthy supply
2276 * of filecache pages to work with, let's keep stealing those.
2277 */
2278 inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count;
2279
2280 if (vm_page_pageable_external_count > vm_pageout_state.vm_page_filecache_min &&
2281 (inactive_external_count >= VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count))) {
2282 *anons_grabbed = ANONS_GRABBED_LIMIT;
2283 VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle_deferred, 1);
2284 return VM_PAGEOUT_SCAN_PROCEED;
2285 }
2286 }
2287
2288 vps_flow_control_reset_deadlock_timer(flow_control);
2289 msecs = vm_pageout_state.vm_pageout_deadlock_wait;
2290
2291 break;
2292
2293 case FCS_DELAYED:
2294 clock_get_system_nanotime(&sec, &nsec);
2295 ts.tv_sec = (unsigned int) sec;
2296 ts.tv_nsec = nsec;
2297
2298 if (CMP_MACH_TIMESPEC(&ts, &flow_control->ts) >= 0) {
2299 /*
2300 * the pageout thread for the default pager is potentially
2301 * deadlocked since the
2302 * default pager queue has been throttled for more than the
2303 * allowable time... we need to move some clean pages or dirty
2304 * pages belonging to the external pagers if they aren't throttled
2305 * vm_page_free_wanted represents the number of threads currently
2306 * blocked waiting for pages... we'll move one page for each of
2307 * these plus a fixed amount to break the logjam... once we're done
2308 * moving this number of pages, we'll re-enter the FSC_DELAYED state
2309 * with a new timeout target since we have no way of knowing
2310 * whether we've broken the deadlock except through observation
2311 * of the queue associated with the default pager... we need to
2312 * stop moving pages and allow the system to run to see what
2313 * state it settles into.
2314 */
2315
2316 *vm_pageout_deadlock_target = vm_pageout_state.vm_pageout_deadlock_relief +
2317 vm_page_free_wanted + vm_page_free_wanted_privileged;
2318 VM_PAGEOUT_DEBUG(vm_pageout_scan_deadlock_detected, 1);
2319 flow_control->state = FCS_DEADLOCK_DETECTED;
2320 thread_wakeup(VM_PAGEOUT_GC_EVENT);
2321 return VM_PAGEOUT_SCAN_PROCEED;
2322 }
2323 /*
2324 * just resniff instead of trying
2325 * to compute a new delay time... we're going to be
2326 * awakened immediately upon a laundry completion,
2327 * so we won't wait any longer than necessary
2328 */
2329 msecs = vm_pageout_state.vm_pageout_idle_wait;
2330 break;
2331
2332 case FCS_DEADLOCK_DETECTED:
2333 if (*vm_pageout_deadlock_target) {
2334 return VM_PAGEOUT_SCAN_PROCEED;
2335 }
2336
2337 vps_flow_control_reset_deadlock_timer(flow_control);
2338 msecs = vm_pageout_state.vm_pageout_deadlock_wait;
2339
2340 break;
2341 }
2342 } else {
2343 /*
2344 * No need to pause...
2345 */
2346 return VM_PAGEOUT_SCAN_PROCEED;
2347 }
2348
2349 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
2350
2351 vm_pageout_prepare_to_block(object, delayed_unlock, local_freeq, local_freed,
2352 VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
2353
2354 if (vm_page_free_count >= vm_page_free_target) {
2355 /*
2356 * we're here because
2357 * 1) someone else freed up some pages while we had
2358 * the queues unlocked above
2359 * and we've hit one of the 3 conditions that
2360 * cause us to pause the pageout scan thread
2361 *
2362 * since we already have enough free pages,
2363 * let's avoid stalling and return normally
2364 *
2365 * before we return, make sure the pageout I/O threads
2366 * are running throttled in case there are still requests
2367 * in the laundry... since we have enough free pages
2368 * we don't need the laundry to be cleaned in a timely
2369 * fashion... so let's avoid interfering with foreground
2370 * activity
2371 *
2372 * we don't want to hold vm_page_queue_free_lock when
2373 * calling vm_pageout_adjust_eq_iothrottle (since it
2374 * may cause other locks to be taken), we do the intitial
2375 * check outside of the lock. Once we take the lock,
2376 * we recheck the condition since it may have changed.
2377 * if it has, no problem, we will make the threads
2378 * non-throttled before actually blocking
2379 */
2380 vm_pageout_adjust_eq_iothrottle(&pgo_iothread_external_state, TRUE);
2381 }
2382 vm_free_page_lock();
2383
2384 if (vm_page_free_count >= vm_page_free_target &&
2385 (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
2386 return VM_PAGEOUT_SCAN_DONE_RETURN;
2387 }
2388 vm_free_page_unlock();
2389
2390 if ((vm_page_free_count + vm_page_cleaned_count) < vm_page_free_target) {
2391 /*
2392 * we're most likely about to block due to one of
2393 * the 3 conditions that cause vm_pageout_scan to
2394 * not be able to make forward progress w/r
2395 * to providing new pages to the free queue,
2396 * so unthrottle the I/O threads in case we
2397 * have laundry to be cleaned... it needs
2398 * to be completed ASAP.
2399 *
2400 * even if we don't block, we want the io threads
2401 * running unthrottled since the sum of free +
2402 * clean pages is still under our free target
2403 */
2404 vm_pageout_adjust_eq_iothrottle(&pgo_iothread_external_state, FALSE);
2405 }
2406 if (vm_page_cleaned_count > 0 && exceeded_burst_throttle == FALSE) {
2407 /*
2408 * if we get here we're below our free target and
2409 * we're stalling due to a full laundry queue or
2410 * we don't have any inactive pages other then
2411 * those in the clean queue...
2412 * however, we have pages on the clean queue that
2413 * can be moved to the free queue, so let's not
2414 * stall the pageout scan
2415 */
2416 flow_control->state = FCS_IDLE;
2417 return VM_PAGEOUT_SCAN_PROCEED;
2418 }
2419 if (flow_control->state == FCS_DELAYED && !VM_PAGE_Q_THROTTLED(iq)) {
2420 flow_control->state = FCS_IDLE;
2421 return VM_PAGEOUT_SCAN_PROCEED;
2422 }
2423
2424 VM_CHECK_MEMORYSTATUS;
2425
2426 if (flow_control->state != FCS_IDLE) {
2427 VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle, 1);
2428 }
2429
2430 iq->pgo_throttled = TRUE;
2431 assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000 * NSEC_PER_USEC);
2432
2433 vm_page_unlock_queues();
2434
2435 assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
2436
2437 VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START,
2438 iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
2439 memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START);
2440
2441 thread_block(THREAD_CONTINUE_NULL);
2442
2443 VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END,
2444 iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
2445 memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END);
2446
2447 vm_page_lock_queues();
2448
2449 iq->pgo_throttled = FALSE;
2450
2451 vps_init_page_targets();
2452
2453 return VM_PAGEOUT_SCAN_NEXT_ITERATION;
2454 }
2455
2456 extern boolean_t vm_darkwake_mode;
2457 /*
2458 * This function is called only from vm_pageout_scan and
2459 * it will find and return the most appropriate page to be
2460 * reclaimed.
2461 */
2462 static int
vps_choose_victim_page(vm_page_t * victim_page,int * anons_grabbed,boolean_t * grab_anonymous,boolean_t force_anonymous,boolean_t * is_page_from_bg_q,unsigned int * reactivated_this_call)2463 vps_choose_victim_page(vm_page_t *victim_page, int *anons_grabbed, boolean_t *grab_anonymous, boolean_t force_anonymous,
2464 boolean_t *is_page_from_bg_q, unsigned int *reactivated_this_call)
2465 {
2466 vm_page_t m = NULL;
2467 vm_object_t m_object = VM_OBJECT_NULL;
2468 uint32_t inactive_external_count;
2469 struct vm_speculative_age_q *sq;
2470 struct vm_pageout_queue *iq;
2471 int retval = VM_PAGEOUT_SCAN_PROCEED;
2472
2473 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
2474 iq = &vm_pageout_queue_internal;
2475
2476 *is_page_from_bg_q = FALSE;
2477
2478 m = NULL;
2479 m_object = VM_OBJECT_NULL;
2480
2481 if (VM_DYNAMIC_PAGING_ENABLED()) {
2482 assert(vm_page_throttled_count == 0);
2483 assert(vm_page_queue_empty(&vm_page_queue_throttled));
2484 }
2485
2486 /*
2487 * Try for a clean-queue inactive page.
2488 * These are pages that vm_pageout_scan tried to steal earlier, but
2489 * were dirty and had to be cleaned. Pick them up now that they are clean.
2490 */
2491 if (!vm_page_queue_empty(&vm_page_queue_cleaned)) {
2492 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
2493
2494 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
2495
2496 goto found_page;
2497 }
2498
2499 /*
2500 * The next most eligible pages are ones we paged in speculatively,
2501 * but which have not yet been touched and have been aged out.
2502 */
2503 if (!vm_page_queue_empty(&sq->age_q)) {
2504 m = (vm_page_t) vm_page_queue_first(&sq->age_q);
2505
2506 assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q);
2507
2508 if (!m->vmp_dirty || force_anonymous == FALSE) {
2509 goto found_page;
2510 } else {
2511 m = NULL;
2512 }
2513 }
2514
2515 #if !CONFIG_JETSAM
2516 if (vm_page_donate_mode != VM_PAGE_DONATE_DISABLED) {
2517 if (vm_page_donate_queue_ripe && !vm_page_queue_empty(&vm_page_queue_donate)) {
2518 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_donate);
2519 assert(m->vmp_on_specialq == VM_PAGE_SPECIAL_Q_DONATE);
2520 goto found_page;
2521 }
2522 }
2523 #endif /* !CONFIG_JETSAM */
2524
2525 if (vm_page_background_mode != VM_PAGE_BG_DISABLED && (vm_page_background_count > vm_page_background_target)) {
2526 vm_object_t bg_m_object = NULL;
2527
2528 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_background);
2529
2530 bg_m_object = VM_PAGE_OBJECT(m);
2531
2532 if (!VM_PAGE_PAGEABLE(m) || (vm_darkwake_mode && m->vmp_busy)) {
2533 /*
2534 * This page is on the background queue
2535 * but not on a pageable queue OR is busy during
2536 * darkwake mode when the target is artificially lowered.
2537 * If it is busy during darkwake mode, and we don't skip it,
2538 * we will just swing back around and try again with the same
2539 * queue and might hit the same page or its neighbor in a
2540 * similar state. Both of these are transient states and will
2541 * get resolved, but, at this point let's ignore this page.
2542 */
2543 if (vm_darkwake_mode && m->vmp_busy) {
2544 if (bg_m_object->internal) {
2545 vm_pageout_skipped_bq_internal++;
2546 } else {
2547 vm_pageout_skipped_bq_external++;
2548 }
2549 }
2550 } else if (force_anonymous == FALSE || bg_m_object->internal) {
2551 if (bg_m_object->internal &&
2552 (VM_PAGE_Q_THROTTLED(iq) ||
2553 vm_compressor_out_of_space() == TRUE ||
2554 vm_page_free_count < (vm_page_free_reserved / 4))) {
2555 vm_pageout_skipped_bq_internal++;
2556 } else {
2557 *is_page_from_bg_q = TRUE;
2558
2559 if (bg_m_object->internal) {
2560 vm_pageout_vminfo.vm_pageout_considered_bq_internal++;
2561 } else {
2562 vm_pageout_vminfo.vm_pageout_considered_bq_external++;
2563 }
2564 goto found_page;
2565 }
2566 }
2567 }
2568
2569 inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count;
2570
2571 if ((vm_page_pageable_external_count < vm_pageout_state.vm_page_filecache_min || force_anonymous == TRUE) ||
2572 (inactive_external_count < VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count))) {
2573 *grab_anonymous = TRUE;
2574 *anons_grabbed = 0;
2575
2576 if (VM_CONFIG_SWAP_IS_ACTIVE) {
2577 vm_pageout_vminfo.vm_pageout_skipped_external++;
2578 } else {
2579 if (vm_page_free_count < (COMPRESSOR_FREE_RESERVED_LIMIT * 2)) {
2580 /*
2581 * No swap and we are in dangerously low levels of free memory.
2582 * If we keep going ahead with anonymous pages, we are going to run into a situation
2583 * where the compressor will be stuck waiting for free pages (if it isn't already).
2584 *
2585 * So, pick a file backed page...
2586 */
2587 *grab_anonymous = FALSE;
2588 *anons_grabbed = ANONS_GRABBED_LIMIT;
2589 vm_pageout_vminfo.vm_pageout_skipped_internal++;
2590 }
2591 }
2592 goto want_anonymous;
2593 }
2594 *grab_anonymous = (vm_page_anonymous_count > vm_page_anonymous_min);
2595
2596 #if CONFIG_JETSAM
2597 /* If the file-backed pool has accumulated
2598 * significantly more pages than the jetsam
2599 * threshold, prefer to reclaim those
2600 * inline to minimise compute overhead of reclaiming
2601 * anonymous pages.
2602 * This calculation does not account for the CPU local
2603 * external page queues, as those are expected to be
2604 * much smaller relative to the global pools.
2605 */
2606
2607 struct vm_pageout_queue *eq = &vm_pageout_queue_external;
2608
2609 if (*grab_anonymous == TRUE && !VM_PAGE_Q_THROTTLED(eq)) {
2610 if (vm_page_pageable_external_count >
2611 vm_pageout_state.vm_page_filecache_min) {
2612 if ((vm_page_pageable_external_count *
2613 vm_pageout_memorystatus_fb_factor_dr) >
2614 (memorystatus_available_pages_critical *
2615 vm_pageout_memorystatus_fb_factor_nr)) {
2616 *grab_anonymous = FALSE;
2617
2618 VM_PAGEOUT_DEBUG(vm_grab_anon_overrides, 1);
2619 }
2620 }
2621 if (*grab_anonymous) {
2622 VM_PAGEOUT_DEBUG(vm_grab_anon_nops, 1);
2623 }
2624 }
2625 #endif /* CONFIG_JETSAM */
2626
2627 want_anonymous:
2628 if (*grab_anonymous == FALSE || *anons_grabbed >= ANONS_GRABBED_LIMIT || vm_page_queue_empty(&vm_page_queue_anonymous)) {
2629 if (!vm_page_queue_empty(&vm_page_queue_inactive)) {
2630 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
2631
2632 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
2633 *anons_grabbed = 0;
2634
2635 if (vm_page_pageable_external_count < vm_pageout_state.vm_page_filecache_min) {
2636 if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
2637 if ((++(*reactivated_this_call) % 100)) {
2638 vm_pageout_vminfo.vm_pageout_filecache_min_reactivated++;
2639
2640 vm_page_activate(m);
2641 counter_inc(&vm_statistics_reactivations);
2642 #if DEVELOPMENT || DEBUG
2643 if (*is_page_from_bg_q == TRUE) {
2644 if (m_object->internal) {
2645 vm_pageout_rejected_bq_internal++;
2646 } else {
2647 vm_pageout_rejected_bq_external++;
2648 }
2649 }
2650 #endif /* DEVELOPMENT || DEBUG */
2651 vm_pageout_state.vm_pageout_inactive_used++;
2652
2653 m = NULL;
2654 retval = VM_PAGEOUT_SCAN_NEXT_ITERATION;
2655
2656 goto found_page;
2657 }
2658
2659 /*
2660 * steal 1 of the file backed pages even if
2661 * we are under the limit that has been set
2662 * for a healthy filecache
2663 */
2664 }
2665 }
2666 goto found_page;
2667 }
2668 }
2669 if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
2670 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
2671
2672 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
2673 *anons_grabbed += 1;
2674
2675 goto found_page;
2676 }
2677
2678 m = NULL;
2679
2680 found_page:
2681 *victim_page = m;
2682
2683 return retval;
2684 }
2685
2686 /*
2687 * This function is called only from vm_pageout_scan and
2688 * it will put a page back on the active/inactive queue
2689 * if we can't reclaim it for some reason.
2690 */
2691 static void
vps_requeue_page(vm_page_t m,int page_prev_q_state,__unused boolean_t page_from_bg_q)2692 vps_requeue_page(vm_page_t m, int page_prev_q_state, __unused boolean_t page_from_bg_q)
2693 {
2694 if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
2695 vm_page_enqueue_inactive(m, FALSE);
2696 } else {
2697 vm_page_activate(m);
2698 }
2699
2700 #if DEVELOPMENT || DEBUG
2701 vm_object_t m_object = VM_PAGE_OBJECT(m);
2702
2703 if (page_from_bg_q == TRUE) {
2704 if (m_object->internal) {
2705 vm_pageout_rejected_bq_internal++;
2706 } else {
2707 vm_pageout_rejected_bq_external++;
2708 }
2709 }
2710 #endif /* DEVELOPMENT || DEBUG */
2711 }
2712
2713 /*
2714 * This function is called only from vm_pageout_scan and
2715 * it will try to grab the victim page's VM object (m_object)
2716 * which differs from the previous victim page's object (object).
2717 */
2718 static int
vps_switch_object(vm_page_t m,vm_object_t m_object,vm_object_t * object,int page_prev_q_state,boolean_t avoid_anon_pages,boolean_t page_from_bg_q)2719 vps_switch_object(vm_page_t m, vm_object_t m_object, vm_object_t *object, int page_prev_q_state, boolean_t avoid_anon_pages, boolean_t page_from_bg_q)
2720 {
2721 struct vm_speculative_age_q *sq;
2722
2723 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
2724
2725 /*
2726 * the object associated with candidate page is
2727 * different from the one we were just working
2728 * with... dump the lock if we still own it
2729 */
2730 if (*object != NULL) {
2731 vm_object_unlock(*object);
2732 *object = NULL;
2733 }
2734 /*
2735 * Try to lock object; since we've alread got the
2736 * page queues lock, we can only 'try' for this one.
2737 * if the 'try' fails, we need to do a mutex_pause
2738 * to allow the owner of the object lock a chance to
2739 * run... otherwise, we're likely to trip over this
2740 * object in the same state as we work our way through
2741 * the queue... clumps of pages associated with the same
2742 * object are fairly typical on the inactive and active queues
2743 */
2744 if (!vm_object_lock_try_scan(m_object)) {
2745 vm_page_t m_want = NULL;
2746
2747 vm_pageout_vminfo.vm_pageout_inactive_nolock++;
2748
2749 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
2750 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_nolock, 1);
2751 }
2752
2753 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
2754
2755 m->vmp_reference = FALSE;
2756
2757 if (!m_object->object_is_shared_cache) {
2758 /*
2759 * don't apply this optimization if this is the shared cache
2760 * object, it's too easy to get rid of very hot and important
2761 * pages...
2762 * m->vmp_object must be stable since we hold the page queues lock...
2763 * we can update the scan_collisions field sans the object lock
2764 * since it is a separate field and this is the only spot that does
2765 * a read-modify-write operation and it is never executed concurrently...
2766 * we can asynchronously set this field to 0 when creating a UPL, so it
2767 * is possible for the value to be a bit non-determistic, but that's ok
2768 * since it's only used as a hint
2769 */
2770 m_object->scan_collisions = 1;
2771 }
2772 if (page_from_bg_q) {
2773 m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_background);
2774 } else if (!vm_page_queue_empty(&vm_page_queue_cleaned)) {
2775 m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
2776 } else if (!vm_page_queue_empty(&sq->age_q)) {
2777 m_want = (vm_page_t) vm_page_queue_first(&sq->age_q);
2778 } else if ((avoid_anon_pages || vm_page_queue_empty(&vm_page_queue_anonymous)) &&
2779 !vm_page_queue_empty(&vm_page_queue_inactive)) {
2780 m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
2781 } else if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
2782 m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
2783 }
2784
2785 /*
2786 * this is the next object we're going to be interested in
2787 * try to make sure its available after the mutex_pause
2788 * returns control
2789 */
2790 if (m_want) {
2791 vm_pageout_scan_wants_object = VM_PAGE_OBJECT(m_want);
2792 }
2793
2794 vps_requeue_page(m, page_prev_q_state, page_from_bg_q);
2795
2796 return VM_PAGEOUT_SCAN_NEXT_ITERATION;
2797 } else {
2798 *object = m_object;
2799 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
2800 }
2801
2802 return VM_PAGEOUT_SCAN_PROCEED;
2803 }
2804
2805 /*
2806 * This function is called only from vm_pageout_scan and
2807 * it notices that pageout scan may be rendered ineffective
2808 * due to a FS deadlock and will jetsam a process if possible.
2809 * If jetsam isn't supported, it'll move the page to the active
2810 * queue to try and get some different pages pushed onwards so
2811 * we can try to get out of this scenario.
2812 */
2813 static void
vps_deal_with_throttled_queues(vm_page_t m,vm_object_t * object,uint32_t * vm_pageout_inactive_external_forced_reactivate_limit,boolean_t * force_anonymous,__unused boolean_t is_page_from_bg_q)2814 vps_deal_with_throttled_queues(vm_page_t m, vm_object_t *object, uint32_t *vm_pageout_inactive_external_forced_reactivate_limit,
2815 boolean_t *force_anonymous, __unused boolean_t is_page_from_bg_q)
2816 {
2817 struct vm_pageout_queue *eq;
2818 vm_object_t cur_object = VM_OBJECT_NULL;
2819
2820 cur_object = *object;
2821
2822 eq = &vm_pageout_queue_external;
2823
2824 if (cur_object->internal == FALSE) {
2825 /*
2826 * we need to break up the following potential deadlock case...
2827 * a) The external pageout thread is stuck on the truncate lock for a file that is being extended i.e. written.
2828 * b) The thread doing the writing is waiting for pages while holding the truncate lock
2829 * c) Most of the pages in the inactive queue belong to this file.
2830 *
2831 * we are potentially in this deadlock because...
2832 * a) the external pageout queue is throttled
2833 * b) we're done with the active queue and moved on to the inactive queue
2834 * c) we've got a dirty external page
2835 *
2836 * since we don't know the reason for the external pageout queue being throttled we
2837 * must suspect that we are deadlocked, so move the current page onto the active queue
2838 * in an effort to cause a page from the active queue to 'age' to the inactive queue
2839 *
2840 * if we don't have jetsam configured (i.e. we have a dynamic pager), set
2841 * 'force_anonymous' to TRUE to cause us to grab a page from the cleaned/anonymous
2842 * pool the next time we select a victim page... if we can make enough new free pages,
2843 * the deadlock will break, the external pageout queue will empty and it will no longer
2844 * be throttled
2845 *
2846 * if we have jetsam configured, keep a count of the pages reactivated this way so
2847 * that we can try to find clean pages in the active/inactive queues before
2848 * deciding to jetsam a process
2849 */
2850 vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_external++;
2851
2852 vm_page_check_pageable_safe(m);
2853 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
2854 vm_page_queue_enter(&vm_page_queue_active, m, vmp_pageq);
2855 m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
2856 vm_page_active_count++;
2857 vm_page_pageable_external_count++;
2858
2859 vm_pageout_adjust_eq_iothrottle(&pgo_iothread_external_state, FALSE);
2860
2861 #if CONFIG_MEMORYSTATUS && CONFIG_JETSAM
2862
2863 #pragma unused(force_anonymous)
2864
2865 *vm_pageout_inactive_external_forced_reactivate_limit -= 1;
2866
2867 if (*vm_pageout_inactive_external_forced_reactivate_limit <= 0) {
2868 *vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
2869 /*
2870 * Possible deadlock scenario so request jetsam action
2871 */
2872 memorystatus_kill_on_vps_starvation();
2873 VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_NONE,
2874 vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count);
2875 }
2876 #else /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
2877
2878 #pragma unused(vm_pageout_inactive_external_forced_reactivate_limit)
2879
2880 *force_anonymous = TRUE;
2881 #endif /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
2882 } else {
2883 vm_page_activate(m);
2884 counter_inc(&vm_statistics_reactivations);
2885
2886 #if DEVELOPMENT || DEBUG
2887 if (is_page_from_bg_q == TRUE) {
2888 if (cur_object->internal) {
2889 vm_pageout_rejected_bq_internal++;
2890 } else {
2891 vm_pageout_rejected_bq_external++;
2892 }
2893 }
2894 #endif /* DEVELOPMENT || DEBUG */
2895
2896 vm_pageout_state.vm_pageout_inactive_used++;
2897 }
2898 }
2899
2900
2901 void
vm_page_balance_inactive(int max_to_move)2902 vm_page_balance_inactive(int max_to_move)
2903 {
2904 vm_page_t m;
2905
2906 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2907
2908 if (hibernation_vmqueues_inspection || hibernate_cleaning_in_progress) {
2909 /*
2910 * It is likely that the hibernation code path is
2911 * dealing with these very queues as we are about
2912 * to move pages around in/from them and completely
2913 * change the linkage of the pages.
2914 *
2915 * And so we skip the rebalancing of these queues.
2916 */
2917 return;
2918 }
2919 vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
2920 vm_page_inactive_count +
2921 vm_page_speculative_count);
2922
2923 while (max_to_move-- && (vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) {
2924 VM_PAGEOUT_DEBUG(vm_pageout_balanced, 1);
2925
2926 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
2927
2928 assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
2929 assert(!m->vmp_laundry);
2930 assert(!is_kernel_object(VM_PAGE_OBJECT(m)));
2931 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
2932
2933 DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
2934
2935 /*
2936 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
2937 *
2938 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
2939 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
2940 * new reference happens. If no futher references happen on the page after that remote TLB flushes
2941 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
2942 * by pageout_scan, which is just fine since the last reference would have happened quite far
2943 * in the past (TLB caches don't hang around for very long), and of course could just as easily
2944 * have happened before we moved the page
2945 */
2946 if (m->vmp_pmapped == TRUE) {
2947 /*
2948 * We might be holding the page queue lock as a
2949 * spin lock and clearing the "referenced" bit could
2950 * take a while if there are lots of mappings of
2951 * that page, so make sure we acquire the lock as
2952 * as mutex to avoid a spinlock timeout.
2953 */
2954 vm_page_lockconvert_queues();
2955 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
2956 }
2957
2958 /*
2959 * The page might be absent or busy,
2960 * but vm_page_deactivate can handle that.
2961 * FALSE indicates that we don't want a H/W clear reference
2962 */
2963 vm_page_deactivate_internal(m, FALSE);
2964 }
2965 }
2966
2967 /*
2968 * vm_pageout_scan does the dirty work for the pageout daemon.
2969 * It returns with both vm_page_queue_free_lock and vm_page_queue_lock
2970 * held and vm_page_free_wanted == 0.
2971 */
2972 void
vm_pageout_scan(void)2973 vm_pageout_scan(void)
2974 {
2975 unsigned int loop_count = 0;
2976 unsigned int inactive_burst_count = 0;
2977 unsigned int reactivated_this_call;
2978 unsigned int reactivate_limit;
2979 vm_page_t local_freeq = NULL;
2980 int local_freed = 0;
2981 int delayed_unlock;
2982 int delayed_unlock_limit = 0;
2983 int refmod_state = 0;
2984 int vm_pageout_deadlock_target = 0;
2985 struct vm_pageout_queue *iq;
2986 struct vm_pageout_queue *eq;
2987 struct vm_speculative_age_q *sq;
2988 struct flow_control flow_control = { .state = 0, .ts = { .tv_sec = 0, .tv_nsec = 0 } };
2989 boolean_t inactive_throttled = FALSE;
2990 vm_object_t object = NULL;
2991 uint32_t inactive_reclaim_run;
2992 boolean_t grab_anonymous = FALSE;
2993 boolean_t force_anonymous = FALSE;
2994 boolean_t force_speculative_aging = FALSE;
2995 int anons_grabbed = 0;
2996 int page_prev_q_state = 0;
2997 boolean_t page_from_bg_q = FALSE;
2998 uint32_t vm_pageout_inactive_external_forced_reactivate_limit = 0;
2999 vm_object_t m_object = VM_OBJECT_NULL;
3000 int retval = 0;
3001 boolean_t lock_yield_check = FALSE;
3002
3003
3004 VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_START,
3005 vm_pageout_vminfo.vm_pageout_freed_speculative,
3006 vm_pageout_state.vm_pageout_inactive_clean,
3007 vm_pageout_vminfo.vm_pageout_inactive_dirty_internal,
3008 vm_pageout_vminfo.vm_pageout_inactive_dirty_external);
3009
3010 flow_control.state = FCS_IDLE;
3011 iq = &vm_pageout_queue_internal;
3012 eq = &vm_pageout_queue_external;
3013 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
3014
3015 /* Ask the pmap layer to return any pages it no longer needs. */
3016 pmap_release_pages_fast();
3017
3018 vm_page_lock_queues();
3019
3020 delayed_unlock = 1;
3021
3022 /*
3023 * Calculate the max number of referenced pages on the inactive
3024 * queue that we will reactivate.
3025 */
3026 reactivated_this_call = 0;
3027 reactivate_limit = VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count +
3028 vm_page_inactive_count);
3029 inactive_reclaim_run = 0;
3030
3031 vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
3032
3033 /*
3034 * We must limit the rate at which we send pages to the pagers
3035 * so that we don't tie up too many pages in the I/O queues.
3036 * We implement a throttling mechanism using the laundry count
3037 * to limit the number of pages outstanding to the default
3038 * and external pagers. We can bypass the throttles and look
3039 * for clean pages if the pageout queues don't drain in a timely
3040 * fashion since this may indicate that the pageout paths are
3041 * stalled waiting for memory, which only we can provide.
3042 */
3043
3044 vps_init_page_targets();
3045 assert(object == NULL);
3046 assert(delayed_unlock != 0);
3047
3048 for (;;) {
3049 vm_page_t m;
3050
3051 DTRACE_VM2(rev, int, 1, (uint64_t *), NULL);
3052
3053 if (lock_yield_check) {
3054 lock_yield_check = FALSE;
3055
3056 if (delayed_unlock++ > delayed_unlock_limit) {
3057 vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
3058 VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
3059 } else if (vm_pageout_scan_wants_object) {
3060 vm_page_unlock_queues();
3061 mutex_pause(0);
3062 vm_page_lock_queues();
3063 } else if (vps_yield_for_pgqlockwaiters && lck_mtx_yield(&vm_page_queue_lock)) {
3064 VM_PAGEOUT_DEBUG(vm_pageout_yield_for_free_pages, 1);
3065 }
3066 }
3067
3068 if (vm_upl_wait_for_pages < 0) {
3069 vm_upl_wait_for_pages = 0;
3070 }
3071
3072 delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT + vm_upl_wait_for_pages;
3073
3074 if (delayed_unlock_limit > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX) {
3075 delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX;
3076 }
3077
3078 vps_deal_with_secluded_page_overflow(&local_freeq, &local_freed);
3079
3080 assert(delayed_unlock);
3081
3082 /*
3083 * maintain our balance
3084 */
3085 vm_page_balance_inactive(1);
3086
3087
3088 /**********************************************************************
3089 * above this point we're playing with the active and secluded queues
3090 * below this point we're playing with the throttling mechanisms
3091 * and the inactive queue
3092 **********************************************************************/
3093
3094 if (vm_page_free_count + local_freed >= vm_page_free_target) {
3095 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
3096
3097 vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
3098 VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
3099 /*
3100 * make sure the pageout I/O threads are running
3101 * throttled in case there are still requests
3102 * in the laundry... since we have met our targets
3103 * we don't need the laundry to be cleaned in a timely
3104 * fashion... so let's avoid interfering with foreground
3105 * activity
3106 */
3107 vm_pageout_adjust_eq_iothrottle(&pgo_iothread_external_state, TRUE);
3108
3109 vm_free_page_lock();
3110
3111 if ((vm_page_free_count >= vm_page_free_target) &&
3112 (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
3113 /*
3114 * done - we have met our target *and*
3115 * there is no one waiting for a page.
3116 */
3117 return_from_scan:
3118 assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
3119
3120 VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_NONE,
3121 vm_pageout_state.vm_pageout_inactive,
3122 vm_pageout_state.vm_pageout_inactive_used, 0, 0);
3123 VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_END,
3124 vm_pageout_vminfo.vm_pageout_freed_speculative,
3125 vm_pageout_state.vm_pageout_inactive_clean,
3126 vm_pageout_vminfo.vm_pageout_inactive_dirty_internal,
3127 vm_pageout_vminfo.vm_pageout_inactive_dirty_external);
3128
3129 return;
3130 }
3131 vm_free_page_unlock();
3132 }
3133
3134 /*
3135 * Before anything, we check if we have any ripe volatile
3136 * objects around. If so, try to purge the first object.
3137 * If the purge fails, fall through to reclaim a page instead.
3138 * If the purge succeeds, go back to the top and reevalute
3139 * the new memory situation.
3140 */
3141 retval = vps_purge_object();
3142
3143 if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3144 /*
3145 * Success
3146 */
3147 if (object != NULL) {
3148 vm_object_unlock(object);
3149 object = NULL;
3150 }
3151
3152 lock_yield_check = FALSE;
3153 continue;
3154 }
3155
3156 /*
3157 * If our 'aged' queue is empty and we have some speculative pages
3158 * in the other queues, let's go through and see if we need to age
3159 * them.
3160 *
3161 * If we succeeded in aging a speculative Q or just that everything
3162 * looks normal w.r.t queue age and queue counts, we keep going onward.
3163 *
3164 * If, for some reason, we seem to have a mismatch between the spec.
3165 * page count and the page queues, we reset those variables and
3166 * restart the loop (LD TODO: Track this better?).
3167 */
3168 if (vm_page_queue_empty(&sq->age_q) && vm_page_speculative_count) {
3169 retval = vps_age_speculative_queue(force_speculative_aging);
3170
3171 if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3172 lock_yield_check = FALSE;
3173 continue;
3174 }
3175 }
3176 force_speculative_aging = FALSE;
3177
3178 /*
3179 * Check to see if we need to evict objects from the cache.
3180 *
3181 * Note: 'object' here doesn't have anything to do with
3182 * the eviction part. We just need to make sure we have dropped
3183 * any object lock we might be holding if we need to go down
3184 * into the eviction logic.
3185 */
3186 retval = vps_object_cache_evict(&object);
3187
3188 if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3189 lock_yield_check = FALSE;
3190 continue;
3191 }
3192
3193
3194 /*
3195 * Calculate our filecache_min that will affect the loop
3196 * going forward.
3197 */
3198 vps_calculate_filecache_min();
3199
3200 /*
3201 * LD TODO: Use a structure to hold all state variables for a single
3202 * vm_pageout_scan iteration and pass that structure to this function instead.
3203 */
3204 retval = vps_flow_control(&flow_control, &anons_grabbed, &object,
3205 &delayed_unlock, &local_freeq, &local_freed,
3206 &vm_pageout_deadlock_target, inactive_burst_count);
3207
3208 if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3209 if (loop_count >= vm_page_inactive_count) {
3210 loop_count = 0;
3211 }
3212
3213 inactive_burst_count = 0;
3214
3215 assert(object == NULL);
3216 assert(delayed_unlock != 0);
3217
3218 lock_yield_check = FALSE;
3219 continue;
3220 } else if (retval == VM_PAGEOUT_SCAN_DONE_RETURN) {
3221 goto return_from_scan;
3222 }
3223
3224 flow_control.state = FCS_IDLE;
3225
3226 vm_pageout_inactive_external_forced_reactivate_limit = MIN((vm_page_active_count + vm_page_inactive_count),
3227 vm_pageout_inactive_external_forced_reactivate_limit);
3228 loop_count++;
3229 inactive_burst_count++;
3230 vm_pageout_state.vm_pageout_inactive++;
3231
3232 /*
3233 * Choose a victim.
3234 */
3235
3236 m = NULL;
3237 retval = vps_choose_victim_page(&m, &anons_grabbed, &grab_anonymous, force_anonymous, &page_from_bg_q, &reactivated_this_call);
3238
3239 if (m == NULL) {
3240 if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3241 inactive_burst_count = 0;
3242
3243 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3244 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
3245 }
3246
3247 lock_yield_check = TRUE;
3248 continue;
3249 }
3250
3251 /*
3252 * if we've gotten here, we have no victim page.
3253 * check to see if we've not finished balancing the queues
3254 * or we have a page on the aged speculative queue that we
3255 * skipped due to force_anonymous == TRUE.. or we have
3256 * speculative pages that we can prematurely age... if
3257 * one of these cases we'll keep going, else panic
3258 */
3259 force_anonymous = FALSE;
3260 VM_PAGEOUT_DEBUG(vm_pageout_no_victim, 1);
3261
3262 if (!vm_page_queue_empty(&sq->age_q)) {
3263 lock_yield_check = TRUE;
3264 continue;
3265 }
3266
3267 if (vm_page_speculative_count) {
3268 force_speculative_aging = TRUE;
3269 lock_yield_check = TRUE;
3270 continue;
3271 }
3272 panic("vm_pageout: no victim");
3273
3274 /* NOTREACHED */
3275 }
3276
3277 assert(VM_PAGE_PAGEABLE(m));
3278 m_object = VM_PAGE_OBJECT(m);
3279 force_anonymous = FALSE;
3280
3281 page_prev_q_state = m->vmp_q_state;
3282 /*
3283 * we just found this page on one of our queues...
3284 * it can't also be on the pageout queue, so safe
3285 * to call vm_page_queues_remove
3286 */
3287 bool donate = (m->vmp_on_specialq == VM_PAGE_SPECIAL_Q_DONATE);
3288 vm_page_queues_remove(m, TRUE);
3289 if (donate) {
3290 /*
3291 * The compressor needs to see this bit to know
3292 * where this page needs to land. Also if stolen,
3293 * this bit helps put the page back in the right
3294 * special queue where it belongs.
3295 */
3296 m->vmp_on_specialq = VM_PAGE_SPECIAL_Q_DONATE;
3297 }
3298
3299 assert(!m->vmp_laundry);
3300 assert(!m->vmp_private);
3301 assert(!m->vmp_fictitious);
3302 assert(!is_kernel_object(m_object));
3303 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
3304
3305 vm_pageout_vminfo.vm_pageout_considered_page++;
3306
3307 DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
3308
3309 /*
3310 * check to see if we currently are working
3311 * with the same object... if so, we've
3312 * already got the lock
3313 */
3314 if (m_object != object) {
3315 boolean_t avoid_anon_pages = (grab_anonymous == FALSE || anons_grabbed >= ANONS_GRABBED_LIMIT);
3316
3317 /*
3318 * vps_switch_object() will always drop the 'object' lock first
3319 * and then try to acquire the 'm_object' lock. So 'object' has to point to
3320 * either 'm_object' or NULL.
3321 */
3322 retval = vps_switch_object(m, m_object, &object, page_prev_q_state, avoid_anon_pages, page_from_bg_q);
3323
3324 if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3325 lock_yield_check = TRUE;
3326 continue;
3327 }
3328 }
3329 assert(m_object == object);
3330 assert(VM_PAGE_OBJECT(m) == m_object);
3331
3332 if (m->vmp_busy) {
3333 /*
3334 * Somebody is already playing with this page.
3335 * Put it back on the appropriate queue
3336 *
3337 */
3338 VM_PAGEOUT_DEBUG(vm_pageout_inactive_busy, 1);
3339
3340 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3341 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_busy, 1);
3342 }
3343
3344 vps_requeue_page(m, page_prev_q_state, page_from_bg_q);
3345
3346 lock_yield_check = TRUE;
3347 continue;
3348 }
3349
3350 /*
3351 * if (m->vmp_cleaning && !m->vmp_free_when_done)
3352 * If already cleaning this page in place
3353 * just leave if off the paging queues.
3354 * We can leave the page mapped, and upl_commit_range
3355 * will put it on the clean queue.
3356 *
3357 * if (m->vmp_free_when_done && !m->vmp_cleaning)
3358 * an msync INVALIDATE is in progress...
3359 * this page has been marked for destruction
3360 * after it has been cleaned,
3361 * but not yet gathered into a UPL
3362 * where 'cleaning' will be set...
3363 * just leave it off the paging queues
3364 *
3365 * if (m->vmp_free_when_done && m->vmp_clenaing)
3366 * an msync INVALIDATE is in progress
3367 * and the UPL has already gathered this page...
3368 * just leave it off the paging queues
3369 */
3370 if (m->vmp_free_when_done || m->vmp_cleaning) {
3371 lock_yield_check = TRUE;
3372 continue;
3373 }
3374
3375
3376 /*
3377 * If it's absent, in error or the object is no longer alive,
3378 * we can reclaim the page... in the no longer alive case,
3379 * there are 2 states the page can be in that preclude us
3380 * from reclaiming it - busy or cleaning - that we've already
3381 * dealt with
3382 */
3383 if (m->vmp_absent || VMP_ERROR_GET(m) || !object->alive ||
3384 (!object->internal && object->pager == MEMORY_OBJECT_NULL)) {
3385 if (m->vmp_absent) {
3386 VM_PAGEOUT_DEBUG(vm_pageout_inactive_absent, 1);
3387 } else if (!object->alive ||
3388 (!object->internal &&
3389 object->pager == MEMORY_OBJECT_NULL)) {
3390 VM_PAGEOUT_DEBUG(vm_pageout_inactive_notalive, 1);
3391 } else {
3392 VM_PAGEOUT_DEBUG(vm_pageout_inactive_error, 1);
3393 }
3394 reclaim_page:
3395 if (vm_pageout_deadlock_target) {
3396 VM_PAGEOUT_DEBUG(vm_pageout_scan_inactive_throttle_success, 1);
3397 vm_pageout_deadlock_target--;
3398 }
3399
3400 DTRACE_VM2(dfree, int, 1, (uint64_t *), NULL);
3401
3402 if (object->internal) {
3403 DTRACE_VM2(anonfree, int, 1, (uint64_t *), NULL);
3404 } else {
3405 DTRACE_VM2(fsfree, int, 1, (uint64_t *), NULL);
3406 }
3407 assert(!m->vmp_cleaning);
3408 assert(!m->vmp_laundry);
3409
3410 if (!object->internal &&
3411 object->pager != NULL &&
3412 object->pager->mo_pager_ops == &shared_region_pager_ops) {
3413 shared_region_pager_reclaimed++;
3414 }
3415
3416 m->vmp_busy = TRUE;
3417
3418 /*
3419 * remove page from object here since we're already
3420 * behind the object lock... defer the rest of the work
3421 * we'd normally do in vm_page_free_prepare_object
3422 * until 'vm_page_free_list' is called
3423 */
3424 if (m->vmp_tabled) {
3425 vm_page_remove(m, TRUE);
3426 }
3427
3428 assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
3429 m->vmp_snext = local_freeq;
3430 local_freeq = m;
3431 local_freed++;
3432
3433 if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
3434 vm_pageout_vminfo.vm_pageout_freed_speculative++;
3435 } else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3436 vm_pageout_vminfo.vm_pageout_freed_cleaned++;
3437 } else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q) {
3438 vm_pageout_vminfo.vm_pageout_freed_internal++;
3439 } else {
3440 vm_pageout_vminfo.vm_pageout_freed_external++;
3441 }
3442
3443 inactive_burst_count = 0;
3444
3445 lock_yield_check = TRUE;
3446 continue;
3447 }
3448 if (object->vo_copy == VM_OBJECT_NULL) {
3449 /*
3450 * No one else can have any interest in this page.
3451 * If this is an empty purgable object, the page can be
3452 * reclaimed even if dirty.
3453 * If the page belongs to a volatile purgable object, we
3454 * reactivate it if the compressor isn't active.
3455 */
3456 if (object->purgable == VM_PURGABLE_EMPTY) {
3457 if (m->vmp_pmapped == TRUE) {
3458 /* unmap the page */
3459 refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
3460 if (refmod_state & VM_MEM_MODIFIED) {
3461 SET_PAGE_DIRTY(m, FALSE);
3462 }
3463 }
3464 if (m->vmp_dirty || m->vmp_precious) {
3465 /* we saved the cost of cleaning this page ! */
3466 vm_page_purged_count++;
3467 }
3468 goto reclaim_page;
3469 }
3470
3471 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
3472 /*
3473 * With the VM compressor, the cost of
3474 * reclaiming a page is much lower (no I/O),
3475 * so if we find a "volatile" page, it's better
3476 * to let it get compressed rather than letting
3477 * it occupy a full page until it gets purged.
3478 * So no need to check for "volatile" here.
3479 */
3480 } else if (object->purgable == VM_PURGABLE_VOLATILE) {
3481 /*
3482 * Avoid cleaning a "volatile" page which might
3483 * be purged soon.
3484 */
3485
3486 /* if it's wired, we can't put it on our queue */
3487 assert(!VM_PAGE_WIRED(m));
3488
3489 /* just stick it back on! */
3490 reactivated_this_call++;
3491
3492 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3493 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_volatile_reactivated, 1);
3494 }
3495
3496 goto reactivate_page;
3497 }
3498 }
3499 /*
3500 * If it's being used, reactivate.
3501 * (Fictitious pages are either busy or absent.)
3502 * First, update the reference and dirty bits
3503 * to make sure the page is unreferenced.
3504 */
3505 refmod_state = -1;
3506
3507 if (m->vmp_reference == FALSE && m->vmp_pmapped == TRUE) {
3508 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
3509
3510 if (refmod_state & VM_MEM_REFERENCED) {
3511 m->vmp_reference = TRUE;
3512 }
3513 if (refmod_state & VM_MEM_MODIFIED) {
3514 SET_PAGE_DIRTY(m, FALSE);
3515 }
3516 }
3517
3518 if (m->vmp_reference || m->vmp_dirty) {
3519 /* deal with a rogue "reusable" page */
3520 VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, m_object);
3521 }
3522
3523 if (vm_pageout_state.vm_page_xpmapped_min_divisor == 0) {
3524 vm_pageout_state.vm_page_xpmapped_min = 0;
3525 } else {
3526 vm_pageout_state.vm_page_xpmapped_min = (vm_page_external_count * 10) / vm_pageout_state.vm_page_xpmapped_min_divisor;
3527 }
3528
3529 if (!m->vmp_no_cache &&
3530 page_from_bg_q == FALSE &&
3531 (m->vmp_reference || (m->vmp_xpmapped && !object->internal &&
3532 (vm_page_xpmapped_external_count < vm_pageout_state.vm_page_xpmapped_min)))) {
3533 /*
3534 * The page we pulled off the inactive list has
3535 * been referenced. It is possible for other
3536 * processors to be touching pages faster than we
3537 * can clear the referenced bit and traverse the
3538 * inactive queue, so we limit the number of
3539 * reactivations.
3540 */
3541 if (++reactivated_this_call >= reactivate_limit &&
3542 !object->object_is_shared_cache &&
3543 !((m->vmp_realtime ||
3544 object->for_realtime) &&
3545 vm_pageout_protect_realtime)) {
3546 vm_pageout_vminfo.vm_pageout_reactivation_limit_exceeded++;
3547 } else if (++inactive_reclaim_run >= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM) {
3548 vm_pageout_vminfo.vm_pageout_inactive_force_reclaim++;
3549 if (object->object_is_shared_cache) {
3550 vm_pageout_vminfo.vm_pageout_forcereclaimed_sharedcache++;
3551 } else if (m->vmp_realtime ||
3552 object->for_realtime) {
3553 vm_pageout_vminfo.vm_pageout_forcereclaimed_realtime++;
3554 }
3555 } else {
3556 uint32_t isinuse;
3557
3558 if (reactivated_this_call >= reactivate_limit) {
3559 if (object->object_is_shared_cache) {
3560 vm_pageout_vminfo.vm_pageout_protected_sharedcache++;
3561 } else if ((m->vmp_realtime ||
3562 object->for_realtime) &&
3563 vm_pageout_protect_realtime) {
3564 vm_pageout_vminfo.vm_pageout_protected_realtime++;
3565 }
3566 }
3567 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3568 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reference_reactivated, 1);
3569 }
3570
3571 vm_pageout_vminfo.vm_pageout_inactive_referenced++;
3572 reactivate_page:
3573 if (!object->internal && object->pager != MEMORY_OBJECT_NULL &&
3574 vnode_pager_get_isinuse(object->pager, &isinuse) == KERN_SUCCESS && !isinuse) {
3575 /*
3576 * no explict mappings of this object exist
3577 * and it's not open via the filesystem
3578 */
3579 vm_page_deactivate(m);
3580 VM_PAGEOUT_DEBUG(vm_pageout_inactive_deactivated, 1);
3581 } else {
3582 /*
3583 * The page was/is being used, so put back on active list.
3584 */
3585 vm_page_activate(m);
3586 counter_inc(&vm_statistics_reactivations);
3587 inactive_burst_count = 0;
3588 }
3589 #if DEVELOPMENT || DEBUG
3590 if (page_from_bg_q == TRUE) {
3591 if (m_object->internal) {
3592 vm_pageout_rejected_bq_internal++;
3593 } else {
3594 vm_pageout_rejected_bq_external++;
3595 }
3596 }
3597 #endif /* DEVELOPMENT || DEBUG */
3598
3599 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3600 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
3601 }
3602 vm_pageout_state.vm_pageout_inactive_used++;
3603
3604 lock_yield_check = TRUE;
3605 continue;
3606 }
3607 /*
3608 * Make sure we call pmap_get_refmod() if it
3609 * wasn't already called just above, to update
3610 * the dirty bit.
3611 */
3612 if ((refmod_state == -1) && !m->vmp_dirty && m->vmp_pmapped) {
3613 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
3614 if (refmod_state & VM_MEM_MODIFIED) {
3615 SET_PAGE_DIRTY(m, FALSE);
3616 }
3617 }
3618 }
3619
3620 /*
3621 * we've got a candidate page to steal...
3622 *
3623 * m->vmp_dirty is up to date courtesy of the
3624 * preceding check for m->vmp_reference... if
3625 * we get here, then m->vmp_reference had to be
3626 * FALSE (or possibly "reactivate_limit" was
3627 * exceeded), but in either case we called
3628 * pmap_get_refmod() and updated both
3629 * m->vmp_reference and m->vmp_dirty
3630 *
3631 * if it's dirty or precious we need to
3632 * see if the target queue is throtttled
3633 * it if is, we need to skip over it by moving it back
3634 * to the end of the inactive queue
3635 */
3636
3637 inactive_throttled = FALSE;
3638
3639 if (m->vmp_dirty || m->vmp_precious) {
3640 if (object->internal) {
3641 if (VM_PAGE_Q_THROTTLED(iq)) {
3642 inactive_throttled = TRUE;
3643 }
3644 } else if (VM_PAGE_Q_THROTTLED(eq)) {
3645 inactive_throttled = TRUE;
3646 }
3647 }
3648 throttle_inactive:
3649 if (!VM_DYNAMIC_PAGING_ENABLED() &&
3650 object->internal && m->vmp_dirty &&
3651 (object->purgable == VM_PURGABLE_DENY ||
3652 object->purgable == VM_PURGABLE_NONVOLATILE ||
3653 object->purgable == VM_PURGABLE_VOLATILE)) {
3654 vm_page_check_pageable_safe(m);
3655 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
3656 vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
3657 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
3658 vm_page_throttled_count++;
3659
3660 VM_PAGEOUT_DEBUG(vm_pageout_scan_reclaimed_throttled, 1);
3661
3662 inactive_burst_count = 0;
3663
3664 lock_yield_check = TRUE;
3665 continue;
3666 }
3667 if (inactive_throttled == TRUE) {
3668 vps_deal_with_throttled_queues(m, &object, &vm_pageout_inactive_external_forced_reactivate_limit,
3669 &force_anonymous, page_from_bg_q);
3670
3671 inactive_burst_count = 0;
3672
3673 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3674 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
3675 }
3676
3677 lock_yield_check = TRUE;
3678 continue;
3679 }
3680
3681 /*
3682 * we've got a page that we can steal...
3683 * eliminate all mappings and make sure
3684 * we have the up-to-date modified state
3685 *
3686 * if we need to do a pmap_disconnect then we
3687 * need to re-evaluate m->vmp_dirty since the pmap_disconnect
3688 * provides the true state atomically... the
3689 * page was still mapped up to the pmap_disconnect
3690 * and may have been dirtied at the last microsecond
3691 *
3692 * Note that if 'pmapped' is FALSE then the page is not
3693 * and has not been in any map, so there is no point calling
3694 * pmap_disconnect(). m->vmp_dirty could have been set in anticipation
3695 * of likely usage of the page.
3696 */
3697 if (m->vmp_pmapped == TRUE) {
3698 int pmap_options;
3699
3700 /*
3701 * Don't count this page as going into the compressor
3702 * if any of these are true:
3703 * 1) compressed pager isn't enabled
3704 * 2) Freezer enabled device with compressed pager
3705 * backend (exclusive use) i.e. most of the VM system
3706 * (including vm_pageout_scan) has no knowledge of
3707 * the compressor
3708 * 3) This page belongs to a file and hence will not be
3709 * sent into the compressor
3710 */
3711 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE ||
3712 object->internal == FALSE) {
3713 pmap_options = 0;
3714 } else if (m->vmp_dirty || m->vmp_precious) {
3715 /*
3716 * VM knows that this page is dirty (or
3717 * precious) and needs to be compressed
3718 * rather than freed.
3719 * Tell the pmap layer to count this page
3720 * as "compressed".
3721 */
3722 pmap_options = PMAP_OPTIONS_COMPRESSOR;
3723 } else {
3724 /*
3725 * VM does not know if the page needs to
3726 * be preserved but the pmap layer might tell
3727 * us if any mapping has "modified" it.
3728 * Let's the pmap layer to count this page
3729 * as compressed if and only if it has been
3730 * modified.
3731 */
3732 pmap_options =
3733 PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
3734 }
3735 refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m),
3736 pmap_options,
3737 NULL);
3738 if (refmod_state & VM_MEM_MODIFIED) {
3739 SET_PAGE_DIRTY(m, FALSE);
3740 }
3741 }
3742
3743 /*
3744 * reset our count of pages that have been reclaimed
3745 * since the last page was 'stolen'
3746 */
3747 inactive_reclaim_run = 0;
3748
3749 /*
3750 * If it's clean and not precious, we can free the page.
3751 */
3752 if (!m->vmp_dirty && !m->vmp_precious) {
3753 vm_pageout_state.vm_pageout_inactive_clean++;
3754
3755 /*
3756 * OK, at this point we have found a page we are going to free.
3757 */
3758 #if CONFIG_PHANTOM_CACHE
3759 if (!object->internal) {
3760 vm_phantom_cache_add_ghost(m);
3761 }
3762 #endif
3763 goto reclaim_page;
3764 }
3765
3766 /*
3767 * The page may have been dirtied since the last check
3768 * for a throttled target queue (which may have been skipped
3769 * if the page was clean then). With the dirty page
3770 * disconnected here, we can make one final check.
3771 */
3772 if (object->internal) {
3773 if (VM_PAGE_Q_THROTTLED(iq)) {
3774 inactive_throttled = TRUE;
3775 }
3776 } else if (VM_PAGE_Q_THROTTLED(eq)) {
3777 inactive_throttled = TRUE;
3778 }
3779
3780 if (inactive_throttled == TRUE) {
3781 goto throttle_inactive;
3782 }
3783
3784 #if VM_PRESSURE_EVENTS
3785 #if CONFIG_JETSAM
3786
3787 /*
3788 * If Jetsam is enabled, then the sending
3789 * of memory pressure notifications is handled
3790 * from the same thread that takes care of high-water
3791 * and other jetsams i.e. the memorystatus_thread.
3792 */
3793
3794 #else /* CONFIG_JETSAM */
3795
3796 vm_pressure_response();
3797
3798 #endif /* CONFIG_JETSAM */
3799 #endif /* VM_PRESSURE_EVENTS */
3800
3801 if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
3802 VM_PAGEOUT_DEBUG(vm_pageout_speculative_dirty, 1);
3803 }
3804
3805 if (object->internal) {
3806 vm_pageout_vminfo.vm_pageout_inactive_dirty_internal++;
3807 } else {
3808 vm_pageout_vminfo.vm_pageout_inactive_dirty_external++;
3809 }
3810
3811 /*
3812 * internal pages will go to the compressor...
3813 * external pages will go to the appropriate pager to be cleaned
3814 * and upon completion will end up on 'vm_page_queue_cleaned' which
3815 * is a preferred queue to steal from
3816 */
3817 vm_pageout_cluster(m);
3818 inactive_burst_count = 0;
3819
3820 /*
3821 * back to top of pageout scan loop
3822 */
3823 }
3824 }
3825
3826
3827 void
vm_page_free_reserve(int pages)3828 vm_page_free_reserve(
3829 int pages)
3830 {
3831 int free_after_reserve;
3832
3833 if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
3834 if ((vm_page_free_reserved + pages + COMPRESSOR_FREE_RESERVED_LIMIT) >= (VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT)) {
3835 vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT;
3836 } else {
3837 vm_page_free_reserved += (pages + COMPRESSOR_FREE_RESERVED_LIMIT);
3838 }
3839 } else {
3840 if ((vm_page_free_reserved + pages) >= VM_PAGE_FREE_RESERVED_LIMIT) {
3841 vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT;
3842 } else {
3843 vm_page_free_reserved += pages;
3844 }
3845 }
3846 free_after_reserve = vm_pageout_state.vm_page_free_count_init - vm_page_free_reserved;
3847
3848 vm_page_free_min = vm_page_free_reserved +
3849 VM_PAGE_FREE_MIN(free_after_reserve);
3850
3851 if (vm_page_free_min > VM_PAGE_FREE_MIN_LIMIT) {
3852 vm_page_free_min = VM_PAGE_FREE_MIN_LIMIT;
3853 }
3854
3855 vm_page_free_target = vm_page_free_reserved +
3856 VM_PAGE_FREE_TARGET(free_after_reserve);
3857
3858 if (vm_page_free_target > VM_PAGE_FREE_TARGET_LIMIT) {
3859 vm_page_free_target = VM_PAGE_FREE_TARGET_LIMIT;
3860 }
3861
3862 if (vm_page_free_target < vm_page_free_min + 5) {
3863 vm_page_free_target = vm_page_free_min + 5;
3864 }
3865
3866 vm_page_throttle_limit = vm_page_free_target - (vm_page_free_target / 2);
3867 }
3868
3869 /*
3870 * vm_pageout is the high level pageout daemon.
3871 */
3872
3873 void
vm_pageout_continue(void)3874 vm_pageout_continue(void)
3875 {
3876 DTRACE_VM2(pgrrun, int, 1, (uint64_t *), NULL);
3877 VM_PAGEOUT_DEBUG(vm_pageout_scan_event_counter, 1);
3878
3879 vm_free_page_lock();
3880 vm_pageout_running = TRUE;
3881 vm_free_page_unlock();
3882
3883 vm_pageout_scan();
3884 /*
3885 * we hold both the vm_page_queue_free_lock
3886 * and the vm_page_queues_lock at this point
3887 */
3888 assert(vm_page_free_wanted == 0);
3889 assert(vm_page_free_wanted_privileged == 0);
3890 assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT);
3891
3892 vm_pageout_running = FALSE;
3893 #if XNU_TARGET_OS_OSX
3894 if (vm_pageout_waiter) {
3895 vm_pageout_waiter = FALSE;
3896 thread_wakeup((event_t)&vm_pageout_waiter);
3897 }
3898 #endif /* XNU_TARGET_OS_OSX */
3899
3900 vm_free_page_unlock();
3901 vm_page_unlock_queues();
3902
3903 thread_block((thread_continue_t)vm_pageout_continue);
3904 /*NOTREACHED*/
3905 }
3906
3907 #if XNU_TARGET_OS_OSX
3908 kern_return_t
vm_pageout_wait(uint64_t deadline)3909 vm_pageout_wait(uint64_t deadline)
3910 {
3911 kern_return_t kr;
3912
3913 vm_free_page_lock();
3914 for (kr = KERN_SUCCESS; vm_pageout_running && (KERN_SUCCESS == kr);) {
3915 vm_pageout_waiter = TRUE;
3916 if (THREAD_AWAKENED != lck_mtx_sleep_deadline(
3917 &vm_page_queue_free_lock, LCK_SLEEP_DEFAULT,
3918 (event_t) &vm_pageout_waiter, THREAD_UNINT, deadline)) {
3919 kr = KERN_OPERATION_TIMED_OUT;
3920 }
3921 }
3922 vm_free_page_unlock();
3923
3924 return kr;
3925 }
3926 #endif /* XNU_TARGET_OS_OSX */
3927
3928 OS_NORETURN
3929 static void
vm_pageout_iothread_external_continue(struct pgo_iothread_state * ethr,__unused wait_result_t w)3930 vm_pageout_iothread_external_continue(struct pgo_iothread_state *ethr, __unused wait_result_t w)
3931 {
3932 vm_page_t m = NULL;
3933 vm_object_t object;
3934 vm_object_offset_t offset;
3935 memory_object_t pager;
3936 struct vm_pageout_queue *q = ethr->q;
3937
3938 /* On systems with a compressor, the external IO thread clears its
3939 * VM privileged bit to accommodate large allocations (e.g. bulk UPL
3940 * creation)
3941 */
3942 if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
3943 current_thread()->options &= ~TH_OPT_VMPRIV;
3944 }
3945
3946 sched_cond_ack(&(ethr->pgo_wakeup));
3947
3948 while (true) {
3949 vm_page_lockspin_queues();
3950
3951 while (!vm_page_queue_empty(&q->pgo_pending)) {
3952 q->pgo_busy = TRUE;
3953 vm_page_queue_remove_first(&q->pgo_pending, m, vmp_pageq);
3954
3955 assert(m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q);
3956 VM_PAGE_CHECK(m);
3957 /*
3958 * grab a snapshot of the object and offset this
3959 * page is tabled in so that we can relookup this
3960 * page after we've taken the object lock - these
3961 * fields are stable while we hold the page queues lock
3962 * but as soon as we drop it, there is nothing to keep
3963 * this page in this object... we hold an activity_in_progress
3964 * on this object which will keep it from terminating
3965 */
3966 object = VM_PAGE_OBJECT(m);
3967 offset = m->vmp_offset;
3968
3969 m->vmp_q_state = VM_PAGE_NOT_ON_Q;
3970 VM_PAGE_ZERO_PAGEQ_ENTRY(m);
3971
3972 vm_page_unlock_queues();
3973
3974 vm_object_lock(object);
3975
3976 m = vm_page_lookup(object, offset);
3977
3978 if (m == NULL || m->vmp_busy || m->vmp_cleaning ||
3979 !m->vmp_laundry || (m->vmp_q_state != VM_PAGE_NOT_ON_Q)) {
3980 /*
3981 * it's either the same page that someone else has
3982 * started cleaning (or it's finished cleaning or
3983 * been put back on the pageout queue), or
3984 * the page has been freed or we have found a
3985 * new page at this offset... in all of these cases
3986 * we merely need to release the activity_in_progress
3987 * we took when we put the page on the pageout queue
3988 */
3989 vm_object_activity_end(object);
3990 vm_object_unlock(object);
3991
3992 vm_page_lockspin_queues();
3993 continue;
3994 }
3995 pager = object->pager;
3996
3997 if (pager == MEMORY_OBJECT_NULL) {
3998 /*
3999 * This pager has been destroyed by either
4000 * memory_object_destroy or vm_object_destroy, and
4001 * so there is nowhere for the page to go.
4002 */
4003 if (m->vmp_free_when_done) {
4004 /*
4005 * Just free the page... VM_PAGE_FREE takes
4006 * care of cleaning up all the state...
4007 * including doing the vm_pageout_throttle_up
4008 */
4009 VM_PAGE_FREE(m);
4010 } else {
4011 vm_page_lockspin_queues();
4012
4013 vm_pageout_throttle_up(m);
4014 vm_page_activate(m);
4015
4016 vm_page_unlock_queues();
4017
4018 /*
4019 * And we are done with it.
4020 */
4021 }
4022 vm_object_activity_end(object);
4023 vm_object_unlock(object);
4024
4025 vm_page_lockspin_queues();
4026 continue;
4027 }
4028 #if 0
4029 /*
4030 * we don't hold the page queue lock
4031 * so this check isn't safe to make
4032 */
4033 VM_PAGE_CHECK(m);
4034 #endif
4035 /*
4036 * give back the activity_in_progress reference we
4037 * took when we queued up this page and replace it
4038 * it with a paging_in_progress reference that will
4039 * also hold the paging offset from changing and
4040 * prevent the object from terminating
4041 */
4042 vm_object_activity_end(object);
4043 vm_object_paging_begin(object);
4044 vm_object_unlock(object);
4045
4046 /*
4047 * Send the data to the pager.
4048 * any pageout clustering happens there
4049 */
4050 memory_object_data_return(pager,
4051 m->vmp_offset + object->paging_offset,
4052 PAGE_SIZE,
4053 NULL,
4054 NULL,
4055 FALSE,
4056 FALSE,
4057 0);
4058
4059 vm_object_lock(object);
4060 vm_object_paging_end(object);
4061 vm_object_unlock(object);
4062
4063 vm_pageout_io_throttle();
4064
4065 vm_page_lockspin_queues();
4066 }
4067 q->pgo_busy = FALSE;
4068
4069 vm_page_unlock_queues();
4070 sched_cond_wait_parameter(&(ethr->pgo_wakeup), THREAD_UNINT, (thread_continue_t)vm_pageout_iothread_external_continue, ethr);
4071 }
4072 /*NOTREACHED*/
4073 }
4074
4075
4076 #define MAX_FREE_BATCH 32
4077 uint32_t vm_compressor_time_thread; /* Set via sysctl to record time accrued by
4078 * this thread.
4079 */
4080
4081
4082 OS_NORETURN
4083 static void
vm_pageout_iothread_internal_continue(struct pgo_iothread_state * cq,__unused wait_result_t w)4084 vm_pageout_iothread_internal_continue(struct pgo_iothread_state *cq, __unused wait_result_t w)
4085 {
4086 struct vm_pageout_queue *q;
4087 vm_page_t m = NULL;
4088 boolean_t pgo_draining;
4089 vm_page_t local_q;
4090 int local_cnt;
4091 vm_page_t local_freeq = NULL;
4092 int local_freed = 0;
4093 int local_batch_size;
4094 #if DEVELOPMENT || DEBUG
4095 int ncomps = 0;
4096 boolean_t marked_active = FALSE;
4097 int num_pages_processed = 0;
4098 #endif
4099 void *chead = NULL;
4100
4101 KERNEL_DEBUG(0xe040000c | DBG_FUNC_END, 0, 0, 0, 0, 0);
4102
4103 sched_cond_ack(&(cq->pgo_wakeup));
4104
4105 q = cq->q;
4106
4107 while (true) {
4108 #if DEVELOPMENT || DEBUG
4109 bool benchmark_accounting = false;
4110 /*
4111 * If we're running the compressor perf test, only process the benchmark pages.
4112 * We'll get back to our regular queue once the benchmark is done
4113 */
4114 if (compressor_running_perf_test) {
4115 q = cq->benchmark_q;
4116 if (!vm_page_queue_empty(&q->pgo_pending)) {
4117 benchmark_accounting = true;
4118 } else {
4119 q = cq->q;
4120 benchmark_accounting = false;
4121 }
4122 }
4123 #endif /* DEVELOPMENT || DEBUG */
4124
4125 #if __AMP__
4126 if (vm_compressor_ebound && (vm_pageout_state.vm_compressor_thread_count > 1)) {
4127 local_batch_size = (q->pgo_maxlaundry >> 3);
4128 local_batch_size = MAX(local_batch_size, 16);
4129 } else {
4130 local_batch_size = q->pgo_maxlaundry / (vm_pageout_state.vm_compressor_thread_count * 2);
4131 }
4132 #else
4133 local_batch_size = q->pgo_maxlaundry / (vm_pageout_state.vm_compressor_thread_count * 2);
4134 #endif
4135
4136 #if RECORD_THE_COMPRESSED_DATA
4137 if (q->pgo_laundry) {
4138 c_compressed_record_init();
4139 }
4140 #endif
4141 while (true) {
4142 int pages_left_on_q = 0;
4143
4144 local_cnt = 0;
4145 local_q = NULL;
4146
4147 KERNEL_DEBUG(0xe0400014 | DBG_FUNC_START, 0, 0, 0, 0, 0);
4148
4149 vm_page_lock_queues();
4150 #if DEVELOPMENT || DEBUG
4151 if (marked_active == FALSE) {
4152 vmct_active++;
4153 vmct_state[cq->id] = VMCT_ACTIVE;
4154 marked_active = TRUE;
4155 if (vmct_active == 1) {
4156 vm_compressor_epoch_start = mach_absolute_time();
4157 }
4158 }
4159 #endif
4160 KERNEL_DEBUG(0xe0400014 | DBG_FUNC_END, 0, 0, 0, 0, 0);
4161
4162 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_START, q->pgo_laundry, 0, 0, 0, 0);
4163
4164 while (!vm_page_queue_empty(&q->pgo_pending) && local_cnt < local_batch_size) {
4165 vm_page_queue_remove_first(&q->pgo_pending, m, vmp_pageq);
4166 assert(m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q);
4167 VM_PAGE_CHECK(m);
4168
4169 m->vmp_q_state = VM_PAGE_NOT_ON_Q;
4170 VM_PAGE_ZERO_PAGEQ_ENTRY(m);
4171 m->vmp_laundry = FALSE;
4172
4173 m->vmp_snext = local_q;
4174 local_q = m;
4175 local_cnt++;
4176 }
4177 if (local_q == NULL) {
4178 break;
4179 }
4180
4181 q->pgo_busy = TRUE;
4182
4183 if ((pgo_draining = q->pgo_draining) == FALSE) {
4184 vm_pageout_throttle_up_batch(q, local_cnt);
4185 pages_left_on_q = q->pgo_laundry;
4186 } else {
4187 pages_left_on_q = q->pgo_laundry - local_cnt;
4188 }
4189
4190 vm_page_unlock_queues();
4191
4192 #if !RECORD_THE_COMPRESSED_DATA
4193 if (pages_left_on_q >= local_batch_size && cq->id < (vm_pageout_state.vm_compressor_thread_count - 1)) {
4194 // wake up the next compressor thread
4195 sched_cond_signal(&pgo_iothread_internal_state[cq->id + 1].pgo_wakeup,
4196 pgo_iothread_internal_state[cq->id + 1].pgo_iothread);
4197 }
4198 #endif
4199 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, q->pgo_laundry, 0, 0, 0, 0);
4200
4201 while (local_q) {
4202 KERNEL_DEBUG(0xe0400024 | DBG_FUNC_START, local_cnt, 0, 0, 0, 0);
4203
4204 m = local_q;
4205 local_q = m->vmp_snext;
4206 m->vmp_snext = NULL;
4207
4208 /*
4209 * Technically we need the pageq locks to manipulate this field.
4210 * However, this page has been removed from all queues and is only
4211 * known to this compressor thread dealing with this local queue.
4212 *
4213 * TODO LIONEL: Add a second localq that is the early localq and
4214 * put special pages like this one on that queue in the block above
4215 * under the pageq lock to avoid this 'works but not clean' logic.
4216 */
4217 void *donate_queue_head;
4218 #if XNU_TARGET_OS_OSX
4219 donate_queue_head = &cq->current_early_swapout_chead;
4220 #else /* XNU_TARGET_OS_OSX */
4221 donate_queue_head = &cq->current_late_swapout_chead;
4222 #endif /* XNU_TARGET_OS_OSX */
4223 if (m->vmp_on_specialq == VM_PAGE_SPECIAL_Q_DONATE) {
4224 m->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
4225 chead = donate_queue_head;
4226 } else {
4227 chead = &cq->current_regular_swapout_chead;
4228 }
4229
4230 if (vm_pageout_compress_page(chead, cq->scratch_buf, m) == KERN_SUCCESS) {
4231 #if DEVELOPMENT || DEBUG
4232 ncomps++;
4233 #endif
4234 KERNEL_DEBUG(0xe0400024 | DBG_FUNC_END, local_cnt, 0, 0, 0, 0);
4235
4236 m->vmp_snext = local_freeq;
4237 local_freeq = m;
4238 local_freed++;
4239
4240 if (local_freed >= MAX_FREE_BATCH) {
4241 OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
4242
4243 vm_page_free_list(local_freeq, TRUE);
4244
4245 local_freeq = NULL;
4246 local_freed = 0;
4247 }
4248 }
4249 #if DEVELOPMENT || DEBUG
4250 num_pages_processed++;
4251 #endif /* DEVELOPMENT || DEBUG */
4252 #if !CONFIG_JETSAM
4253 while (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) {
4254 kern_return_t wait_result;
4255 int need_wakeup = 0;
4256
4257 if (local_freeq) {
4258 OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
4259
4260 vm_page_free_list(local_freeq, TRUE);
4261 local_freeq = NULL;
4262 local_freed = 0;
4263
4264 continue;
4265 }
4266 vm_free_page_lock_spin();
4267
4268 if (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) {
4269 if (vm_page_free_wanted_privileged++ == 0) {
4270 need_wakeup = 1;
4271 }
4272 wait_result = assert_wait((event_t)&vm_page_free_wanted_privileged, THREAD_UNINT);
4273
4274 vm_free_page_unlock();
4275
4276 if (need_wakeup) {
4277 thread_wakeup((event_t)&vm_page_free_wanted);
4278 }
4279
4280 if (wait_result == THREAD_WAITING) {
4281 thread_block(THREAD_CONTINUE_NULL);
4282 }
4283 } else {
4284 vm_free_page_unlock();
4285 }
4286 }
4287 #endif
4288 }
4289 if (local_freeq) {
4290 OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
4291
4292 vm_page_free_list(local_freeq, TRUE);
4293 local_freeq = NULL;
4294 local_freed = 0;
4295 }
4296 if (pgo_draining == TRUE) {
4297 vm_page_lockspin_queues();
4298 vm_pageout_throttle_up_batch(q, local_cnt);
4299 vm_page_unlock_queues();
4300 }
4301 }
4302 KERNEL_DEBUG(0xe040000c | DBG_FUNC_START, 0, 0, 0, 0, 0);
4303
4304 /*
4305 * queue lock is held and our q is empty
4306 */
4307 q->pgo_busy = FALSE;
4308 #if DEVELOPMENT || DEBUG
4309 if (marked_active == TRUE) {
4310 vmct_active--;
4311 vmct_state[cq->id] = VMCT_IDLE;
4312
4313 if (vmct_active == 0) {
4314 vm_compressor_epoch_stop = mach_absolute_time();
4315 assertf(vm_compressor_epoch_stop >= vm_compressor_epoch_start,
4316 "Compressor epoch non-monotonic: 0x%llx -> 0x%llx",
4317 vm_compressor_epoch_start, vm_compressor_epoch_stop);
4318 /* This interval includes intervals where one or more
4319 * compressor threads were pre-empted
4320 */
4321 vmct_stats.vmct_cthreads_total += vm_compressor_epoch_stop - vm_compressor_epoch_start;
4322 }
4323 }
4324 if (compressor_running_perf_test && benchmark_accounting) {
4325 /*
4326 * We could turn ON compressor_running_perf_test while still processing
4327 * regular non-benchmark pages. We shouldn't count them here else we
4328 * could overshoot. We might also still be populating that benchmark Q
4329 * and be under pressure. So we will go back to the regular queues. And
4330 * benchmark accounting will be off for that case too.
4331 */
4332 compressor_perf_test_pages_processed += num_pages_processed;
4333 thread_wakeup(&compressor_perf_test_pages_processed);
4334 }
4335 #endif
4336 vm_page_unlock_queues();
4337 #if DEVELOPMENT || DEBUG
4338 if (__improbable(vm_compressor_time_thread)) {
4339 vmct_stats.vmct_runtimes[cq->id] = thread_get_runtime_self();
4340 vmct_stats.vmct_pages[cq->id] += ncomps;
4341 vmct_stats.vmct_iterations[cq->id]++;
4342 if (ncomps > vmct_stats.vmct_maxpages[cq->id]) {
4343 vmct_stats.vmct_maxpages[cq->id] = ncomps;
4344 }
4345 if (ncomps < vmct_stats.vmct_minpages[cq->id]) {
4346 vmct_stats.vmct_minpages[cq->id] = ncomps;
4347 }
4348 }
4349 #endif
4350
4351 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, 0, 0, 0, 0, 0);
4352 #if DEVELOPMENT || DEBUG
4353 if (compressor_running_perf_test && benchmark_accounting) {
4354 /*
4355 * We've been exclusively compressing pages from the benchmark queue,
4356 * do 1 pass over the internal queue before blocking.
4357 */
4358 continue;
4359 }
4360 #endif
4361
4362 sched_cond_wait_parameter(&(cq->pgo_wakeup), THREAD_UNINT, (thread_continue_t)vm_pageout_iothread_internal_continue, (void *) cq);
4363 }
4364 /*NOTREACHED*/
4365 }
4366
4367
4368 kern_return_t
vm_pageout_compress_page(void ** current_chead,char * scratch_buf,vm_page_t m)4369 vm_pageout_compress_page(void **current_chead, char *scratch_buf, vm_page_t m)
4370 {
4371 vm_object_t object;
4372 memory_object_t pager;
4373 int compressed_count_delta;
4374 kern_return_t retval;
4375
4376 object = VM_PAGE_OBJECT(m);
4377
4378 assert(!m->vmp_free_when_done);
4379 assert(!m->vmp_laundry);
4380
4381 pager = object->pager;
4382
4383 if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) {
4384 KERNEL_DEBUG(0xe0400010 | DBG_FUNC_START, object, pager, 0, 0, 0);
4385
4386 vm_object_lock(object);
4387
4388 /*
4389 * If there is no memory object for the page, create
4390 * one and hand it to the compression pager.
4391 */
4392
4393 if (!object->pager_initialized) {
4394 vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
4395 }
4396 if (!object->pager_initialized) {
4397 vm_object_compressor_pager_create(object);
4398 }
4399
4400 pager = object->pager;
4401
4402 if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) {
4403 /*
4404 * Still no pager for the object,
4405 * or the pager has been destroyed.
4406 * Reactivate the page.
4407 *
4408 * Should only happen if there is no
4409 * compression pager
4410 */
4411 PAGE_WAKEUP_DONE(m);
4412
4413 vm_page_lockspin_queues();
4414 vm_page_activate(m);
4415 VM_PAGEOUT_DEBUG(vm_pageout_dirty_no_pager, 1);
4416 vm_page_unlock_queues();
4417
4418 /*
4419 * And we are done with it.
4420 */
4421 vm_object_activity_end(object);
4422 vm_object_unlock(object);
4423
4424 return KERN_FAILURE;
4425 }
4426 vm_object_unlock(object);
4427
4428 KERNEL_DEBUG(0xe0400010 | DBG_FUNC_END, object, pager, 0, 0, 0);
4429 }
4430 assert(object->pager_initialized && pager != MEMORY_OBJECT_NULL);
4431 assert(object->activity_in_progress > 0);
4432
4433 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
4434 if (m->vmp_unmodified_ro == true) {
4435 os_atomic_inc(&compressor_ro_uncompressed_total_returned, relaxed);
4436 }
4437 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4438
4439 retval = vm_compressor_pager_put(
4440 pager,
4441 m->vmp_offset + object->paging_offset,
4442 VM_PAGE_GET_PHYS_PAGE(m),
4443 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
4444 m->vmp_unmodified_ro,
4445 #else /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4446 false,
4447 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4448 current_chead,
4449 scratch_buf,
4450 &compressed_count_delta);
4451
4452 vm_object_lock(object);
4453
4454 assert(object->activity_in_progress > 0);
4455 assert(VM_PAGE_OBJECT(m) == object);
4456 assert( !VM_PAGE_WIRED(m));
4457
4458 vm_compressor_pager_count(pager,
4459 compressed_count_delta,
4460 FALSE, /* shared_lock */
4461 object);
4462
4463 if (retval == KERN_SUCCESS) {
4464 /*
4465 * If the object is purgeable, its owner's
4466 * purgeable ledgers will be updated in
4467 * vm_page_remove() but the page still
4468 * contributes to the owner's memory footprint,
4469 * so account for it as such.
4470 */
4471 if ((object->purgable != VM_PURGABLE_DENY ||
4472 object->vo_ledger_tag) &&
4473 object->vo_owner != NULL) {
4474 /* one more compressed purgeable/tagged page */
4475 vm_object_owner_compressed_update(object,
4476 compressed_count_delta);
4477 }
4478 counter_inc(&vm_statistics_compressions);
4479
4480 if (m->vmp_tabled) {
4481 vm_page_remove(m, TRUE);
4482 }
4483 } else {
4484 PAGE_WAKEUP_DONE(m);
4485
4486 vm_page_lockspin_queues();
4487
4488 vm_page_activate(m);
4489 vm_pageout_vminfo.vm_compressor_failed++;
4490
4491 vm_page_unlock_queues();
4492 }
4493 vm_object_activity_end(object);
4494 vm_object_unlock(object);
4495
4496 return retval;
4497 }
4498
4499
4500 static void
vm_pageout_adjust_eq_iothrottle(struct pgo_iothread_state * ethr,boolean_t req_lowpriority)4501 vm_pageout_adjust_eq_iothrottle(struct pgo_iothread_state *ethr, boolean_t req_lowpriority)
4502 {
4503 uint32_t policy;
4504
4505 if (hibernate_cleaning_in_progress == TRUE) {
4506 req_lowpriority = FALSE;
4507 }
4508
4509 if (ethr->q->pgo_inited == TRUE && ethr->q->pgo_lowpriority != req_lowpriority) {
4510 vm_page_unlock_queues();
4511
4512 if (req_lowpriority == TRUE) {
4513 policy = THROTTLE_LEVEL_PAGEOUT_THROTTLED;
4514 DTRACE_VM(laundrythrottle);
4515 } else {
4516 policy = THROTTLE_LEVEL_PAGEOUT_UNTHROTTLED;
4517 DTRACE_VM(laundryunthrottle);
4518 }
4519 proc_set_thread_policy(ethr->pgo_iothread,
4520 TASK_POLICY_EXTERNAL, TASK_POLICY_IO, policy);
4521
4522 vm_page_lock_queues();
4523 ethr->q->pgo_lowpriority = req_lowpriority;
4524 }
4525 }
4526
4527 OS_NORETURN
4528 static void
vm_pageout_iothread_external(struct pgo_iothread_state * ethr,__unused wait_result_t w)4529 vm_pageout_iothread_external(struct pgo_iothread_state *ethr, __unused wait_result_t w)
4530 {
4531 thread_t self = current_thread();
4532
4533 self->options |= TH_OPT_VMPRIV;
4534
4535 DTRACE_VM2(laundrythrottle, int, 1, (uint64_t *), NULL);
4536
4537 proc_set_thread_policy(self, TASK_POLICY_EXTERNAL,
4538 TASK_POLICY_IO, THROTTLE_LEVEL_PAGEOUT_THROTTLED);
4539
4540 vm_page_lock_queues();
4541
4542 vm_pageout_queue_external.pgo_lowpriority = TRUE;
4543 vm_pageout_queue_external.pgo_inited = TRUE;
4544
4545 vm_page_unlock_queues();
4546
4547 #if CONFIG_THREAD_GROUPS
4548 thread_group_vm_add();
4549 #endif /* CONFIG_THREAD_GROUPS */
4550
4551 vm_pageout_iothread_external_continue(ethr, 0);
4552 /*NOTREACHED*/
4553 }
4554
4555
4556 OS_NORETURN
4557 static void
vm_pageout_iothread_internal(struct pgo_iothread_state * cthr,__unused wait_result_t w)4558 vm_pageout_iothread_internal(struct pgo_iothread_state *cthr, __unused wait_result_t w)
4559 {
4560 thread_t self = current_thread();
4561
4562 self->options |= TH_OPT_VMPRIV;
4563
4564 vm_page_lock_queues();
4565
4566 vm_pageout_queue_internal.pgo_lowpriority = TRUE;
4567 vm_pageout_queue_internal.pgo_inited = TRUE;
4568
4569 #if DEVELOPMENT || DEBUG
4570 vm_pageout_queue_benchmark.pgo_lowpriority = vm_pageout_queue_internal.pgo_lowpriority;
4571 vm_pageout_queue_benchmark.pgo_inited = vm_pageout_queue_internal.pgo_inited;
4572 vm_pageout_queue_benchmark.pgo_busy = FALSE;
4573 #endif /* DEVELOPMENT || DEBUG */
4574
4575 vm_page_unlock_queues();
4576
4577 if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
4578 thread_vm_bind_group_add();
4579 }
4580
4581 #if CONFIG_THREAD_GROUPS
4582 thread_group_vm_add();
4583 #endif /* CONFIG_THREAD_GROUPS */
4584
4585 #if __AMP__
4586 if (vm_compressor_ebound) {
4587 /*
4588 * Use the soft bound option for vm_compressor to allow it to run on
4589 * P-cores if E-cluster is unavailable.
4590 */
4591 thread_bind_cluster_type(self, 'E', true);
4592 }
4593 #endif /* __AMP__ */
4594
4595 thread_set_thread_name(current_thread(), "VM_compressor");
4596 #if DEVELOPMENT || DEBUG
4597 vmct_stats.vmct_minpages[cthr->id] = INT32_MAX;
4598 #endif
4599 vm_pageout_iothread_internal_continue(cthr, 0);
4600
4601 /*NOTREACHED*/
4602 }
4603
4604 kern_return_t
vm_set_buffer_cleanup_callout(boolean_t (* func)(int))4605 vm_set_buffer_cleanup_callout(boolean_t (*func)(int))
4606 {
4607 if (OSCompareAndSwapPtr(NULL, ptrauth_nop_cast(void *, func), (void * volatile *) &consider_buffer_cache_collect)) {
4608 return KERN_SUCCESS;
4609 } else {
4610 return KERN_FAILURE; /* Already set */
4611 }
4612 }
4613
4614 extern boolean_t memorystatus_manual_testing_on;
4615 extern unsigned int memorystatus_level;
4616
4617
4618 #if VM_PRESSURE_EVENTS
4619
4620 boolean_t vm_pressure_events_enabled = FALSE;
4621
4622 extern uint64_t next_warning_notification_sent_at_ts;
4623 extern uint64_t next_critical_notification_sent_at_ts;
4624
4625 #define PRESSURE_LEVEL_STUCK_THRESHOLD_MINS (30) /* 30 minutes. */
4626
4627 /*
4628 * The last time there was change in pressure level OR we forced a check
4629 * because the system is stuck in a non-normal pressure level.
4630 */
4631 uint64_t vm_pressure_last_level_transition_abs = 0;
4632
4633 /*
4634 * This is how the long the system waits 'stuck' in an unchanged non-normal pressure
4635 * level before resending out notifications for that level again.
4636 */
4637 int vm_pressure_level_transition_threshold = PRESSURE_LEVEL_STUCK_THRESHOLD_MINS;
4638
4639 void
vm_pressure_response(void)4640 vm_pressure_response(void)
4641 {
4642 vm_pressure_level_t old_level = kVMPressureNormal;
4643 int new_level = -1;
4644 unsigned int total_pages;
4645 uint64_t available_memory = 0;
4646 uint64_t curr_ts, abs_time_since_level_transition, time_in_ns;
4647 bool force_check = false;
4648 int time_in_mins;
4649
4650
4651 if (vm_pressure_events_enabled == FALSE) {
4652 return;
4653 }
4654
4655 #if !XNU_TARGET_OS_OSX
4656
4657 available_memory = (uint64_t) memorystatus_available_pages;
4658
4659 #else /* !XNU_TARGET_OS_OSX */
4660
4661 available_memory = (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY;
4662 memorystatus_available_pages = (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY;
4663
4664 #endif /* !XNU_TARGET_OS_OSX */
4665
4666 total_pages = (unsigned int) atop_64(max_mem);
4667 #if CONFIG_SECLUDED_MEMORY
4668 total_pages -= vm_page_secluded_count;
4669 #endif /* CONFIG_SECLUDED_MEMORY */
4670 memorystatus_level = (unsigned int) ((available_memory * 100) / total_pages);
4671
4672 if (memorystatus_manual_testing_on) {
4673 return;
4674 }
4675
4676 curr_ts = mach_absolute_time();
4677 abs_time_since_level_transition = curr_ts - vm_pressure_last_level_transition_abs;
4678
4679 absolutetime_to_nanoseconds(abs_time_since_level_transition, &time_in_ns);
4680 time_in_mins = (int) ((time_in_ns / NSEC_PER_SEC) / 60);
4681 force_check = (time_in_mins >= vm_pressure_level_transition_threshold);
4682
4683 old_level = memorystatus_vm_pressure_level;
4684
4685 switch (memorystatus_vm_pressure_level) {
4686 case kVMPressureNormal:
4687 {
4688 if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
4689 new_level = kVMPressureCritical;
4690 } else if (VM_PRESSURE_NORMAL_TO_WARNING()) {
4691 new_level = kVMPressureWarning;
4692 }
4693 break;
4694 }
4695
4696 case kVMPressureWarning:
4697 case kVMPressureUrgent:
4698 {
4699 if (VM_PRESSURE_WARNING_TO_NORMAL()) {
4700 new_level = kVMPressureNormal;
4701 } else if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
4702 new_level = kVMPressureCritical;
4703 } else if (force_check) {
4704 new_level = kVMPressureWarning;
4705 next_warning_notification_sent_at_ts = curr_ts;
4706 }
4707 break;
4708 }
4709
4710 case kVMPressureCritical:
4711 {
4712 if (VM_PRESSURE_WARNING_TO_NORMAL()) {
4713 new_level = kVMPressureNormal;
4714 } else if (VM_PRESSURE_CRITICAL_TO_WARNING()) {
4715 new_level = kVMPressureWarning;
4716 } else if (force_check) {
4717 new_level = kVMPressureCritical;
4718 next_critical_notification_sent_at_ts = curr_ts;
4719 }
4720 break;
4721 }
4722
4723 default:
4724 return;
4725 }
4726
4727 if (new_level != -1 || force_check) {
4728 if (new_level != -1) {
4729 memorystatus_vm_pressure_level = (vm_pressure_level_t) new_level;
4730
4731 if (new_level != (int) old_level) {
4732 VM_DEBUG_CONSTANT_EVENT(vm_pressure_level_change, VM_PRESSURE_LEVEL_CHANGE, DBG_FUNC_NONE,
4733 new_level, old_level, 0, 0);
4734 }
4735 } else {
4736 VM_DEBUG_CONSTANT_EVENT(vm_pressure_level_change, VM_PRESSURE_LEVEL_CHANGE, DBG_FUNC_NONE,
4737 new_level, old_level, force_check, 0);
4738 }
4739
4740 if (hibernation_vmqueues_inspection || hibernate_cleaning_in_progress) {
4741 /*
4742 * We don't want to schedule a wakeup while hibernation is in progress
4743 * because that could collide with checks for non-monotonicity in the scheduler.
4744 * We do however do all the updates to memorystatus_vm_pressure_level because
4745 * we _might_ want to use that for decisions regarding which pages or how
4746 * many pages we want to dump in hibernation.
4747 */
4748 return;
4749 }
4750
4751 if ((memorystatus_vm_pressure_level != kVMPressureNormal) || (old_level != memorystatus_vm_pressure_level) || force_check) {
4752 if (vm_pageout_state.vm_pressure_thread_running == FALSE) {
4753 thread_wakeup(&vm_pressure_thread);
4754 }
4755
4756 if (old_level != memorystatus_vm_pressure_level) {
4757 thread_wakeup(&vm_pageout_state.vm_pressure_changed);
4758 }
4759 vm_pressure_last_level_transition_abs = curr_ts; /* renew the window of observation for a stuck pressure level */
4760 }
4761 }
4762 }
4763 #endif /* VM_PRESSURE_EVENTS */
4764
4765
4766 /**
4767 * Called by a kernel thread to ask if a number of pages may be wired.
4768 */
4769 kern_return_t
mach_vm_wire_level_monitor(int64_t requested_pages)4770 mach_vm_wire_level_monitor(int64_t requested_pages)
4771 {
4772 if (requested_pages <= 0) {
4773 return KERN_INVALID_ARGUMENT;
4774 }
4775
4776 const int64_t max_wire_pages = atop_64(vm_global_user_wire_limit);
4777 /**
4778 * Available pages can be negative in the case where more system memory is
4779 * wired than the threshold, so we must use a signed integer.
4780 */
4781 const int64_t available_pages = max_wire_pages - vm_page_wire_count;
4782
4783 if (requested_pages > available_pages) {
4784 return KERN_RESOURCE_SHORTAGE;
4785 }
4786 return KERN_SUCCESS;
4787 }
4788
4789 /*
4790 * Function called by a kernel thread to either get the current pressure level or
4791 * wait until memory pressure changes from a given level.
4792 */
4793 kern_return_t
mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure,__unused unsigned int * pressure_level)4794 mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure, __unused unsigned int *pressure_level)
4795 {
4796 #if !VM_PRESSURE_EVENTS
4797
4798 return KERN_FAILURE;
4799
4800 #else /* VM_PRESSURE_EVENTS */
4801
4802 wait_result_t wr = 0;
4803 vm_pressure_level_t old_level = memorystatus_vm_pressure_level;
4804
4805 if (pressure_level == NULL) {
4806 return KERN_INVALID_ARGUMENT;
4807 }
4808
4809 if (*pressure_level == kVMPressureJetsam) {
4810 if (!wait_for_pressure) {
4811 return KERN_INVALID_ARGUMENT;
4812 }
4813
4814 lck_mtx_lock(&memorystatus_jetsam_fg_band_lock);
4815 wr = assert_wait((event_t)&memorystatus_jetsam_fg_band_waiters,
4816 THREAD_INTERRUPTIBLE);
4817 if (wr == THREAD_WAITING) {
4818 ++memorystatus_jetsam_fg_band_waiters;
4819 lck_mtx_unlock(&memorystatus_jetsam_fg_band_lock);
4820 wr = thread_block(THREAD_CONTINUE_NULL);
4821 } else {
4822 lck_mtx_unlock(&memorystatus_jetsam_fg_band_lock);
4823 }
4824 if (wr != THREAD_AWAKENED) {
4825 return KERN_ABORTED;
4826 }
4827 *pressure_level = kVMPressureJetsam;
4828 return KERN_SUCCESS;
4829 }
4830
4831 if (wait_for_pressure == TRUE) {
4832 while (old_level == *pressure_level) {
4833 wr = assert_wait((event_t) &vm_pageout_state.vm_pressure_changed,
4834 THREAD_INTERRUPTIBLE);
4835 if (wr == THREAD_WAITING) {
4836 wr = thread_block(THREAD_CONTINUE_NULL);
4837 }
4838 if (wr == THREAD_INTERRUPTED) {
4839 return KERN_ABORTED;
4840 }
4841
4842 if (wr == THREAD_AWAKENED) {
4843 old_level = memorystatus_vm_pressure_level;
4844 }
4845 }
4846 }
4847
4848 *pressure_level = old_level;
4849 return KERN_SUCCESS;
4850 #endif /* VM_PRESSURE_EVENTS */
4851 }
4852
4853 #if VM_PRESSURE_EVENTS
4854 void
vm_pressure_thread(void)4855 vm_pressure_thread(void)
4856 {
4857 static boolean_t thread_initialized = FALSE;
4858
4859 if (thread_initialized == TRUE) {
4860 vm_pageout_state.vm_pressure_thread_running = TRUE;
4861 consider_vm_pressure_events();
4862 vm_pageout_state.vm_pressure_thread_running = FALSE;
4863 }
4864
4865 #if CONFIG_THREAD_GROUPS
4866 thread_group_vm_add();
4867 #endif /* CONFIG_THREAD_GROUPS */
4868
4869 thread_set_thread_name(current_thread(), "VM_pressure");
4870 thread_initialized = TRUE;
4871 assert_wait((event_t) &vm_pressure_thread, THREAD_UNINT);
4872 thread_block((thread_continue_t)vm_pressure_thread);
4873 }
4874 #endif /* VM_PRESSURE_EVENTS */
4875
4876
4877 /*
4878 * called once per-second via "compute_averages"
4879 */
4880 void
compute_pageout_gc_throttle(__unused void * arg)4881 compute_pageout_gc_throttle(__unused void *arg)
4882 {
4883 if (vm_pageout_vminfo.vm_pageout_considered_page != vm_pageout_state.vm_pageout_considered_page_last) {
4884 vm_pageout_state.vm_pageout_considered_page_last = vm_pageout_vminfo.vm_pageout_considered_page;
4885
4886 thread_wakeup(VM_PAGEOUT_GC_EVENT);
4887 }
4888 }
4889
4890 /*
4891 * vm_pageout_garbage_collect can also be called when the zone allocator needs
4892 * to call zone_gc on a different thread in order to trigger zone-map-exhaustion
4893 * jetsams. We need to check if the zone map size is above its jetsam limit to
4894 * decide if this was indeed the case.
4895 *
4896 * We need to do this on a different thread because of the following reasons:
4897 *
4898 * 1. In the case of synchronous jetsams, the leaking process can try to jetsam
4899 * itself causing the system to hang. We perform synchronous jetsams if we're
4900 * leaking in the VM map entries zone, so the leaking process could be doing a
4901 * zalloc for a VM map entry while holding its vm_map lock, when it decides to
4902 * jetsam itself. We also need the vm_map lock on the process termination path,
4903 * which would now lead the dying process to deadlock against itself.
4904 *
4905 * 2. The jetsam path might need to allocate zone memory itself. We could try
4906 * using the non-blocking variant of zalloc for this path, but we can still
4907 * end up trying to do a kmem_alloc when the zone maps are almost full.
4908 */
4909 __dead2
4910 void
vm_pageout_garbage_collect(void * step,wait_result_t wr __unused)4911 vm_pageout_garbage_collect(void *step, wait_result_t wr __unused)
4912 {
4913 assert(step == VM_PAGEOUT_GC_INIT || step == VM_PAGEOUT_GC_COLLECT);
4914
4915 if (step == VM_PAGEOUT_GC_INIT) {
4916 /* first time being called is not about GC */
4917 #if CONFIG_THREAD_GROUPS
4918 thread_group_vm_add();
4919 #endif /* CONFIG_THREAD_GROUPS */
4920 } else if (zone_map_nearing_exhaustion()) {
4921 /*
4922 * Woken up by the zone allocator for zone-map-exhaustion jetsams.
4923 *
4924 * Bail out after calling zone_gc (which triggers the
4925 * zone-map-exhaustion jetsams). If we fall through, the subsequent
4926 * operations that clear out a bunch of caches might allocate zone
4927 * memory themselves (for eg. vm_map operations would need VM map
4928 * entries). Since the zone map is almost full at this point, we
4929 * could end up with a panic. We just need to quickly jetsam a
4930 * process and exit here.
4931 *
4932 * It could so happen that we were woken up to relieve memory
4933 * pressure and the zone map also happened to be near its limit at
4934 * the time, in which case we'll skip out early. But that should be
4935 * ok; if memory pressure persists, the thread will simply be woken
4936 * up again.
4937 */
4938 zone_gc(ZONE_GC_JETSAM);
4939 } else {
4940 /* Woken up by vm_pageout_scan or compute_pageout_gc_throttle. */
4941 boolean_t buf_large_zfree = FALSE;
4942 boolean_t first_try = TRUE;
4943
4944 stack_collect();
4945
4946 consider_machine_collect();
4947 #if CONFIG_MBUF_MCACHE
4948 mbuf_drain(FALSE);
4949 #endif /* CONFIG_MBUF_MCACHE */
4950
4951 do {
4952 if (consider_buffer_cache_collect != NULL) {
4953 buf_large_zfree = (*consider_buffer_cache_collect)(0);
4954 }
4955 if (first_try == TRUE || buf_large_zfree == TRUE) {
4956 /*
4957 * zone_gc should be last, because the other operations
4958 * might return memory to zones.
4959 */
4960 zone_gc(ZONE_GC_TRIM);
4961 }
4962 first_try = FALSE;
4963 } while (buf_large_zfree == TRUE && vm_page_free_count < vm_page_free_target);
4964
4965 consider_machine_adjust();
4966 }
4967
4968 assert_wait(VM_PAGEOUT_GC_EVENT, THREAD_UNINT);
4969
4970 thread_block_parameter(vm_pageout_garbage_collect, VM_PAGEOUT_GC_COLLECT);
4971 __builtin_unreachable();
4972 }
4973
4974
4975 #if VM_PAGE_BUCKETS_CHECK
4976 #if VM_PAGE_FAKE_BUCKETS
4977 extern vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
4978 #endif /* VM_PAGE_FAKE_BUCKETS */
4979 #endif /* VM_PAGE_BUCKETS_CHECK */
4980
4981
4982
4983 void
vm_set_restrictions(unsigned int num_cpus)4984 vm_set_restrictions(unsigned int num_cpus)
4985 {
4986 int vm_restricted_to_single_processor = 0;
4987
4988 if (PE_parse_boot_argn("vm_restricted_to_single_processor", &vm_restricted_to_single_processor, sizeof(vm_restricted_to_single_processor))) {
4989 kprintf("Overriding vm_restricted_to_single_processor to %d\n", vm_restricted_to_single_processor);
4990 vm_pageout_state.vm_restricted_to_single_processor = (vm_restricted_to_single_processor ? TRUE : FALSE);
4991 } else {
4992 assert(num_cpus > 0);
4993
4994 if (num_cpus <= 3) {
4995 /*
4996 * on systems with a limited number of CPUS, bind the
4997 * 4 major threads that can free memory and that tend to use
4998 * a fair bit of CPU under pressured conditions to a single processor.
4999 * This insures that these threads don't hog all of the available CPUs
5000 * (important for camera launch), while allowing them to run independently
5001 * w/r to locks... the 4 threads are
5002 * vm_pageout_scan, vm_pageout_iothread_internal (compressor),
5003 * vm_compressor_swap_trigger_thread (minor and major compactions),
5004 * memorystatus_thread (jetsams).
5005 *
5006 * the first time the thread is run, it is responsible for checking the
5007 * state of vm_restricted_to_single_processor, and if TRUE it calls
5008 * thread_bind_master... someday this should be replaced with a group
5009 * scheduling mechanism and KPI.
5010 */
5011 vm_pageout_state.vm_restricted_to_single_processor = TRUE;
5012 } else {
5013 vm_pageout_state.vm_restricted_to_single_processor = FALSE;
5014 }
5015 }
5016 }
5017
5018 /*
5019 * Set up vm_config based on the vm_compressor_mode.
5020 * Must run BEFORE the pageout thread starts up.
5021 */
5022 __startup_func
5023 void
vm_config_init(void)5024 vm_config_init(void)
5025 {
5026 bzero(&vm_config, sizeof(vm_config));
5027
5028 switch (vm_compressor_mode) {
5029 case VM_PAGER_DEFAULT:
5030 printf("mapping deprecated VM_PAGER_DEFAULT to VM_PAGER_COMPRESSOR_WITH_SWAP\n");
5031 OS_FALLTHROUGH;
5032
5033 case VM_PAGER_COMPRESSOR_WITH_SWAP:
5034 vm_config.compressor_is_present = TRUE;
5035 vm_config.swap_is_present = TRUE;
5036 vm_config.compressor_is_active = TRUE;
5037 vm_config.swap_is_active = TRUE;
5038 break;
5039
5040 case VM_PAGER_COMPRESSOR_NO_SWAP:
5041 vm_config.compressor_is_present = TRUE;
5042 vm_config.swap_is_present = TRUE;
5043 vm_config.compressor_is_active = TRUE;
5044 break;
5045
5046 case VM_PAGER_FREEZER_DEFAULT:
5047 printf("mapping deprecated VM_PAGER_FREEZER_DEFAULT to VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP\n");
5048 OS_FALLTHROUGH;
5049
5050 case VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP:
5051 vm_config.compressor_is_present = TRUE;
5052 vm_config.swap_is_present = TRUE;
5053 break;
5054
5055 case VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP:
5056 vm_config.compressor_is_present = TRUE;
5057 vm_config.swap_is_present = TRUE;
5058 vm_config.compressor_is_active = TRUE;
5059 vm_config.freezer_swap_is_active = TRUE;
5060 break;
5061
5062 case VM_PAGER_NOT_CONFIGURED:
5063 break;
5064
5065 default:
5066 printf("unknown compressor mode - %x\n", vm_compressor_mode);
5067 break;
5068 }
5069 }
5070
5071 __startup_func
5072 static void
vm_pageout_create_gc_thread(void)5073 vm_pageout_create_gc_thread(void)
5074 {
5075 thread_t thread;
5076
5077 if (kernel_thread_create(vm_pageout_garbage_collect,
5078 VM_PAGEOUT_GC_INIT, BASEPRI_DEFAULT, &thread) != KERN_SUCCESS) {
5079 panic("vm_pageout_garbage_collect: create failed");
5080 }
5081 thread_set_thread_name(thread, "VM_pageout_garbage_collect");
5082 if (thread->reserved_stack == 0) {
5083 assert(thread->kernel_stack);
5084 thread->reserved_stack = thread->kernel_stack;
5085 }
5086
5087 /* thread is started in vm_pageout() */
5088 vm_pageout_gc_thread = thread;
5089 }
5090 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, vm_pageout_create_gc_thread);
5091
5092 void
vm_pageout(void)5093 vm_pageout(void)
5094 {
5095 thread_t self = current_thread();
5096 thread_t thread;
5097 kern_return_t result;
5098 spl_t s;
5099
5100 /*
5101 * Set thread privileges.
5102 */
5103 s = splsched();
5104
5105 #if CONFIG_VPS_DYNAMIC_PRIO
5106 if (vps_dynamic_priority_enabled) {
5107 sched_set_kernel_thread_priority(self, MAXPRI_THROTTLE);
5108 thread_set_eager_preempt(self);
5109 } else {
5110 sched_set_kernel_thread_priority(self, BASEPRI_VM);
5111 }
5112 #else /* CONFIG_VPS_DYNAMIC_PRIO */
5113 sched_set_kernel_thread_priority(self, BASEPRI_VM);
5114 #endif /* CONFIG_VPS_DYNAMIC_PRIO */
5115
5116 thread_lock(self);
5117 self->options |= TH_OPT_VMPRIV;
5118 thread_unlock(self);
5119
5120 if (!self->reserved_stack) {
5121 self->reserved_stack = self->kernel_stack;
5122 }
5123
5124 if (vm_pageout_state.vm_restricted_to_single_processor == TRUE &&
5125 !vps_dynamic_priority_enabled) {
5126 thread_vm_bind_group_add();
5127 }
5128
5129
5130 #if CONFIG_THREAD_GROUPS
5131 thread_group_vm_add();
5132 #endif /* CONFIG_THREAD_GROUPS */
5133
5134 #if __AMP__
5135 PE_parse_boot_argn("vmpgo_pcluster", &vm_pgo_pbound, sizeof(vm_pgo_pbound));
5136 if (vm_pgo_pbound) {
5137 /*
5138 * Use the soft bound option for vm pageout to allow it to run on
5139 * E-cores if P-cluster is unavailable.
5140 */
5141 thread_bind_cluster_type(self, 'P', true);
5142 }
5143 #endif /* __AMP__ */
5144
5145 PE_parse_boot_argn("vmpgo_protect_realtime",
5146 &vm_pageout_protect_realtime,
5147 sizeof(vm_pageout_protect_realtime));
5148 splx(s);
5149
5150 thread_set_thread_name(current_thread(), "VM_pageout_scan");
5151
5152 /*
5153 * Initialize some paging parameters.
5154 */
5155
5156 vm_pageout_state.vm_pressure_thread_running = FALSE;
5157 vm_pageout_state.vm_pressure_changed = FALSE;
5158 vm_pageout_state.memorystatus_purge_on_warning = 2;
5159 vm_pageout_state.memorystatus_purge_on_urgent = 5;
5160 vm_pageout_state.memorystatus_purge_on_critical = 8;
5161 vm_pageout_state.vm_page_speculative_q_age_ms = VM_PAGE_SPECULATIVE_Q_AGE_MS;
5162 vm_pageout_state.vm_page_speculative_percentage = 5;
5163 vm_pageout_state.vm_page_speculative_target = 0;
5164
5165 vm_pageout_state.vm_pageout_swap_wait = 0;
5166 vm_pageout_state.vm_pageout_idle_wait = 0;
5167 vm_pageout_state.vm_pageout_empty_wait = 0;
5168 vm_pageout_state.vm_pageout_burst_wait = 0;
5169 vm_pageout_state.vm_pageout_deadlock_wait = 0;
5170 vm_pageout_state.vm_pageout_deadlock_relief = 0;
5171 vm_pageout_state.vm_pageout_burst_inactive_throttle = 0;
5172
5173 vm_pageout_state.vm_pageout_inactive = 0;
5174 vm_pageout_state.vm_pageout_inactive_used = 0;
5175 vm_pageout_state.vm_pageout_inactive_clean = 0;
5176
5177 vm_pageout_state.vm_memory_pressure = 0;
5178 vm_pageout_state.vm_page_filecache_min = 0;
5179 #if CONFIG_JETSAM
5180 vm_pageout_state.vm_page_filecache_min_divisor = 70;
5181 vm_pageout_state.vm_page_xpmapped_min_divisor = 40;
5182 #else
5183 vm_pageout_state.vm_page_filecache_min_divisor = 27;
5184 vm_pageout_state.vm_page_xpmapped_min_divisor = 36;
5185 #endif
5186 vm_pageout_state.vm_page_free_count_init = vm_page_free_count;
5187
5188 vm_pageout_state.vm_pageout_considered_page_last = 0;
5189
5190 if (vm_pageout_state.vm_pageout_swap_wait == 0) {
5191 vm_pageout_state.vm_pageout_swap_wait = VM_PAGEOUT_SWAP_WAIT;
5192 }
5193
5194 if (vm_pageout_state.vm_pageout_idle_wait == 0) {
5195 vm_pageout_state.vm_pageout_idle_wait = VM_PAGEOUT_IDLE_WAIT;
5196 }
5197
5198 if (vm_pageout_state.vm_pageout_burst_wait == 0) {
5199 vm_pageout_state.vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT;
5200 }
5201
5202 if (vm_pageout_state.vm_pageout_empty_wait == 0) {
5203 vm_pageout_state.vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT;
5204 }
5205
5206 if (vm_pageout_state.vm_pageout_deadlock_wait == 0) {
5207 vm_pageout_state.vm_pageout_deadlock_wait = VM_PAGEOUT_DEADLOCK_WAIT;
5208 }
5209
5210 if (vm_pageout_state.vm_pageout_deadlock_relief == 0) {
5211 vm_pageout_state.vm_pageout_deadlock_relief = VM_PAGEOUT_DEADLOCK_RELIEF;
5212 }
5213
5214 if (vm_pageout_state.vm_pageout_burst_inactive_throttle == 0) {
5215 vm_pageout_state.vm_pageout_burst_inactive_throttle = VM_PAGEOUT_BURST_INACTIVE_THROTTLE;
5216 }
5217 /*
5218 * even if we've already called vm_page_free_reserve
5219 * call it again here to insure that the targets are
5220 * accurately calculated (it uses vm_page_free_count_init)
5221 * calling it with an arg of 0 will not change the reserve
5222 * but will re-calculate free_min and free_target
5223 */
5224 if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED(processor_count)) {
5225 vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count)) - vm_page_free_reserved);
5226 } else {
5227 vm_page_free_reserve(0);
5228 }
5229
5230 bzero(&vm_pageout_queue_external, sizeof(struct vm_pageout_queue));
5231 bzero(&vm_pageout_queue_internal, sizeof(struct vm_pageout_queue));
5232
5233 vm_page_queue_init(&vm_pageout_queue_external.pgo_pending);
5234 vm_pageout_queue_external.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
5235
5236 vm_page_queue_init(&vm_pageout_queue_internal.pgo_pending);
5237
5238 #if DEVELOPMENT || DEBUG
5239 bzero(&vm_pageout_queue_benchmark, sizeof(struct vm_pageout_queue));
5240 vm_page_queue_init(&vm_pageout_queue_benchmark.pgo_pending);
5241 #endif /* DEVELOPMENT || DEBUG */
5242
5243
5244 /* internal pageout thread started when default pager registered first time */
5245 /* external pageout and garbage collection threads started here */
5246 struct pgo_iothread_state *ethr = &pgo_iothread_external_state;
5247 ethr->id = 0;
5248 ethr->q = &vm_pageout_queue_external;
5249 ethr->current_early_swapout_chead = NULL;
5250 ethr->current_regular_swapout_chead = NULL;
5251 ethr->current_late_swapout_chead = NULL;
5252 ethr->scratch_buf = NULL;
5253 #if DEVELOPMENT || DEBUG
5254 ethr->benchmark_q = NULL;
5255 #endif /* DEVELOPMENT || DEBUG */
5256 sched_cond_init(&(ethr->pgo_wakeup));
5257
5258 result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_external,
5259 (void *)ethr, BASEPRI_VM,
5260 &(ethr->pgo_iothread));
5261 if (result != KERN_SUCCESS) {
5262 panic("vm_pageout: Unable to create external thread (%d)\n", result);
5263 }
5264 thread_set_thread_name(ethr->pgo_iothread, "VM_pageout_external_iothread");
5265
5266 thread_mtx_lock(vm_pageout_gc_thread );
5267 thread_start(vm_pageout_gc_thread );
5268 thread_mtx_unlock(vm_pageout_gc_thread);
5269
5270 #if VM_PRESSURE_EVENTS
5271 result = kernel_thread_start_priority((thread_continue_t)vm_pressure_thread, NULL,
5272 BASEPRI_DEFAULT,
5273 &thread);
5274
5275 if (result != KERN_SUCCESS) {
5276 panic("vm_pressure_thread: create failed");
5277 }
5278
5279 thread_deallocate(thread);
5280 #endif
5281
5282 vm_object_reaper_init();
5283
5284
5285 if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
5286 vm_compressor_init();
5287 }
5288
5289 #if VM_PRESSURE_EVENTS
5290 vm_pressure_events_enabled = TRUE;
5291 #endif /* VM_PRESSURE_EVENTS */
5292
5293 #if CONFIG_PHANTOM_CACHE
5294 vm_phantom_cache_init();
5295 #endif
5296 #if VM_PAGE_BUCKETS_CHECK
5297 #if VM_PAGE_FAKE_BUCKETS
5298 printf("**** DEBUG: protecting fake buckets [0x%llx:0x%llx]\n",
5299 (uint64_t) vm_page_fake_buckets_start,
5300 (uint64_t) vm_page_fake_buckets_end);
5301 pmap_protect(kernel_pmap,
5302 vm_page_fake_buckets_start,
5303 vm_page_fake_buckets_end,
5304 VM_PROT_READ);
5305 // *(char *) vm_page_fake_buckets_start = 'x'; /* panic! */
5306 #endif /* VM_PAGE_FAKE_BUCKETS */
5307 #endif /* VM_PAGE_BUCKETS_CHECK */
5308
5309 #if VM_OBJECT_TRACKING
5310 vm_object_tracking_init();
5311 #endif /* VM_OBJECT_TRACKING */
5312
5313 #if __arm64__
5314 // vm_tests();
5315 #endif /* __arm64__ */
5316
5317 vm_pageout_continue();
5318
5319 /*
5320 * Unreached code!
5321 *
5322 * The vm_pageout_continue() call above never returns, so the code below is never
5323 * executed. We take advantage of this to declare several DTrace VM related probe
5324 * points that our kernel doesn't have an analog for. These are probe points that
5325 * exist in Solaris and are in the DTrace documentation, so people may have written
5326 * scripts that use them. Declaring the probe points here means their scripts will
5327 * compile and execute which we want for portability of the scripts, but since this
5328 * section of code is never reached, the probe points will simply never fire. Yes,
5329 * this is basically a hack. The problem is the DTrace probe points were chosen with
5330 * Solaris specific VM events in mind, not portability to different VM implementations.
5331 */
5332
5333 DTRACE_VM2(execfree, int, 1, (uint64_t *), NULL);
5334 DTRACE_VM2(execpgin, int, 1, (uint64_t *), NULL);
5335 DTRACE_VM2(execpgout, int, 1, (uint64_t *), NULL);
5336 DTRACE_VM2(pgswapin, int, 1, (uint64_t *), NULL);
5337 DTRACE_VM2(pgswapout, int, 1, (uint64_t *), NULL);
5338 DTRACE_VM2(swapin, int, 1, (uint64_t *), NULL);
5339 DTRACE_VM2(swapout, int, 1, (uint64_t *), NULL);
5340 /*NOTREACHED*/
5341 }
5342
5343
5344
5345 kern_return_t
vm_pageout_internal_start(void)5346 vm_pageout_internal_start(void)
5347 {
5348 kern_return_t result = KERN_SUCCESS;
5349 host_basic_info_data_t hinfo;
5350 vm_offset_t buf, bufsize;
5351
5352 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
5353
5354 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
5355 #define BSD_HOST 1
5356 host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
5357
5358 assert(hinfo.max_cpus > 0);
5359
5360 #if !XNU_TARGET_OS_OSX
5361 vm_pageout_state.vm_compressor_thread_count = 1;
5362 #else /* !XNU_TARGET_OS_OSX */
5363 if (hinfo.max_cpus > 4) {
5364 vm_pageout_state.vm_compressor_thread_count = 2;
5365 } else {
5366 vm_pageout_state.vm_compressor_thread_count = 1;
5367 }
5368 #endif /* !XNU_TARGET_OS_OSX */
5369 #if __AMP__
5370 if (vm_compressor_ebound) {
5371 vm_pageout_state.vm_compressor_thread_count = 2;
5372 }
5373 #endif
5374 PE_parse_boot_argn("vmcomp_threads", &vm_pageout_state.vm_compressor_thread_count,
5375 sizeof(vm_pageout_state.vm_compressor_thread_count));
5376
5377 if (vm_pageout_state.vm_compressor_thread_count >= hinfo.max_cpus) {
5378 vm_pageout_state.vm_compressor_thread_count = hinfo.max_cpus - 1;
5379 }
5380 if (vm_pageout_state.vm_compressor_thread_count <= 0) {
5381 vm_pageout_state.vm_compressor_thread_count = 1;
5382 } else if (vm_pageout_state.vm_compressor_thread_count > MAX_COMPRESSOR_THREAD_COUNT) {
5383 vm_pageout_state.vm_compressor_thread_count = MAX_COMPRESSOR_THREAD_COUNT;
5384 }
5385
5386 vm_pageout_queue_internal.pgo_maxlaundry =
5387 (vm_pageout_state.vm_compressor_thread_count * 4) * VM_PAGE_LAUNDRY_MAX;
5388
5389 PE_parse_boot_argn("vmpgoi_maxlaundry",
5390 &vm_pageout_queue_internal.pgo_maxlaundry,
5391 sizeof(vm_pageout_queue_internal.pgo_maxlaundry));
5392
5393 #if DEVELOPMENT || DEBUG
5394 // Note: this will be modified at enqueue-time such that the benchmark queue is never throttled
5395 vm_pageout_queue_benchmark.pgo_maxlaundry = vm_pageout_queue_internal.pgo_maxlaundry;
5396 #endif /* DEVELOPMENT || DEBUG */
5397
5398 bufsize = COMPRESSOR_SCRATCH_BUF_SIZE;
5399
5400 kmem_alloc(kernel_map, &buf,
5401 bufsize * vm_pageout_state.vm_compressor_thread_count,
5402 KMA_DATA | KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT,
5403 VM_KERN_MEMORY_COMPRESSOR);
5404
5405 for (int i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) {
5406 struct pgo_iothread_state *iq = &pgo_iothread_internal_state[i];
5407 iq->id = i;
5408 iq->q = &vm_pageout_queue_internal;
5409 iq->current_early_swapout_chead = NULL;
5410 iq->current_regular_swapout_chead = NULL;
5411 iq->current_late_swapout_chead = NULL;
5412 iq->scratch_buf = (char *)(buf + i * bufsize);
5413 #if DEVELOPMENT || DEBUG
5414 iq->benchmark_q = &vm_pageout_queue_benchmark;
5415 #endif /* DEVELOPMENT || DEBUG */
5416 sched_cond_init(&(iq->pgo_wakeup));
5417 result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal,
5418 (void *)iq, BASEPRI_VM,
5419 &(iq->pgo_iothread));
5420
5421 if (result != KERN_SUCCESS) {
5422 panic("vm_pageout: Unable to create compressor thread no. %d (%d)\n", i, result);
5423 }
5424 }
5425 return result;
5426 }
5427
5428 #if CONFIG_IOSCHED
5429 /*
5430 * To support I/O Expedite for compressed files we mark the upls with special flags.
5431 * The way decmpfs works is that we create a big upl which marks all the pages needed to
5432 * represent the compressed file as busy. We tag this upl with the flag UPL_DECMP_REQ. Decmpfs
5433 * then issues smaller I/Os for compressed I/Os, deflates them and puts the data into the pages
5434 * being held in the big original UPL. We mark each of these smaller UPLs with the flag
5435 * UPL_DECMP_REAL_IO. Any outstanding real I/O UPL is tracked by the big req upl using the
5436 * decmp_io_upl field (in the upl structure). This link is protected in the forward direction
5437 * by the req upl lock (the reverse link doesnt need synch. since we never inspect this link
5438 * unless the real I/O upl is being destroyed).
5439 */
5440
5441
5442 static void
upl_set_decmp_info(upl_t upl,upl_t src_upl)5443 upl_set_decmp_info(upl_t upl, upl_t src_upl)
5444 {
5445 assert((src_upl->flags & UPL_DECMP_REQ) != 0);
5446
5447 upl_lock(src_upl);
5448 if (src_upl->decmp_io_upl) {
5449 /*
5450 * If there is already an alive real I/O UPL, ignore this new UPL.
5451 * This case should rarely happen and even if it does, it just means
5452 * that we might issue a spurious expedite which the driver is expected
5453 * to handle.
5454 */
5455 upl_unlock(src_upl);
5456 return;
5457 }
5458 src_upl->decmp_io_upl = (void *)upl;
5459 src_upl->ref_count++;
5460
5461 upl->flags |= UPL_DECMP_REAL_IO;
5462 upl->decmp_io_upl = (void *)src_upl;
5463 upl_unlock(src_upl);
5464 }
5465 #endif /* CONFIG_IOSCHED */
5466
5467 #if UPL_DEBUG
5468 int upl_debug_enabled = 1;
5469 #else
5470 int upl_debug_enabled = 0;
5471 #endif
5472
5473 static upl_t
upl_create(int type,int flags,upl_size_t size)5474 upl_create(int type, int flags, upl_size_t size)
5475 {
5476 uint32_t pages = (uint32_t)atop(round_page_32(size));
5477 upl_t upl;
5478
5479 assert(page_aligned(size));
5480
5481 /*
5482 * FIXME: this code assumes the allocation always succeeds,
5483 * however `pages` can be up to MAX_UPL_SIZE.
5484 *
5485 * The allocation size is above 32k (resp. 128k)
5486 * on 16k pages (resp. 4k), which kalloc might fail
5487 * to allocate.
5488 */
5489 upl = kalloc_type(struct upl, struct upl_page_info,
5490 (type & UPL_CREATE_INTERNAL) ? pages : 0, Z_WAITOK | Z_ZERO);
5491 if (type & UPL_CREATE_INTERNAL) {
5492 flags |= UPL_INTERNAL;
5493 }
5494
5495 if (type & UPL_CREATE_LITE) {
5496 flags |= UPL_LITE;
5497 if (pages) {
5498 upl->lite_list = bitmap_alloc(pages);
5499 }
5500 }
5501
5502 upl->flags = flags;
5503 upl->ref_count = 1;
5504 upl_lock_init(upl);
5505 #if CONFIG_IOSCHED
5506 if (type & UPL_CREATE_IO_TRACKING) {
5507 upl->upl_priority = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
5508 }
5509
5510 if ((type & UPL_CREATE_INTERNAL) && (type & UPL_CREATE_EXPEDITE_SUP)) {
5511 /* Only support expedite on internal UPLs */
5512 thread_t curthread = current_thread();
5513 upl->upl_reprio_info = kalloc_data(sizeof(uint64_t) * pages,
5514 Z_WAITOK | Z_ZERO);
5515 upl->flags |= UPL_EXPEDITE_SUPPORTED;
5516 if (curthread->decmp_upl != NULL) {
5517 upl_set_decmp_info(upl, curthread->decmp_upl);
5518 }
5519 }
5520 #endif
5521 #if CONFIG_IOSCHED || UPL_DEBUG
5522 if ((type & UPL_CREATE_IO_TRACKING) || upl_debug_enabled) {
5523 upl->upl_creator = current_thread();
5524 upl->flags |= UPL_TRACKED_BY_OBJECT;
5525 }
5526 #endif
5527
5528 #if UPL_DEBUG
5529 upl->uple_create_btref = btref_get(__builtin_frame_address(0), 0);
5530 #endif /* UPL_DEBUG */
5531
5532 return upl;
5533 }
5534
5535 static void
upl_destroy(upl_t upl)5536 upl_destroy(upl_t upl)
5537 {
5538 uint32_t pages;
5539
5540 // DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object);
5541
5542 if (upl->ext_ref_count) {
5543 panic("upl(%p) ext_ref_count", upl);
5544 }
5545
5546 #if CONFIG_IOSCHED
5547 if ((upl->flags & UPL_DECMP_REAL_IO) && upl->decmp_io_upl) {
5548 upl_t src_upl;
5549 src_upl = upl->decmp_io_upl;
5550 assert((src_upl->flags & UPL_DECMP_REQ) != 0);
5551 upl_lock(src_upl);
5552 src_upl->decmp_io_upl = NULL;
5553 upl_unlock(src_upl);
5554 upl_deallocate(src_upl);
5555 }
5556 #endif /* CONFIG_IOSCHED */
5557
5558 #if CONFIG_IOSCHED || UPL_DEBUG
5559 if (((upl->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) &&
5560 !(upl->flags & UPL_VECTOR)) {
5561 vm_object_t object;
5562
5563 if (upl->flags & UPL_SHADOWED) {
5564 object = upl->map_object->shadow;
5565 } else {
5566 object = upl->map_object;
5567 }
5568
5569 vm_object_lock(object);
5570 queue_remove(&object->uplq, upl, upl_t, uplq);
5571 vm_object_activity_end(object);
5572 vm_object_collapse(object, 0, TRUE);
5573 vm_object_unlock(object);
5574 }
5575 #endif
5576 /*
5577 * drop a reference on the map_object whether or
5578 * not a pageout object is inserted
5579 */
5580 if (upl->flags & UPL_SHADOWED) {
5581 vm_object_deallocate(upl->map_object);
5582 }
5583
5584 if (upl->flags & UPL_DEVICE_MEMORY) {
5585 pages = 1;
5586 } else {
5587 pages = (uint32_t)atop(upl_adjusted_size(upl, PAGE_MASK));
5588 }
5589
5590 upl_lock_destroy(upl);
5591
5592 #if CONFIG_IOSCHED
5593 if (upl->flags & UPL_EXPEDITE_SUPPORTED) {
5594 kfree_data(upl->upl_reprio_info, sizeof(uint64_t) * pages);
5595 }
5596 #endif
5597
5598 #if UPL_DEBUG
5599 for (int i = 0; i < upl->upl_commit_index; i++) {
5600 btref_put(upl->upl_commit_records[i].c_btref);
5601 }
5602 btref_put(upl->uple_create_btref);
5603 #endif /* UPL_DEBUG */
5604
5605 if ((upl->flags & UPL_LITE) && pages) {
5606 bitmap_free(upl->lite_list, pages);
5607 }
5608 kfree_type(struct upl, struct upl_page_info,
5609 (upl->flags & UPL_INTERNAL) ? pages : 0, upl);
5610 }
5611
5612 void
upl_deallocate(upl_t upl)5613 upl_deallocate(upl_t upl)
5614 {
5615 upl_lock(upl);
5616
5617 if (--upl->ref_count == 0) {
5618 if (vector_upl_is_valid(upl)) {
5619 vector_upl_deallocate(upl);
5620 }
5621 upl_unlock(upl);
5622
5623 if (upl->upl_iodone) {
5624 upl_callout_iodone(upl);
5625 }
5626
5627 upl_destroy(upl);
5628 } else {
5629 upl_unlock(upl);
5630 }
5631 }
5632
5633 #if CONFIG_IOSCHED
5634 void
upl_mark_decmp(upl_t upl)5635 upl_mark_decmp(upl_t upl)
5636 {
5637 if (upl->flags & UPL_TRACKED_BY_OBJECT) {
5638 upl->flags |= UPL_DECMP_REQ;
5639 upl->upl_creator->decmp_upl = (void *)upl;
5640 }
5641 }
5642
5643 void
upl_unmark_decmp(upl_t upl)5644 upl_unmark_decmp(upl_t upl)
5645 {
5646 if (upl && (upl->flags & UPL_DECMP_REQ)) {
5647 upl->upl_creator->decmp_upl = NULL;
5648 }
5649 }
5650
5651 #endif /* CONFIG_IOSCHED */
5652
5653 #define VM_PAGE_Q_BACKING_UP(q) \
5654 ((q)->pgo_laundry >= (((q)->pgo_maxlaundry * 8) / 10))
5655
5656 boolean_t must_throttle_writes(void);
5657
5658 boolean_t
must_throttle_writes()5659 must_throttle_writes()
5660 {
5661 if (VM_PAGE_Q_BACKING_UP(&vm_pageout_queue_external) &&
5662 vm_page_pageable_external_count > (AVAILABLE_NON_COMPRESSED_MEMORY * 6) / 10) {
5663 return TRUE;
5664 }
5665
5666 return FALSE;
5667 }
5668
5669 int vm_page_delayed_work_ctx_needed = 0;
5670 KALLOC_TYPE_DEFINE(dw_ctx_zone, struct vm_page_delayed_work_ctx, KT_PRIV_ACCT);
5671
5672 __startup_func
5673 static void
vm_page_delayed_work_init_ctx(void)5674 vm_page_delayed_work_init_ctx(void)
5675 {
5676 uint16_t min_delayed_work_ctx_allocated = 16;
5677
5678 /*
5679 * try really hard to always keep NCPU elements around in the zone
5680 * in order for the UPL code to almost always get an element.
5681 */
5682 if (min_delayed_work_ctx_allocated < zpercpu_count()) {
5683 min_delayed_work_ctx_allocated = (uint16_t)zpercpu_count();
5684 }
5685
5686 zone_raise_reserve(dw_ctx_zone, min_delayed_work_ctx_allocated);
5687 }
5688 STARTUP(ZALLOC, STARTUP_RANK_LAST, vm_page_delayed_work_init_ctx);
5689
5690 struct vm_page_delayed_work*
vm_page_delayed_work_get_ctx(void)5691 vm_page_delayed_work_get_ctx(void)
5692 {
5693 struct vm_page_delayed_work_ctx * dw_ctx = NULL;
5694
5695 dw_ctx = zalloc_flags(dw_ctx_zone, Z_ZERO | Z_NOWAIT);
5696
5697 if (__probable(dw_ctx)) {
5698 dw_ctx->delayed_owner = current_thread();
5699 } else {
5700 vm_page_delayed_work_ctx_needed++;
5701 }
5702 return dw_ctx ? dw_ctx->dwp : NULL;
5703 }
5704
5705 void
vm_page_delayed_work_finish_ctx(struct vm_page_delayed_work * dwp)5706 vm_page_delayed_work_finish_ctx(struct vm_page_delayed_work* dwp)
5707 {
5708 struct vm_page_delayed_work_ctx *ldw_ctx;
5709
5710 ldw_ctx = (struct vm_page_delayed_work_ctx *)dwp;
5711 ldw_ctx->delayed_owner = NULL;
5712
5713 zfree(dw_ctx_zone, ldw_ctx);
5714 }
5715
5716 /*
5717 * Routine: vm_object_upl_request
5718 * Purpose:
5719 * Cause the population of a portion of a vm_object.
5720 * Depending on the nature of the request, the pages
5721 * returned may be contain valid data or be uninitialized.
5722 * A page list structure, listing the physical pages
5723 * will be returned upon request.
5724 * This function is called by the file system or any other
5725 * supplier of backing store to a pager.
5726 * IMPORTANT NOTE: The caller must still respect the relationship
5727 * between the vm_object and its backing memory object. The
5728 * caller MUST NOT substitute changes in the backing file
5729 * without first doing a memory_object_lock_request on the
5730 * target range unless it is know that the pages are not
5731 * shared with another entity at the pager level.
5732 * Copy_in_to:
5733 * if a page list structure is present
5734 * return the mapped physical pages, where a
5735 * page is not present, return a non-initialized
5736 * one. If the no_sync bit is turned on, don't
5737 * call the pager unlock to synchronize with other
5738 * possible copies of the page. Leave pages busy
5739 * in the original object, if a page list structure
5740 * was specified. When a commit of the page list
5741 * pages is done, the dirty bit will be set for each one.
5742 * Copy_out_from:
5743 * If a page list structure is present, return
5744 * all mapped pages. Where a page does not exist
5745 * map a zero filled one. Leave pages busy in
5746 * the original object. If a page list structure
5747 * is not specified, this call is a no-op.
5748 *
5749 * Note: access of default pager objects has a rather interesting
5750 * twist. The caller of this routine, presumably the file system
5751 * page cache handling code, will never actually make a request
5752 * against a default pager backed object. Only the default
5753 * pager will make requests on backing store related vm_objects
5754 * In this way the default pager can maintain the relationship
5755 * between backing store files (abstract memory objects) and
5756 * the vm_objects (cache objects), they support.
5757 *
5758 */
5759
5760 __private_extern__ kern_return_t
vm_object_upl_request(vm_object_t object,vm_object_offset_t offset,upl_size_t size,upl_t * upl_ptr,upl_page_info_array_t user_page_list,unsigned int * page_list_count,upl_control_flags_t cntrl_flags,vm_tag_t tag)5761 vm_object_upl_request(
5762 vm_object_t object,
5763 vm_object_offset_t offset,
5764 upl_size_t size,
5765 upl_t *upl_ptr,
5766 upl_page_info_array_t user_page_list,
5767 unsigned int *page_list_count,
5768 upl_control_flags_t cntrl_flags,
5769 vm_tag_t tag)
5770 {
5771 vm_page_t dst_page = VM_PAGE_NULL;
5772 vm_object_offset_t dst_offset;
5773 upl_size_t xfer_size;
5774 unsigned int size_in_pages;
5775 boolean_t dirty;
5776 boolean_t hw_dirty;
5777 upl_t upl = NULL;
5778 unsigned int entry;
5779 vm_page_t alias_page = NULL;
5780 int refmod_state = 0;
5781 vm_object_t last_copy_object;
5782 uint32_t last_copy_version;
5783 struct vm_page_delayed_work dw_array;
5784 struct vm_page_delayed_work *dwp, *dwp_start;
5785 bool dwp_finish_ctx = TRUE;
5786 int dw_count;
5787 int dw_limit;
5788 int io_tracking_flag = 0;
5789 int grab_options;
5790 int page_grab_count = 0;
5791 ppnum_t phys_page;
5792 pmap_flush_context pmap_flush_context_storage;
5793 boolean_t pmap_flushes_delayed = FALSE;
5794 #if DEVELOPMENT || DEBUG
5795 task_t task = current_task();
5796 #endif /* DEVELOPMENT || DEBUG */
5797
5798 dwp_start = dwp = NULL;
5799
5800 if (cntrl_flags & ~UPL_VALID_FLAGS) {
5801 /*
5802 * For forward compatibility's sake,
5803 * reject any unknown flag.
5804 */
5805 return KERN_INVALID_VALUE;
5806 }
5807 if ((!object->internal) && (object->paging_offset != 0)) {
5808 panic("vm_object_upl_request: external object with non-zero paging offset");
5809 }
5810 if (object->phys_contiguous) {
5811 panic("vm_object_upl_request: contiguous object specified");
5812 }
5813
5814 assertf(page_aligned(offset) && page_aligned(size),
5815 "offset 0x%llx size 0x%x",
5816 offset, size);
5817
5818 VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request, VM_UPL_REQUEST, DBG_FUNC_START, size, cntrl_flags, 0, 0);
5819
5820 dw_count = 0;
5821 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
5822 dwp_start = vm_page_delayed_work_get_ctx();
5823 if (dwp_start == NULL) {
5824 dwp_start = &dw_array;
5825 dw_limit = 1;
5826 dwp_finish_ctx = FALSE;
5827 }
5828
5829 dwp = dwp_start;
5830
5831 if (size > MAX_UPL_SIZE_BYTES) {
5832 size = MAX_UPL_SIZE_BYTES;
5833 }
5834
5835 if ((cntrl_flags & UPL_SET_INTERNAL) && page_list_count != NULL) {
5836 *page_list_count = MAX_UPL_SIZE_BYTES >> PAGE_SHIFT;
5837 }
5838
5839 #if CONFIG_IOSCHED || UPL_DEBUG
5840 if (object->io_tracking || upl_debug_enabled) {
5841 io_tracking_flag |= UPL_CREATE_IO_TRACKING;
5842 }
5843 #endif
5844 #if CONFIG_IOSCHED
5845 if (object->io_tracking) {
5846 io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP;
5847 }
5848 #endif
5849
5850 if (cntrl_flags & UPL_SET_INTERNAL) {
5851 if (cntrl_flags & UPL_SET_LITE) {
5852 upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size);
5853 } else {
5854 upl = upl_create(UPL_CREATE_INTERNAL | io_tracking_flag, 0, size);
5855 }
5856 user_page_list = size ? upl->page_list : NULL;
5857 } else {
5858 if (cntrl_flags & UPL_SET_LITE) {
5859 upl = upl_create(UPL_CREATE_EXTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size);
5860 } else {
5861 upl = upl_create(UPL_CREATE_EXTERNAL | io_tracking_flag, 0, size);
5862 }
5863 }
5864 *upl_ptr = upl;
5865
5866 if (user_page_list) {
5867 user_page_list[0].device = FALSE;
5868 }
5869
5870 if (cntrl_flags & UPL_SET_LITE) {
5871 upl->map_object = object;
5872 } else {
5873 upl->map_object = vm_object_allocate(size);
5874 /*
5875 * No neeed to lock the new object: nobody else knows
5876 * about it yet, so it's all ours so far.
5877 */
5878 upl->map_object->shadow = object;
5879 upl->map_object->pageout = TRUE;
5880 upl->map_object->can_persist = FALSE;
5881 upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
5882 upl->map_object->vo_shadow_offset = offset;
5883 upl->map_object->wimg_bits = object->wimg_bits;
5884 assertf(page_aligned(upl->map_object->vo_shadow_offset),
5885 "object %p shadow_offset 0x%llx",
5886 upl->map_object, upl->map_object->vo_shadow_offset);
5887
5888 alias_page = vm_page_grab_fictitious(TRUE);
5889
5890 upl->flags |= UPL_SHADOWED;
5891 }
5892 if (cntrl_flags & UPL_FOR_PAGEOUT) {
5893 upl->flags |= UPL_PAGEOUT;
5894 }
5895
5896 vm_object_lock(object);
5897 vm_object_activity_begin(object);
5898
5899 grab_options = 0;
5900 #if CONFIG_SECLUDED_MEMORY
5901 if (object->can_grab_secluded) {
5902 grab_options |= VM_PAGE_GRAB_SECLUDED;
5903 }
5904 #endif /* CONFIG_SECLUDED_MEMORY */
5905
5906 /*
5907 * we can lock in the paging_offset once paging_in_progress is set
5908 */
5909 upl->u_size = size;
5910 upl->u_offset = offset + object->paging_offset;
5911
5912 #if CONFIG_IOSCHED || UPL_DEBUG
5913 if (object->io_tracking || upl_debug_enabled) {
5914 vm_object_activity_begin(object);
5915 queue_enter(&object->uplq, upl, upl_t, uplq);
5916 }
5917 #endif
5918 if ((cntrl_flags & UPL_WILL_MODIFY) && object->vo_copy != VM_OBJECT_NULL) {
5919 /*
5920 * Honor copy-on-write obligations
5921 *
5922 * The caller is gathering these pages and
5923 * might modify their contents. We need to
5924 * make sure that the copy object has its own
5925 * private copies of these pages before we let
5926 * the caller modify them.
5927 */
5928 vm_object_update(object,
5929 offset,
5930 size,
5931 NULL,
5932 NULL,
5933 FALSE, /* should_return */
5934 MEMORY_OBJECT_COPY_SYNC,
5935 VM_PROT_NO_CHANGE);
5936
5937 VM_PAGEOUT_DEBUG(upl_cow, 1);
5938 VM_PAGEOUT_DEBUG(upl_cow_pages, (size >> PAGE_SHIFT));
5939 }
5940 /*
5941 * remember which copy object we synchronized with
5942 */
5943 last_copy_object = object->vo_copy;
5944 last_copy_version = object->vo_copy_version;
5945 entry = 0;
5946
5947 xfer_size = size;
5948 dst_offset = offset;
5949 size_in_pages = size / PAGE_SIZE;
5950
5951 if (vm_page_free_count > (vm_page_free_target + size_in_pages) ||
5952 object->resident_page_count < ((MAX_UPL_SIZE_BYTES * 2) >> PAGE_SHIFT)) {
5953 object->scan_collisions = 0;
5954 }
5955
5956 if ((cntrl_flags & UPL_WILL_MODIFY) && must_throttle_writes() == TRUE) {
5957 boolean_t isSSD = FALSE;
5958
5959 #if !XNU_TARGET_OS_OSX
5960 isSSD = TRUE;
5961 #else /* !XNU_TARGET_OS_OSX */
5962 vnode_pager_get_isSSD(object->pager, &isSSD);
5963 #endif /* !XNU_TARGET_OS_OSX */
5964 vm_object_unlock(object);
5965
5966 OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
5967
5968 if (isSSD == TRUE) {
5969 delay(1000 * size_in_pages);
5970 } else {
5971 delay(5000 * size_in_pages);
5972 }
5973 OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
5974
5975 vm_object_lock(object);
5976 }
5977
5978 while (xfer_size) {
5979 dwp->dw_mask = 0;
5980
5981 if ((alias_page == NULL) && !(cntrl_flags & UPL_SET_LITE)) {
5982 vm_object_unlock(object);
5983 alias_page = vm_page_grab_fictitious(TRUE);
5984 vm_object_lock(object);
5985 }
5986 if (cntrl_flags & UPL_COPYOUT_FROM) {
5987 upl->flags |= UPL_PAGE_SYNC_DONE;
5988
5989 if (((dst_page = vm_page_lookup(object, dst_offset)) == VM_PAGE_NULL) ||
5990 dst_page->vmp_fictitious ||
5991 dst_page->vmp_absent ||
5992 VMP_ERROR_GET(dst_page) ||
5993 dst_page->vmp_cleaning ||
5994 (VM_PAGE_WIRED(dst_page))) {
5995 if (user_page_list) {
5996 user_page_list[entry].phys_addr = 0;
5997 }
5998
5999 goto try_next_page;
6000 }
6001 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
6002
6003 /*
6004 * grab this up front...
6005 * a high percentange of the time we're going to
6006 * need the hardware modification state a bit later
6007 * anyway... so we can eliminate an extra call into
6008 * the pmap layer by grabbing it here and recording it
6009 */
6010 if (dst_page->vmp_pmapped) {
6011 refmod_state = pmap_get_refmod(phys_page);
6012 } else {
6013 refmod_state = 0;
6014 }
6015
6016 if ((refmod_state & VM_MEM_REFERENCED) && VM_PAGE_INACTIVE(dst_page)) {
6017 /*
6018 * page is on inactive list and referenced...
6019 * reactivate it now... this gets it out of the
6020 * way of vm_pageout_scan which would have to
6021 * reactivate it upon tripping over it
6022 */
6023 dwp->dw_mask |= DW_vm_page_activate;
6024 }
6025 if (cntrl_flags & UPL_RET_ONLY_DIRTY) {
6026 /*
6027 * we're only asking for DIRTY pages to be returned
6028 */
6029 if (dst_page->vmp_laundry || !(cntrl_flags & UPL_FOR_PAGEOUT)) {
6030 /*
6031 * if we were the page stolen by vm_pageout_scan to be
6032 * cleaned (as opposed to a buddy being clustered in
6033 * or this request is not being driven by a PAGEOUT cluster
6034 * then we only need to check for the page being dirty or
6035 * precious to decide whether to return it
6036 */
6037 if (dst_page->vmp_dirty || dst_page->vmp_precious || (refmod_state & VM_MEM_MODIFIED)) {
6038 goto check_busy;
6039 }
6040 goto dont_return;
6041 }
6042 /*
6043 * this is a request for a PAGEOUT cluster and this page
6044 * is merely along for the ride as a 'buddy'... not only
6045 * does it have to be dirty to be returned, but it also
6046 * can't have been referenced recently...
6047 */
6048 if ((hibernate_cleaning_in_progress == TRUE ||
6049 (!((refmod_state & VM_MEM_REFERENCED) || dst_page->vmp_reference) ||
6050 (dst_page->vmp_q_state == VM_PAGE_ON_THROTTLED_Q))) &&
6051 ((refmod_state & VM_MEM_MODIFIED) || dst_page->vmp_dirty || dst_page->vmp_precious)) {
6052 goto check_busy;
6053 }
6054 dont_return:
6055 /*
6056 * if we reach here, we're not to return
6057 * the page... go on to the next one
6058 */
6059 if (dst_page->vmp_laundry == TRUE) {
6060 /*
6061 * if we get here, the page is not 'cleaning' (filtered out above).
6062 * since it has been referenced, remove it from the laundry
6063 * so we don't pay the cost of an I/O to clean a page
6064 * we're just going to take back
6065 */
6066 vm_page_lockspin_queues();
6067
6068 vm_pageout_steal_laundry(dst_page, TRUE);
6069 vm_page_activate(dst_page);
6070
6071 vm_page_unlock_queues();
6072 }
6073 if (user_page_list) {
6074 user_page_list[entry].phys_addr = 0;
6075 }
6076
6077 goto try_next_page;
6078 }
6079 check_busy:
6080 if (dst_page->vmp_busy) {
6081 if (cntrl_flags & UPL_NOBLOCK) {
6082 if (user_page_list) {
6083 user_page_list[entry].phys_addr = 0;
6084 }
6085 dwp->dw_mask = 0;
6086
6087 goto try_next_page;
6088 }
6089 /*
6090 * someone else is playing with the
6091 * page. We will have to wait.
6092 */
6093 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
6094
6095 continue;
6096 }
6097 if (dst_page->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
6098 vm_page_lockspin_queues();
6099
6100 if (dst_page->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
6101 /*
6102 * we've buddied up a page for a clustered pageout
6103 * that has already been moved to the pageout
6104 * queue by pageout_scan... we need to remove
6105 * it from the queue and drop the laundry count
6106 * on that queue
6107 */
6108 vm_pageout_throttle_up(dst_page);
6109 }
6110 vm_page_unlock_queues();
6111 }
6112 hw_dirty = refmod_state & VM_MEM_MODIFIED;
6113 dirty = hw_dirty ? TRUE : dst_page->vmp_dirty;
6114
6115 if (phys_page > upl->highest_page) {
6116 upl->highest_page = phys_page;
6117 }
6118
6119 assert(!pmap_is_noencrypt(phys_page));
6120
6121 if (cntrl_flags & UPL_SET_LITE) {
6122 unsigned int pg_num;
6123
6124 pg_num = (unsigned int) ((dst_offset - offset) / PAGE_SIZE);
6125 assert(pg_num == (dst_offset - offset) / PAGE_SIZE);
6126 bitmap_set(upl->lite_list, pg_num);
6127
6128 if (hw_dirty) {
6129 if (pmap_flushes_delayed == FALSE) {
6130 pmap_flush_context_init(&pmap_flush_context_storage);
6131 pmap_flushes_delayed = TRUE;
6132 }
6133 pmap_clear_refmod_options(phys_page,
6134 VM_MEM_MODIFIED,
6135 PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_CLEAR_WRITE,
6136 &pmap_flush_context_storage);
6137 }
6138
6139 /*
6140 * Mark original page as cleaning
6141 * in place.
6142 */
6143 dst_page->vmp_cleaning = TRUE;
6144 dst_page->vmp_precious = FALSE;
6145 } else {
6146 /*
6147 * use pageclean setup, it is more
6148 * convenient even for the pageout
6149 * cases here
6150 */
6151 vm_object_lock(upl->map_object);
6152 vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
6153 vm_object_unlock(upl->map_object);
6154
6155 alias_page->vmp_absent = FALSE;
6156 alias_page = NULL;
6157 }
6158 if (dirty) {
6159 SET_PAGE_DIRTY(dst_page, FALSE);
6160 } else {
6161 dst_page->vmp_dirty = FALSE;
6162 }
6163
6164 if (!dirty) {
6165 dst_page->vmp_precious = TRUE;
6166 }
6167
6168 if (!(cntrl_flags & UPL_CLEAN_IN_PLACE)) {
6169 if (!VM_PAGE_WIRED(dst_page)) {
6170 dst_page->vmp_free_when_done = TRUE;
6171 }
6172 }
6173 } else {
6174 if ((cntrl_flags & UPL_WILL_MODIFY) &&
6175 (object->vo_copy != last_copy_object ||
6176 object->vo_copy_version != last_copy_version)) {
6177 /*
6178 * Honor copy-on-write obligations
6179 *
6180 * The copy object has changed since we
6181 * last synchronized for copy-on-write.
6182 * Another copy object might have been
6183 * inserted while we released the object's
6184 * lock. Since someone could have seen the
6185 * original contents of the remaining pages
6186 * through that new object, we have to
6187 * synchronize with it again for the remaining
6188 * pages only. The previous pages are "busy"
6189 * so they can not be seen through the new
6190 * mapping. The new mapping will see our
6191 * upcoming changes for those previous pages,
6192 * but that's OK since they couldn't see what
6193 * was there before. It's just a race anyway
6194 * and there's no guarantee of consistency or
6195 * atomicity. We just don't want new mappings
6196 * to see both the *before* and *after* pages.
6197 */
6198 if (object->vo_copy != VM_OBJECT_NULL) {
6199 vm_object_update(
6200 object,
6201 dst_offset,/* current offset */
6202 xfer_size, /* remaining size */
6203 NULL,
6204 NULL,
6205 FALSE, /* should_return */
6206 MEMORY_OBJECT_COPY_SYNC,
6207 VM_PROT_NO_CHANGE);
6208
6209 VM_PAGEOUT_DEBUG(upl_cow_again, 1);
6210 VM_PAGEOUT_DEBUG(upl_cow_again_pages, (xfer_size >> PAGE_SHIFT));
6211 }
6212 /*
6213 * remember the copy object we synced with
6214 */
6215 last_copy_object = object->vo_copy;
6216 last_copy_version = object->vo_copy_version;
6217 }
6218 dst_page = vm_page_lookup(object, dst_offset);
6219
6220 if (dst_page != VM_PAGE_NULL) {
6221 if ((cntrl_flags & UPL_RET_ONLY_ABSENT)) {
6222 /*
6223 * skip over pages already present in the cache
6224 */
6225 if (user_page_list) {
6226 user_page_list[entry].phys_addr = 0;
6227 }
6228
6229 goto try_next_page;
6230 }
6231 if (dst_page->vmp_fictitious) {
6232 panic("need corner case for fictitious page");
6233 }
6234
6235 if (dst_page->vmp_busy || dst_page->vmp_cleaning) {
6236 /*
6237 * someone else is playing with the
6238 * page. We will have to wait.
6239 */
6240 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
6241
6242 continue;
6243 }
6244 if (dst_page->vmp_laundry) {
6245 vm_pageout_steal_laundry(dst_page, FALSE);
6246 }
6247 } else {
6248 if (object->private) {
6249 /*
6250 * This is a nasty wrinkle for users
6251 * of upl who encounter device or
6252 * private memory however, it is
6253 * unavoidable, only a fault can
6254 * resolve the actual backing
6255 * physical page by asking the
6256 * backing device.
6257 */
6258 if (user_page_list) {
6259 user_page_list[entry].phys_addr = 0;
6260 }
6261
6262 goto try_next_page;
6263 }
6264 if (object->scan_collisions) {
6265 /*
6266 * the pageout_scan thread is trying to steal
6267 * pages from this object, but has run into our
6268 * lock... grab 2 pages from the head of the object...
6269 * the first is freed on behalf of pageout_scan, the
6270 * 2nd is for our own use... we use vm_object_page_grab
6271 * in both cases to avoid taking pages from the free
6272 * list since we are under memory pressure and our
6273 * lock on this object is getting in the way of
6274 * relieving it
6275 */
6276 dst_page = vm_object_page_grab(object);
6277
6278 if (dst_page != VM_PAGE_NULL) {
6279 vm_page_release(dst_page,
6280 FALSE);
6281 }
6282
6283 dst_page = vm_object_page_grab(object);
6284 }
6285 if (dst_page == VM_PAGE_NULL) {
6286 /*
6287 * need to allocate a page
6288 */
6289 dst_page = vm_page_grab_options(grab_options);
6290 if (dst_page != VM_PAGE_NULL) {
6291 page_grab_count++;
6292 }
6293 }
6294 if (dst_page == VM_PAGE_NULL) {
6295 if ((cntrl_flags & (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) == (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) {
6296 /*
6297 * we don't want to stall waiting for pages to come onto the free list
6298 * while we're already holding absent pages in this UPL
6299 * the caller will deal with the empty slots
6300 */
6301 if (user_page_list) {
6302 user_page_list[entry].phys_addr = 0;
6303 }
6304
6305 goto try_next_page;
6306 }
6307 /*
6308 * no pages available... wait
6309 * then try again for the same
6310 * offset...
6311 */
6312 vm_object_unlock(object);
6313
6314 OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
6315
6316 VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
6317
6318 VM_PAGE_WAIT();
6319 OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
6320
6321 VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
6322
6323 vm_object_lock(object);
6324
6325 continue;
6326 }
6327 vm_page_insert(dst_page, object, dst_offset);
6328
6329 dst_page->vmp_absent = TRUE;
6330 dst_page->vmp_busy = FALSE;
6331
6332 if (cntrl_flags & UPL_RET_ONLY_ABSENT) {
6333 /*
6334 * if UPL_RET_ONLY_ABSENT was specified,
6335 * than we're definitely setting up a
6336 * upl for a clustered read/pagein
6337 * operation... mark the pages as clustered
6338 * so upl_commit_range can put them on the
6339 * speculative list
6340 */
6341 dst_page->vmp_clustered = TRUE;
6342
6343 if (!(cntrl_flags & UPL_FILE_IO)) {
6344 counter_inc(&vm_statistics_pageins);
6345 }
6346 }
6347 }
6348 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
6349
6350 dst_page->vmp_overwriting = TRUE;
6351
6352 if (dst_page->vmp_pmapped) {
6353 if (!(cntrl_flags & UPL_FILE_IO)) {
6354 /*
6355 * eliminate all mappings from the
6356 * original object and its prodigy
6357 */
6358 refmod_state = pmap_disconnect(phys_page);
6359 } else {
6360 refmod_state = pmap_get_refmod(phys_page);
6361 }
6362 } else {
6363 refmod_state = 0;
6364 }
6365
6366 hw_dirty = refmod_state & VM_MEM_MODIFIED;
6367 dirty = hw_dirty ? TRUE : dst_page->vmp_dirty;
6368
6369 if (cntrl_flags & UPL_SET_LITE) {
6370 unsigned int pg_num;
6371
6372 pg_num = (unsigned int) ((dst_offset - offset) / PAGE_SIZE);
6373 assert(pg_num == (dst_offset - offset) / PAGE_SIZE);
6374 bitmap_set(upl->lite_list, pg_num);
6375
6376 if (hw_dirty) {
6377 pmap_clear_modify(phys_page);
6378 }
6379
6380 /*
6381 * Mark original page as cleaning
6382 * in place.
6383 */
6384 dst_page->vmp_cleaning = TRUE;
6385 dst_page->vmp_precious = FALSE;
6386 } else {
6387 /*
6388 * use pageclean setup, it is more
6389 * convenient even for the pageout
6390 * cases here
6391 */
6392 vm_object_lock(upl->map_object);
6393 vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
6394 vm_object_unlock(upl->map_object);
6395
6396 alias_page->vmp_absent = FALSE;
6397 alias_page = NULL;
6398 }
6399
6400 if (cntrl_flags & UPL_REQUEST_SET_DIRTY) {
6401 upl->flags &= ~UPL_CLEAR_DIRTY;
6402 upl->flags |= UPL_SET_DIRTY;
6403 dirty = TRUE;
6404 /*
6405 * Page belonging to a code-signed object is about to
6406 * be written. Mark it tainted and disconnect it from
6407 * all pmaps so processes have to fault it back in and
6408 * deal with the tainted bit.
6409 */
6410 if (object->code_signed && dst_page->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
6411 dst_page->vmp_cs_tainted = VMP_CS_ALL_TRUE;
6412 vm_page_upl_tainted++;
6413 if (dst_page->vmp_pmapped) {
6414 refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
6415 if (refmod_state & VM_MEM_REFERENCED) {
6416 dst_page->vmp_reference = TRUE;
6417 }
6418 }
6419 }
6420 } else if (cntrl_flags & UPL_CLEAN_IN_PLACE) {
6421 /*
6422 * clean in place for read implies
6423 * that a write will be done on all
6424 * the pages that are dirty before
6425 * a upl commit is done. The caller
6426 * is obligated to preserve the
6427 * contents of all pages marked dirty
6428 */
6429 upl->flags |= UPL_CLEAR_DIRTY;
6430 }
6431 dst_page->vmp_dirty = dirty;
6432
6433 if (!dirty) {
6434 dst_page->vmp_precious = TRUE;
6435 }
6436
6437 if (!VM_PAGE_WIRED(dst_page)) {
6438 /*
6439 * deny access to the target page while
6440 * it is being worked on
6441 */
6442 dst_page->vmp_busy = TRUE;
6443 } else {
6444 dwp->dw_mask |= DW_vm_page_wire;
6445 }
6446
6447 /*
6448 * We might be about to satisfy a fault which has been
6449 * requested. So no need for the "restart" bit.
6450 */
6451 dst_page->vmp_restart = FALSE;
6452 if (!dst_page->vmp_absent && !(cntrl_flags & UPL_WILL_MODIFY)) {
6453 /*
6454 * expect the page to be used
6455 */
6456 dwp->dw_mask |= DW_set_reference;
6457 }
6458 if (cntrl_flags & UPL_PRECIOUS) {
6459 if (object->internal) {
6460 SET_PAGE_DIRTY(dst_page, FALSE);
6461 dst_page->vmp_precious = FALSE;
6462 } else {
6463 dst_page->vmp_precious = TRUE;
6464 }
6465 } else {
6466 dst_page->vmp_precious = FALSE;
6467 }
6468 }
6469 if (dst_page->vmp_busy) {
6470 upl->flags |= UPL_HAS_BUSY;
6471 }
6472
6473 if (phys_page > upl->highest_page) {
6474 upl->highest_page = phys_page;
6475 }
6476 assert(!pmap_is_noencrypt(phys_page));
6477 if (user_page_list) {
6478 user_page_list[entry].phys_addr = phys_page;
6479 user_page_list[entry].free_when_done = dst_page->vmp_free_when_done;
6480 user_page_list[entry].absent = dst_page->vmp_absent;
6481 user_page_list[entry].dirty = dst_page->vmp_dirty;
6482 user_page_list[entry].precious = dst_page->vmp_precious;
6483 user_page_list[entry].device = FALSE;
6484 user_page_list[entry].needed = FALSE;
6485 if (dst_page->vmp_clustered == TRUE) {
6486 user_page_list[entry].speculative = (dst_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE;
6487 } else {
6488 user_page_list[entry].speculative = FALSE;
6489 }
6490 user_page_list[entry].cs_validated = dst_page->vmp_cs_validated;
6491 user_page_list[entry].cs_tainted = dst_page->vmp_cs_tainted;
6492 user_page_list[entry].cs_nx = dst_page->vmp_cs_nx;
6493 user_page_list[entry].mark = FALSE;
6494 }
6495 /*
6496 * if UPL_RET_ONLY_ABSENT is set, then
6497 * we are working with a fresh page and we've
6498 * just set the clustered flag on it to
6499 * indicate that it was drug in as part of a
6500 * speculative cluster... so leave it alone
6501 */
6502 if (!(cntrl_flags & UPL_RET_ONLY_ABSENT)) {
6503 /*
6504 * someone is explicitly grabbing this page...
6505 * update clustered and speculative state
6506 *
6507 */
6508 if (dst_page->vmp_clustered) {
6509 VM_PAGE_CONSUME_CLUSTERED(dst_page);
6510 }
6511 }
6512 try_next_page:
6513 if (dwp->dw_mask) {
6514 if (dwp->dw_mask & DW_vm_page_activate) {
6515 counter_inc(&vm_statistics_reactivations);
6516 }
6517
6518 VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count);
6519
6520 if (dw_count >= dw_limit) {
6521 vm_page_do_delayed_work(object, tag, dwp_start, dw_count);
6522
6523 dwp = dwp_start;
6524 dw_count = 0;
6525 }
6526 }
6527 entry++;
6528 dst_offset += PAGE_SIZE_64;
6529 xfer_size -= PAGE_SIZE;
6530 }
6531 if (dw_count) {
6532 vm_page_do_delayed_work(object, tag, dwp_start, dw_count);
6533 dwp = dwp_start;
6534 dw_count = 0;
6535 }
6536
6537 if (alias_page != NULL) {
6538 VM_PAGE_FREE(alias_page);
6539 }
6540 if (pmap_flushes_delayed == TRUE) {
6541 pmap_flush(&pmap_flush_context_storage);
6542 }
6543
6544 if (page_list_count != NULL) {
6545 if (upl->flags & UPL_INTERNAL) {
6546 *page_list_count = 0;
6547 } else if (*page_list_count > entry) {
6548 *page_list_count = entry;
6549 }
6550 }
6551 #if UPL_DEBUG
6552 upl->upl_state = 1;
6553 #endif
6554 vm_object_unlock(object);
6555
6556 VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request, VM_UPL_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0);
6557 #if DEVELOPMENT || DEBUG
6558 if (task != NULL) {
6559 ledger_credit(task->ledger, task_ledgers.pages_grabbed_upl, page_grab_count);
6560 }
6561 #endif /* DEVELOPMENT || DEBUG */
6562
6563 if (dwp_start && dwp_finish_ctx) {
6564 vm_page_delayed_work_finish_ctx(dwp_start);
6565 dwp_start = dwp = NULL;
6566 }
6567
6568 return KERN_SUCCESS;
6569 }
6570
6571 /*
6572 * Routine: vm_object_super_upl_request
6573 * Purpose:
6574 * Cause the population of a portion of a vm_object
6575 * in much the same way as memory_object_upl_request.
6576 * Depending on the nature of the request, the pages
6577 * returned may be contain valid data or be uninitialized.
6578 * However, the region may be expanded up to the super
6579 * cluster size provided.
6580 */
6581
6582 __private_extern__ kern_return_t
vm_object_super_upl_request(vm_object_t object,vm_object_offset_t offset,upl_size_t size,upl_size_t super_cluster,upl_t * upl,upl_page_info_t * user_page_list,unsigned int * page_list_count,upl_control_flags_t cntrl_flags,vm_tag_t tag)6583 vm_object_super_upl_request(
6584 vm_object_t object,
6585 vm_object_offset_t offset,
6586 upl_size_t size,
6587 upl_size_t super_cluster,
6588 upl_t *upl,
6589 upl_page_info_t *user_page_list,
6590 unsigned int *page_list_count,
6591 upl_control_flags_t cntrl_flags,
6592 vm_tag_t tag)
6593 {
6594 if (object->paging_offset > offset || ((cntrl_flags & UPL_VECTOR) == UPL_VECTOR)) {
6595 return KERN_FAILURE;
6596 }
6597
6598 assert(object->paging_in_progress);
6599 offset = offset - object->paging_offset;
6600
6601 if (super_cluster > size) {
6602 vm_object_offset_t base_offset;
6603 upl_size_t super_size;
6604 vm_object_size_t super_size_64;
6605
6606 base_offset = (offset & ~((vm_object_offset_t) super_cluster - 1));
6607 super_size = (offset + size) > (base_offset + super_cluster) ? super_cluster << 1 : super_cluster;
6608 super_size_64 = ((base_offset + super_size) > object->vo_size) ? (object->vo_size - base_offset) : super_size;
6609 super_size = (upl_size_t) super_size_64;
6610 assert(super_size == super_size_64);
6611
6612 if (offset > (base_offset + super_size)) {
6613 panic("vm_object_super_upl_request: Missed target pageout"
6614 " %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
6615 offset, base_offset, super_size, super_cluster,
6616 size, object->paging_offset);
6617 }
6618 /*
6619 * apparently there is a case where the vm requests a
6620 * page to be written out who's offset is beyond the
6621 * object size
6622 */
6623 if ((offset + size) > (base_offset + super_size)) {
6624 super_size_64 = (offset + size) - base_offset;
6625 super_size = (upl_size_t) super_size_64;
6626 assert(super_size == super_size_64);
6627 }
6628
6629 offset = base_offset;
6630 size = super_size;
6631 }
6632 return vm_object_upl_request(object, offset, size, upl, user_page_list, page_list_count, cntrl_flags, tag);
6633 }
6634
6635 int cs_executable_create_upl = 0;
6636 extern int proc_selfpid(void);
6637 extern char *proc_name_address(void *p);
6638
6639 kern_return_t
vm_map_create_upl(vm_map_t map,vm_map_address_t offset,upl_size_t * upl_size,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,upl_control_flags_t * flags,vm_tag_t tag)6640 vm_map_create_upl(
6641 vm_map_t map,
6642 vm_map_address_t offset,
6643 upl_size_t *upl_size,
6644 upl_t *upl,
6645 upl_page_info_array_t page_list,
6646 unsigned int *count,
6647 upl_control_flags_t *flags,
6648 vm_tag_t tag)
6649 {
6650 vm_map_entry_t entry;
6651 upl_control_flags_t caller_flags;
6652 int force_data_sync;
6653 int sync_cow_data;
6654 vm_object_t local_object;
6655 vm_map_offset_t local_offset;
6656 vm_map_offset_t local_start;
6657 kern_return_t ret;
6658 vm_map_address_t original_offset;
6659 vm_map_size_t original_size, adjusted_size;
6660 vm_map_offset_t local_entry_start;
6661 vm_object_offset_t local_entry_offset;
6662 vm_object_offset_t offset_in_mapped_page;
6663 boolean_t release_map = FALSE;
6664
6665 start_with_map:
6666
6667 original_offset = offset;
6668 original_size = *upl_size;
6669 adjusted_size = original_size;
6670
6671 caller_flags = *flags;
6672
6673 if (caller_flags & ~UPL_VALID_FLAGS) {
6674 /*
6675 * For forward compatibility's sake,
6676 * reject any unknown flag.
6677 */
6678 ret = KERN_INVALID_VALUE;
6679 goto done;
6680 }
6681 force_data_sync = (caller_flags & UPL_FORCE_DATA_SYNC);
6682 sync_cow_data = !(caller_flags & UPL_COPYOUT_FROM);
6683
6684 if (upl == NULL) {
6685 ret = KERN_INVALID_ARGUMENT;
6686 goto done;
6687 }
6688
6689 REDISCOVER_ENTRY:
6690 vm_map_lock_read(map);
6691
6692 if (!vm_map_lookup_entry(map, offset, &entry)) {
6693 vm_map_unlock_read(map);
6694 ret = KERN_FAILURE;
6695 goto done;
6696 }
6697
6698 local_entry_start = entry->vme_start;
6699 local_entry_offset = VME_OFFSET(entry);
6700
6701 if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) {
6702 DEBUG4K_UPL("map %p (%d) offset 0x%llx size 0x%x flags 0x%llx\n", map, VM_MAP_PAGE_SHIFT(map), (uint64_t)offset, *upl_size, *flags);
6703 }
6704
6705 if (entry->vme_end - original_offset < adjusted_size) {
6706 adjusted_size = entry->vme_end - original_offset;
6707 assert(adjusted_size > 0);
6708 *upl_size = (upl_size_t) adjusted_size;
6709 assert(*upl_size == adjusted_size);
6710 }
6711
6712 if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
6713 *flags = 0;
6714
6715 if (!entry->is_sub_map &&
6716 VME_OBJECT(entry) != VM_OBJECT_NULL) {
6717 if (VME_OBJECT(entry)->private) {
6718 *flags = UPL_DEV_MEMORY;
6719 }
6720
6721 if (VME_OBJECT(entry)->phys_contiguous) {
6722 *flags |= UPL_PHYS_CONTIG;
6723 }
6724 }
6725 vm_map_unlock_read(map);
6726 ret = KERN_SUCCESS;
6727 goto done;
6728 }
6729
6730 offset_in_mapped_page = 0;
6731 if (VM_MAP_PAGE_SIZE(map) < PAGE_SIZE) {
6732 offset = vm_map_trunc_page(original_offset, VM_MAP_PAGE_MASK(map));
6733 *upl_size = (upl_size_t)
6734 (vm_map_round_page(original_offset + adjusted_size,
6735 VM_MAP_PAGE_MASK(map))
6736 - offset);
6737
6738 offset_in_mapped_page = original_offset - offset;
6739 assert(offset_in_mapped_page < VM_MAP_PAGE_SIZE(map));
6740
6741 DEBUG4K_UPL("map %p (%d) offset 0x%llx size 0x%llx flags 0x%llx -> offset 0x%llx adjusted_size 0x%llx *upl_size 0x%x offset_in_mapped_page 0x%llx\n", map, VM_MAP_PAGE_SHIFT(map), (uint64_t)original_offset, (uint64_t)original_size, *flags, (uint64_t)offset, (uint64_t)adjusted_size, *upl_size, offset_in_mapped_page);
6742 }
6743
6744 if (!entry->is_sub_map) {
6745 if (VME_OBJECT(entry) == VM_OBJECT_NULL ||
6746 !VME_OBJECT(entry)->phys_contiguous) {
6747 if (*upl_size > MAX_UPL_SIZE_BYTES) {
6748 *upl_size = MAX_UPL_SIZE_BYTES;
6749 }
6750 }
6751
6752 /*
6753 * Create an object if necessary.
6754 */
6755 if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
6756 if (vm_map_lock_read_to_write(map)) {
6757 goto REDISCOVER_ENTRY;
6758 }
6759
6760 VME_OBJECT_SET(entry,
6761 vm_object_allocate((vm_size_t)
6762 vm_object_round_page((entry->vme_end - entry->vme_start))),
6763 false, 0);
6764 VME_OFFSET_SET(entry, 0);
6765 assert(entry->use_pmap);
6766
6767 vm_map_lock_write_to_read(map);
6768 }
6769
6770 if (!(caller_flags & UPL_COPYOUT_FROM) &&
6771 !(entry->protection & VM_PROT_WRITE)) {
6772 vm_map_unlock_read(map);
6773 ret = KERN_PROTECTION_FAILURE;
6774 goto done;
6775 }
6776 }
6777
6778 #if !XNU_TARGET_OS_OSX
6779 if (map->pmap != kernel_pmap &&
6780 (caller_flags & UPL_COPYOUT_FROM) &&
6781 (entry->protection & VM_PROT_EXECUTE) &&
6782 !(entry->protection & VM_PROT_WRITE)) {
6783 vm_offset_t kaddr;
6784 vm_size_t ksize;
6785
6786 /*
6787 * We're about to create a read-only UPL backed by
6788 * memory from an executable mapping.
6789 * Wiring the pages would result in the pages being copied
6790 * (due to the "MAP_PRIVATE" mapping) and no longer
6791 * code-signed, so no longer eligible for execution.
6792 * Instead, let's copy the data into a kernel buffer and
6793 * create the UPL from this kernel buffer.
6794 * The kernel buffer is then freed, leaving the UPL holding
6795 * the last reference on the VM object, so the memory will
6796 * be released when the UPL is committed.
6797 */
6798
6799 vm_map_unlock_read(map);
6800 entry = VM_MAP_ENTRY_NULL;
6801 /* allocate kernel buffer */
6802 ksize = round_page(*upl_size);
6803 kaddr = 0;
6804 ret = kmem_alloc(kernel_map, &kaddr, ksize,
6805 KMA_PAGEABLE | KMA_DATA, tag);
6806 if (ret == KERN_SUCCESS) {
6807 /* copyin the user data */
6808 ret = copyinmap(map, offset, (void *)kaddr, *upl_size);
6809 }
6810 if (ret == KERN_SUCCESS) {
6811 if (ksize > *upl_size) {
6812 /* zero out the extra space in kernel buffer */
6813 memset((void *)(kaddr + *upl_size),
6814 0,
6815 ksize - *upl_size);
6816 }
6817 /* create the UPL from the kernel buffer */
6818 vm_object_offset_t offset_in_object;
6819 vm_object_offset_t offset_in_object_page;
6820
6821 offset_in_object = offset - local_entry_start + local_entry_offset;
6822 offset_in_object_page = offset_in_object - vm_object_trunc_page(offset_in_object);
6823 assert(offset_in_object_page < PAGE_SIZE);
6824 assert(offset_in_object_page + offset_in_mapped_page < PAGE_SIZE);
6825 *upl_size -= offset_in_object_page + offset_in_mapped_page;
6826 ret = vm_map_create_upl(kernel_map,
6827 (vm_map_address_t)(kaddr + offset_in_object_page + offset_in_mapped_page),
6828 upl_size, upl, page_list, count, flags, tag);
6829 }
6830 if (kaddr != 0) {
6831 /* free the kernel buffer */
6832 kmem_free(kernel_map, kaddr, ksize);
6833 kaddr = 0;
6834 ksize = 0;
6835 }
6836 #if DEVELOPMENT || DEBUG
6837 DTRACE_VM4(create_upl_from_executable,
6838 vm_map_t, map,
6839 vm_map_address_t, offset,
6840 upl_size_t, *upl_size,
6841 kern_return_t, ret);
6842 #endif /* DEVELOPMENT || DEBUG */
6843 goto done;
6844 }
6845 #endif /* !XNU_TARGET_OS_OSX */
6846
6847 if (!entry->is_sub_map) {
6848 local_object = VME_OBJECT(entry);
6849 assert(local_object != VM_OBJECT_NULL);
6850 }
6851
6852 if (!entry->is_sub_map &&
6853 !entry->needs_copy &&
6854 *upl_size != 0 &&
6855 local_object->vo_size > *upl_size && /* partial UPL */
6856 entry->wired_count == 0 && /* No COW for entries that are wired */
6857 (map->pmap != kernel_pmap) && /* alias checks */
6858 (vm_map_entry_should_cow_for_true_share(entry) /* case 1 */
6859 ||
6860 ( /* case 2 */
6861 local_object->internal &&
6862 (local_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) &&
6863 local_object->ref_count > 1))) {
6864 vm_prot_t prot;
6865
6866 /*
6867 * Case 1:
6868 * Set up the targeted range for copy-on-write to avoid
6869 * applying true_share/copy_delay to the entire object.
6870 *
6871 * Case 2:
6872 * This map entry covers only part of an internal
6873 * object. There could be other map entries covering
6874 * other areas of this object and some of these map
6875 * entries could be marked as "needs_copy", which
6876 * assumes that the object is COPY_SYMMETRIC.
6877 * To avoid marking this object as COPY_DELAY and
6878 * "true_share", let's shadow it and mark the new
6879 * (smaller) object as "true_share" and COPY_DELAY.
6880 */
6881
6882 if (vm_map_lock_read_to_write(map)) {
6883 goto REDISCOVER_ENTRY;
6884 }
6885 vm_map_lock_assert_exclusive(map);
6886 assert(VME_OBJECT(entry) == local_object);
6887
6888 vm_map_clip_start(map,
6889 entry,
6890 vm_map_trunc_page(offset,
6891 VM_MAP_PAGE_MASK(map)));
6892 vm_map_clip_end(map,
6893 entry,
6894 vm_map_round_page(offset + *upl_size,
6895 VM_MAP_PAGE_MASK(map)));
6896 if ((entry->vme_end - offset) < *upl_size) {
6897 *upl_size = (upl_size_t) (entry->vme_end - offset);
6898 assert(*upl_size == entry->vme_end - offset);
6899 }
6900
6901 prot = entry->protection & ~VM_PROT_WRITE;
6902 if (override_nx(map, VME_ALIAS(entry)) && prot) {
6903 prot |= VM_PROT_EXECUTE;
6904 }
6905 vm_object_pmap_protect(local_object,
6906 VME_OFFSET(entry),
6907 entry->vme_end - entry->vme_start,
6908 ((entry->is_shared ||
6909 map->mapped_in_other_pmaps)
6910 ? PMAP_NULL
6911 : map->pmap),
6912 VM_MAP_PAGE_SIZE(map),
6913 entry->vme_start,
6914 prot);
6915
6916 assert(entry->wired_count == 0);
6917
6918 /*
6919 * Lock the VM object and re-check its status: if it's mapped
6920 * in another address space, we could still be racing with
6921 * another thread holding that other VM map exclusively.
6922 */
6923 vm_object_lock(local_object);
6924 if (local_object->true_share) {
6925 /* object is already in proper state: no COW needed */
6926 assert(local_object->copy_strategy !=
6927 MEMORY_OBJECT_COPY_SYMMETRIC);
6928 } else {
6929 /* not true_share: ask for copy-on-write below */
6930 assert(local_object->copy_strategy ==
6931 MEMORY_OBJECT_COPY_SYMMETRIC);
6932 entry->needs_copy = TRUE;
6933 }
6934 vm_object_unlock(local_object);
6935
6936 vm_map_lock_write_to_read(map);
6937 }
6938
6939 if (entry->needs_copy) {
6940 /*
6941 * Honor copy-on-write for COPY_SYMMETRIC
6942 * strategy.
6943 */
6944 vm_map_t local_map;
6945 vm_object_t object;
6946 vm_object_offset_t new_offset;
6947 vm_prot_t prot;
6948 boolean_t wired;
6949 vm_map_version_t version;
6950 vm_map_t real_map;
6951 vm_prot_t fault_type;
6952
6953 local_map = map;
6954
6955 if (caller_flags & UPL_COPYOUT_FROM) {
6956 fault_type = VM_PROT_READ | VM_PROT_COPY;
6957 vm_counters.create_upl_extra_cow++;
6958 vm_counters.create_upl_extra_cow_pages +=
6959 (entry->vme_end - entry->vme_start) / PAGE_SIZE;
6960 } else {
6961 fault_type = VM_PROT_WRITE;
6962 }
6963 if (vm_map_lookup_and_lock_object(&local_map,
6964 offset, fault_type,
6965 OBJECT_LOCK_EXCLUSIVE,
6966 &version, &object,
6967 &new_offset, &prot, &wired,
6968 NULL,
6969 &real_map, NULL) != KERN_SUCCESS) {
6970 if (fault_type == VM_PROT_WRITE) {
6971 vm_counters.create_upl_lookup_failure_write++;
6972 } else {
6973 vm_counters.create_upl_lookup_failure_copy++;
6974 }
6975 vm_map_unlock_read(local_map);
6976 ret = KERN_FAILURE;
6977 goto done;
6978 }
6979 if (real_map != local_map) {
6980 vm_map_unlock(real_map);
6981 }
6982 vm_map_unlock_read(local_map);
6983
6984 vm_object_unlock(object);
6985
6986 goto REDISCOVER_ENTRY;
6987 }
6988
6989 if (entry->is_sub_map) {
6990 vm_map_t submap;
6991
6992 submap = VME_SUBMAP(entry);
6993 local_start = entry->vme_start;
6994 local_offset = (vm_map_offset_t)VME_OFFSET(entry);
6995
6996 vm_map_reference(submap);
6997 vm_map_unlock_read(map);
6998
6999 DEBUG4K_UPL("map %p offset 0x%llx (0x%llx) size 0x%x (adjusted 0x%llx original 0x%llx) offset_in_mapped_page 0x%llx submap %p\n", map, (uint64_t)offset, (uint64_t)original_offset, *upl_size, (uint64_t)adjusted_size, (uint64_t)original_size, offset_in_mapped_page, submap);
7000 offset += offset_in_mapped_page;
7001 *upl_size -= offset_in_mapped_page;
7002
7003 if (release_map) {
7004 vm_map_deallocate(map);
7005 }
7006 map = submap;
7007 release_map = TRUE;
7008 offset = local_offset + (offset - local_start);
7009 goto start_with_map;
7010 }
7011
7012 if (sync_cow_data &&
7013 (VME_OBJECT(entry)->shadow ||
7014 VME_OBJECT(entry)->vo_copy)) {
7015 local_object = VME_OBJECT(entry);
7016 local_start = entry->vme_start;
7017 local_offset = (vm_map_offset_t)VME_OFFSET(entry);
7018
7019 vm_object_reference(local_object);
7020 vm_map_unlock_read(map);
7021
7022 if (local_object->shadow && local_object->vo_copy) {
7023 vm_object_lock_request(local_object->shadow,
7024 ((vm_object_offset_t)
7025 ((offset - local_start) +
7026 local_offset) +
7027 local_object->vo_shadow_offset),
7028 *upl_size, FALSE,
7029 MEMORY_OBJECT_DATA_SYNC,
7030 VM_PROT_NO_CHANGE);
7031 }
7032 sync_cow_data = FALSE;
7033 vm_object_deallocate(local_object);
7034
7035 goto REDISCOVER_ENTRY;
7036 }
7037 if (force_data_sync) {
7038 local_object = VME_OBJECT(entry);
7039 local_start = entry->vme_start;
7040 local_offset = (vm_map_offset_t)VME_OFFSET(entry);
7041
7042 vm_object_reference(local_object);
7043 vm_map_unlock_read(map);
7044
7045 vm_object_lock_request(local_object,
7046 ((vm_object_offset_t)
7047 ((offset - local_start) +
7048 local_offset)),
7049 (vm_object_size_t)*upl_size,
7050 FALSE,
7051 MEMORY_OBJECT_DATA_SYNC,
7052 VM_PROT_NO_CHANGE);
7053
7054 force_data_sync = FALSE;
7055 vm_object_deallocate(local_object);
7056
7057 goto REDISCOVER_ENTRY;
7058 }
7059 if (VME_OBJECT(entry)->private) {
7060 *flags = UPL_DEV_MEMORY;
7061 } else {
7062 *flags = 0;
7063 }
7064
7065 if (VME_OBJECT(entry)->phys_contiguous) {
7066 *flags |= UPL_PHYS_CONTIG;
7067 }
7068
7069 local_object = VME_OBJECT(entry);
7070 local_offset = (vm_map_offset_t)VME_OFFSET(entry);
7071 local_start = entry->vme_start;
7072
7073 /*
7074 * Wiring will copy the pages to the shadow object.
7075 * The shadow object will not be code-signed so
7076 * attempting to execute code from these copied pages
7077 * would trigger a code-signing violation.
7078 */
7079 if (entry->protection & VM_PROT_EXECUTE) {
7080 #if MACH_ASSERT
7081 printf("pid %d[%s] create_upl out of executable range from "
7082 "0x%llx to 0x%llx: side effects may include "
7083 "code-signing violations later on\n",
7084 proc_selfpid(),
7085 (get_bsdtask_info(current_task())
7086 ? proc_name_address(get_bsdtask_info(current_task()))
7087 : "?"),
7088 (uint64_t) entry->vme_start,
7089 (uint64_t) entry->vme_end);
7090 #endif /* MACH_ASSERT */
7091 DTRACE_VM2(cs_executable_create_upl,
7092 uint64_t, (uint64_t)entry->vme_start,
7093 uint64_t, (uint64_t)entry->vme_end);
7094 cs_executable_create_upl++;
7095 }
7096
7097 vm_object_lock(local_object);
7098
7099 /*
7100 * Ensure that this object is "true_share" and "copy_delay" now,
7101 * while we're still holding the VM map lock. After we unlock the map,
7102 * anything could happen to that mapping, including some copy-on-write
7103 * activity. We need to make sure that the IOPL will point at the
7104 * same memory as the mapping.
7105 */
7106 if (local_object->true_share) {
7107 assert(local_object->copy_strategy !=
7108 MEMORY_OBJECT_COPY_SYMMETRIC);
7109 } else if (!is_kernel_object(local_object) &&
7110 local_object != compressor_object &&
7111 !local_object->phys_contiguous) {
7112 #if VM_OBJECT_TRACKING_OP_TRUESHARE
7113 if (!local_object->true_share &&
7114 vm_object_tracking_btlog) {
7115 btlog_record(vm_object_tracking_btlog, local_object,
7116 VM_OBJECT_TRACKING_OP_TRUESHARE,
7117 btref_get(__builtin_frame_address(0), 0));
7118 }
7119 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
7120 local_object->true_share = TRUE;
7121 if (local_object->copy_strategy ==
7122 MEMORY_OBJECT_COPY_SYMMETRIC) {
7123 local_object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
7124 }
7125 }
7126
7127 vm_object_reference_locked(local_object);
7128 vm_object_unlock(local_object);
7129
7130 vm_map_unlock_read(map);
7131
7132 offset += offset_in_mapped_page;
7133 assert(*upl_size > offset_in_mapped_page);
7134 *upl_size -= offset_in_mapped_page;
7135
7136 ret = vm_object_iopl_request(local_object,
7137 ((vm_object_offset_t)
7138 ((offset - local_start) + local_offset)),
7139 *upl_size,
7140 upl,
7141 page_list,
7142 count,
7143 caller_flags,
7144 tag);
7145 vm_object_deallocate(local_object);
7146
7147 done:
7148 if (release_map) {
7149 vm_map_deallocate(map);
7150 }
7151
7152 return ret;
7153 }
7154
7155 /*
7156 * Internal routine to enter a UPL into a VM map.
7157 *
7158 * JMM - This should just be doable through the standard
7159 * vm_map_enter() API.
7160 */
7161 kern_return_t
vm_map_enter_upl_range(vm_map_t map,upl_t upl,vm_object_offset_t offset_to_map,upl_size_t size_to_map,vm_prot_t prot_to_map,vm_map_offset_t * dst_addr)7162 vm_map_enter_upl_range(
7163 vm_map_t map,
7164 upl_t upl,
7165 vm_object_offset_t offset_to_map,
7166 upl_size_t size_to_map,
7167 vm_prot_t prot_to_map,
7168 vm_map_offset_t *dst_addr)
7169 {
7170 vm_map_size_t size;
7171 vm_object_offset_t offset;
7172 vm_map_offset_t addr;
7173 vm_page_t m;
7174 kern_return_t kr;
7175 int isVectorUPL = 0, curr_upl = 0;
7176 upl_t vector_upl = NULL;
7177 mach_vm_offset_t vector_upl_dst_addr = 0;
7178 vm_map_t vector_upl_submap = NULL;
7179 upl_offset_t subupl_offset = 0;
7180 upl_size_t subupl_size = 0;
7181
7182 if (upl == UPL_NULL) {
7183 return KERN_INVALID_ARGUMENT;
7184 }
7185
7186 DEBUG4K_UPL("map %p upl %p flags 0x%x object %p offset 0x%llx (uploff: 0x%llx) size 0x%x (uplsz: 0x%x) \n", map, upl, upl->flags, upl->map_object, offset_to_map, upl->u_offset, size_to_map, upl->u_size);
7187 assert(map == kernel_map);
7188
7189 if ((isVectorUPL = vector_upl_is_valid(upl))) {
7190 int mapped = 0, valid_upls = 0;
7191 vector_upl = upl;
7192
7193 upl_lock(vector_upl);
7194 for (curr_upl = 0; curr_upl < vector_upl_max_upls(vector_upl); curr_upl++) {
7195 upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
7196 if (upl == NULL) {
7197 continue;
7198 }
7199 valid_upls++;
7200 if (UPL_PAGE_LIST_MAPPED & upl->flags) {
7201 mapped++;
7202 }
7203 }
7204
7205 if (mapped) {
7206 if (mapped != valid_upls) {
7207 panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped", mapped, valid_upls);
7208 } else {
7209 upl_unlock(vector_upl);
7210 return KERN_FAILURE;
7211 }
7212 }
7213
7214 if (VM_MAP_PAGE_MASK(map) < PAGE_MASK) {
7215 panic("TODO4K: vector UPL not implemented");
7216 }
7217
7218 vector_upl_submap = kmem_suballoc(map, &vector_upl_dst_addr,
7219 vector_upl->u_size, VM_MAP_CREATE_DEFAULT,
7220 VM_FLAGS_ANYWHERE, KMS_NOFAIL | KMS_DATA,
7221 VM_KERN_MEMORY_NONE).kmr_submap;
7222 map = vector_upl_submap;
7223 vector_upl_set_submap(vector_upl, vector_upl_submap, vector_upl_dst_addr);
7224 curr_upl = 0;
7225 } else {
7226 upl_lock(upl);
7227 }
7228
7229 process_upl_to_enter:
7230 if (isVectorUPL) {
7231 if (curr_upl == vector_upl_max_upls(vector_upl)) {
7232 *dst_addr = vector_upl_dst_addr;
7233 upl_unlock(vector_upl);
7234 return KERN_SUCCESS;
7235 }
7236 upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
7237 if (upl == NULL) {
7238 goto process_upl_to_enter;
7239 }
7240
7241 vector_upl_get_iostate(vector_upl, upl, &subupl_offset, &subupl_size);
7242 *dst_addr = (vm_map_offset_t)(vector_upl_dst_addr + (vm_map_offset_t)subupl_offset);
7243 } else {
7244 /*
7245 * check to see if already mapped
7246 */
7247 if (UPL_PAGE_LIST_MAPPED & upl->flags) {
7248 upl_unlock(upl);
7249 return KERN_FAILURE;
7250 }
7251 }
7252
7253 if ((!(upl->flags & UPL_SHADOWED)) &&
7254 ((upl->flags & UPL_HAS_BUSY) ||
7255 !((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || (upl->map_object->phys_contiguous)))) {
7256 vm_object_t object;
7257 vm_page_t alias_page;
7258 vm_object_offset_t new_offset;
7259 unsigned int pg_num;
7260
7261 size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map));
7262 object = upl->map_object;
7263 upl->map_object = vm_object_allocate(vm_object_round_page(size));
7264
7265 vm_object_lock(upl->map_object);
7266
7267 upl->map_object->shadow = object;
7268 upl->map_object->pageout = TRUE;
7269 upl->map_object->can_persist = FALSE;
7270 upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
7271 upl->map_object->vo_shadow_offset = upl_adjusted_offset(upl, PAGE_MASK) - object->paging_offset;
7272 assertf(page_aligned(upl->map_object->vo_shadow_offset),
7273 "object %p shadow_offset 0x%llx",
7274 upl->map_object,
7275 (uint64_t)upl->map_object->vo_shadow_offset);
7276 upl->map_object->wimg_bits = object->wimg_bits;
7277 offset = upl->map_object->vo_shadow_offset;
7278 new_offset = 0;
7279
7280 upl->flags |= UPL_SHADOWED;
7281
7282 while (size) {
7283 pg_num = (unsigned int) (new_offset / PAGE_SIZE);
7284 assert(pg_num == new_offset / PAGE_SIZE);
7285
7286 if (bitmap_test(upl->lite_list, pg_num)) {
7287 alias_page = vm_page_grab_fictitious(TRUE);
7288
7289 vm_object_lock(object);
7290
7291 m = vm_page_lookup(object, offset);
7292 if (m == VM_PAGE_NULL) {
7293 panic("vm_upl_map: page missing");
7294 }
7295
7296 /*
7297 * Convert the fictitious page to a private
7298 * shadow of the real page.
7299 */
7300 assert(alias_page->vmp_fictitious);
7301 alias_page->vmp_fictitious = FALSE;
7302 alias_page->vmp_private = TRUE;
7303 alias_page->vmp_free_when_done = TRUE;
7304 /*
7305 * since m is a page in the upl it must
7306 * already be wired or BUSY, so it's
7307 * safe to assign the underlying physical
7308 * page to the alias
7309 */
7310 VM_PAGE_SET_PHYS_PAGE(alias_page, VM_PAGE_GET_PHYS_PAGE(m));
7311
7312 vm_object_unlock(object);
7313
7314 vm_page_lockspin_queues();
7315 vm_page_wire(alias_page, VM_KERN_MEMORY_NONE, TRUE);
7316 vm_page_unlock_queues();
7317
7318 vm_page_insert_wired(alias_page, upl->map_object, new_offset, VM_KERN_MEMORY_NONE);
7319
7320 assert(!alias_page->vmp_wanted);
7321 alias_page->vmp_busy = FALSE;
7322 alias_page->vmp_absent = FALSE;
7323 }
7324 size -= PAGE_SIZE;
7325 offset += PAGE_SIZE_64;
7326 new_offset += PAGE_SIZE_64;
7327 }
7328 vm_object_unlock(upl->map_object);
7329 }
7330 if (upl->flags & UPL_SHADOWED) {
7331 if (isVectorUPL) {
7332 offset = 0;
7333 } else {
7334 offset = offset_to_map;
7335 }
7336 } else {
7337 offset = upl_adjusted_offset(upl, VM_MAP_PAGE_MASK(map)) - upl->map_object->paging_offset;
7338 if (!isVectorUPL) {
7339 offset += offset_to_map;
7340 }
7341 }
7342
7343 if (isVectorUPL) {
7344 size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map));
7345 } else {
7346 size = MIN(upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map)), size_to_map);
7347 }
7348
7349 vm_object_reference(upl->map_object);
7350
7351 if (!isVectorUPL) {
7352 *dst_addr = 0;
7353 /*
7354 * NEED A UPL_MAP ALIAS
7355 */
7356 kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
7357 VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(.vm_tag = VM_KERN_MEMORY_OSFMK),
7358 upl->map_object, offset, FALSE,
7359 prot_to_map, VM_PROT_ALL, VM_INHERIT_DEFAULT);
7360
7361 if (kr != KERN_SUCCESS) {
7362 vm_object_deallocate(upl->map_object);
7363 upl_unlock(upl);
7364 return kr;
7365 }
7366 } else {
7367 kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
7368 VM_MAP_KERNEL_FLAGS_FIXED(.vm_tag = VM_KERN_MEMORY_OSFMK),
7369 upl->map_object, offset, FALSE,
7370 prot_to_map, VM_PROT_ALL, VM_INHERIT_DEFAULT);
7371 if (kr) {
7372 panic("vm_map_enter failed for a Vector UPL");
7373 }
7374 }
7375 upl->u_mapped_size = (upl_size_t) size; /* When we allow multiple submappings of the UPL */
7376 /* this will have to be an increment rather than */
7377 /* an assignment. */
7378 vm_object_lock(upl->map_object);
7379
7380 for (addr = *dst_addr; size > 0; size -= PAGE_SIZE, addr += PAGE_SIZE) {
7381 m = vm_page_lookup(upl->map_object, offset);
7382
7383 if (m) {
7384 m->vmp_pmapped = TRUE;
7385
7386 /*
7387 * CODE SIGNING ENFORCEMENT: page has been wpmapped,
7388 * but only in kernel space. If this was on a user map,
7389 * we'd have to set the wpmapped bit.
7390 */
7391 /* m->vmp_wpmapped = TRUE; */
7392 assert(map->pmap == kernel_pmap);
7393
7394 kr = pmap_enter_check(map->pmap, addr, m, prot_to_map, VM_PROT_NONE, 0, TRUE);
7395
7396 assert(kr == KERN_SUCCESS);
7397 #if KASAN
7398 kasan_notify_address(addr, PAGE_SIZE_64);
7399 #endif
7400 }
7401 offset += PAGE_SIZE_64;
7402 }
7403 vm_object_unlock(upl->map_object);
7404
7405 /*
7406 * hold a reference for the mapping
7407 */
7408 upl->ref_count++;
7409 upl->flags |= UPL_PAGE_LIST_MAPPED;
7410 upl->kaddr = (vm_offset_t) *dst_addr;
7411 assert(upl->kaddr == *dst_addr);
7412
7413 if (isVectorUPL) {
7414 goto process_upl_to_enter;
7415 }
7416
7417 if (!isVectorUPL) {
7418 vm_map_offset_t addr_adjustment;
7419
7420 addr_adjustment = (vm_map_offset_t)(upl->u_offset - upl_adjusted_offset(upl, VM_MAP_PAGE_MASK(map)));
7421 if (addr_adjustment) {
7422 assert(VM_MAP_PAGE_MASK(map) != PAGE_MASK);
7423 DEBUG4K_UPL("dst_addr 0x%llx (+ 0x%llx) -> 0x%llx\n", (uint64_t)*dst_addr, (uint64_t)addr_adjustment, (uint64_t)(*dst_addr + addr_adjustment));
7424 *dst_addr += addr_adjustment;
7425 }
7426 }
7427
7428 upl_unlock(upl);
7429
7430 return KERN_SUCCESS;
7431 }
7432
7433 kern_return_t
vm_map_enter_upl(vm_map_t map,upl_t upl,vm_map_offset_t * dst_addr)7434 vm_map_enter_upl(
7435 vm_map_t map,
7436 upl_t upl,
7437 vm_map_offset_t *dst_addr)
7438 {
7439 upl_size_t upl_size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map));
7440 return vm_map_enter_upl_range(map, upl, 0, upl_size, VM_PROT_DEFAULT, dst_addr);
7441 }
7442
7443 /*
7444 * Internal routine to remove a UPL mapping from a VM map.
7445 *
7446 * XXX - This should just be doable through a standard
7447 * vm_map_remove() operation. Otherwise, implicit clean-up
7448 * of the target map won't be able to correctly remove
7449 * these (and release the reference on the UPL). Having
7450 * to do this means we can't map these into user-space
7451 * maps yet.
7452 */
7453 kern_return_t
vm_map_remove_upl_range(vm_map_t map,upl_t upl,__unused vm_object_offset_t offset_to_unmap,__unused upl_size_t size_to_unmap)7454 vm_map_remove_upl_range(
7455 vm_map_t map,
7456 upl_t upl,
7457 __unused vm_object_offset_t offset_to_unmap,
7458 __unused upl_size_t size_to_unmap)
7459 {
7460 vm_address_t addr;
7461 upl_size_t size;
7462 int isVectorUPL = 0, curr_upl = 0;
7463 upl_t vector_upl = NULL;
7464
7465 if (upl == UPL_NULL) {
7466 return KERN_INVALID_ARGUMENT;
7467 }
7468
7469 if ((isVectorUPL = vector_upl_is_valid(upl))) {
7470 int unmapped = 0, valid_upls = 0;
7471 vector_upl = upl;
7472 upl_lock(vector_upl);
7473 for (curr_upl = 0; curr_upl < vector_upl_max_upls(vector_upl); curr_upl++) {
7474 upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
7475 if (upl == NULL) {
7476 continue;
7477 }
7478 valid_upls++;
7479 if (!(UPL_PAGE_LIST_MAPPED & upl->flags)) {
7480 unmapped++;
7481 }
7482 }
7483
7484 if (unmapped) {
7485 if (unmapped != valid_upls) {
7486 panic("%d of the %d sub-upls within the Vector UPL is/are not mapped", unmapped, valid_upls);
7487 } else {
7488 upl_unlock(vector_upl);
7489 return KERN_FAILURE;
7490 }
7491 }
7492 curr_upl = 0;
7493 } else {
7494 upl_lock(upl);
7495 }
7496
7497 process_upl_to_remove:
7498 if (isVectorUPL) {
7499 if (curr_upl == vector_upl_max_upls(vector_upl)) {
7500 vm_map_t v_upl_submap;
7501 vm_offset_t v_upl_submap_dst_addr;
7502 vector_upl_get_submap(vector_upl, &v_upl_submap, &v_upl_submap_dst_addr);
7503
7504 kmem_free_guard(map, v_upl_submap_dst_addr,
7505 vector_upl->u_size, KMF_NONE, KMEM_GUARD_SUBMAP);
7506 vm_map_deallocate(v_upl_submap);
7507 upl_unlock(vector_upl);
7508 return KERN_SUCCESS;
7509 }
7510
7511 upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
7512 if (upl == NULL) {
7513 goto process_upl_to_remove;
7514 }
7515 }
7516
7517 if (upl->flags & UPL_PAGE_LIST_MAPPED) {
7518 addr = upl->kaddr;
7519 size = upl->u_mapped_size;
7520
7521 assert(upl->ref_count > 1);
7522 upl->ref_count--; /* removing mapping ref */
7523
7524 upl->flags &= ~UPL_PAGE_LIST_MAPPED;
7525 upl->kaddr = (vm_offset_t) 0;
7526 upl->u_mapped_size = 0;
7527
7528 if (isVectorUPL) {
7529 /*
7530 * If it's a Vectored UPL, we'll be removing the entire
7531 * submap anyways, so no need to remove individual UPL
7532 * element mappings from within the submap
7533 */
7534 goto process_upl_to_remove;
7535 }
7536
7537 upl_unlock(upl);
7538
7539 vm_map_remove(map,
7540 vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(map)),
7541 vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(map)));
7542 return KERN_SUCCESS;
7543 }
7544 upl_unlock(upl);
7545
7546 return KERN_FAILURE;
7547 }
7548
7549 kern_return_t
vm_map_remove_upl(vm_map_t map,upl_t upl)7550 vm_map_remove_upl(
7551 vm_map_t map,
7552 upl_t upl)
7553 {
7554 upl_size_t upl_size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map));
7555 return vm_map_remove_upl_range(map, upl, 0, upl_size);
7556 }
7557
7558 kern_return_t
upl_commit_range(upl_t upl,upl_offset_t offset,upl_size_t size,int flags,upl_page_info_t * page_list,mach_msg_type_number_t count,boolean_t * empty)7559 upl_commit_range(
7560 upl_t upl,
7561 upl_offset_t offset,
7562 upl_size_t size,
7563 int flags,
7564 upl_page_info_t *page_list,
7565 mach_msg_type_number_t count,
7566 boolean_t *empty)
7567 {
7568 upl_size_t xfer_size, subupl_size;
7569 vm_object_t shadow_object;
7570 vm_object_t object;
7571 vm_object_t m_object;
7572 vm_object_offset_t target_offset;
7573 upl_offset_t subupl_offset = offset;
7574 int entry;
7575 int occupied;
7576 int clear_refmod = 0;
7577 int pgpgout_count = 0;
7578 struct vm_page_delayed_work dw_array;
7579 struct vm_page_delayed_work *dwp, *dwp_start;
7580 bool dwp_finish_ctx = TRUE;
7581 int dw_count;
7582 int dw_limit;
7583 int isVectorUPL = 0;
7584 upl_t vector_upl = NULL;
7585 boolean_t should_be_throttled = FALSE;
7586
7587 vm_page_t nxt_page = VM_PAGE_NULL;
7588 int fast_path_possible = 0;
7589 int fast_path_full_commit = 0;
7590 int throttle_page = 0;
7591 int unwired_count = 0;
7592 int local_queue_count = 0;
7593 vm_page_t first_local, last_local;
7594 vm_object_offset_t obj_start, obj_end, obj_offset;
7595 kern_return_t kr = KERN_SUCCESS;
7596
7597 // DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p offset 0x%llx size 0x%llx flags 0x%x\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object, (uint64_t)offset, (uint64_t)size, flags);
7598
7599 dwp_start = dwp = NULL;
7600
7601 subupl_size = size;
7602 *empty = FALSE;
7603
7604 if (upl == UPL_NULL) {
7605 return KERN_INVALID_ARGUMENT;
7606 }
7607
7608 dw_count = 0;
7609 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
7610 dwp_start = vm_page_delayed_work_get_ctx();
7611 if (dwp_start == NULL) {
7612 dwp_start = &dw_array;
7613 dw_limit = 1;
7614 dwp_finish_ctx = FALSE;
7615 }
7616
7617 dwp = dwp_start;
7618
7619 if (count == 0) {
7620 page_list = NULL;
7621 }
7622
7623 if ((isVectorUPL = vector_upl_is_valid(upl))) {
7624 vector_upl = upl;
7625 upl_lock(vector_upl);
7626 } else {
7627 upl_lock(upl);
7628 }
7629
7630 process_upl_to_commit:
7631
7632 if (isVectorUPL) {
7633 size = subupl_size;
7634 offset = subupl_offset;
7635 if (size == 0) {
7636 upl_unlock(vector_upl);
7637 kr = KERN_SUCCESS;
7638 goto done;
7639 }
7640 upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
7641 if (upl == NULL) {
7642 upl_unlock(vector_upl);
7643 kr = KERN_FAILURE;
7644 goto done;
7645 }
7646 page_list = upl->page_list;
7647 subupl_size -= size;
7648 subupl_offset += size;
7649 }
7650
7651 #if UPL_DEBUG
7652 if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
7653 upl->upl_commit_records[upl->upl_commit_index].c_btref = btref_get(__builtin_frame_address(0), 0);
7654 upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
7655 upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
7656
7657 upl->upl_commit_index++;
7658 }
7659 #endif
7660 if (upl->flags & UPL_DEVICE_MEMORY) {
7661 xfer_size = 0;
7662 } else if ((offset + size) <= upl_adjusted_size(upl, PAGE_MASK)) {
7663 xfer_size = size;
7664 } else {
7665 if (!isVectorUPL) {
7666 upl_unlock(upl);
7667 } else {
7668 upl_unlock(vector_upl);
7669 }
7670 DEBUG4K_ERROR("upl %p (u_offset 0x%llx u_size 0x%x) offset 0x%x size 0x%x\n", upl, upl->u_offset, upl->u_size, offset, size);
7671 kr = KERN_FAILURE;
7672 goto done;
7673 }
7674 if (upl->flags & UPL_SET_DIRTY) {
7675 flags |= UPL_COMMIT_SET_DIRTY;
7676 }
7677 if (upl->flags & UPL_CLEAR_DIRTY) {
7678 flags |= UPL_COMMIT_CLEAR_DIRTY;
7679 }
7680
7681 object = upl->map_object;
7682
7683 if (upl->flags & UPL_SHADOWED) {
7684 vm_object_lock(object);
7685 shadow_object = object->shadow;
7686 } else {
7687 shadow_object = object;
7688 }
7689 entry = offset / PAGE_SIZE;
7690 target_offset = (vm_object_offset_t)offset;
7691
7692 if (upl->flags & UPL_KERNEL_OBJECT) {
7693 vm_object_lock_shared(shadow_object);
7694 } else {
7695 vm_object_lock(shadow_object);
7696 }
7697
7698 VM_OBJECT_WIRED_PAGE_UPDATE_START(shadow_object);
7699
7700 if (upl->flags & UPL_ACCESS_BLOCKED) {
7701 assert(shadow_object->blocked_access);
7702 shadow_object->blocked_access = FALSE;
7703 vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
7704 }
7705
7706 if (shadow_object->code_signed) {
7707 /*
7708 * CODE SIGNING:
7709 * If the object is code-signed, do not let this UPL tell
7710 * us if the pages are valid or not. Let the pages be
7711 * validated by VM the normal way (when they get mapped or
7712 * copied).
7713 */
7714 flags &= ~UPL_COMMIT_CS_VALIDATED;
7715 }
7716 if (!page_list) {
7717 /*
7718 * No page list to get the code-signing info from !?
7719 */
7720 flags &= ~UPL_COMMIT_CS_VALIDATED;
7721 }
7722 if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal) {
7723 should_be_throttled = TRUE;
7724 }
7725
7726 if ((upl->flags & UPL_IO_WIRE) &&
7727 !(flags & UPL_COMMIT_FREE_ABSENT) &&
7728 !isVectorUPL &&
7729 shadow_object->purgable != VM_PURGABLE_VOLATILE &&
7730 shadow_object->purgable != VM_PURGABLE_EMPTY) {
7731 if (!vm_page_queue_empty(&shadow_object->memq)) {
7732 if (shadow_object->internal && size == shadow_object->vo_size) {
7733 nxt_page = (vm_page_t)vm_page_queue_first(&shadow_object->memq);
7734 fast_path_full_commit = 1;
7735 }
7736 fast_path_possible = 1;
7737
7738 if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal &&
7739 (shadow_object->purgable == VM_PURGABLE_DENY ||
7740 shadow_object->purgable == VM_PURGABLE_NONVOLATILE ||
7741 shadow_object->purgable == VM_PURGABLE_VOLATILE)) {
7742 throttle_page = 1;
7743 }
7744 }
7745 }
7746 first_local = VM_PAGE_NULL;
7747 last_local = VM_PAGE_NULL;
7748
7749 obj_start = target_offset + upl->u_offset - shadow_object->paging_offset;
7750 obj_end = obj_start + xfer_size;
7751 obj_start = vm_object_trunc_page(obj_start);
7752 obj_end = vm_object_round_page(obj_end);
7753 for (obj_offset = obj_start;
7754 obj_offset < obj_end;
7755 obj_offset += PAGE_SIZE) {
7756 vm_page_t t, m;
7757
7758 dwp->dw_mask = 0;
7759 clear_refmod = 0;
7760
7761 m = VM_PAGE_NULL;
7762
7763 if (upl->flags & UPL_LITE) {
7764 unsigned int pg_num;
7765
7766 if (nxt_page != VM_PAGE_NULL) {
7767 m = nxt_page;
7768 nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->vmp_listq);
7769 target_offset = m->vmp_offset;
7770 }
7771 pg_num = (unsigned int) (target_offset / PAGE_SIZE);
7772 assert(pg_num == target_offset / PAGE_SIZE);
7773
7774 if (bitmap_test(upl->lite_list, pg_num)) {
7775 bitmap_clear(upl->lite_list, pg_num);
7776
7777 if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) {
7778 m = vm_page_lookup(shadow_object, obj_offset);
7779 }
7780 } else {
7781 m = NULL;
7782 }
7783 }
7784 if (upl->flags & UPL_SHADOWED) {
7785 if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
7786 t->vmp_free_when_done = FALSE;
7787
7788 VM_PAGE_FREE(t);
7789
7790 if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) {
7791 m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
7792 }
7793 }
7794 }
7795 if (m == VM_PAGE_NULL) {
7796 goto commit_next_page;
7797 }
7798
7799 m_object = VM_PAGE_OBJECT(m);
7800
7801 if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
7802 assert(m->vmp_busy);
7803
7804 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
7805 goto commit_next_page;
7806 }
7807
7808 if (flags & UPL_COMMIT_CS_VALIDATED) {
7809 /*
7810 * CODE SIGNING:
7811 * Set the code signing bits according to
7812 * what the UPL says they should be.
7813 */
7814 m->vmp_cs_validated |= page_list[entry].cs_validated;
7815 m->vmp_cs_tainted |= page_list[entry].cs_tainted;
7816 m->vmp_cs_nx |= page_list[entry].cs_nx;
7817 }
7818 if (flags & UPL_COMMIT_WRITTEN_BY_KERNEL) {
7819 m->vmp_written_by_kernel = TRUE;
7820 }
7821
7822 if (upl->flags & UPL_IO_WIRE) {
7823 if (page_list) {
7824 page_list[entry].phys_addr = 0;
7825 }
7826
7827 if (flags & UPL_COMMIT_SET_DIRTY) {
7828 SET_PAGE_DIRTY(m, FALSE);
7829 } else if (flags & UPL_COMMIT_CLEAR_DIRTY) {
7830 m->vmp_dirty = FALSE;
7831
7832 if (!(flags & UPL_COMMIT_CS_VALIDATED) &&
7833 m->vmp_cs_validated &&
7834 m->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
7835 /*
7836 * CODE SIGNING:
7837 * This page is no longer dirty
7838 * but could have been modified,
7839 * so it will need to be
7840 * re-validated.
7841 */
7842 m->vmp_cs_validated = VMP_CS_ALL_FALSE;
7843
7844 VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
7845
7846 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
7847 }
7848 clear_refmod |= VM_MEM_MODIFIED;
7849 }
7850 if (upl->flags & UPL_ACCESS_BLOCKED) {
7851 /*
7852 * We blocked access to the pages in this UPL.
7853 * Clear the "busy" bit and wake up any waiter
7854 * for this page.
7855 */
7856 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
7857 }
7858 if (fast_path_possible) {
7859 assert(m_object->purgable != VM_PURGABLE_EMPTY);
7860 assert(m_object->purgable != VM_PURGABLE_VOLATILE);
7861 if (m->vmp_absent) {
7862 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
7863 assert(m->vmp_wire_count == 0);
7864 assert(m->vmp_busy);
7865
7866 m->vmp_absent = FALSE;
7867 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
7868 } else {
7869 if (m->vmp_wire_count == 0) {
7870 panic("wire_count == 0, m = %p, obj = %p", m, shadow_object);
7871 }
7872 assert(m->vmp_q_state == VM_PAGE_IS_WIRED);
7873
7874 /*
7875 * XXX FBDP need to update some other
7876 * counters here (purgeable_wired_count)
7877 * (ledgers), ...
7878 */
7879 assert(m->vmp_wire_count > 0);
7880 m->vmp_wire_count--;
7881
7882 if (m->vmp_wire_count == 0) {
7883 m->vmp_q_state = VM_PAGE_NOT_ON_Q;
7884 unwired_count++;
7885 }
7886 }
7887 if (m->vmp_wire_count == 0) {
7888 assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
7889
7890 if (last_local == VM_PAGE_NULL) {
7891 assert(first_local == VM_PAGE_NULL);
7892
7893 last_local = m;
7894 first_local = m;
7895 } else {
7896 assert(first_local != VM_PAGE_NULL);
7897
7898 m->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
7899 first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(m);
7900 first_local = m;
7901 }
7902 local_queue_count++;
7903
7904 if (throttle_page) {
7905 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
7906 } else {
7907 if (flags & UPL_COMMIT_INACTIVATE) {
7908 if (shadow_object->internal) {
7909 m->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
7910 } else {
7911 m->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
7912 }
7913 } else {
7914 m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
7915 }
7916 }
7917 }
7918 } else {
7919 if (flags & UPL_COMMIT_INACTIVATE) {
7920 dwp->dw_mask |= DW_vm_page_deactivate_internal;
7921 clear_refmod |= VM_MEM_REFERENCED;
7922 }
7923 if (m->vmp_absent) {
7924 if (flags & UPL_COMMIT_FREE_ABSENT) {
7925 dwp->dw_mask |= DW_vm_page_free;
7926 } else {
7927 m->vmp_absent = FALSE;
7928 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
7929
7930 if (!(dwp->dw_mask & DW_vm_page_deactivate_internal)) {
7931 dwp->dw_mask |= DW_vm_page_activate;
7932 }
7933 }
7934 } else {
7935 dwp->dw_mask |= DW_vm_page_unwire;
7936 }
7937 }
7938 goto commit_next_page;
7939 }
7940 assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
7941
7942 if (page_list) {
7943 page_list[entry].phys_addr = 0;
7944 }
7945
7946 /*
7947 * make sure to clear the hardware
7948 * modify or reference bits before
7949 * releasing the BUSY bit on this page
7950 * otherwise we risk losing a legitimate
7951 * change of state
7952 */
7953 if (flags & UPL_COMMIT_CLEAR_DIRTY) {
7954 m->vmp_dirty = FALSE;
7955
7956 clear_refmod |= VM_MEM_MODIFIED;
7957 }
7958 if (m->vmp_laundry) {
7959 dwp->dw_mask |= DW_vm_pageout_throttle_up;
7960 }
7961
7962 if (VM_PAGE_WIRED(m)) {
7963 m->vmp_free_when_done = FALSE;
7964 }
7965
7966 if (!(flags & UPL_COMMIT_CS_VALIDATED) &&
7967 m->vmp_cs_validated &&
7968 m->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
7969 /*
7970 * CODE SIGNING:
7971 * This page is no longer dirty
7972 * but could have been modified,
7973 * so it will need to be
7974 * re-validated.
7975 */
7976 m->vmp_cs_validated = VMP_CS_ALL_FALSE;
7977
7978 VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
7979
7980 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
7981 }
7982 if (m->vmp_overwriting) {
7983 /*
7984 * the (COPY_OUT_FROM == FALSE) request_page_list case
7985 */
7986 if (m->vmp_busy) {
7987 #if CONFIG_PHANTOM_CACHE
7988 if (m->vmp_absent && !m_object->internal) {
7989 dwp->dw_mask |= DW_vm_phantom_cache_update;
7990 }
7991 #endif
7992 m->vmp_absent = FALSE;
7993
7994 dwp->dw_mask |= DW_clear_busy;
7995 } else {
7996 /*
7997 * alternate (COPY_OUT_FROM == FALSE) page_list case
7998 * Occurs when the original page was wired
7999 * at the time of the list request
8000 */
8001 assert(VM_PAGE_WIRED(m));
8002
8003 dwp->dw_mask |= DW_vm_page_unwire; /* reactivates */
8004 }
8005 m->vmp_overwriting = FALSE;
8006 }
8007 m->vmp_cleaning = FALSE;
8008
8009 if (m->vmp_free_when_done) {
8010 /*
8011 * With the clean queue enabled, UPL_PAGEOUT should
8012 * no longer set the pageout bit. Its pages now go
8013 * to the clean queue.
8014 *
8015 * We don't use the cleaned Q anymore and so this
8016 * assert isn't correct. The code for the clean Q
8017 * still exists and might be used in the future. If we
8018 * go back to the cleaned Q, we will re-enable this
8019 * assert.
8020 *
8021 * assert(!(upl->flags & UPL_PAGEOUT));
8022 */
8023 assert(!m_object->internal);
8024
8025 m->vmp_free_when_done = FALSE;
8026
8027 if ((flags & UPL_COMMIT_SET_DIRTY) ||
8028 (m->vmp_pmapped && (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED))) {
8029 /*
8030 * page was re-dirtied after we started
8031 * the pageout... reactivate it since
8032 * we don't know whether the on-disk
8033 * copy matches what is now in memory
8034 */
8035 SET_PAGE_DIRTY(m, FALSE);
8036
8037 dwp->dw_mask |= DW_vm_page_activate | DW_PAGE_WAKEUP;
8038
8039 if (upl->flags & UPL_PAGEOUT) {
8040 counter_inc(&vm_statistics_reactivations);
8041 DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
8042 }
8043 } else if (m->vmp_busy && !(upl->flags & UPL_HAS_BUSY)) {
8044 /*
8045 * Someone else might still be handling this
8046 * page (vm_fault() for example), so let's not
8047 * free it or "un-busy" it!
8048 * Put that page in the "speculative" queue
8049 * for now (since we would otherwise have freed
8050 * it) and let whoever is keeping the page
8051 * "busy" move it if needed when they're done
8052 * with it.
8053 */
8054 dwp->dw_mask |= DW_vm_page_speculate;
8055 } else {
8056 /*
8057 * page has been successfully cleaned
8058 * go ahead and free it for other use
8059 */
8060 if (m_object->internal) {
8061 DTRACE_VM2(anonpgout, int, 1, (uint64_t *), NULL);
8062 } else {
8063 DTRACE_VM2(fspgout, int, 1, (uint64_t *), NULL);
8064 }
8065 m->vmp_dirty = FALSE;
8066 if (!(upl->flags & UPL_HAS_BUSY)) {
8067 assert(!m->vmp_busy);
8068 }
8069 m->vmp_busy = TRUE;
8070
8071 dwp->dw_mask |= DW_vm_page_free;
8072 }
8073 goto commit_next_page;
8074 }
8075 /*
8076 * It is a part of the semantic of COPYOUT_FROM
8077 * UPLs that a commit implies cache sync
8078 * between the vm page and the backing store
8079 * this can be used to strip the precious bit
8080 * as well as clean
8081 */
8082 if ((upl->flags & UPL_PAGE_SYNC_DONE) || (flags & UPL_COMMIT_CLEAR_PRECIOUS)) {
8083 m->vmp_precious = FALSE;
8084 }
8085
8086 if (flags & UPL_COMMIT_SET_DIRTY) {
8087 SET_PAGE_DIRTY(m, FALSE);
8088 } else {
8089 m->vmp_dirty = FALSE;
8090 }
8091
8092 /* with the clean queue on, move *all* cleaned pages to the clean queue */
8093 if (hibernate_cleaning_in_progress == FALSE && !m->vmp_dirty && (upl->flags & UPL_PAGEOUT)) {
8094 pgpgout_count++;
8095
8096 counter_inc(&vm_statistics_pageouts);
8097 DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL);
8098
8099 dwp->dw_mask |= DW_enqueue_cleaned;
8100 } else if (should_be_throttled == TRUE && (m->vmp_q_state == VM_PAGE_NOT_ON_Q)) {
8101 /*
8102 * page coming back in from being 'frozen'...
8103 * it was dirty before it was frozen, so keep it so
8104 * the vm_page_activate will notice that it really belongs
8105 * on the throttle queue and put it there
8106 */
8107 SET_PAGE_DIRTY(m, FALSE);
8108 dwp->dw_mask |= DW_vm_page_activate;
8109 } else {
8110 if ((flags & UPL_COMMIT_INACTIVATE) && !m->vmp_clustered && (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q)) {
8111 dwp->dw_mask |= DW_vm_page_deactivate_internal;
8112 clear_refmod |= VM_MEM_REFERENCED;
8113 } else if (!VM_PAGE_PAGEABLE(m)) {
8114 if (m->vmp_clustered || (flags & UPL_COMMIT_SPECULATE)) {
8115 dwp->dw_mask |= DW_vm_page_speculate;
8116 } else if (m->vmp_reference) {
8117 dwp->dw_mask |= DW_vm_page_activate;
8118 } else {
8119 dwp->dw_mask |= DW_vm_page_deactivate_internal;
8120 clear_refmod |= VM_MEM_REFERENCED;
8121 }
8122 }
8123 }
8124 if (upl->flags & UPL_ACCESS_BLOCKED) {
8125 /*
8126 * We blocked access to the pages in this URL.
8127 * Clear the "busy" bit on this page before we
8128 * wake up any waiter.
8129 */
8130 dwp->dw_mask |= DW_clear_busy;
8131 }
8132 /*
8133 * Wakeup any thread waiting for the page to be un-cleaning.
8134 */
8135 dwp->dw_mask |= DW_PAGE_WAKEUP;
8136
8137 commit_next_page:
8138 if (clear_refmod) {
8139 pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m), clear_refmod);
8140 }
8141
8142 target_offset += PAGE_SIZE_64;
8143 xfer_size -= PAGE_SIZE;
8144 entry++;
8145
8146 if (dwp->dw_mask) {
8147 if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
8148 VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
8149
8150 if (dw_count >= dw_limit) {
8151 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
8152
8153 dwp = dwp_start;
8154 dw_count = 0;
8155 }
8156 } else {
8157 if (dwp->dw_mask & DW_clear_busy) {
8158 m->vmp_busy = FALSE;
8159 }
8160
8161 if (dwp->dw_mask & DW_PAGE_WAKEUP) {
8162 PAGE_WAKEUP(m);
8163 }
8164 }
8165 }
8166 }
8167 if (dw_count) {
8168 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
8169 dwp = dwp_start;
8170 dw_count = 0;
8171 }
8172
8173 if (fast_path_possible) {
8174 assert(shadow_object->purgable != VM_PURGABLE_VOLATILE);
8175 assert(shadow_object->purgable != VM_PURGABLE_EMPTY);
8176
8177 if (local_queue_count || unwired_count) {
8178 if (local_queue_count) {
8179 vm_page_t first_target;
8180 vm_page_queue_head_t *target_queue;
8181
8182 if (throttle_page) {
8183 target_queue = &vm_page_queue_throttled;
8184 } else {
8185 if (flags & UPL_COMMIT_INACTIVATE) {
8186 if (shadow_object->internal) {
8187 target_queue = &vm_page_queue_anonymous;
8188 } else {
8189 target_queue = &vm_page_queue_inactive;
8190 }
8191 } else {
8192 target_queue = &vm_page_queue_active;
8193 }
8194 }
8195 /*
8196 * Transfer the entire local queue to a regular LRU page queues.
8197 */
8198 vm_page_lockspin_queues();
8199
8200 first_target = (vm_page_t) vm_page_queue_first(target_queue);
8201
8202 if (vm_page_queue_empty(target_queue)) {
8203 target_queue->prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
8204 } else {
8205 first_target->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
8206 }
8207
8208 target_queue->next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
8209 first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(target_queue);
8210 last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_target);
8211
8212 /*
8213 * Adjust the global page counts.
8214 */
8215 if (throttle_page) {
8216 vm_page_throttled_count += local_queue_count;
8217 } else {
8218 if (flags & UPL_COMMIT_INACTIVATE) {
8219 if (shadow_object->internal) {
8220 vm_page_anonymous_count += local_queue_count;
8221 }
8222 vm_page_inactive_count += local_queue_count;
8223
8224 token_new_pagecount += local_queue_count;
8225 } else {
8226 vm_page_active_count += local_queue_count;
8227 }
8228
8229 if (shadow_object->internal) {
8230 vm_page_pageable_internal_count += local_queue_count;
8231 } else {
8232 vm_page_pageable_external_count += local_queue_count;
8233 }
8234 }
8235 } else {
8236 vm_page_lockspin_queues();
8237 }
8238 if (unwired_count) {
8239 vm_page_wire_count -= unwired_count;
8240 VM_CHECK_MEMORYSTATUS;
8241 }
8242 vm_page_unlock_queues();
8243
8244 VM_OBJECT_WIRED_PAGE_COUNT(shadow_object, -unwired_count);
8245 }
8246 }
8247
8248 if (upl->flags & UPL_DEVICE_MEMORY) {
8249 occupied = 0;
8250 } else if (upl->flags & UPL_LITE) {
8251 uint32_t pages = (uint32_t)atop(upl_adjusted_size(upl, PAGE_MASK));
8252
8253 occupied = !fast_path_full_commit &&
8254 !bitmap_is_empty(upl->lite_list, pages);
8255 } else {
8256 occupied = !vm_page_queue_empty(&upl->map_object->memq);
8257 }
8258 if (occupied == 0) {
8259 /*
8260 * If this UPL element belongs to a Vector UPL and is
8261 * empty, then this is the right function to deallocate
8262 * it. So go ahead set the *empty variable. The flag
8263 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
8264 * should be considered relevant for the Vector UPL and not
8265 * the internal UPLs.
8266 */
8267 if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) {
8268 *empty = TRUE;
8269 }
8270
8271 if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
8272 /*
8273 * this is not a paging object
8274 * so we need to drop the paging reference
8275 * that was taken when we created the UPL
8276 * against this object
8277 */
8278 vm_object_activity_end(shadow_object);
8279 vm_object_collapse(shadow_object, 0, TRUE);
8280 } else {
8281 /*
8282 * we dontated the paging reference to
8283 * the map object... vm_pageout_object_terminate
8284 * will drop this reference
8285 */
8286 }
8287 }
8288 VM_OBJECT_WIRED_PAGE_UPDATE_END(shadow_object, shadow_object->wire_tag);
8289 vm_object_unlock(shadow_object);
8290 if (object != shadow_object) {
8291 vm_object_unlock(object);
8292 }
8293
8294 if (!isVectorUPL) {
8295 upl_unlock(upl);
8296 } else {
8297 /*
8298 * If we completed our operations on an UPL that is
8299 * part of a Vectored UPL and if empty is TRUE, then
8300 * we should go ahead and deallocate this UPL element.
8301 * Then we check if this was the last of the UPL elements
8302 * within that Vectored UPL. If so, set empty to TRUE
8303 * so that in ubc_upl_commit_range or ubc_upl_commit, we
8304 * can go ahead and deallocate the Vector UPL too.
8305 */
8306 if (*empty == TRUE) {
8307 *empty = vector_upl_set_subupl(vector_upl, upl, 0);
8308 upl_deallocate(upl);
8309 }
8310 goto process_upl_to_commit;
8311 }
8312 if (pgpgout_count) {
8313 DTRACE_VM2(pgpgout, int, pgpgout_count, (uint64_t *), NULL);
8314 }
8315
8316 kr = KERN_SUCCESS;
8317 done:
8318 if (dwp_start && dwp_finish_ctx) {
8319 vm_page_delayed_work_finish_ctx(dwp_start);
8320 dwp_start = dwp = NULL;
8321 }
8322
8323 return kr;
8324 }
8325
8326 kern_return_t
upl_abort_range(upl_t upl,upl_offset_t offset,upl_size_t size,int error,boolean_t * empty)8327 upl_abort_range(
8328 upl_t upl,
8329 upl_offset_t offset,
8330 upl_size_t size,
8331 int error,
8332 boolean_t *empty)
8333 {
8334 upl_size_t xfer_size, subupl_size;
8335 vm_object_t shadow_object;
8336 vm_object_t object;
8337 vm_object_offset_t target_offset;
8338 upl_offset_t subupl_offset = offset;
8339 int occupied;
8340 struct vm_page_delayed_work dw_array;
8341 struct vm_page_delayed_work *dwp, *dwp_start;
8342 bool dwp_finish_ctx = TRUE;
8343 int dw_count;
8344 int dw_limit;
8345 int isVectorUPL = 0;
8346 upl_t vector_upl = NULL;
8347 vm_object_offset_t obj_start, obj_end, obj_offset;
8348 kern_return_t kr = KERN_SUCCESS;
8349
8350 // DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p offset 0x%llx size 0x%llx error 0x%x\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object, (uint64_t)offset, (uint64_t)size, error);
8351
8352 dwp_start = dwp = NULL;
8353
8354 subupl_size = size;
8355 *empty = FALSE;
8356
8357 if (upl == UPL_NULL) {
8358 return KERN_INVALID_ARGUMENT;
8359 }
8360
8361 if ((upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES)) {
8362 return upl_commit_range(upl, offset, size, UPL_COMMIT_FREE_ABSENT, NULL, 0, empty);
8363 }
8364
8365 dw_count = 0;
8366 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
8367 dwp_start = vm_page_delayed_work_get_ctx();
8368 if (dwp_start == NULL) {
8369 dwp_start = &dw_array;
8370 dw_limit = 1;
8371 dwp_finish_ctx = FALSE;
8372 }
8373
8374 dwp = dwp_start;
8375
8376 if ((isVectorUPL = vector_upl_is_valid(upl))) {
8377 vector_upl = upl;
8378 upl_lock(vector_upl);
8379 } else {
8380 upl_lock(upl);
8381 }
8382
8383 process_upl_to_abort:
8384 if (isVectorUPL) {
8385 size = subupl_size;
8386 offset = subupl_offset;
8387 if (size == 0) {
8388 upl_unlock(vector_upl);
8389 kr = KERN_SUCCESS;
8390 goto done;
8391 }
8392 upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
8393 if (upl == NULL) {
8394 upl_unlock(vector_upl);
8395 kr = KERN_FAILURE;
8396 goto done;
8397 }
8398 subupl_size -= size;
8399 subupl_offset += size;
8400 }
8401
8402 *empty = FALSE;
8403
8404 #if UPL_DEBUG
8405 if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
8406 upl->upl_commit_records[upl->upl_commit_index].c_btref = btref_get(__builtin_frame_address(0), 0);
8407 upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
8408 upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
8409 upl->upl_commit_records[upl->upl_commit_index].c_aborted = 1;
8410
8411 upl->upl_commit_index++;
8412 }
8413 #endif
8414 if (upl->flags & UPL_DEVICE_MEMORY) {
8415 xfer_size = 0;
8416 } else if ((offset + size) <= upl_adjusted_size(upl, PAGE_MASK)) {
8417 xfer_size = size;
8418 } else {
8419 if (!isVectorUPL) {
8420 upl_unlock(upl);
8421 } else {
8422 upl_unlock(vector_upl);
8423 }
8424 DEBUG4K_ERROR("upl %p (u_offset 0x%llx u_size 0x%x) offset 0x%x size 0x%x\n", upl, upl->u_offset, upl->u_size, offset, size);
8425 kr = KERN_FAILURE;
8426 goto done;
8427 }
8428 object = upl->map_object;
8429
8430 if (upl->flags & UPL_SHADOWED) {
8431 vm_object_lock(object);
8432 shadow_object = object->shadow;
8433 } else {
8434 shadow_object = object;
8435 }
8436
8437 target_offset = (vm_object_offset_t)offset;
8438
8439 if (upl->flags & UPL_KERNEL_OBJECT) {
8440 vm_object_lock_shared(shadow_object);
8441 } else {
8442 vm_object_lock(shadow_object);
8443 }
8444
8445 if (upl->flags & UPL_ACCESS_BLOCKED) {
8446 assert(shadow_object->blocked_access);
8447 shadow_object->blocked_access = FALSE;
8448 vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
8449 }
8450
8451 if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT)) {
8452 panic("upl_abort_range: kernel_object being DUMPED");
8453 }
8454
8455 obj_start = target_offset + upl->u_offset - shadow_object->paging_offset;
8456 obj_end = obj_start + xfer_size;
8457 obj_start = vm_object_trunc_page(obj_start);
8458 obj_end = vm_object_round_page(obj_end);
8459 for (obj_offset = obj_start;
8460 obj_offset < obj_end;
8461 obj_offset += PAGE_SIZE) {
8462 vm_page_t t, m;
8463 unsigned int pg_num;
8464 boolean_t needed;
8465
8466 pg_num = (unsigned int) (target_offset / PAGE_SIZE);
8467 assert(pg_num == target_offset / PAGE_SIZE);
8468
8469 needed = FALSE;
8470
8471 if (upl->flags & UPL_INTERNAL) {
8472 needed = upl->page_list[pg_num].needed;
8473 }
8474
8475 dwp->dw_mask = 0;
8476 m = VM_PAGE_NULL;
8477
8478 if (upl->flags & UPL_LITE) {
8479 if (bitmap_test(upl->lite_list, pg_num)) {
8480 bitmap_clear(upl->lite_list, pg_num);
8481
8482 if (!(upl->flags & UPL_KERNEL_OBJECT)) {
8483 m = vm_page_lookup(shadow_object, obj_offset);
8484 }
8485 }
8486 }
8487 if (upl->flags & UPL_SHADOWED) {
8488 if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
8489 t->vmp_free_when_done = FALSE;
8490
8491 VM_PAGE_FREE(t);
8492
8493 if (m == VM_PAGE_NULL) {
8494 m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
8495 }
8496 }
8497 }
8498 if ((upl->flags & UPL_KERNEL_OBJECT)) {
8499 goto abort_next_page;
8500 }
8501
8502 if (m != VM_PAGE_NULL) {
8503 assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
8504
8505 if (m->vmp_absent) {
8506 boolean_t must_free = TRUE;
8507
8508 /*
8509 * COPYOUT = FALSE case
8510 * check for error conditions which must
8511 * be passed back to the pages customer
8512 */
8513 if (error & UPL_ABORT_RESTART) {
8514 m->vmp_restart = TRUE;
8515 m->vmp_absent = FALSE;
8516 m->vmp_unusual = TRUE;
8517 must_free = FALSE;
8518 } else if (error & UPL_ABORT_UNAVAILABLE) {
8519 m->vmp_restart = FALSE;
8520 m->vmp_unusual = TRUE;
8521 must_free = FALSE;
8522 } else if (error & UPL_ABORT_ERROR) {
8523 m->vmp_restart = FALSE;
8524 m->vmp_absent = FALSE;
8525 m->vmp_error = TRUE;
8526 m->vmp_unusual = TRUE;
8527 must_free = FALSE;
8528 }
8529 if (m->vmp_clustered && needed == FALSE) {
8530 /*
8531 * This page was a part of a speculative
8532 * read-ahead initiated by the kernel
8533 * itself. No one is expecting this
8534 * page and no one will clean up its
8535 * error state if it ever becomes valid
8536 * in the future.
8537 * We have to free it here.
8538 */
8539 must_free = TRUE;
8540 }
8541 m->vmp_cleaning = FALSE;
8542
8543 if (m->vmp_overwriting && !m->vmp_busy) {
8544 /*
8545 * this shouldn't happen since
8546 * this is an 'absent' page, but
8547 * it doesn't hurt to check for
8548 * the 'alternate' method of
8549 * stabilizing the page...
8550 * we will mark 'busy' to be cleared
8551 * in the following code which will
8552 * take care of the primary stabilzation
8553 * method (i.e. setting 'busy' to TRUE)
8554 */
8555 dwp->dw_mask |= DW_vm_page_unwire;
8556 }
8557 m->vmp_overwriting = FALSE;
8558
8559 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
8560
8561 if (must_free == TRUE) {
8562 dwp->dw_mask |= DW_vm_page_free;
8563 } else {
8564 dwp->dw_mask |= DW_vm_page_activate;
8565 }
8566 } else {
8567 /*
8568 * Handle the trusted pager throttle.
8569 */
8570 if (m->vmp_laundry) {
8571 dwp->dw_mask |= DW_vm_pageout_throttle_up;
8572 }
8573
8574 if (upl->flags & UPL_ACCESS_BLOCKED) {
8575 /*
8576 * We blocked access to the pages in this UPL.
8577 * Clear the "busy" bit and wake up any waiter
8578 * for this page.
8579 */
8580 dwp->dw_mask |= DW_clear_busy;
8581 }
8582 if (m->vmp_overwriting) {
8583 if (m->vmp_busy) {
8584 dwp->dw_mask |= DW_clear_busy;
8585 } else {
8586 /*
8587 * deal with the 'alternate' method
8588 * of stabilizing the page...
8589 * we will either free the page
8590 * or mark 'busy' to be cleared
8591 * in the following code which will
8592 * take care of the primary stabilzation
8593 * method (i.e. setting 'busy' to TRUE)
8594 */
8595 dwp->dw_mask |= DW_vm_page_unwire;
8596 }
8597 m->vmp_overwriting = FALSE;
8598 }
8599 m->vmp_free_when_done = FALSE;
8600 m->vmp_cleaning = FALSE;
8601
8602 if (error & UPL_ABORT_DUMP_PAGES) {
8603 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
8604
8605 dwp->dw_mask |= DW_vm_page_free;
8606 } else {
8607 if (!(dwp->dw_mask & DW_vm_page_unwire)) {
8608 if (error & UPL_ABORT_REFERENCE) {
8609 /*
8610 * we've been told to explictly
8611 * reference this page... for
8612 * file I/O, this is done by
8613 * implementing an LRU on the inactive q
8614 */
8615 dwp->dw_mask |= DW_vm_page_lru;
8616 } else if (!VM_PAGE_PAGEABLE(m)) {
8617 dwp->dw_mask |= DW_vm_page_deactivate_internal;
8618 }
8619 }
8620 dwp->dw_mask |= DW_PAGE_WAKEUP;
8621 }
8622 }
8623 }
8624 abort_next_page:
8625 target_offset += PAGE_SIZE_64;
8626 xfer_size -= PAGE_SIZE;
8627
8628 if (dwp->dw_mask) {
8629 if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
8630 VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
8631
8632 if (dw_count >= dw_limit) {
8633 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
8634
8635 dwp = dwp_start;
8636 dw_count = 0;
8637 }
8638 } else {
8639 if (dwp->dw_mask & DW_clear_busy) {
8640 m->vmp_busy = FALSE;
8641 }
8642
8643 if (dwp->dw_mask & DW_PAGE_WAKEUP) {
8644 PAGE_WAKEUP(m);
8645 }
8646 }
8647 }
8648 }
8649 if (dw_count) {
8650 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
8651 dwp = dwp_start;
8652 dw_count = 0;
8653 }
8654
8655 if (upl->flags & UPL_DEVICE_MEMORY) {
8656 occupied = 0;
8657 } else if (upl->flags & UPL_LITE) {
8658 uint32_t pages = (uint32_t)atop(upl_adjusted_size(upl, PAGE_MASK));
8659
8660 occupied = !bitmap_is_empty(upl->lite_list, pages);
8661 } else {
8662 occupied = !vm_page_queue_empty(&upl->map_object->memq);
8663 }
8664 if (occupied == 0) {
8665 /*
8666 * If this UPL element belongs to a Vector UPL and is
8667 * empty, then this is the right function to deallocate
8668 * it. So go ahead set the *empty variable. The flag
8669 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
8670 * should be considered relevant for the Vector UPL and
8671 * not the internal UPLs.
8672 */
8673 if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) {
8674 *empty = TRUE;
8675 }
8676
8677 if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
8678 /*
8679 * this is not a paging object
8680 * so we need to drop the paging reference
8681 * that was taken when we created the UPL
8682 * against this object
8683 */
8684 vm_object_activity_end(shadow_object);
8685 vm_object_collapse(shadow_object, 0, TRUE);
8686 } else {
8687 /*
8688 * we dontated the paging reference to
8689 * the map object... vm_pageout_object_terminate
8690 * will drop this reference
8691 */
8692 }
8693 }
8694 vm_object_unlock(shadow_object);
8695 if (object != shadow_object) {
8696 vm_object_unlock(object);
8697 }
8698
8699 if (!isVectorUPL) {
8700 upl_unlock(upl);
8701 } else {
8702 /*
8703 * If we completed our operations on an UPL that is
8704 * part of a Vectored UPL and if empty is TRUE, then
8705 * we should go ahead and deallocate this UPL element.
8706 * Then we check if this was the last of the UPL elements
8707 * within that Vectored UPL. If so, set empty to TRUE
8708 * so that in ubc_upl_abort_range or ubc_upl_abort, we
8709 * can go ahead and deallocate the Vector UPL too.
8710 */
8711 if (*empty == TRUE) {
8712 *empty = vector_upl_set_subupl(vector_upl, upl, 0);
8713 upl_deallocate(upl);
8714 }
8715 goto process_upl_to_abort;
8716 }
8717
8718 kr = KERN_SUCCESS;
8719
8720 done:
8721 if (dwp_start && dwp_finish_ctx) {
8722 vm_page_delayed_work_finish_ctx(dwp_start);
8723 dwp_start = dwp = NULL;
8724 }
8725
8726 return kr;
8727 }
8728
8729
8730 kern_return_t
upl_abort(upl_t upl,int error)8731 upl_abort(
8732 upl_t upl,
8733 int error)
8734 {
8735 boolean_t empty;
8736
8737 if (upl == UPL_NULL) {
8738 return KERN_INVALID_ARGUMENT;
8739 }
8740
8741 return upl_abort_range(upl, 0, upl->u_size, error, &empty);
8742 }
8743
8744
8745 /* an option on commit should be wire */
8746 kern_return_t
upl_commit(upl_t upl,upl_page_info_t * page_list,mach_msg_type_number_t count)8747 upl_commit(
8748 upl_t upl,
8749 upl_page_info_t *page_list,
8750 mach_msg_type_number_t count)
8751 {
8752 boolean_t empty;
8753
8754 if (upl == UPL_NULL) {
8755 return KERN_INVALID_ARGUMENT;
8756 }
8757
8758 return upl_commit_range(upl, 0, upl->u_size, 0,
8759 page_list, count, &empty);
8760 }
8761
8762
8763 void
iopl_valid_data(upl_t upl,vm_tag_t tag)8764 iopl_valid_data(
8765 upl_t upl,
8766 vm_tag_t tag)
8767 {
8768 vm_object_t object;
8769 vm_offset_t offset;
8770 vm_page_t m, nxt_page = VM_PAGE_NULL;
8771 upl_size_t size;
8772 int wired_count = 0;
8773
8774 if (upl == NULL) {
8775 panic("iopl_valid_data: NULL upl");
8776 }
8777 if (vector_upl_is_valid(upl)) {
8778 panic("iopl_valid_data: vector upl");
8779 }
8780 if ((upl->flags & (UPL_DEVICE_MEMORY | UPL_SHADOWED | UPL_ACCESS_BLOCKED | UPL_IO_WIRE | UPL_INTERNAL)) != UPL_IO_WIRE) {
8781 panic("iopl_valid_data: unsupported upl, flags = %x", upl->flags);
8782 }
8783
8784 object = upl->map_object;
8785
8786 if (is_kernel_object(object) || object == compressor_object) {
8787 panic("iopl_valid_data: object == kernel or compressor");
8788 }
8789
8790 if (object->purgable == VM_PURGABLE_VOLATILE ||
8791 object->purgable == VM_PURGABLE_EMPTY) {
8792 panic("iopl_valid_data: object %p purgable %d",
8793 object, object->purgable);
8794 }
8795
8796 size = upl_adjusted_size(upl, PAGE_MASK);
8797
8798 vm_object_lock(object);
8799 VM_OBJECT_WIRED_PAGE_UPDATE_START(object);
8800
8801 bool whole_object;
8802
8803 if (object->vo_size == size && object->resident_page_count == (size / PAGE_SIZE)) {
8804 nxt_page = (vm_page_t)vm_page_queue_first(&object->memq);
8805 whole_object = true;
8806 } else {
8807 offset = (vm_offset_t)(upl_adjusted_offset(upl, PAGE_MASK) - object->paging_offset);
8808 whole_object = false;
8809 }
8810
8811 while (size) {
8812 if (whole_object) {
8813 if (nxt_page != VM_PAGE_NULL) {
8814 m = nxt_page;
8815 nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->vmp_listq);
8816 }
8817 } else {
8818 m = vm_page_lookup(object, offset);
8819 offset += PAGE_SIZE;
8820
8821 if (m == VM_PAGE_NULL) {
8822 panic("iopl_valid_data: missing expected page at offset %lx", (long)offset);
8823 }
8824 }
8825 if (m->vmp_busy) {
8826 if (!m->vmp_absent) {
8827 panic("iopl_valid_data: busy page w/o absent");
8828 }
8829
8830 if (m->vmp_pageq.next || m->vmp_pageq.prev) {
8831 panic("iopl_valid_data: busy+absent page on page queue");
8832 }
8833 if (m->vmp_reusable) {
8834 panic("iopl_valid_data: %p is reusable", m);
8835 }
8836
8837 m->vmp_absent = FALSE;
8838 m->vmp_dirty = TRUE;
8839 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
8840 assert(m->vmp_wire_count == 0);
8841 m->vmp_wire_count++;
8842 assert(m->vmp_wire_count);
8843 if (m->vmp_wire_count == 1) {
8844 m->vmp_q_state = VM_PAGE_IS_WIRED;
8845 wired_count++;
8846 } else {
8847 panic("iopl_valid_data: %p already wired", m);
8848 }
8849
8850 PAGE_WAKEUP_DONE(m);
8851 }
8852 size -= PAGE_SIZE;
8853 }
8854 if (wired_count) {
8855 VM_OBJECT_WIRED_PAGE_COUNT(object, wired_count);
8856 assert(object->resident_page_count >= object->wired_page_count);
8857
8858 /* no need to adjust purgeable accounting for this object: */
8859 assert(object->purgable != VM_PURGABLE_VOLATILE);
8860 assert(object->purgable != VM_PURGABLE_EMPTY);
8861
8862 vm_page_lockspin_queues();
8863 vm_page_wire_count += wired_count;
8864 vm_page_unlock_queues();
8865 }
8866 VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag);
8867 vm_object_unlock(object);
8868 }
8869
8870
8871 void
vm_object_set_pmap_cache_attr(vm_object_t object,upl_page_info_array_t user_page_list,unsigned int num_pages,boolean_t batch_pmap_op)8872 vm_object_set_pmap_cache_attr(
8873 vm_object_t object,
8874 upl_page_info_array_t user_page_list,
8875 unsigned int num_pages,
8876 boolean_t batch_pmap_op)
8877 {
8878 unsigned int cache_attr = 0;
8879
8880 cache_attr = object->wimg_bits & VM_WIMG_MASK;
8881 assert(user_page_list);
8882 if (cache_attr != VM_WIMG_USE_DEFAULT) {
8883 PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, cache_attr, num_pages, batch_pmap_op);
8884 }
8885 }
8886
8887
8888 static bool
vm_object_iopl_wire_full(vm_object_t object,upl_t upl,upl_page_info_array_t user_page_list,upl_control_flags_t cntrl_flags,vm_tag_t tag)8889 vm_object_iopl_wire_full(
8890 vm_object_t object,
8891 upl_t upl,
8892 upl_page_info_array_t user_page_list,
8893 upl_control_flags_t cntrl_flags,
8894 vm_tag_t tag)
8895 {
8896 vm_page_t dst_page;
8897 unsigned int entry;
8898 int page_count;
8899 int delayed_unlock = 0;
8900 boolean_t retval = TRUE;
8901 ppnum_t phys_page;
8902
8903 vm_object_lock_assert_exclusive(object);
8904 assert(object->purgable != VM_PURGABLE_VOLATILE);
8905 assert(object->purgable != VM_PURGABLE_EMPTY);
8906 assert(object->pager == NULL);
8907 assert(object->vo_copy == NULL);
8908 assert(object->shadow == NULL);
8909
8910 page_count = object->resident_page_count;
8911 dst_page = (vm_page_t)vm_page_queue_first(&object->memq);
8912
8913 vm_page_lock_queues();
8914
8915 while (page_count--) {
8916 if (dst_page->vmp_busy ||
8917 dst_page->vmp_fictitious ||
8918 dst_page->vmp_absent ||
8919 VMP_ERROR_GET(dst_page) ||
8920 dst_page->vmp_cleaning ||
8921 dst_page->vmp_restart ||
8922 dst_page->vmp_laundry) {
8923 retval = FALSE;
8924 goto done;
8925 }
8926 if ((cntrl_flags & UPL_REQUEST_FORCE_COHERENCY) && dst_page->vmp_written_by_kernel == TRUE) {
8927 retval = FALSE;
8928 goto done;
8929 }
8930 dst_page->vmp_reference = TRUE;
8931
8932 vm_page_wire(dst_page, tag, FALSE);
8933
8934 if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
8935 SET_PAGE_DIRTY(dst_page, FALSE);
8936 }
8937 entry = (unsigned int)(dst_page->vmp_offset / PAGE_SIZE);
8938 assert(entry >= 0 && entry < object->resident_page_count);
8939 bitmap_set(upl->lite_list, entry);
8940
8941 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
8942
8943 if (phys_page > upl->highest_page) {
8944 upl->highest_page = phys_page;
8945 }
8946
8947 if (user_page_list) {
8948 user_page_list[entry].phys_addr = phys_page;
8949 user_page_list[entry].absent = dst_page->vmp_absent;
8950 user_page_list[entry].dirty = dst_page->vmp_dirty;
8951 user_page_list[entry].free_when_done = dst_page->vmp_free_when_done;
8952 user_page_list[entry].precious = dst_page->vmp_precious;
8953 user_page_list[entry].device = FALSE;
8954 user_page_list[entry].speculative = FALSE;
8955 user_page_list[entry].cs_validated = FALSE;
8956 user_page_list[entry].cs_tainted = FALSE;
8957 user_page_list[entry].cs_nx = FALSE;
8958 user_page_list[entry].needed = FALSE;
8959 user_page_list[entry].mark = FALSE;
8960 }
8961 if (delayed_unlock++ > 256) {
8962 delayed_unlock = 0;
8963 lck_mtx_yield(&vm_page_queue_lock);
8964
8965 VM_CHECK_MEMORYSTATUS;
8966 }
8967 dst_page = (vm_page_t)vm_page_queue_next(&dst_page->vmp_listq);
8968 }
8969 done:
8970 vm_page_unlock_queues();
8971
8972 VM_CHECK_MEMORYSTATUS;
8973
8974 return retval;
8975 }
8976
8977
8978 static kern_return_t
vm_object_iopl_wire_empty(vm_object_t object,upl_t upl,upl_page_info_array_t user_page_list,upl_control_flags_t cntrl_flags,vm_tag_t tag,vm_object_offset_t * dst_offset,int page_count,int * page_grab_count)8979 vm_object_iopl_wire_empty(
8980 vm_object_t object,
8981 upl_t upl,
8982 upl_page_info_array_t user_page_list,
8983 upl_control_flags_t cntrl_flags,
8984 vm_tag_t tag,
8985 vm_object_offset_t *dst_offset,
8986 int page_count,
8987 int *page_grab_count)
8988 {
8989 vm_page_t dst_page;
8990 boolean_t no_zero_fill = FALSE;
8991 int interruptible;
8992 int pages_wired = 0;
8993 int pages_inserted = 0;
8994 int entry = 0;
8995 uint64_t delayed_ledger_update = 0;
8996 kern_return_t ret = KERN_SUCCESS;
8997 int grab_options;
8998 ppnum_t phys_page;
8999
9000 vm_object_lock_assert_exclusive(object);
9001 assert(object->purgable != VM_PURGABLE_VOLATILE);
9002 assert(object->purgable != VM_PURGABLE_EMPTY);
9003 assert(object->pager == NULL);
9004 assert(object->vo_copy == NULL);
9005 assert(object->shadow == NULL);
9006
9007 if (cntrl_flags & UPL_SET_INTERRUPTIBLE) {
9008 interruptible = THREAD_ABORTSAFE;
9009 } else {
9010 interruptible = THREAD_UNINT;
9011 }
9012
9013 if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO)) {
9014 no_zero_fill = TRUE;
9015 }
9016
9017 grab_options = 0;
9018 #if CONFIG_SECLUDED_MEMORY
9019 if (object->can_grab_secluded) {
9020 grab_options |= VM_PAGE_GRAB_SECLUDED;
9021 }
9022 #endif /* CONFIG_SECLUDED_MEMORY */
9023
9024 while (page_count--) {
9025 while ((dst_page = vm_page_grab_options(grab_options))
9026 == VM_PAGE_NULL) {
9027 OSAddAtomic(page_count, &vm_upl_wait_for_pages);
9028
9029 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
9030
9031 if (vm_page_wait(interruptible) == FALSE) {
9032 /*
9033 * interrupted case
9034 */
9035 OSAddAtomic(-page_count, &vm_upl_wait_for_pages);
9036
9037 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1);
9038
9039 ret = MACH_SEND_INTERRUPTED;
9040 goto done;
9041 }
9042 OSAddAtomic(-page_count, &vm_upl_wait_for_pages);
9043
9044 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
9045 }
9046 if (no_zero_fill == FALSE) {
9047 vm_page_zero_fill(dst_page);
9048 } else {
9049 dst_page->vmp_absent = TRUE;
9050 }
9051
9052 dst_page->vmp_reference = TRUE;
9053
9054 if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
9055 SET_PAGE_DIRTY(dst_page, FALSE);
9056 }
9057 if (dst_page->vmp_absent == FALSE) {
9058 assert(dst_page->vmp_q_state == VM_PAGE_NOT_ON_Q);
9059 assert(dst_page->vmp_wire_count == 0);
9060 dst_page->vmp_wire_count++;
9061 dst_page->vmp_q_state = VM_PAGE_IS_WIRED;
9062 assert(dst_page->vmp_wire_count);
9063 pages_wired++;
9064 PAGE_WAKEUP_DONE(dst_page);
9065 }
9066 pages_inserted++;
9067
9068 vm_page_insert_internal(dst_page, object, *dst_offset, tag, FALSE, TRUE, TRUE, TRUE, &delayed_ledger_update);
9069
9070 bitmap_set(upl->lite_list, entry);
9071
9072 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
9073
9074 if (phys_page > upl->highest_page) {
9075 upl->highest_page = phys_page;
9076 }
9077
9078 if (user_page_list) {
9079 user_page_list[entry].phys_addr = phys_page;
9080 user_page_list[entry].absent = dst_page->vmp_absent;
9081 user_page_list[entry].dirty = dst_page->vmp_dirty;
9082 user_page_list[entry].free_when_done = FALSE;
9083 user_page_list[entry].precious = FALSE;
9084 user_page_list[entry].device = FALSE;
9085 user_page_list[entry].speculative = FALSE;
9086 user_page_list[entry].cs_validated = FALSE;
9087 user_page_list[entry].cs_tainted = FALSE;
9088 user_page_list[entry].cs_nx = FALSE;
9089 user_page_list[entry].needed = FALSE;
9090 user_page_list[entry].mark = FALSE;
9091 }
9092 entry++;
9093 *dst_offset += PAGE_SIZE_64;
9094 }
9095 done:
9096 if (pages_wired) {
9097 vm_page_lockspin_queues();
9098 vm_page_wire_count += pages_wired;
9099 vm_page_unlock_queues();
9100 }
9101 if (pages_inserted) {
9102 if (object->internal) {
9103 OSAddAtomic(pages_inserted, &vm_page_internal_count);
9104 } else {
9105 OSAddAtomic(pages_inserted, &vm_page_external_count);
9106 }
9107 }
9108 if (delayed_ledger_update) {
9109 task_t owner;
9110 int ledger_idx_volatile;
9111 int ledger_idx_nonvolatile;
9112 int ledger_idx_volatile_compressed;
9113 int ledger_idx_nonvolatile_compressed;
9114 boolean_t do_footprint;
9115
9116 owner = VM_OBJECT_OWNER(object);
9117 assert(owner);
9118
9119 vm_object_ledger_tag_ledgers(object,
9120 &ledger_idx_volatile,
9121 &ledger_idx_nonvolatile,
9122 &ledger_idx_volatile_compressed,
9123 &ledger_idx_nonvolatile_compressed,
9124 &do_footprint);
9125
9126 /* more non-volatile bytes */
9127 ledger_credit(owner->ledger,
9128 ledger_idx_nonvolatile,
9129 delayed_ledger_update);
9130 if (do_footprint) {
9131 /* more footprint */
9132 ledger_credit(owner->ledger,
9133 task_ledgers.phys_footprint,
9134 delayed_ledger_update);
9135 }
9136 }
9137
9138 assert(page_grab_count);
9139 *page_grab_count = pages_inserted;
9140
9141 return ret;
9142 }
9143
9144
9145
9146 kern_return_t
vm_object_iopl_request(vm_object_t object,vm_object_offset_t offset,upl_size_t size,upl_t * upl_ptr,upl_page_info_array_t user_page_list,unsigned int * page_list_count,upl_control_flags_t cntrl_flags,vm_tag_t tag)9147 vm_object_iopl_request(
9148 vm_object_t object,
9149 vm_object_offset_t offset,
9150 upl_size_t size,
9151 upl_t *upl_ptr,
9152 upl_page_info_array_t user_page_list,
9153 unsigned int *page_list_count,
9154 upl_control_flags_t cntrl_flags,
9155 vm_tag_t tag)
9156 {
9157 vm_page_t dst_page;
9158 vm_object_offset_t dst_offset;
9159 upl_size_t xfer_size;
9160 upl_t upl = NULL;
9161 unsigned int entry;
9162 int no_zero_fill = FALSE;
9163 unsigned int size_in_pages;
9164 int page_grab_count = 0;
9165 u_int32_t psize;
9166 kern_return_t ret;
9167 vm_prot_t prot;
9168 struct vm_object_fault_info fault_info = {};
9169 struct vm_page_delayed_work dw_array;
9170 struct vm_page_delayed_work *dwp, *dwp_start;
9171 bool dwp_finish_ctx = TRUE;
9172 int dw_count;
9173 int dw_limit;
9174 int dw_index;
9175 boolean_t caller_lookup;
9176 int io_tracking_flag = 0;
9177 int interruptible;
9178 ppnum_t phys_page;
9179
9180 boolean_t set_cache_attr_needed = FALSE;
9181 boolean_t free_wired_pages = FALSE;
9182 boolean_t fast_path_empty_req = FALSE;
9183 boolean_t fast_path_full_req = FALSE;
9184
9185 #if DEVELOPMENT || DEBUG
9186 task_t task = current_task();
9187 #endif /* DEVELOPMENT || DEBUG */
9188
9189 dwp_start = dwp = NULL;
9190
9191 vm_object_offset_t original_offset = offset;
9192 upl_size_t original_size = size;
9193
9194 // DEBUG4K_UPL("object %p offset 0x%llx size 0x%llx cntrl_flags 0x%llx\n", object, (uint64_t)offset, (uint64_t)size, cntrl_flags);
9195
9196 size = (upl_size_t)(vm_object_round_page(offset + size) - vm_object_trunc_page(offset));
9197 offset = vm_object_trunc_page(offset);
9198 if (size != original_size || offset != original_offset) {
9199 DEBUG4K_IOKIT("flags 0x%llx object %p offset 0x%llx size 0x%x -> offset 0x%llx size 0x%x\n", cntrl_flags, object, original_offset, original_size, offset, size);
9200 }
9201
9202 if (cntrl_flags & ~UPL_VALID_FLAGS) {
9203 /*
9204 * For forward compatibility's sake,
9205 * reject any unknown flag.
9206 */
9207 return KERN_INVALID_VALUE;
9208 }
9209 if (vm_lopage_needed == FALSE) {
9210 cntrl_flags &= ~UPL_NEED_32BIT_ADDR;
9211 }
9212
9213 if (cntrl_flags & UPL_NEED_32BIT_ADDR) {
9214 if ((cntrl_flags & (UPL_SET_IO_WIRE | UPL_SET_LITE)) != (UPL_SET_IO_WIRE | UPL_SET_LITE)) {
9215 return KERN_INVALID_VALUE;
9216 }
9217
9218 if (object->phys_contiguous) {
9219 if ((offset + object->vo_shadow_offset) >= (vm_object_offset_t)max_valid_dma_address) {
9220 return KERN_INVALID_ADDRESS;
9221 }
9222
9223 if (((offset + object->vo_shadow_offset) + size) >= (vm_object_offset_t)max_valid_dma_address) {
9224 return KERN_INVALID_ADDRESS;
9225 }
9226 }
9227 }
9228 if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO)) {
9229 no_zero_fill = TRUE;
9230 }
9231
9232 if (cntrl_flags & UPL_COPYOUT_FROM) {
9233 prot = VM_PROT_READ;
9234 } else {
9235 prot = VM_PROT_READ | VM_PROT_WRITE;
9236 }
9237
9238 if ((!object->internal) && (object->paging_offset != 0)) {
9239 panic("vm_object_iopl_request: external object with non-zero paging offset");
9240 }
9241
9242
9243 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_START, size, cntrl_flags, prot, 0);
9244
9245 #if CONFIG_IOSCHED || UPL_DEBUG
9246 if ((object->io_tracking && !is_kernel_object(object)) || upl_debug_enabled) {
9247 io_tracking_flag |= UPL_CREATE_IO_TRACKING;
9248 }
9249 #endif
9250
9251 #if CONFIG_IOSCHED
9252 if (object->io_tracking) {
9253 /* Check if we're dealing with the kernel object. We do not support expedite on kernel object UPLs */
9254 if (!is_kernel_object(object)) {
9255 io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP;
9256 }
9257 }
9258 #endif
9259
9260 if (object->phys_contiguous) {
9261 psize = PAGE_SIZE;
9262 } else {
9263 psize = size;
9264
9265 dw_count = 0;
9266 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
9267 dwp_start = vm_page_delayed_work_get_ctx();
9268 if (dwp_start == NULL) {
9269 dwp_start = &dw_array;
9270 dw_limit = 1;
9271 dwp_finish_ctx = FALSE;
9272 }
9273
9274 dwp = dwp_start;
9275 }
9276
9277 if (cntrl_flags & UPL_SET_INTERNAL) {
9278 upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize);
9279 user_page_list = size ? upl->page_list : NULL;
9280 } else {
9281 upl = upl_create(UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize);
9282 }
9283 if (user_page_list) {
9284 user_page_list[0].device = FALSE;
9285 }
9286 *upl_ptr = upl;
9287
9288 if (cntrl_flags & UPL_NOZEROFILLIO) {
9289 DTRACE_VM4(upl_nozerofillio,
9290 vm_object_t, object,
9291 vm_object_offset_t, offset,
9292 upl_size_t, size,
9293 upl_t, upl);
9294 }
9295
9296 upl->map_object = object;
9297 upl->u_offset = original_offset;
9298 upl->u_size = original_size;
9299
9300 size_in_pages = size / PAGE_SIZE;
9301
9302 if (is_kernel_object(object) &&
9303 !(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS))) {
9304 upl->flags |= UPL_KERNEL_OBJECT;
9305 #if UPL_DEBUG
9306 vm_object_lock(object);
9307 #else
9308 vm_object_lock_shared(object);
9309 #endif
9310 } else {
9311 vm_object_lock(object);
9312 vm_object_activity_begin(object);
9313 }
9314 /*
9315 * paging in progress also protects the paging_offset
9316 */
9317 upl->u_offset = original_offset + object->paging_offset;
9318
9319 if (cntrl_flags & UPL_BLOCK_ACCESS) {
9320 /*
9321 * The user requested that access to the pages in this UPL
9322 * be blocked until the UPL is commited or aborted.
9323 */
9324 upl->flags |= UPL_ACCESS_BLOCKED;
9325 }
9326
9327 #if CONFIG_IOSCHED || UPL_DEBUG
9328 if ((upl->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) {
9329 vm_object_activity_begin(object);
9330 queue_enter(&object->uplq, upl, upl_t, uplq);
9331 }
9332 #endif
9333
9334 if (object->phys_contiguous) {
9335 if (upl->flags & UPL_ACCESS_BLOCKED) {
9336 assert(!object->blocked_access);
9337 object->blocked_access = TRUE;
9338 }
9339
9340 vm_object_unlock(object);
9341
9342 /*
9343 * don't need any shadow mappings for this one
9344 * since it is already I/O memory
9345 */
9346 upl->flags |= UPL_DEVICE_MEMORY;
9347
9348 upl->highest_page = (ppnum_t) ((offset + object->vo_shadow_offset + size - 1) >> PAGE_SHIFT);
9349
9350 if (user_page_list) {
9351 user_page_list[0].phys_addr = (ppnum_t) ((offset + object->vo_shadow_offset) >> PAGE_SHIFT);
9352 user_page_list[0].device = TRUE;
9353 }
9354 if (page_list_count != NULL) {
9355 if (upl->flags & UPL_INTERNAL) {
9356 *page_list_count = 0;
9357 } else {
9358 *page_list_count = 1;
9359 }
9360 }
9361
9362 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, KERN_SUCCESS, 0, 0);
9363 #if DEVELOPMENT || DEBUG
9364 if (task != NULL) {
9365 ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count);
9366 }
9367 #endif /* DEVELOPMENT || DEBUG */
9368 return KERN_SUCCESS;
9369 }
9370 if (!is_kernel_object(object) && object != compressor_object) {
9371 /*
9372 * Protect user space from future COW operations
9373 */
9374 #if VM_OBJECT_TRACKING_OP_TRUESHARE
9375 if (!object->true_share &&
9376 vm_object_tracking_btlog) {
9377 btlog_record(vm_object_tracking_btlog, object,
9378 VM_OBJECT_TRACKING_OP_TRUESHARE,
9379 btref_get(__builtin_frame_address(0), 0));
9380 }
9381 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
9382
9383 vm_object_lock_assert_exclusive(object);
9384 object->true_share = TRUE;
9385
9386 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
9387 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
9388 }
9389 }
9390
9391 if (!(cntrl_flags & UPL_COPYOUT_FROM) &&
9392 object->vo_copy != VM_OBJECT_NULL) {
9393 /*
9394 * Honor copy-on-write obligations
9395 *
9396 * The caller is gathering these pages and
9397 * might modify their contents. We need to
9398 * make sure that the copy object has its own
9399 * private copies of these pages before we let
9400 * the caller modify them.
9401 *
9402 * NOTE: someone else could map the original object
9403 * after we've done this copy-on-write here, and they
9404 * could then see an inconsistent picture of the memory
9405 * while it's being modified via the UPL. To prevent this,
9406 * we would have to block access to these pages until the
9407 * UPL is released. We could use the UPL_BLOCK_ACCESS
9408 * code path for that...
9409 */
9410 vm_object_update(object,
9411 offset,
9412 size,
9413 NULL,
9414 NULL,
9415 FALSE, /* should_return */
9416 MEMORY_OBJECT_COPY_SYNC,
9417 VM_PROT_NO_CHANGE);
9418 VM_PAGEOUT_DEBUG(iopl_cow, 1);
9419 VM_PAGEOUT_DEBUG(iopl_cow_pages, (size >> PAGE_SHIFT));
9420 }
9421 if (!(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS)) &&
9422 object->purgable != VM_PURGABLE_VOLATILE &&
9423 object->purgable != VM_PURGABLE_EMPTY &&
9424 object->vo_copy == NULL &&
9425 size == object->vo_size &&
9426 offset == 0 &&
9427 object->shadow == NULL &&
9428 object->pager == NULL) {
9429 if (object->resident_page_count == size_in_pages) {
9430 assert(object != compressor_object);
9431 assert(!is_kernel_object(object));
9432 fast_path_full_req = TRUE;
9433 } else if (object->resident_page_count == 0) {
9434 assert(object != compressor_object);
9435 assert(!is_kernel_object(object));
9436 fast_path_empty_req = TRUE;
9437 set_cache_attr_needed = TRUE;
9438 }
9439 }
9440
9441 if (cntrl_flags & UPL_SET_INTERRUPTIBLE) {
9442 interruptible = THREAD_ABORTSAFE;
9443 } else {
9444 interruptible = THREAD_UNINT;
9445 }
9446
9447 entry = 0;
9448
9449 xfer_size = size;
9450 dst_offset = offset;
9451
9452 if (fast_path_full_req) {
9453 if (vm_object_iopl_wire_full(object, upl, user_page_list, cntrl_flags, tag) == TRUE) {
9454 goto finish;
9455 }
9456 /*
9457 * we couldn't complete the processing of this request on the fast path
9458 * so fall through to the slow path and finish up
9459 */
9460 } else if (fast_path_empty_req) {
9461 if (cntrl_flags & UPL_REQUEST_NO_FAULT) {
9462 ret = KERN_MEMORY_ERROR;
9463 goto return_err;
9464 }
9465 ret = vm_object_iopl_wire_empty(object, upl, user_page_list,
9466 cntrl_flags, tag, &dst_offset, size_in_pages, &page_grab_count);
9467
9468 if (ret) {
9469 free_wired_pages = TRUE;
9470 goto return_err;
9471 }
9472 goto finish;
9473 }
9474
9475 fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
9476 fault_info.lo_offset = offset;
9477 fault_info.hi_offset = offset + xfer_size;
9478 fault_info.mark_zf_absent = TRUE;
9479 fault_info.interruptible = interruptible;
9480 fault_info.batch_pmap_op = TRUE;
9481
9482 while (xfer_size) {
9483 vm_fault_return_t result;
9484
9485 dwp->dw_mask = 0;
9486
9487 if (fast_path_full_req) {
9488 /*
9489 * if we get here, it means that we ran into a page
9490 * state we couldn't handle in the fast path and
9491 * bailed out to the slow path... since the order
9492 * we look at pages is different between the 2 paths,
9493 * the following check is needed to determine whether
9494 * this page was already processed in the fast path
9495 */
9496 if (bitmap_test(upl->lite_list, entry)) {
9497 goto skip_page;
9498 }
9499 }
9500 dst_page = vm_page_lookup(object, dst_offset);
9501
9502 if (dst_page == VM_PAGE_NULL ||
9503 dst_page->vmp_busy ||
9504 VMP_ERROR_GET(dst_page) ||
9505 dst_page->vmp_restart ||
9506 dst_page->vmp_absent ||
9507 dst_page->vmp_fictitious) {
9508 if (is_kernel_object(object)) {
9509 panic("vm_object_iopl_request: missing/bad page in kernel object");
9510 }
9511 if (object == compressor_object) {
9512 panic("vm_object_iopl_request: missing/bad page in compressor object");
9513 }
9514
9515 if (cntrl_flags & UPL_REQUEST_NO_FAULT) {
9516 ret = KERN_MEMORY_ERROR;
9517 goto return_err;
9518 }
9519 set_cache_attr_needed = TRUE;
9520
9521 /*
9522 * We just looked up the page and the result remains valid
9523 * until the object lock is release, so send it to
9524 * vm_fault_page() (as "dst_page"), to avoid having to
9525 * look it up again there.
9526 */
9527 caller_lookup = TRUE;
9528
9529 do {
9530 vm_page_t top_page;
9531 kern_return_t error_code;
9532
9533 fault_info.cluster_size = xfer_size;
9534
9535 vm_object_paging_begin(object);
9536
9537 result = vm_fault_page(object, dst_offset,
9538 prot | VM_PROT_WRITE, FALSE,
9539 caller_lookup,
9540 &prot, &dst_page, &top_page,
9541 (int *)0,
9542 &error_code, no_zero_fill,
9543 &fault_info);
9544
9545 /* our lookup is no longer valid at this point */
9546 caller_lookup = FALSE;
9547
9548 switch (result) {
9549 case VM_FAULT_SUCCESS:
9550 page_grab_count++;
9551
9552 if (!dst_page->vmp_absent) {
9553 PAGE_WAKEUP_DONE(dst_page);
9554 } else {
9555 /*
9556 * we only get back an absent page if we
9557 * requested that it not be zero-filled
9558 * because we are about to fill it via I/O
9559 *
9560 * absent pages should be left BUSY
9561 * to prevent them from being faulted
9562 * into an address space before we've
9563 * had a chance to complete the I/O on
9564 * them since they may contain info that
9565 * shouldn't be seen by the faulting task
9566 */
9567 }
9568 /*
9569 * Release paging references and
9570 * top-level placeholder page, if any.
9571 */
9572 if (top_page != VM_PAGE_NULL) {
9573 vm_object_t local_object;
9574
9575 local_object = VM_PAGE_OBJECT(top_page);
9576
9577 /*
9578 * comparing 2 packed pointers
9579 */
9580 if (top_page->vmp_object != dst_page->vmp_object) {
9581 vm_object_lock(local_object);
9582 VM_PAGE_FREE(top_page);
9583 vm_object_paging_end(local_object);
9584 vm_object_unlock(local_object);
9585 } else {
9586 VM_PAGE_FREE(top_page);
9587 vm_object_paging_end(local_object);
9588 }
9589 }
9590 vm_object_paging_end(object);
9591 break;
9592
9593 case VM_FAULT_RETRY:
9594 vm_object_lock(object);
9595 break;
9596
9597 case VM_FAULT_MEMORY_SHORTAGE:
9598 OSAddAtomic((size_in_pages - entry), &vm_upl_wait_for_pages);
9599
9600 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
9601
9602 if (vm_page_wait(interruptible)) {
9603 OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages);
9604
9605 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
9606 vm_object_lock(object);
9607
9608 break;
9609 }
9610 OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages);
9611
9612 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1);
9613 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_OBJIOPLREQ_MEMORY_SHORTAGE), 0 /* arg */);
9614 OS_FALLTHROUGH;
9615
9616 case VM_FAULT_INTERRUPTED:
9617 error_code = MACH_SEND_INTERRUPTED;
9618 OS_FALLTHROUGH;
9619 case VM_FAULT_MEMORY_ERROR:
9620 memory_error:
9621 ret = (error_code ? error_code: KERN_MEMORY_ERROR);
9622
9623 vm_object_lock(object);
9624 goto return_err;
9625
9626 case VM_FAULT_SUCCESS_NO_VM_PAGE:
9627 /* success but no page: fail */
9628 vm_object_paging_end(object);
9629 vm_object_unlock(object);
9630 goto memory_error;
9631
9632 default:
9633 panic("vm_object_iopl_request: unexpected error"
9634 " 0x%x from vm_fault_page()\n", result);
9635 }
9636 } while (result != VM_FAULT_SUCCESS);
9637 }
9638 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
9639
9640 if (upl->flags & UPL_KERNEL_OBJECT) {
9641 goto record_phys_addr;
9642 }
9643
9644 if (dst_page->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
9645 dst_page->vmp_busy = TRUE;
9646 goto record_phys_addr;
9647 }
9648
9649 if (dst_page->vmp_cleaning) {
9650 /*
9651 * Someone else is cleaning this page in place.
9652 * In theory, we should be able to proceed and use this
9653 * page but they'll probably end up clearing the "busy"
9654 * bit on it in upl_commit_range() but they didn't set
9655 * it, so they would clear our "busy" bit and open
9656 * us to race conditions.
9657 * We'd better wait for the cleaning to complete and
9658 * then try again.
9659 */
9660 VM_PAGEOUT_DEBUG(vm_object_iopl_request_sleep_for_cleaning, 1);
9661 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
9662 continue;
9663 }
9664 if (dst_page->vmp_laundry) {
9665 vm_pageout_steal_laundry(dst_page, FALSE);
9666 }
9667
9668 if ((cntrl_flags & UPL_NEED_32BIT_ADDR) &&
9669 phys_page >= (max_valid_dma_address >> PAGE_SHIFT)) {
9670 vm_page_t low_page;
9671 int refmod;
9672
9673 /*
9674 * support devices that can't DMA above 32 bits
9675 * by substituting pages from a pool of low address
9676 * memory for any pages we find above the 4G mark
9677 * can't substitute if the page is already wired because
9678 * we don't know whether that physical address has been
9679 * handed out to some other 64 bit capable DMA device to use
9680 */
9681 if (VM_PAGE_WIRED(dst_page)) {
9682 ret = KERN_PROTECTION_FAILURE;
9683 goto return_err;
9684 }
9685 low_page = vm_page_grablo();
9686
9687 if (low_page == VM_PAGE_NULL) {
9688 ret = KERN_RESOURCE_SHORTAGE;
9689 goto return_err;
9690 }
9691 /*
9692 * from here until the vm_page_replace completes
9693 * we musn't drop the object lock... we don't
9694 * want anyone refaulting this page in and using
9695 * it after we disconnect it... we want the fault
9696 * to find the new page being substituted.
9697 */
9698 if (dst_page->vmp_pmapped) {
9699 refmod = pmap_disconnect(phys_page);
9700 } else {
9701 refmod = 0;
9702 }
9703
9704 if (!dst_page->vmp_absent) {
9705 vm_page_copy(dst_page, low_page);
9706 }
9707
9708 low_page->vmp_reference = dst_page->vmp_reference;
9709 low_page->vmp_dirty = dst_page->vmp_dirty;
9710 low_page->vmp_absent = dst_page->vmp_absent;
9711
9712 if (refmod & VM_MEM_REFERENCED) {
9713 low_page->vmp_reference = TRUE;
9714 }
9715 if (refmod & VM_MEM_MODIFIED) {
9716 SET_PAGE_DIRTY(low_page, FALSE);
9717 }
9718
9719 vm_page_replace(low_page, object, dst_offset);
9720
9721 dst_page = low_page;
9722 /*
9723 * vm_page_grablo returned the page marked
9724 * BUSY... we don't need a PAGE_WAKEUP_DONE
9725 * here, because we've never dropped the object lock
9726 */
9727 if (!dst_page->vmp_absent) {
9728 dst_page->vmp_busy = FALSE;
9729 }
9730
9731 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
9732 }
9733 if (!dst_page->vmp_busy) {
9734 dwp->dw_mask |= DW_vm_page_wire;
9735 }
9736
9737 if (cntrl_flags & UPL_BLOCK_ACCESS) {
9738 /*
9739 * Mark the page "busy" to block any future page fault
9740 * on this page in addition to wiring it.
9741 * We'll also remove the mapping
9742 * of all these pages before leaving this routine.
9743 */
9744 assert(!dst_page->vmp_fictitious);
9745 dst_page->vmp_busy = TRUE;
9746 }
9747 /*
9748 * expect the page to be used
9749 * page queues lock must be held to set 'reference'
9750 */
9751 dwp->dw_mask |= DW_set_reference;
9752
9753 if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
9754 SET_PAGE_DIRTY(dst_page, TRUE);
9755 /*
9756 * Page belonging to a code-signed object is about to
9757 * be written. Mark it tainted and disconnect it from
9758 * all pmaps so processes have to fault it back in and
9759 * deal with the tainted bit.
9760 */
9761 if (object->code_signed && dst_page->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
9762 dst_page->vmp_cs_tainted = VMP_CS_ALL_TRUE;
9763 vm_page_iopl_tainted++;
9764 if (dst_page->vmp_pmapped) {
9765 int refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
9766 if (refmod & VM_MEM_REFERENCED) {
9767 dst_page->vmp_reference = TRUE;
9768 }
9769 }
9770 }
9771 }
9772 if ((cntrl_flags & UPL_REQUEST_FORCE_COHERENCY) && dst_page->vmp_written_by_kernel == TRUE) {
9773 pmap_sync_page_attributes_phys(phys_page);
9774 dst_page->vmp_written_by_kernel = FALSE;
9775 }
9776
9777 record_phys_addr:
9778 if (dst_page->vmp_busy) {
9779 upl->flags |= UPL_HAS_BUSY;
9780 }
9781
9782 bitmap_set(upl->lite_list, entry);
9783
9784 if (phys_page > upl->highest_page) {
9785 upl->highest_page = phys_page;
9786 }
9787
9788 if (user_page_list) {
9789 user_page_list[entry].phys_addr = phys_page;
9790 user_page_list[entry].free_when_done = dst_page->vmp_free_when_done;
9791 user_page_list[entry].absent = dst_page->vmp_absent;
9792 user_page_list[entry].dirty = dst_page->vmp_dirty;
9793 user_page_list[entry].precious = dst_page->vmp_precious;
9794 user_page_list[entry].device = FALSE;
9795 user_page_list[entry].needed = FALSE;
9796 if (dst_page->vmp_clustered == TRUE) {
9797 user_page_list[entry].speculative = (dst_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE;
9798 } else {
9799 user_page_list[entry].speculative = FALSE;
9800 }
9801 user_page_list[entry].cs_validated = dst_page->vmp_cs_validated;
9802 user_page_list[entry].cs_tainted = dst_page->vmp_cs_tainted;
9803 user_page_list[entry].cs_nx = dst_page->vmp_cs_nx;
9804 user_page_list[entry].mark = FALSE;
9805 }
9806 if (!is_kernel_object(object) && object != compressor_object) {
9807 /*
9808 * someone is explicitly grabbing this page...
9809 * update clustered and speculative state
9810 *
9811 */
9812 if (dst_page->vmp_clustered) {
9813 VM_PAGE_CONSUME_CLUSTERED(dst_page);
9814 }
9815 }
9816 skip_page:
9817 entry++;
9818 dst_offset += PAGE_SIZE_64;
9819 xfer_size -= PAGE_SIZE;
9820
9821 if (dwp->dw_mask) {
9822 VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count);
9823
9824 if (dw_count >= dw_limit) {
9825 vm_page_do_delayed_work(object, tag, dwp_start, dw_count);
9826
9827 dwp = dwp_start;
9828 dw_count = 0;
9829 }
9830 }
9831 }
9832 assert(entry == size_in_pages);
9833
9834 if (dw_count) {
9835 vm_page_do_delayed_work(object, tag, dwp_start, dw_count);
9836 dwp = dwp_start;
9837 dw_count = 0;
9838 }
9839 finish:
9840 if (user_page_list && set_cache_attr_needed == TRUE) {
9841 vm_object_set_pmap_cache_attr(object, user_page_list, size_in_pages, TRUE);
9842 }
9843
9844 if (page_list_count != NULL) {
9845 if (upl->flags & UPL_INTERNAL) {
9846 *page_list_count = 0;
9847 } else if (*page_list_count > size_in_pages) {
9848 *page_list_count = size_in_pages;
9849 }
9850 }
9851 vm_object_unlock(object);
9852
9853 if (cntrl_flags & UPL_BLOCK_ACCESS) {
9854 /*
9855 * We've marked all the pages "busy" so that future
9856 * page faults will block.
9857 * Now remove the mapping for these pages, so that they
9858 * can't be accessed without causing a page fault.
9859 */
9860 vm_object_pmap_protect(object, offset, (vm_object_size_t)size,
9861 PMAP_NULL,
9862 PAGE_SIZE,
9863 0, VM_PROT_NONE);
9864 assert(!object->blocked_access);
9865 object->blocked_access = TRUE;
9866 }
9867
9868 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, KERN_SUCCESS, 0, 0);
9869 #if DEVELOPMENT || DEBUG
9870 if (task != NULL) {
9871 ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count);
9872 }
9873 #endif /* DEVELOPMENT || DEBUG */
9874
9875 if (dwp_start && dwp_finish_ctx) {
9876 vm_page_delayed_work_finish_ctx(dwp_start);
9877 dwp_start = dwp = NULL;
9878 }
9879
9880 return KERN_SUCCESS;
9881
9882 return_err:
9883 dw_index = 0;
9884
9885 for (; offset < dst_offset; offset += PAGE_SIZE) {
9886 boolean_t need_unwire;
9887
9888 dst_page = vm_page_lookup(object, offset);
9889
9890 if (dst_page == VM_PAGE_NULL) {
9891 panic("vm_object_iopl_request: Wired page missing.");
9892 }
9893
9894 /*
9895 * if we've already processed this page in an earlier
9896 * dw_do_work, we need to undo the wiring... we will
9897 * leave the dirty and reference bits on if they
9898 * were set, since we don't have a good way of knowing
9899 * what the previous state was and we won't get here
9900 * under any normal circumstances... we will always
9901 * clear BUSY and wakeup any waiters via vm_page_free
9902 * or PAGE_WAKEUP_DONE
9903 */
9904 need_unwire = TRUE;
9905
9906 if (dw_count) {
9907 if ((dwp_start)[dw_index].dw_m == dst_page) {
9908 /*
9909 * still in the deferred work list
9910 * which means we haven't yet called
9911 * vm_page_wire on this page
9912 */
9913 need_unwire = FALSE;
9914
9915 dw_index++;
9916 dw_count--;
9917 }
9918 }
9919 vm_page_lock_queues();
9920
9921 if (dst_page->vmp_absent || free_wired_pages == TRUE) {
9922 vm_page_free(dst_page);
9923
9924 need_unwire = FALSE;
9925 } else {
9926 if (need_unwire == TRUE) {
9927 vm_page_unwire(dst_page, TRUE);
9928 }
9929
9930 PAGE_WAKEUP_DONE(dst_page);
9931 }
9932 vm_page_unlock_queues();
9933
9934 if (need_unwire == TRUE) {
9935 counter_inc(&vm_statistics_reactivations);
9936 }
9937 }
9938 #if UPL_DEBUG
9939 upl->upl_state = 2;
9940 #endif
9941 if (!(upl->flags & UPL_KERNEL_OBJECT)) {
9942 vm_object_activity_end(object);
9943 vm_object_collapse(object, 0, TRUE);
9944 }
9945 vm_object_unlock(object);
9946 upl_destroy(upl);
9947
9948 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, ret, 0, 0);
9949 #if DEVELOPMENT || DEBUG
9950 if (task != NULL) {
9951 ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count);
9952 }
9953 #endif /* DEVELOPMENT || DEBUG */
9954
9955 if (dwp_start && dwp_finish_ctx) {
9956 vm_page_delayed_work_finish_ctx(dwp_start);
9957 dwp_start = dwp = NULL;
9958 }
9959 return ret;
9960 }
9961
9962 kern_return_t
upl_transpose(upl_t upl1,upl_t upl2)9963 upl_transpose(
9964 upl_t upl1,
9965 upl_t upl2)
9966 {
9967 kern_return_t retval;
9968 boolean_t upls_locked;
9969 vm_object_t object1, object2;
9970
9971 /* LD: Should mapped UPLs be eligible for a transpose? */
9972 if (upl1 == UPL_NULL || upl2 == UPL_NULL || upl1 == upl2 || ((upl1->flags & UPL_VECTOR) == UPL_VECTOR) || ((upl2->flags & UPL_VECTOR) == UPL_VECTOR)) {
9973 return KERN_INVALID_ARGUMENT;
9974 }
9975
9976 upls_locked = FALSE;
9977
9978 /*
9979 * Since we need to lock both UPLs at the same time,
9980 * avoid deadlocks by always taking locks in the same order.
9981 */
9982 if (upl1 < upl2) {
9983 upl_lock(upl1);
9984 upl_lock(upl2);
9985 } else {
9986 upl_lock(upl2);
9987 upl_lock(upl1);
9988 }
9989 upls_locked = TRUE; /* the UPLs will need to be unlocked */
9990
9991 object1 = upl1->map_object;
9992 object2 = upl2->map_object;
9993
9994 if (upl1->u_offset != 0 || upl2->u_offset != 0 ||
9995 upl1->u_size != upl2->u_size) {
9996 /*
9997 * We deal only with full objects, not subsets.
9998 * That's because we exchange the entire backing store info
9999 * for the objects: pager, resident pages, etc... We can't do
10000 * only part of it.
10001 */
10002 retval = KERN_INVALID_VALUE;
10003 goto done;
10004 }
10005
10006 /*
10007 * Tranpose the VM objects' backing store.
10008 */
10009 retval = vm_object_transpose(object1, object2,
10010 upl_adjusted_size(upl1, PAGE_MASK));
10011
10012 if (retval == KERN_SUCCESS) {
10013 /*
10014 * Make each UPL point to the correct VM object, i.e. the
10015 * object holding the pages that the UPL refers to...
10016 */
10017 #if CONFIG_IOSCHED || UPL_DEBUG
10018 if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || (upl2->flags & UPL_TRACKED_BY_OBJECT)) {
10019 vm_object_lock(object1);
10020 vm_object_lock(object2);
10021 }
10022 if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) {
10023 queue_remove(&object1->uplq, upl1, upl_t, uplq);
10024 }
10025 if ((upl2->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) {
10026 queue_remove(&object2->uplq, upl2, upl_t, uplq);
10027 }
10028 #endif
10029 upl1->map_object = object2;
10030 upl2->map_object = object1;
10031
10032 #if CONFIG_IOSCHED || UPL_DEBUG
10033 if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) {
10034 queue_enter(&object2->uplq, upl1, upl_t, uplq);
10035 }
10036 if ((upl2->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) {
10037 queue_enter(&object1->uplq, upl2, upl_t, uplq);
10038 }
10039 if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || (upl2->flags & UPL_TRACKED_BY_OBJECT)) {
10040 vm_object_unlock(object2);
10041 vm_object_unlock(object1);
10042 }
10043 #endif
10044 }
10045
10046 done:
10047 /*
10048 * Cleanup.
10049 */
10050 if (upls_locked) {
10051 upl_unlock(upl1);
10052 upl_unlock(upl2);
10053 upls_locked = FALSE;
10054 }
10055
10056 return retval;
10057 }
10058
10059 void
upl_range_needed(upl_t upl,int index,int count)10060 upl_range_needed(
10061 upl_t upl,
10062 int index,
10063 int count)
10064 {
10065 int size_in_pages;
10066
10067 if (!(upl->flags & UPL_INTERNAL) || count <= 0) {
10068 return;
10069 }
10070
10071 size_in_pages = upl_adjusted_size(upl, PAGE_MASK) / PAGE_SIZE;
10072
10073 while (count-- && index < size_in_pages) {
10074 upl->page_list[index++].needed = TRUE;
10075 }
10076 }
10077
10078
10079 /*
10080 * Reserve of virtual addresses in the kernel address space.
10081 * We need to map the physical pages in the kernel, so that we
10082 * can call the code-signing or slide routines with a kernel
10083 * virtual address. We keep this pool of pre-allocated kernel
10084 * virtual addresses so that we don't have to scan the kernel's
10085 * virtaul address space each time we need to work with
10086 * a physical page.
10087 */
10088 SIMPLE_LOCK_DECLARE(vm_paging_lock, 0);
10089 #define VM_PAGING_NUM_PAGES 64
10090 SECURITY_READ_ONLY_LATE(vm_offset_t) vm_paging_base_address = 0;
10091 bool vm_paging_page_inuse[VM_PAGING_NUM_PAGES] = { FALSE, };
10092 int vm_paging_max_index = 0;
10093 int vm_paging_page_waiter = 0;
10094 int vm_paging_page_waiter_total = 0;
10095
10096 unsigned long vm_paging_no_kernel_page = 0;
10097 unsigned long vm_paging_objects_mapped = 0;
10098 unsigned long vm_paging_pages_mapped = 0;
10099 unsigned long vm_paging_objects_mapped_slow = 0;
10100 unsigned long vm_paging_pages_mapped_slow = 0;
10101
10102 __startup_func
10103 static void
vm_paging_map_init(void)10104 vm_paging_map_init(void)
10105 {
10106 kmem_alloc(kernel_map, &vm_paging_base_address,
10107 ptoa(VM_PAGING_NUM_PAGES),
10108 KMA_DATA | KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT | KMA_PAGEABLE,
10109 VM_KERN_MEMORY_NONE);
10110 }
10111 STARTUP(ZALLOC, STARTUP_RANK_LAST, vm_paging_map_init);
10112
10113 /*
10114 * vm_paging_map_object:
10115 * Maps part of a VM object's pages in the kernel
10116 * virtual address space, using the pre-allocated
10117 * kernel virtual addresses, if possible.
10118 * Context:
10119 * The VM object is locked. This lock will get
10120 * dropped and re-acquired though, so the caller
10121 * must make sure the VM object is kept alive
10122 * (by holding a VM map that has a reference
10123 * on it, for example, or taking an extra reference).
10124 * The page should also be kept busy to prevent
10125 * it from being reclaimed.
10126 */
10127 kern_return_t
vm_paging_map_object(vm_page_t page,vm_object_t object,vm_object_offset_t offset,vm_prot_t protection,boolean_t can_unlock_object,vm_map_size_t * size,vm_map_offset_t * address,boolean_t * need_unmap)10128 vm_paging_map_object(
10129 vm_page_t page,
10130 vm_object_t object,
10131 vm_object_offset_t offset,
10132 vm_prot_t protection,
10133 boolean_t can_unlock_object,
10134 vm_map_size_t *size, /* IN/OUT */
10135 vm_map_offset_t *address, /* OUT */
10136 boolean_t *need_unmap) /* OUT */
10137 {
10138 kern_return_t kr;
10139 vm_map_offset_t page_map_offset;
10140 vm_map_size_t map_size;
10141 vm_object_offset_t object_offset;
10142 int i;
10143
10144 if (page != VM_PAGE_NULL && *size == PAGE_SIZE) {
10145 /* use permanent 1-to-1 kernel mapping of physical memory ? */
10146 *address = (vm_map_offset_t)
10147 phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(page) << PAGE_SHIFT);
10148 *need_unmap = FALSE;
10149 return KERN_SUCCESS;
10150
10151 assert(page->vmp_busy);
10152 /*
10153 * Use one of the pre-allocated kernel virtual addresses
10154 * and just enter the VM page in the kernel address space
10155 * at that virtual address.
10156 */
10157 simple_lock(&vm_paging_lock, &vm_pageout_lck_grp);
10158
10159 /*
10160 * Try and find an available kernel virtual address
10161 * from our pre-allocated pool.
10162 */
10163 page_map_offset = 0;
10164 for (;;) {
10165 for (i = 0; i < VM_PAGING_NUM_PAGES; i++) {
10166 if (vm_paging_page_inuse[i] == FALSE) {
10167 page_map_offset =
10168 vm_paging_base_address +
10169 (i * PAGE_SIZE);
10170 break;
10171 }
10172 }
10173 if (page_map_offset != 0) {
10174 /* found a space to map our page ! */
10175 break;
10176 }
10177
10178 if (can_unlock_object) {
10179 /*
10180 * If we can afford to unlock the VM object,
10181 * let's take the slow path now...
10182 */
10183 break;
10184 }
10185 /*
10186 * We can't afford to unlock the VM object, so
10187 * let's wait for a space to become available...
10188 */
10189 vm_paging_page_waiter_total++;
10190 vm_paging_page_waiter++;
10191 kr = assert_wait((event_t)&vm_paging_page_waiter, THREAD_UNINT);
10192 if (kr == THREAD_WAITING) {
10193 simple_unlock(&vm_paging_lock);
10194 kr = thread_block(THREAD_CONTINUE_NULL);
10195 simple_lock(&vm_paging_lock, &vm_pageout_lck_grp);
10196 }
10197 vm_paging_page_waiter--;
10198 /* ... and try again */
10199 }
10200
10201 if (page_map_offset != 0) {
10202 /*
10203 * We found a kernel virtual address;
10204 * map the physical page to that virtual address.
10205 */
10206 if (i > vm_paging_max_index) {
10207 vm_paging_max_index = i;
10208 }
10209 vm_paging_page_inuse[i] = TRUE;
10210 simple_unlock(&vm_paging_lock);
10211
10212 page->vmp_pmapped = TRUE;
10213
10214 /*
10215 * Keep the VM object locked over the PMAP_ENTER
10216 * and the actual use of the page by the kernel,
10217 * or this pmap mapping might get undone by a
10218 * vm_object_pmap_protect() call...
10219 */
10220 kr = pmap_enter_check(kernel_pmap,
10221 page_map_offset,
10222 page,
10223 protection,
10224 VM_PROT_NONE,
10225 0,
10226 TRUE);
10227 assert(kr == KERN_SUCCESS);
10228 vm_paging_objects_mapped++;
10229 vm_paging_pages_mapped++;
10230 *address = page_map_offset;
10231 *need_unmap = TRUE;
10232
10233 #if KASAN
10234 kasan_notify_address(page_map_offset, PAGE_SIZE);
10235 #endif
10236
10237 /* all done and mapped, ready to use ! */
10238 return KERN_SUCCESS;
10239 }
10240
10241 /*
10242 * We ran out of pre-allocated kernel virtual
10243 * addresses. Just map the page in the kernel
10244 * the slow and regular way.
10245 */
10246 vm_paging_no_kernel_page++;
10247 simple_unlock(&vm_paging_lock);
10248 }
10249
10250 if (!can_unlock_object) {
10251 *address = 0;
10252 *size = 0;
10253 *need_unmap = FALSE;
10254 return KERN_NOT_SUPPORTED;
10255 }
10256
10257 object_offset = vm_object_trunc_page(offset);
10258 map_size = vm_map_round_page(*size,
10259 VM_MAP_PAGE_MASK(kernel_map));
10260
10261 /*
10262 * Try and map the required range of the object
10263 * in the kernel_map. Given that allocation is
10264 * for pageable memory, it shouldn't contain
10265 * pointers and is mapped into the data range.
10266 */
10267
10268 vm_object_reference_locked(object); /* for the map entry */
10269 vm_object_unlock(object);
10270
10271 kr = vm_map_enter(kernel_map,
10272 address,
10273 map_size,
10274 0,
10275 VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(),
10276 object,
10277 object_offset,
10278 FALSE,
10279 protection,
10280 VM_PROT_ALL,
10281 VM_INHERIT_NONE);
10282 if (kr != KERN_SUCCESS) {
10283 *address = 0;
10284 *size = 0;
10285 *need_unmap = FALSE;
10286 vm_object_deallocate(object); /* for the map entry */
10287 vm_object_lock(object);
10288 return kr;
10289 }
10290
10291 *size = map_size;
10292
10293 /*
10294 * Enter the mapped pages in the page table now.
10295 */
10296 vm_object_lock(object);
10297 /*
10298 * VM object must be kept locked from before PMAP_ENTER()
10299 * until after the kernel is done accessing the page(s).
10300 * Otherwise, the pmap mappings in the kernel could be
10301 * undone by a call to vm_object_pmap_protect().
10302 */
10303
10304 for (page_map_offset = 0;
10305 map_size != 0;
10306 map_size -= PAGE_SIZE_64, page_map_offset += PAGE_SIZE_64) {
10307 page = vm_page_lookup(object, offset + page_map_offset);
10308 if (page == VM_PAGE_NULL) {
10309 printf("vm_paging_map_object: no page !?");
10310 vm_object_unlock(object);
10311 vm_map_remove(kernel_map, *address, *size);
10312 *address = 0;
10313 *size = 0;
10314 *need_unmap = FALSE;
10315 vm_object_lock(object);
10316 return KERN_MEMORY_ERROR;
10317 }
10318 page->vmp_pmapped = TRUE;
10319
10320 kr = pmap_enter_check(kernel_pmap,
10321 *address + page_map_offset,
10322 page,
10323 protection,
10324 VM_PROT_NONE,
10325 0,
10326 TRUE);
10327 assert(kr == KERN_SUCCESS);
10328 #if KASAN
10329 kasan_notify_address(*address + page_map_offset, PAGE_SIZE);
10330 #endif
10331 }
10332
10333 vm_paging_objects_mapped_slow++;
10334 vm_paging_pages_mapped_slow += (unsigned long) (map_size / PAGE_SIZE_64);
10335
10336 *need_unmap = TRUE;
10337
10338 return KERN_SUCCESS;
10339 }
10340
10341 /*
10342 * vm_paging_unmap_object:
10343 * Unmaps part of a VM object's pages from the kernel
10344 * virtual address space.
10345 * Context:
10346 * The VM object is locked. This lock will get
10347 * dropped and re-acquired though.
10348 */
10349 void
vm_paging_unmap_object(vm_object_t object,vm_map_offset_t start,vm_map_offset_t end)10350 vm_paging_unmap_object(
10351 vm_object_t object,
10352 vm_map_offset_t start,
10353 vm_map_offset_t end)
10354 {
10355 int i;
10356
10357 if ((vm_paging_base_address == 0) ||
10358 (start < vm_paging_base_address) ||
10359 (end > (vm_paging_base_address
10360 + (VM_PAGING_NUM_PAGES * PAGE_SIZE)))) {
10361 /*
10362 * We didn't use our pre-allocated pool of
10363 * kernel virtual address. Deallocate the
10364 * virtual memory.
10365 */
10366 if (object != VM_OBJECT_NULL) {
10367 vm_object_unlock(object);
10368 }
10369 vm_map_remove(kernel_map, start, end);
10370 if (object != VM_OBJECT_NULL) {
10371 vm_object_lock(object);
10372 }
10373 } else {
10374 /*
10375 * We used a kernel virtual address from our
10376 * pre-allocated pool. Put it back in the pool
10377 * for next time.
10378 */
10379 assert(end - start == PAGE_SIZE);
10380 i = (int) ((start - vm_paging_base_address) >> PAGE_SHIFT);
10381 assert(i >= 0 && i < VM_PAGING_NUM_PAGES);
10382
10383 /* undo the pmap mapping */
10384 pmap_remove(kernel_pmap, start, end);
10385
10386 simple_lock(&vm_paging_lock, &vm_pageout_lck_grp);
10387 vm_paging_page_inuse[i] = FALSE;
10388 if (vm_paging_page_waiter) {
10389 thread_wakeup(&vm_paging_page_waiter);
10390 }
10391 simple_unlock(&vm_paging_lock);
10392 }
10393 }
10394
10395
10396 /*
10397 * page->vmp_object must be locked
10398 */
10399 void
vm_pageout_steal_laundry(vm_page_t page,boolean_t queues_locked)10400 vm_pageout_steal_laundry(vm_page_t page, boolean_t queues_locked)
10401 {
10402 if (!queues_locked) {
10403 vm_page_lockspin_queues();
10404 }
10405
10406 page->vmp_free_when_done = FALSE;
10407 /*
10408 * need to drop the laundry count...
10409 * we may also need to remove it
10410 * from the I/O paging queue...
10411 * vm_pageout_throttle_up handles both cases
10412 *
10413 * the laundry and pageout_queue flags are cleared...
10414 */
10415 vm_pageout_throttle_up(page);
10416
10417 if (!queues_locked) {
10418 vm_page_unlock_queues();
10419 }
10420 }
10421
10422 #define VECTOR_UPL_ELEMENTS_UPPER_LIMIT 64
10423
10424 upl_t
vector_upl_create(vm_offset_t upl_offset,uint32_t max_upls)10425 vector_upl_create(vm_offset_t upl_offset, uint32_t max_upls)
10426 {
10427 int i = 0;
10428 upl_t upl;
10429
10430 assert(max_upls > 0);
10431 if (max_upls == 0) {
10432 return NULL;
10433 }
10434
10435 if (max_upls > VECTOR_UPL_ELEMENTS_UPPER_LIMIT) {
10436 max_upls = VECTOR_UPL_ELEMENTS_UPPER_LIMIT;
10437 }
10438 vector_upl_t vector_upl = kalloc_type(struct _vector_upl, typeof(vector_upl->upls[0]), max_upls, Z_WAITOK | Z_NOFAIL);
10439
10440 upl = upl_create(0, UPL_VECTOR, 0);
10441 upl->vector_upl = vector_upl;
10442 upl->u_offset = upl_offset;
10443 vector_upl->size = 0;
10444 vector_upl->offset = upl_offset;
10445 vector_upl->invalid_upls = 0;
10446 vector_upl->num_upls = 0;
10447 vector_upl->pagelist = NULL;
10448 vector_upl->max_upls = max_upls;
10449
10450 for (i = 0; i < max_upls; i++) {
10451 vector_upl->upls[i].iostate.size = 0;
10452 vector_upl->upls[i].iostate.offset = 0;
10453 }
10454 return upl;
10455 }
10456
10457 uint32_t
vector_upl_max_upls(const upl_t upl)10458 vector_upl_max_upls(const upl_t upl)
10459 {
10460 if (!vector_upl_is_valid(upl)) {
10461 return 0;
10462 }
10463 return ((vector_upl_t)(upl->vector_upl))->max_upls;
10464 }
10465
10466 void
vector_upl_deallocate(upl_t upl)10467 vector_upl_deallocate(upl_t upl)
10468 {
10469 vector_upl_t vector_upl = upl->vector_upl;
10470
10471 assert(vector_upl_is_valid(upl));
10472
10473 if (vector_upl->invalid_upls != vector_upl->num_upls) {
10474 panic("Deallocating non-empty Vectored UPL");
10475 }
10476 uint32_t max_upls = vector_upl->max_upls;
10477 kfree_type(struct upl_page_info, atop(vector_upl->size), vector_upl->pagelist);
10478 kfree_type(struct _vector_upl, typeof(vector_upl->upls[0]), max_upls, vector_upl);
10479 upl->vector_upl = NULL;
10480 }
10481
10482 boolean_t
vector_upl_is_valid(upl_t upl)10483 vector_upl_is_valid(upl_t upl)
10484 {
10485 return upl && (upl->flags & UPL_VECTOR) && upl->vector_upl;
10486 }
10487
10488 boolean_t
vector_upl_set_subupl(upl_t upl,upl_t subupl,uint32_t io_size)10489 vector_upl_set_subupl(upl_t upl, upl_t subupl, uint32_t io_size)
10490 {
10491 if (vector_upl_is_valid(upl)) {
10492 vector_upl_t vector_upl = upl->vector_upl;
10493
10494 if (vector_upl) {
10495 if (subupl) {
10496 if (io_size) {
10497 if (io_size < PAGE_SIZE) {
10498 io_size = PAGE_SIZE;
10499 }
10500 subupl->vector_upl = (void*)vector_upl;
10501 vector_upl->upls[vector_upl->num_upls++].elem = subupl;
10502 vector_upl->size += io_size;
10503 upl->u_size += io_size;
10504 } else {
10505 uint32_t i = 0, invalid_upls = 0;
10506 for (i = 0; i < vector_upl->num_upls; i++) {
10507 if (vector_upl->upls[i].elem == subupl) {
10508 break;
10509 }
10510 }
10511 if (i == vector_upl->num_upls) {
10512 panic("Trying to remove sub-upl when none exists");
10513 }
10514
10515 vector_upl->upls[i].elem = NULL;
10516 invalid_upls = os_atomic_inc(&(vector_upl)->invalid_upls,
10517 relaxed);
10518 if (invalid_upls == vector_upl->num_upls) {
10519 return TRUE;
10520 } else {
10521 return FALSE;
10522 }
10523 }
10524 } else {
10525 panic("vector_upl_set_subupl was passed a NULL upl element");
10526 }
10527 } else {
10528 panic("vector_upl_set_subupl was passed a non-vectored upl");
10529 }
10530 } else {
10531 panic("vector_upl_set_subupl was passed a NULL upl");
10532 }
10533
10534 return FALSE;
10535 }
10536
10537 void
vector_upl_set_pagelist(upl_t upl)10538 vector_upl_set_pagelist(upl_t upl)
10539 {
10540 if (vector_upl_is_valid(upl)) {
10541 uint32_t i = 0;
10542 vector_upl_t vector_upl = upl->vector_upl;
10543
10544 if (vector_upl) {
10545 vm_offset_t pagelist_size = 0, cur_upl_pagelist_size = 0;
10546
10547 vector_upl->pagelist = kalloc_type(struct upl_page_info,
10548 atop(vector_upl->size), Z_WAITOK);
10549
10550 for (i = 0; i < vector_upl->num_upls; i++) {
10551 cur_upl_pagelist_size = sizeof(struct upl_page_info) * upl_adjusted_size(vector_upl->upls[i].elem, PAGE_MASK) / PAGE_SIZE;
10552 bcopy(vector_upl->upls[i].elem->page_list, (char*)vector_upl->pagelist + pagelist_size, cur_upl_pagelist_size);
10553 pagelist_size += cur_upl_pagelist_size;
10554 if (vector_upl->upls[i].elem->highest_page > upl->highest_page) {
10555 upl->highest_page = vector_upl->upls[i].elem->highest_page;
10556 }
10557 }
10558 assert( pagelist_size == (sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE)));
10559 } else {
10560 panic("vector_upl_set_pagelist was passed a non-vectored upl");
10561 }
10562 } else {
10563 panic("vector_upl_set_pagelist was passed a NULL upl");
10564 }
10565 }
10566
10567 upl_t
vector_upl_subupl_byindex(upl_t upl,uint32_t index)10568 vector_upl_subupl_byindex(upl_t upl, uint32_t index)
10569 {
10570 if (vector_upl_is_valid(upl)) {
10571 vector_upl_t vector_upl = upl->vector_upl;
10572 if (vector_upl) {
10573 if (index < vector_upl->num_upls) {
10574 return vector_upl->upls[index].elem;
10575 }
10576 } else {
10577 panic("vector_upl_subupl_byindex was passed a non-vectored upl");
10578 }
10579 }
10580 return NULL;
10581 }
10582
10583 upl_t
vector_upl_subupl_byoffset(upl_t upl,upl_offset_t * upl_offset,upl_size_t * upl_size)10584 vector_upl_subupl_byoffset(upl_t upl, upl_offset_t *upl_offset, upl_size_t *upl_size)
10585 {
10586 if (vector_upl_is_valid(upl)) {
10587 uint32_t i = 0;
10588 vector_upl_t vector_upl = upl->vector_upl;
10589
10590 if (vector_upl) {
10591 upl_t subupl = NULL;
10592 vector_upl_iostates_t subupl_state;
10593
10594 for (i = 0; i < vector_upl->num_upls; i++) {
10595 subupl = vector_upl->upls[i].elem;
10596 subupl_state = vector_upl->upls[i].iostate;
10597 if (*upl_offset <= (subupl_state.offset + subupl_state.size - 1)) {
10598 /* We could have been passed an offset/size pair that belongs
10599 * to an UPL element that has already been committed/aborted.
10600 * If so, return NULL.
10601 */
10602 if (subupl == NULL) {
10603 return NULL;
10604 }
10605 if ((subupl_state.offset + subupl_state.size) < (*upl_offset + *upl_size)) {
10606 *upl_size = (subupl_state.offset + subupl_state.size) - *upl_offset;
10607 if (*upl_size > subupl_state.size) {
10608 *upl_size = subupl_state.size;
10609 }
10610 }
10611 if (*upl_offset >= subupl_state.offset) {
10612 *upl_offset -= subupl_state.offset;
10613 } else if (i) {
10614 panic("Vector UPL offset miscalculation");
10615 }
10616 return subupl;
10617 }
10618 }
10619 } else {
10620 panic("vector_upl_subupl_byoffset was passed a non-vectored UPL");
10621 }
10622 }
10623 return NULL;
10624 }
10625
10626 void
vector_upl_get_submap(upl_t upl,vm_map_t * v_upl_submap,vm_offset_t * submap_dst_addr)10627 vector_upl_get_submap(upl_t upl, vm_map_t *v_upl_submap, vm_offset_t *submap_dst_addr)
10628 {
10629 *v_upl_submap = NULL;
10630
10631 if (vector_upl_is_valid(upl)) {
10632 vector_upl_t vector_upl = upl->vector_upl;
10633 if (vector_upl) {
10634 *v_upl_submap = vector_upl->submap;
10635 *submap_dst_addr = vector_upl->submap_dst_addr;
10636 } else {
10637 panic("vector_upl_get_submap was passed a non-vectored UPL");
10638 }
10639 } else {
10640 panic("vector_upl_get_submap was passed a null UPL");
10641 }
10642 }
10643
10644 void
vector_upl_set_submap(upl_t upl,vm_map_t submap,vm_offset_t submap_dst_addr)10645 vector_upl_set_submap(upl_t upl, vm_map_t submap, vm_offset_t submap_dst_addr)
10646 {
10647 if (vector_upl_is_valid(upl)) {
10648 vector_upl_t vector_upl = upl->vector_upl;
10649 if (vector_upl) {
10650 vector_upl->submap = submap;
10651 vector_upl->submap_dst_addr = submap_dst_addr;
10652 } else {
10653 panic("vector_upl_get_submap was passed a non-vectored UPL");
10654 }
10655 } else {
10656 panic("vector_upl_get_submap was passed a NULL UPL");
10657 }
10658 }
10659
10660 void
vector_upl_set_iostate(upl_t upl,upl_t subupl,upl_offset_t offset,upl_size_t size)10661 vector_upl_set_iostate(upl_t upl, upl_t subupl, upl_offset_t offset, upl_size_t size)
10662 {
10663 if (vector_upl_is_valid(upl)) {
10664 uint32_t i = 0;
10665 vector_upl_t vector_upl = upl->vector_upl;
10666
10667 if (vector_upl) {
10668 for (i = 0; i < vector_upl->num_upls; i++) {
10669 if (vector_upl->upls[i].elem == subupl) {
10670 break;
10671 }
10672 }
10673
10674 if (i == vector_upl->num_upls) {
10675 panic("setting sub-upl iostate when none exists");
10676 }
10677
10678 vector_upl->upls[i].iostate.offset = offset;
10679 if (size < PAGE_SIZE) {
10680 size = PAGE_SIZE;
10681 }
10682 vector_upl->upls[i].iostate.size = size;
10683 } else {
10684 panic("vector_upl_set_iostate was passed a non-vectored UPL");
10685 }
10686 } else {
10687 panic("vector_upl_set_iostate was passed a NULL UPL");
10688 }
10689 }
10690
10691 void
vector_upl_get_iostate(upl_t upl,upl_t subupl,upl_offset_t * offset,upl_size_t * size)10692 vector_upl_get_iostate(upl_t upl, upl_t subupl, upl_offset_t *offset, upl_size_t *size)
10693 {
10694 if (vector_upl_is_valid(upl)) {
10695 uint32_t i = 0;
10696 vector_upl_t vector_upl = upl->vector_upl;
10697
10698 if (vector_upl) {
10699 for (i = 0; i < vector_upl->num_upls; i++) {
10700 if (vector_upl->upls[i].elem == subupl) {
10701 break;
10702 }
10703 }
10704
10705 if (i == vector_upl->num_upls) {
10706 panic("getting sub-upl iostate when none exists");
10707 }
10708
10709 *offset = vector_upl->upls[i].iostate.offset;
10710 *size = vector_upl->upls[i].iostate.size;
10711 } else {
10712 panic("vector_upl_get_iostate was passed a non-vectored UPL");
10713 }
10714 } else {
10715 panic("vector_upl_get_iostate was passed a NULL UPL");
10716 }
10717 }
10718
10719 void
vector_upl_get_iostate_byindex(upl_t upl,uint32_t index,upl_offset_t * offset,upl_size_t * size)10720 vector_upl_get_iostate_byindex(upl_t upl, uint32_t index, upl_offset_t *offset, upl_size_t *size)
10721 {
10722 if (vector_upl_is_valid(upl)) {
10723 vector_upl_t vector_upl = upl->vector_upl;
10724 if (vector_upl) {
10725 if (index < vector_upl->num_upls) {
10726 *offset = vector_upl->upls[index].iostate.offset;
10727 *size = vector_upl->upls[index].iostate.size;
10728 } else {
10729 *offset = *size = 0;
10730 }
10731 } else {
10732 panic("vector_upl_get_iostate_byindex was passed a non-vectored UPL");
10733 }
10734 } else {
10735 panic("vector_upl_get_iostate_byindex was passed a NULL UPL");
10736 }
10737 }
10738
10739 void *
upl_get_internal_vectorupl(upl_t upl)10740 upl_get_internal_vectorupl(upl_t upl)
10741 {
10742 return upl->vector_upl;
10743 }
10744
10745 upl_page_info_t *
upl_get_internal_vectorupl_pagelist(upl_t upl)10746 upl_get_internal_vectorupl_pagelist(upl_t upl)
10747 {
10748 return upl->vector_upl->pagelist;
10749 }
10750
10751 upl_page_info_t *
upl_get_internal_page_list(upl_t upl)10752 upl_get_internal_page_list(upl_t upl)
10753 {
10754 return upl->vector_upl ? upl->vector_upl->pagelist : upl->page_list;
10755 }
10756
10757 void
upl_clear_dirty(upl_t upl,boolean_t value)10758 upl_clear_dirty(
10759 upl_t upl,
10760 boolean_t value)
10761 {
10762 if (value) {
10763 upl->flags |= UPL_CLEAR_DIRTY;
10764 } else {
10765 upl->flags &= ~UPL_CLEAR_DIRTY;
10766 }
10767 }
10768
10769 void
upl_set_referenced(upl_t upl,boolean_t value)10770 upl_set_referenced(
10771 upl_t upl,
10772 boolean_t value)
10773 {
10774 upl_lock(upl);
10775 if (value) {
10776 upl->ext_ref_count++;
10777 } else {
10778 if (!upl->ext_ref_count) {
10779 panic("upl_set_referenced not %p", upl);
10780 }
10781 upl->ext_ref_count--;
10782 }
10783 upl_unlock(upl);
10784 }
10785
10786 #if CONFIG_IOSCHED
10787 void
upl_set_blkno(upl_t upl,vm_offset_t upl_offset,int io_size,int64_t blkno)10788 upl_set_blkno(
10789 upl_t upl,
10790 vm_offset_t upl_offset,
10791 int io_size,
10792 int64_t blkno)
10793 {
10794 int i, j;
10795 if ((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) {
10796 return;
10797 }
10798
10799 assert(upl->upl_reprio_info != 0);
10800 for (i = (int)(upl_offset / PAGE_SIZE), j = 0; j < io_size; i++, j += PAGE_SIZE) {
10801 UPL_SET_REPRIO_INFO(upl, i, blkno, io_size);
10802 }
10803 }
10804 #endif
10805
10806 void inline
memoryshot(unsigned int event,unsigned int control)10807 memoryshot(unsigned int event, unsigned int control)
10808 {
10809 if (vm_debug_events) {
10810 KERNEL_DEBUG_CONSTANT1((MACHDBG_CODE(DBG_MACH_VM_PRESSURE, event)) | control,
10811 vm_page_active_count, vm_page_inactive_count,
10812 vm_page_free_count, vm_page_speculative_count,
10813 vm_page_throttled_count);
10814 } else {
10815 (void) event;
10816 (void) control;
10817 }
10818 }
10819
10820 #ifdef MACH_BSD
10821
10822 boolean_t
upl_device_page(upl_page_info_t * upl)10823 upl_device_page(upl_page_info_t *upl)
10824 {
10825 return UPL_DEVICE_PAGE(upl);
10826 }
10827 boolean_t
upl_page_present(upl_page_info_t * upl,int index)10828 upl_page_present(upl_page_info_t *upl, int index)
10829 {
10830 return UPL_PAGE_PRESENT(upl, index);
10831 }
10832 boolean_t
upl_speculative_page(upl_page_info_t * upl,int index)10833 upl_speculative_page(upl_page_info_t *upl, int index)
10834 {
10835 return UPL_SPECULATIVE_PAGE(upl, index);
10836 }
10837 boolean_t
upl_dirty_page(upl_page_info_t * upl,int index)10838 upl_dirty_page(upl_page_info_t *upl, int index)
10839 {
10840 return UPL_DIRTY_PAGE(upl, index);
10841 }
10842 boolean_t
upl_valid_page(upl_page_info_t * upl,int index)10843 upl_valid_page(upl_page_info_t *upl, int index)
10844 {
10845 return UPL_VALID_PAGE(upl, index);
10846 }
10847 ppnum_t
upl_phys_page(upl_page_info_t * upl,int index)10848 upl_phys_page(upl_page_info_t *upl, int index)
10849 {
10850 return UPL_PHYS_PAGE(upl, index);
10851 }
10852
10853 void
upl_page_set_mark(upl_page_info_t * upl,int index,boolean_t v)10854 upl_page_set_mark(upl_page_info_t *upl, int index, boolean_t v)
10855 {
10856 upl[index].mark = v;
10857 }
10858
10859 boolean_t
upl_page_get_mark(upl_page_info_t * upl,int index)10860 upl_page_get_mark(upl_page_info_t *upl, int index)
10861 {
10862 return upl[index].mark;
10863 }
10864
10865 void
vm_countdirtypages(void)10866 vm_countdirtypages(void)
10867 {
10868 vm_page_t m;
10869 int dpages;
10870 int pgopages;
10871 int precpages;
10872
10873
10874 dpages = 0;
10875 pgopages = 0;
10876 precpages = 0;
10877
10878 vm_page_lock_queues();
10879 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
10880 do {
10881 if (m == (vm_page_t)0) {
10882 break;
10883 }
10884
10885 if (m->vmp_dirty) {
10886 dpages++;
10887 }
10888 if (m->vmp_free_when_done) {
10889 pgopages++;
10890 }
10891 if (m->vmp_precious) {
10892 precpages++;
10893 }
10894
10895 assert(!is_kernel_object(VM_PAGE_OBJECT(m)));
10896 m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
10897 if (m == (vm_page_t)0) {
10898 break;
10899 }
10900 } while (!vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t) m));
10901 vm_page_unlock_queues();
10902
10903 vm_page_lock_queues();
10904 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
10905 do {
10906 if (m == (vm_page_t)0) {
10907 break;
10908 }
10909
10910 dpages++;
10911 assert(m->vmp_dirty);
10912 assert(!m->vmp_free_when_done);
10913 assert(!is_kernel_object(VM_PAGE_OBJECT(m)));
10914 m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
10915 if (m == (vm_page_t)0) {
10916 break;
10917 }
10918 } while (!vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t) m));
10919 vm_page_unlock_queues();
10920
10921 vm_page_lock_queues();
10922 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
10923 do {
10924 if (m == (vm_page_t)0) {
10925 break;
10926 }
10927
10928 if (m->vmp_dirty) {
10929 dpages++;
10930 }
10931 if (m->vmp_free_when_done) {
10932 pgopages++;
10933 }
10934 if (m->vmp_precious) {
10935 precpages++;
10936 }
10937
10938 assert(!is_kernel_object(VM_PAGE_OBJECT(m)));
10939 m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
10940 if (m == (vm_page_t)0) {
10941 break;
10942 }
10943 } while (!vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t) m));
10944 vm_page_unlock_queues();
10945
10946 printf("IN Q: %d : %d : %d\n", dpages, pgopages, precpages);
10947
10948 dpages = 0;
10949 pgopages = 0;
10950 precpages = 0;
10951
10952 vm_page_lock_queues();
10953 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
10954
10955 do {
10956 if (m == (vm_page_t)0) {
10957 break;
10958 }
10959 if (m->vmp_dirty) {
10960 dpages++;
10961 }
10962 if (m->vmp_free_when_done) {
10963 pgopages++;
10964 }
10965 if (m->vmp_precious) {
10966 precpages++;
10967 }
10968
10969 assert(!is_kernel_object(VM_PAGE_OBJECT(m)));
10970 m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
10971 if (m == (vm_page_t)0) {
10972 break;
10973 }
10974 } while (!vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t) m));
10975 vm_page_unlock_queues();
10976
10977 printf("AC Q: %d : %d : %d\n", dpages, pgopages, precpages);
10978 }
10979 #endif /* MACH_BSD */
10980
10981
10982 #if CONFIG_IOSCHED
10983 int
upl_get_cached_tier(upl_t upl)10984 upl_get_cached_tier(upl_t upl)
10985 {
10986 assert(upl);
10987 if (upl->flags & UPL_TRACKED_BY_OBJECT) {
10988 return upl->upl_priority;
10989 }
10990 return -1;
10991 }
10992 #endif /* CONFIG_IOSCHED */
10993
10994
10995 void
upl_callout_iodone(upl_t upl)10996 upl_callout_iodone(upl_t upl)
10997 {
10998 struct upl_io_completion *upl_ctx = upl->upl_iodone;
10999
11000 if (upl_ctx) {
11001 void (*iodone_func)(void *, int) = upl_ctx->io_done;
11002
11003 assert(upl_ctx->io_done);
11004
11005 (*iodone_func)(upl_ctx->io_context, upl_ctx->io_error);
11006 }
11007 }
11008
11009 void
upl_set_iodone(upl_t upl,void * upl_iodone)11010 upl_set_iodone(upl_t upl, void *upl_iodone)
11011 {
11012 upl->upl_iodone = (struct upl_io_completion *)upl_iodone;
11013 }
11014
11015 void
upl_set_iodone_error(upl_t upl,int error)11016 upl_set_iodone_error(upl_t upl, int error)
11017 {
11018 struct upl_io_completion *upl_ctx = upl->upl_iodone;
11019
11020 if (upl_ctx) {
11021 upl_ctx->io_error = error;
11022 }
11023 }
11024
11025
11026 ppnum_t
upl_get_highest_page(upl_t upl)11027 upl_get_highest_page(
11028 upl_t upl)
11029 {
11030 return upl->highest_page;
11031 }
11032
11033 upl_size_t
upl_get_size(upl_t upl)11034 upl_get_size(
11035 upl_t upl)
11036 {
11037 return upl_adjusted_size(upl, PAGE_MASK);
11038 }
11039
11040 upl_size_t
upl_adjusted_size(upl_t upl,vm_map_offset_t pgmask)11041 upl_adjusted_size(
11042 upl_t upl,
11043 vm_map_offset_t pgmask)
11044 {
11045 vm_object_offset_t start_offset, end_offset;
11046
11047 start_offset = trunc_page_mask_64(upl->u_offset, pgmask);
11048 end_offset = round_page_mask_64(upl->u_offset + upl->u_size, pgmask);
11049
11050 return (upl_size_t)(end_offset - start_offset);
11051 }
11052
11053 vm_object_offset_t
upl_adjusted_offset(upl_t upl,vm_map_offset_t pgmask)11054 upl_adjusted_offset(
11055 upl_t upl,
11056 vm_map_offset_t pgmask)
11057 {
11058 return trunc_page_mask_64(upl->u_offset, pgmask);
11059 }
11060
11061 vm_object_offset_t
upl_get_data_offset(upl_t upl)11062 upl_get_data_offset(
11063 upl_t upl)
11064 {
11065 return upl->u_offset - upl_adjusted_offset(upl, PAGE_MASK);
11066 }
11067
11068 upl_t
upl_associated_upl(upl_t upl)11069 upl_associated_upl(upl_t upl)
11070 {
11071 return upl->associated_upl;
11072 }
11073
11074 void
upl_set_associated_upl(upl_t upl,upl_t associated_upl)11075 upl_set_associated_upl(upl_t upl, upl_t associated_upl)
11076 {
11077 upl->associated_upl = associated_upl;
11078 }
11079
11080 struct vnode *
upl_lookup_vnode(upl_t upl)11081 upl_lookup_vnode(upl_t upl)
11082 {
11083 if (!upl->map_object->internal) {
11084 return vnode_pager_lookup_vnode(upl->map_object->pager);
11085 } else {
11086 return NULL;
11087 }
11088 }
11089
11090 #if UPL_DEBUG
11091 kern_return_t
upl_ubc_alias_set(upl_t upl,uintptr_t alias1,uintptr_t alias2)11092 upl_ubc_alias_set(upl_t upl, uintptr_t alias1, uintptr_t alias2)
11093 {
11094 upl->ubc_alias1 = alias1;
11095 upl->ubc_alias2 = alias2;
11096 return KERN_SUCCESS;
11097 }
11098 int
upl_ubc_alias_get(upl_t upl,uintptr_t * al,uintptr_t * al2)11099 upl_ubc_alias_get(upl_t upl, uintptr_t * al, uintptr_t * al2)
11100 {
11101 if (al) {
11102 *al = upl->ubc_alias1;
11103 }
11104 if (al2) {
11105 *al2 = upl->ubc_alias2;
11106 }
11107 return KERN_SUCCESS;
11108 }
11109 #endif /* UPL_DEBUG */
11110
11111 #if VM_PRESSURE_EVENTS
11112 /*
11113 * Upward trajectory.
11114 */
11115 extern boolean_t vm_compressor_low_on_space(void);
11116
11117 boolean_t
VM_PRESSURE_NORMAL_TO_WARNING(void)11118 VM_PRESSURE_NORMAL_TO_WARNING(void)
11119 {
11120 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
11121 /* Available pages below our threshold */
11122 if (memorystatus_available_pages < memorystatus_available_pages_pressure) {
11123 /* No frozen processes to kill */
11124 if (memorystatus_frozen_count == 0) {
11125 /* Not enough suspended processes available. */
11126 if (memorystatus_suspended_count < MEMORYSTATUS_SUSPENDED_THRESHOLD) {
11127 return TRUE;
11128 }
11129 }
11130 }
11131 return FALSE;
11132 } else {
11133 return (AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) ? 1 : 0;
11134 }
11135 }
11136
11137 boolean_t
VM_PRESSURE_WARNING_TO_CRITICAL(void)11138 VM_PRESSURE_WARNING_TO_CRITICAL(void)
11139 {
11140 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
11141 /* Available pages below our threshold */
11142 if (memorystatus_available_pages < memorystatus_available_pages_critical) {
11143 return TRUE;
11144 }
11145 return FALSE;
11146 } else {
11147 return vm_compressor_low_on_space() || (AVAILABLE_NON_COMPRESSED_MEMORY < ((12 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0;
11148 }
11149 }
11150
11151 /*
11152 * Downward trajectory.
11153 */
11154 boolean_t
VM_PRESSURE_WARNING_TO_NORMAL(void)11155 VM_PRESSURE_WARNING_TO_NORMAL(void)
11156 {
11157 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
11158 /* Available pages above our threshold */
11159 unsigned int target_threshold = (unsigned int) (memorystatus_available_pages_pressure + ((15 * memorystatus_available_pages_pressure) / 100));
11160 if (memorystatus_available_pages > target_threshold) {
11161 return TRUE;
11162 }
11163 return FALSE;
11164 } else {
11165 return (AVAILABLE_NON_COMPRESSED_MEMORY > ((12 * VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) / 10)) ? 1 : 0;
11166 }
11167 }
11168
11169 boolean_t
VM_PRESSURE_CRITICAL_TO_WARNING(void)11170 VM_PRESSURE_CRITICAL_TO_WARNING(void)
11171 {
11172 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
11173 /* Available pages above our threshold */
11174 unsigned int target_threshold = (unsigned int)(memorystatus_available_pages_critical + ((15 * memorystatus_available_pages_critical) / 100));
11175 if (memorystatus_available_pages > target_threshold) {
11176 return TRUE;
11177 }
11178 return FALSE;
11179 } else {
11180 return (AVAILABLE_NON_COMPRESSED_MEMORY > ((14 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0;
11181 }
11182 }
11183 #endif /* VM_PRESSURE_EVENTS */
11184
11185 #if DEVELOPMENT || DEBUG
11186 bool compressor_running_perf_test;
11187 uint64_t compressor_perf_test_pages_processed;
11188
11189 kern_return_t
11190 run_compressor_perf_test(
11191 user_addr_t buf,
11192 size_t buffer_size,
11193 uint64_t *time,
11194 uint64_t *bytes_compressed,
11195 uint64_t *compressor_growth);
11196
11197 static kern_return_t
move_pages_to_queue(vm_map_t map,user_addr_t start_addr,size_t buffer_size,vm_page_queue_head_t * queue,size_t * pages_moved)11198 move_pages_to_queue(
11199 vm_map_t map,
11200 user_addr_t start_addr,
11201 size_t buffer_size,
11202 vm_page_queue_head_t *queue,
11203 size_t *pages_moved)
11204 {
11205 kern_return_t err = KERN_SUCCESS;
11206 vm_map_entry_t curr_entry = VM_MAP_ENTRY_NULL;
11207 boolean_t addr_in_map = FALSE;
11208 user_addr_t end_addr = USER_ADDR_NULL, curr_addr = USER_ADDR_NULL;
11209 vm_object_t curr_object = VM_OBJECT_NULL;
11210 *pages_moved = 0;
11211
11212
11213 if (VM_MAP_PAGE_SIZE(map) != PAGE_SIZE_64) {
11214 /*
11215 * We don't currently support benchmarking maps with a different page size
11216 * than the kernel.
11217 */
11218 return KERN_INVALID_ARGUMENT;
11219 }
11220
11221 if (os_add_overflow(start_addr, buffer_size, &end_addr)) {
11222 return KERN_INVALID_ARGUMENT;
11223 }
11224
11225 vm_map_lock_read(map);
11226 curr_addr = vm_map_trunc_page_mask(start_addr, VM_MAP_PAGE_MASK(map));
11227 end_addr = vm_map_round_page_mask(start_addr + buffer_size, VM_MAP_PAGE_MASK(map));
11228
11229
11230 while (curr_addr < end_addr) {
11231 addr_in_map = vm_map_lookup_entry(map, curr_addr, &curr_entry);
11232 if (!addr_in_map) {
11233 err = KERN_INVALID_ARGUMENT;
11234 break;
11235 }
11236 curr_object = VME_OBJECT(curr_entry);
11237 if (curr_object) {
11238 vm_object_lock(curr_object);
11239 /* We really only want anonymous memory that's in the top level map and object here. */
11240 if (curr_entry->is_sub_map || curr_entry->wired_count != 0 ||
11241 curr_object->shadow != VM_OBJECT_NULL || !curr_object->internal) {
11242 err = KERN_INVALID_ARGUMENT;
11243 vm_object_unlock(curr_object);
11244 break;
11245 }
11246 vm_map_offset_t start_offset = (curr_addr - curr_entry->vme_start) + VME_OFFSET(curr_entry);
11247 vm_map_offset_t end_offset = MIN(curr_entry->vme_end, end_addr) -
11248 (curr_entry->vme_start + VME_OFFSET(curr_entry));
11249 vm_map_offset_t curr_offset = start_offset;
11250 vm_page_t curr_page;
11251 while (curr_offset < end_offset) {
11252 curr_page = vm_page_lookup(curr_object, vm_object_trunc_page(curr_offset));
11253 if (curr_page != VM_PAGE_NULL) {
11254 vm_page_lock_queues();
11255 if (curr_page->vmp_laundry) {
11256 vm_pageout_steal_laundry(curr_page, TRUE);
11257 }
11258 /*
11259 * we've already factored out pages in the laundry which
11260 * means this page can't be on the pageout queue so it's
11261 * safe to do the vm_page_queues_remove
11262 */
11263 bool donate = (curr_page->vmp_on_specialq == VM_PAGE_SPECIAL_Q_DONATE);
11264 vm_page_queues_remove(curr_page, TRUE);
11265 if (donate) {
11266 /*
11267 * The compressor needs to see this bit to know
11268 * where this page needs to land. Also if stolen,
11269 * this bit helps put the page back in the right
11270 * special queue where it belongs.
11271 */
11272 curr_page->vmp_on_specialq = VM_PAGE_SPECIAL_Q_DONATE;
11273 }
11274 // Clear the referenced bit so we ensure this gets paged out
11275 curr_page->vmp_reference = false;
11276 if (curr_page->vmp_pmapped) {
11277 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(curr_page),
11278 VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void*)NULL);
11279 }
11280 vm_page_queue_enter(queue, curr_page, vmp_pageq);
11281 vm_page_unlock_queues();
11282 *pages_moved += 1;
11283 }
11284 curr_offset += PAGE_SIZE_64;
11285 curr_addr += PAGE_SIZE_64;
11286 }
11287 }
11288 vm_object_unlock(curr_object);
11289 }
11290 vm_map_unlock_read(map);
11291 return err;
11292 }
11293
11294 /*
11295 * Local queue for processing benchmark pages.
11296 * Can't be allocated on the stack because the pointer has to
11297 * be packable.
11298 */
11299 vm_page_queue_head_t compressor_perf_test_queue VM_PAGE_PACKED_ALIGNED;
11300 kern_return_t
run_compressor_perf_test(user_addr_t buf,size_t buffer_size,uint64_t * time,uint64_t * bytes_compressed,uint64_t * compressor_growth)11301 run_compressor_perf_test(
11302 user_addr_t buf,
11303 size_t buffer_size,
11304 uint64_t *time,
11305 uint64_t *bytes_compressed,
11306 uint64_t *compressor_growth)
11307 {
11308 kern_return_t err = KERN_SUCCESS;
11309 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
11310 return KERN_NOT_SUPPORTED;
11311 }
11312 if (current_task() == kernel_task) {
11313 return KERN_INVALID_ARGUMENT;
11314 }
11315 vm_page_lock_queues();
11316 if (compressor_running_perf_test) {
11317 /* Only run one instance of the benchmark at a time. */
11318 vm_page_unlock_queues();
11319 return KERN_RESOURCE_SHORTAGE;
11320 }
11321 vm_page_unlock_queues();
11322 size_t page_count = 0;
11323 vm_map_t map;
11324 vm_page_t p, next;
11325 uint64_t compressor_perf_test_start = 0, compressor_perf_test_end = 0;
11326 uint64_t compressed_bytes_start = 0, compressed_bytes_end = 0;
11327 *bytes_compressed = *compressor_growth = 0;
11328
11329 vm_page_queue_init(&compressor_perf_test_queue);
11330 map = current_task()->map;
11331 err = move_pages_to_queue(map, buf, buffer_size, &compressor_perf_test_queue, &page_count);
11332 if (err != KERN_SUCCESS) {
11333 goto out;
11334 }
11335
11336 vm_page_lock_queues();
11337 compressor_running_perf_test = true;
11338 compressor_perf_test_pages_processed = 0;
11339 /*
11340 * At this point the compressor threads should only process the benchmark queue
11341 * so we can look at the difference in c_segment_compressed_bytes while the perf test is running
11342 * to determine how many compressed bytes we ended up using.
11343 */
11344 compressed_bytes_start = c_segment_compressed_bytes;
11345 vm_page_unlock_queues();
11346
11347 page_count = vm_pageout_page_queue(&compressor_perf_test_queue, page_count, true);
11348
11349 vm_page_lock_queues();
11350 compressor_perf_test_start = mach_absolute_time();
11351
11352 // Wake up the compressor thread(s)
11353 sched_cond_signal(&pgo_iothread_internal_state[0].pgo_wakeup,
11354 pgo_iothread_internal_state[0].pgo_iothread);
11355
11356 /*
11357 * Depending on when this test is run we could overshoot or be right on the mark
11358 * with our page_count. So the comparison is of the _less than_ variety.
11359 */
11360 while (compressor_perf_test_pages_processed < page_count) {
11361 assert_wait((event_t) &compressor_perf_test_pages_processed, THREAD_UNINT);
11362 vm_page_unlock_queues();
11363 thread_block(THREAD_CONTINUE_NULL);
11364 vm_page_lock_queues();
11365 }
11366 compressor_perf_test_end = mach_absolute_time();
11367 compressed_bytes_end = c_segment_compressed_bytes;
11368 vm_page_unlock_queues();
11369
11370
11371 out:
11372 /*
11373 * If we errored out above, then we could still have some pages
11374 * on the local queue. Make sure to put them back on the active queue before
11375 * returning so they're not orphaned.
11376 */
11377 vm_page_lock_queues();
11378 absolutetime_to_nanoseconds(compressor_perf_test_end - compressor_perf_test_start, time);
11379 p = (vm_page_t) vm_page_queue_first(&compressor_perf_test_queue);
11380 while (p && !vm_page_queue_end(&compressor_perf_test_queue, (vm_page_queue_entry_t)p)) {
11381 next = (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.next);
11382
11383 vm_page_enqueue_active(p, FALSE);
11384 p = next;
11385 }
11386
11387 compressor_running_perf_test = false;
11388 vm_page_unlock_queues();
11389 if (err == KERN_SUCCESS) {
11390 *bytes_compressed = page_count * PAGE_SIZE_64;
11391 *compressor_growth = compressed_bytes_end - compressed_bytes_start;
11392 }
11393
11394 /*
11395 * pageout_scan will consider waking the compactor swapper
11396 * before it blocks. Do the same thing here before we return
11397 * to ensure that back to back benchmark runs can't overly fragment the
11398 * compressor pool.
11399 */
11400 vm_consider_waking_compactor_swapper();
11401 return err;
11402 }
11403 #endif /* DEVELOPMENT || DEBUG */
11404