1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_pageout.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * The proverbial page-out daemon.
64 */
65
66 #include <stdint.h>
67 #include <ptrauth.h>
68
69 #include <debug.h>
70 #include <mach_pagemap.h>
71 #include <mach_cluster_stats.h>
72
73 #include <mach/mach_types.h>
74 #include <mach/memory_object.h>
75 #include <mach/memory_object_default.h>
76 #include <mach/memory_object_control_server.h>
77 #include <mach/mach_host_server.h>
78 #include <mach/upl.h>
79 #include <mach/vm_map.h>
80 #include <mach/vm_param.h>
81 #include <mach/vm_statistics.h>
82 #include <mach/sdt.h>
83
84 #include <kern/kern_types.h>
85 #include <kern/counter.h>
86 #include <kern/host_statistics.h>
87 #include <kern/machine.h>
88 #include <kern/misc_protos.h>
89 #include <kern/sched.h>
90 #include <kern/thread.h>
91 #include <kern/kalloc.h>
92 #include <kern/zalloc_internal.h>
93 #include <kern/policy_internal.h>
94 #include <kern/thread_group.h>
95
96 #include <machine/vm_tuning.h>
97 #include <machine/commpage.h>
98
99 #include <vm/pmap.h>
100 #include <vm/vm_compressor_pager.h>
101 #include <vm/vm_fault.h>
102 #include <vm/vm_map_internal.h>
103 #include <vm/vm_object.h>
104 #include <vm/vm_page.h>
105 #include <vm/vm_pageout.h>
106 #include <vm/vm_protos.h> /* must be last */
107 #include <vm/memory_object.h>
108 #include <vm/vm_purgeable_internal.h>
109 #include <vm/vm_shared_region.h>
110 #include <vm/vm_compressor.h>
111
112 #include <san/kasan.h>
113
114 #if CONFIG_PHANTOM_CACHE
115 #include <vm/vm_phantom_cache.h>
116 #endif
117
118 #if UPL_DEBUG
119 #include <libkern/OSDebug.h>
120 #endif
121
122 extern int cs_debug;
123
124 extern void mbuf_drain(boolean_t);
125
126 #if VM_PRESSURE_EVENTS
127 #if CONFIG_JETSAM
128 extern unsigned int memorystatus_available_pages;
129 extern unsigned int memorystatus_available_pages_pressure;
130 extern unsigned int memorystatus_available_pages_critical;
131 #else /* CONFIG_JETSAM */
132 extern uint64_t memorystatus_available_pages;
133 extern uint64_t memorystatus_available_pages_pressure;
134 extern uint64_t memorystatus_available_pages_critical;
135 #endif /* CONFIG_JETSAM */
136
137 extern unsigned int memorystatus_frozen_count;
138 extern unsigned int memorystatus_suspended_count;
139 extern vm_pressure_level_t memorystatus_vm_pressure_level;
140
141 extern lck_mtx_t memorystatus_jetsam_fg_band_lock;
142 extern uint32_t memorystatus_jetsam_fg_band_waiters;
143
144 void vm_pressure_response(void);
145 extern void consider_vm_pressure_events(void);
146
147 #define MEMORYSTATUS_SUSPENDED_THRESHOLD 4
148 #endif /* VM_PRESSURE_EVENTS */
149
150 SECURITY_READ_ONLY_LATE(thread_t) vm_pageout_scan_thread;
151 SECURITY_READ_ONLY_LATE(thread_t) vm_pageout_gc_thread;
152 boolean_t vps_dynamic_priority_enabled = FALSE;
153
154 #ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE /* maximum iterations of the inactive queue w/o stealing/cleaning a page */
155 #if !XNU_TARGET_OS_OSX
156 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 1024
157 #else /* !XNU_TARGET_OS_OSX */
158 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096
159 #endif /* !XNU_TARGET_OS_OSX */
160 #endif
161
162 #ifndef VM_PAGEOUT_DEADLOCK_RELIEF
163 #define VM_PAGEOUT_DEADLOCK_RELIEF 100 /* number of pages to move to break deadlock */
164 #endif
165
166 #ifndef VM_PAGE_LAUNDRY_MAX
167 #define VM_PAGE_LAUNDRY_MAX 128UL /* maximum pageouts on a given pageout queue */
168 #endif /* VM_PAGEOUT_LAUNDRY_MAX */
169
170 #ifndef VM_PAGEOUT_BURST_WAIT
171 #define VM_PAGEOUT_BURST_WAIT 1 /* milliseconds */
172 #endif /* VM_PAGEOUT_BURST_WAIT */
173
174 #ifndef VM_PAGEOUT_EMPTY_WAIT
175 #define VM_PAGEOUT_EMPTY_WAIT 50 /* milliseconds */
176 #endif /* VM_PAGEOUT_EMPTY_WAIT */
177
178 #ifndef VM_PAGEOUT_DEADLOCK_WAIT
179 #define VM_PAGEOUT_DEADLOCK_WAIT 100 /* milliseconds */
180 #endif /* VM_PAGEOUT_DEADLOCK_WAIT */
181
182 #ifndef VM_PAGEOUT_IDLE_WAIT
183 #define VM_PAGEOUT_IDLE_WAIT 10 /* milliseconds */
184 #endif /* VM_PAGEOUT_IDLE_WAIT */
185
186 #ifndef VM_PAGEOUT_SWAP_WAIT
187 #define VM_PAGEOUT_SWAP_WAIT 10 /* milliseconds */
188 #endif /* VM_PAGEOUT_SWAP_WAIT */
189
190
191 #ifndef VM_PAGE_SPECULATIVE_TARGET
192 #define VM_PAGE_SPECULATIVE_TARGET(total) ((total) * 1 / (100 / vm_pageout_state.vm_page_speculative_percentage))
193 #endif /* VM_PAGE_SPECULATIVE_TARGET */
194
195
196 /*
197 * To obtain a reasonable LRU approximation, the inactive queue
198 * needs to be large enough to give pages on it a chance to be
199 * referenced a second time. This macro defines the fraction
200 * of active+inactive pages that should be inactive.
201 * The pageout daemon uses it to update vm_page_inactive_target.
202 *
203 * If vm_page_free_count falls below vm_page_free_target and
204 * vm_page_inactive_count is below vm_page_inactive_target,
205 * then the pageout daemon starts running.
206 */
207
208 #ifndef VM_PAGE_INACTIVE_TARGET
209 #define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 2)
210 #endif /* VM_PAGE_INACTIVE_TARGET */
211
212 /*
213 * Once the pageout daemon starts running, it keeps going
214 * until vm_page_free_count meets or exceeds vm_page_free_target.
215 */
216
217 #ifndef VM_PAGE_FREE_TARGET
218 #if !XNU_TARGET_OS_OSX
219 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 100)
220 #else /* !XNU_TARGET_OS_OSX */
221 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
222 #endif /* !XNU_TARGET_OS_OSX */
223 #endif /* VM_PAGE_FREE_TARGET */
224
225
226 /*
227 * The pageout daemon always starts running once vm_page_free_count
228 * falls below vm_page_free_min.
229 */
230
231 #ifndef VM_PAGE_FREE_MIN
232 #if !XNU_TARGET_OS_OSX
233 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 200)
234 #else /* !XNU_TARGET_OS_OSX */
235 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
236 #endif /* !XNU_TARGET_OS_OSX */
237 #endif /* VM_PAGE_FREE_MIN */
238
239 #if !XNU_TARGET_OS_OSX
240 #define VM_PAGE_FREE_RESERVED_LIMIT 100
241 #define VM_PAGE_FREE_MIN_LIMIT 1500
242 #define VM_PAGE_FREE_TARGET_LIMIT 2000
243 #else /* !XNU_TARGET_OS_OSX */
244 #define VM_PAGE_FREE_RESERVED_LIMIT 1700
245 #define VM_PAGE_FREE_MIN_LIMIT 3500
246 #define VM_PAGE_FREE_TARGET_LIMIT 4000
247 #endif /* !XNU_TARGET_OS_OSX */
248
249 /*
250 * When vm_page_free_count falls below vm_page_free_reserved,
251 * only vm-privileged threads can allocate pages. vm-privilege
252 * allows the pageout daemon and default pager (and any other
253 * associated threads needed for default pageout) to continue
254 * operation by dipping into the reserved pool of pages.
255 */
256
257 #ifndef VM_PAGE_FREE_RESERVED
258 #define VM_PAGE_FREE_RESERVED(n) \
259 ((unsigned) (6 * VM_PAGE_LAUNDRY_MAX) + (n))
260 #endif /* VM_PAGE_FREE_RESERVED */
261
262 /*
263 * When we dequeue pages from the inactive list, they are
264 * reactivated (ie, put back on the active queue) if referenced.
265 * However, it is possible to starve the free list if other
266 * processors are referencing pages faster than we can turn off
267 * the referenced bit. So we limit the number of reactivations
268 * we will make per call of vm_pageout_scan().
269 */
270 #define VM_PAGE_REACTIVATE_LIMIT_MAX 20000
271
272 #ifndef VM_PAGE_REACTIVATE_LIMIT
273 #if !XNU_TARGET_OS_OSX
274 #define VM_PAGE_REACTIVATE_LIMIT(avail) (VM_PAGE_INACTIVE_TARGET(avail) / 2)
275 #else /* !XNU_TARGET_OS_OSX */
276 #define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX))
277 #endif /* !XNU_TARGET_OS_OSX */
278 #endif /* VM_PAGE_REACTIVATE_LIMIT */
279 #define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM 1000
280
281 extern boolean_t hibernate_cleaning_in_progress;
282
283 /*
284 * Forward declarations for internal routines.
285 */
286 struct cq {
287 struct vm_pageout_queue *q;
288 void *current_chead;
289 char *scratch_buf;
290 int id;
291 };
292
293 struct cq ciq[MAX_COMPRESSOR_THREAD_COUNT];
294
295
296 #if VM_PRESSURE_EVENTS
297 void vm_pressure_thread(void);
298
299 boolean_t VM_PRESSURE_NORMAL_TO_WARNING(void);
300 boolean_t VM_PRESSURE_WARNING_TO_CRITICAL(void);
301
302 boolean_t VM_PRESSURE_WARNING_TO_NORMAL(void);
303 boolean_t VM_PRESSURE_CRITICAL_TO_WARNING(void);
304 #endif
305
306 static void vm_pageout_iothread_external(void);
307 static void vm_pageout_iothread_internal(struct cq *cq);
308 static void vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue *, boolean_t);
309
310 extern void vm_pageout_continue(void);
311 extern void vm_pageout_scan(void);
312
313 boolean_t vm_pageout_running = FALSE;
314
315 uint32_t vm_page_upl_tainted = 0;
316 uint32_t vm_page_iopl_tainted = 0;
317
318 #if XNU_TARGET_OS_OSX
319 static boolean_t vm_pageout_waiter = FALSE;
320 #endif /* XNU_TARGET_OS_OSX */
321
322
323 #if DEVELOPMENT || DEBUG
324 struct vm_pageout_debug vm_pageout_debug;
325 #endif
326 struct vm_pageout_vminfo vm_pageout_vminfo;
327 struct vm_pageout_state vm_pageout_state;
328 struct vm_config vm_config;
329
330 struct vm_pageout_queue vm_pageout_queue_internal VM_PAGE_PACKED_ALIGNED;
331 struct vm_pageout_queue vm_pageout_queue_external VM_PAGE_PACKED_ALIGNED;
332
333 int vm_upl_wait_for_pages = 0;
334 vm_object_t vm_pageout_scan_wants_object = VM_OBJECT_NULL;
335
336 boolean_t(*volatile consider_buffer_cache_collect)(int) = NULL;
337
338 int vm_debug_events = 0;
339
340 LCK_GRP_DECLARE(vm_pageout_lck_grp, "vm_pageout");
341
342 #if CONFIG_MEMORYSTATUS
343 extern boolean_t memorystatus_kill_on_VM_page_shortage(boolean_t async);
344
345 uint32_t vm_pageout_memorystatus_fb_factor_nr = 5;
346 uint32_t vm_pageout_memorystatus_fb_factor_dr = 2;
347
348 #endif
349
350 #if __AMP__
351 int vm_compressor_ebound = 1;
352 int vm_pgo_pbound = 0;
353 extern void thread_bind_cluster_type(thread_t, char, bool);
354 #endif /* __AMP__ */
355
356
357 /*
358 * Routine: vm_pageout_object_terminate
359 * Purpose:
360 * Destroy the pageout_object, and perform all of the
361 * required cleanup actions.
362 *
363 * In/Out conditions:
364 * The object must be locked, and will be returned locked.
365 */
366 void
vm_pageout_object_terminate(vm_object_t object)367 vm_pageout_object_terminate(
368 vm_object_t object)
369 {
370 vm_object_t shadow_object;
371
372 /*
373 * Deal with the deallocation (last reference) of a pageout object
374 * (used for cleaning-in-place) by dropping the paging references/
375 * freeing pages in the original object.
376 */
377
378 assert(object->pageout);
379 shadow_object = object->shadow;
380 vm_object_lock(shadow_object);
381
382 while (!vm_page_queue_empty(&object->memq)) {
383 vm_page_t p, m;
384 vm_object_offset_t offset;
385
386 p = (vm_page_t) vm_page_queue_first(&object->memq);
387
388 assert(p->vmp_private);
389 assert(p->vmp_free_when_done);
390 p->vmp_free_when_done = FALSE;
391 assert(!p->vmp_cleaning);
392 assert(!p->vmp_laundry);
393
394 offset = p->vmp_offset;
395 VM_PAGE_FREE(p);
396 p = VM_PAGE_NULL;
397
398 m = vm_page_lookup(shadow_object,
399 offset + object->vo_shadow_offset);
400
401 if (m == VM_PAGE_NULL) {
402 continue;
403 }
404
405 assert((m->vmp_dirty) || (m->vmp_precious) ||
406 (m->vmp_busy && m->vmp_cleaning));
407
408 /*
409 * Handle the trusted pager throttle.
410 * Also decrement the burst throttle (if external).
411 */
412 vm_page_lock_queues();
413 if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
414 vm_pageout_throttle_up(m);
415 }
416
417 /*
418 * Handle the "target" page(s). These pages are to be freed if
419 * successfully cleaned. Target pages are always busy, and are
420 * wired exactly once. The initial target pages are not mapped,
421 * (so cannot be referenced or modified) but converted target
422 * pages may have been modified between the selection as an
423 * adjacent page and conversion to a target.
424 */
425 if (m->vmp_free_when_done) {
426 assert(m->vmp_busy);
427 assert(m->vmp_q_state == VM_PAGE_IS_WIRED);
428 assert(m->vmp_wire_count == 1);
429 m->vmp_cleaning = FALSE;
430 m->vmp_free_when_done = FALSE;
431 /*
432 * Revoke all access to the page. Since the object is
433 * locked, and the page is busy, this prevents the page
434 * from being dirtied after the pmap_disconnect() call
435 * returns.
436 *
437 * Since the page is left "dirty" but "not modifed", we
438 * can detect whether the page was redirtied during
439 * pageout by checking the modify state.
440 */
441 if (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED) {
442 SET_PAGE_DIRTY(m, FALSE);
443 } else {
444 m->vmp_dirty = FALSE;
445 }
446
447 if (m->vmp_dirty) {
448 vm_page_unwire(m, TRUE); /* reactivates */
449 counter_inc(&vm_statistics_reactivations);
450 PAGE_WAKEUP_DONE(m);
451 } else {
452 vm_page_free(m); /* clears busy, etc. */
453 }
454 vm_page_unlock_queues();
455 continue;
456 }
457 /*
458 * Handle the "adjacent" pages. These pages were cleaned in
459 * place, and should be left alone.
460 * If prep_pin_count is nonzero, then someone is using the
461 * page, so make it active.
462 */
463 if ((m->vmp_q_state == VM_PAGE_NOT_ON_Q) && !m->vmp_private) {
464 if (m->vmp_reference) {
465 vm_page_activate(m);
466 } else {
467 vm_page_deactivate(m);
468 }
469 }
470 if (m->vmp_overwriting) {
471 /*
472 * the (COPY_OUT_FROM == FALSE) request_page_list case
473 */
474 if (m->vmp_busy) {
475 /*
476 * We do not re-set m->vmp_dirty !
477 * The page was busy so no extraneous activity
478 * could have occurred. COPY_INTO is a read into the
479 * new pages. CLEAN_IN_PLACE does actually write
480 * out the pages but handling outside of this code
481 * will take care of resetting dirty. We clear the
482 * modify however for the Programmed I/O case.
483 */
484 pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
485
486 m->vmp_busy = FALSE;
487 m->vmp_absent = FALSE;
488 } else {
489 /*
490 * alternate (COPY_OUT_FROM == FALSE) request_page_list case
491 * Occurs when the original page was wired
492 * at the time of the list request
493 */
494 assert(VM_PAGE_WIRED(m));
495 vm_page_unwire(m, TRUE); /* reactivates */
496 }
497 m->vmp_overwriting = FALSE;
498 } else {
499 m->vmp_dirty = FALSE;
500 }
501 m->vmp_cleaning = FALSE;
502
503 /*
504 * Wakeup any thread waiting for the page to be un-cleaning.
505 */
506 PAGE_WAKEUP(m);
507 vm_page_unlock_queues();
508 }
509 /*
510 * Account for the paging reference taken in vm_paging_object_allocate.
511 */
512 vm_object_activity_end(shadow_object);
513 vm_object_unlock(shadow_object);
514
515 assert(object->ref_count == 0);
516 assert(object->paging_in_progress == 0);
517 assert(object->activity_in_progress == 0);
518 assert(object->resident_page_count == 0);
519 return;
520 }
521
522 /*
523 * Routine: vm_pageclean_setup
524 *
525 * Purpose: setup a page to be cleaned (made non-dirty), but not
526 * necessarily flushed from the VM page cache.
527 * This is accomplished by cleaning in place.
528 *
529 * The page must not be busy, and new_object
530 * must be locked.
531 *
532 */
533 static void
vm_pageclean_setup(vm_page_t m,vm_page_t new_m,vm_object_t new_object,vm_object_offset_t new_offset)534 vm_pageclean_setup(
535 vm_page_t m,
536 vm_page_t new_m,
537 vm_object_t new_object,
538 vm_object_offset_t new_offset)
539 {
540 assert(!m->vmp_busy);
541 #if 0
542 assert(!m->vmp_cleaning);
543 #endif
544
545 pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
546
547 /*
548 * Mark original page as cleaning in place.
549 */
550 m->vmp_cleaning = TRUE;
551 SET_PAGE_DIRTY(m, FALSE);
552 m->vmp_precious = FALSE;
553
554 /*
555 * Convert the fictitious page to a private shadow of
556 * the real page.
557 */
558 assert(new_m->vmp_fictitious);
559 assert(VM_PAGE_GET_PHYS_PAGE(new_m) == vm_page_fictitious_addr);
560 new_m->vmp_fictitious = FALSE;
561 new_m->vmp_private = TRUE;
562 new_m->vmp_free_when_done = TRUE;
563 VM_PAGE_SET_PHYS_PAGE(new_m, VM_PAGE_GET_PHYS_PAGE(m));
564
565 vm_page_lockspin_queues();
566 vm_page_wire(new_m, VM_KERN_MEMORY_NONE, TRUE);
567 vm_page_unlock_queues();
568
569 vm_page_insert_wired(new_m, new_object, new_offset, VM_KERN_MEMORY_NONE);
570 assert(!new_m->vmp_wanted);
571 new_m->vmp_busy = FALSE;
572 }
573
574 /*
575 * Routine: vm_pageout_initialize_page
576 * Purpose:
577 * Causes the specified page to be initialized in
578 * the appropriate memory object. This routine is used to push
579 * pages into a copy-object when they are modified in the
580 * permanent object.
581 *
582 * The page is moved to a temporary object and paged out.
583 *
584 * In/out conditions:
585 * The page in question must not be on any pageout queues.
586 * The object to which it belongs must be locked.
587 * The page must be busy, but not hold a paging reference.
588 *
589 * Implementation:
590 * Move this page to a completely new object.
591 */
592 void
vm_pageout_initialize_page(vm_page_t m)593 vm_pageout_initialize_page(
594 vm_page_t m)
595 {
596 vm_object_t object;
597 vm_object_offset_t paging_offset;
598 memory_object_t pager;
599
600 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
601
602 object = VM_PAGE_OBJECT(m);
603
604 assert(m->vmp_busy);
605 assert(object->internal);
606
607 /*
608 * Verify that we really want to clean this page
609 */
610 assert(!m->vmp_absent);
611 assert(!m->vmp_error);
612 assert(m->vmp_dirty);
613
614 /*
615 * Create a paging reference to let us play with the object.
616 */
617 paging_offset = m->vmp_offset + object->paging_offset;
618
619 if (m->vmp_absent || m->vmp_error || m->vmp_restart || (!m->vmp_dirty && !m->vmp_precious)) {
620 panic("reservation without pageout?"); /* alan */
621
622 VM_PAGE_FREE(m);
623 vm_object_unlock(object);
624
625 return;
626 }
627
628 /*
629 * If there's no pager, then we can't clean the page. This should
630 * never happen since this should be a copy object and therefore not
631 * an external object, so the pager should always be there.
632 */
633
634 pager = object->pager;
635
636 if (pager == MEMORY_OBJECT_NULL) {
637 panic("missing pager for copy object");
638
639 VM_PAGE_FREE(m);
640 return;
641 }
642
643 /*
644 * set the page for future call to vm_fault_list_request
645 */
646 pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
647 SET_PAGE_DIRTY(m, FALSE);
648
649 /*
650 * keep the object from collapsing or terminating
651 */
652 vm_object_paging_begin(object);
653 vm_object_unlock(object);
654
655 /*
656 * Write the data to its pager.
657 * Note that the data is passed by naming the new object,
658 * not a virtual address; the pager interface has been
659 * manipulated to use the "internal memory" data type.
660 * [The object reference from its allocation is donated
661 * to the eventual recipient.]
662 */
663 memory_object_data_initialize(pager, paging_offset, PAGE_SIZE);
664
665 vm_object_lock(object);
666 vm_object_paging_end(object);
667 }
668
669
670 /*
671 * vm_pageout_cluster:
672 *
673 * Given a page, queue it to the appropriate I/O thread,
674 * which will page it out and attempt to clean adjacent pages
675 * in the same operation.
676 *
677 * The object and queues must be locked. We will take a
678 * paging reference to prevent deallocation or collapse when we
679 * release the object lock back at the call site. The I/O thread
680 * is responsible for consuming this reference
681 *
682 * The page must not be on any pageout queue.
683 */
684 #if DEVELOPMENT || DEBUG
685 vmct_stats_t vmct_stats;
686
687 int32_t vmct_active = 0;
688 uint64_t vm_compressor_epoch_start = 0;
689 uint64_t vm_compressor_epoch_stop = 0;
690
691 typedef enum vmct_state_t {
692 VMCT_IDLE,
693 VMCT_AWAKENED,
694 VMCT_ACTIVE,
695 } vmct_state_t;
696 vmct_state_t vmct_state[MAX_COMPRESSOR_THREAD_COUNT];
697 #endif
698
699
700 void
vm_pageout_cluster(vm_page_t m)701 vm_pageout_cluster(vm_page_t m)
702 {
703 vm_object_t object = VM_PAGE_OBJECT(m);
704 struct vm_pageout_queue *q;
705
706 VM_PAGE_CHECK(m);
707 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
708 vm_object_lock_assert_exclusive(object);
709
710 /*
711 * Only a certain kind of page is appreciated here.
712 */
713 assert((m->vmp_dirty || m->vmp_precious) && (!VM_PAGE_WIRED(m)));
714 assert(!m->vmp_cleaning && !m->vmp_laundry);
715 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
716
717 /*
718 * protect the object from collapse or termination
719 */
720 vm_object_activity_begin(object);
721
722 if (object->internal == TRUE) {
723 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
724
725 m->vmp_busy = TRUE;
726
727 q = &vm_pageout_queue_internal;
728 } else {
729 q = &vm_pageout_queue_external;
730 }
731
732 /*
733 * pgo_laundry count is tied to the laundry bit
734 */
735 m->vmp_laundry = TRUE;
736 q->pgo_laundry++;
737
738 m->vmp_q_state = VM_PAGE_ON_PAGEOUT_Q;
739 vm_page_queue_enter(&q->pgo_pending, m, vmp_pageq);
740
741 if (q->pgo_idle == TRUE) {
742 q->pgo_idle = FALSE;
743 thread_wakeup((event_t) &q->pgo_pending);
744 }
745 VM_PAGE_CHECK(m);
746 }
747
748
749 /*
750 * A page is back from laundry or we are stealing it back from
751 * the laundering state. See if there are some pages waiting to
752 * go to laundry and if we can let some of them go now.
753 *
754 * Object and page queues must be locked.
755 */
756 void
vm_pageout_throttle_up(vm_page_t m)757 vm_pageout_throttle_up(
758 vm_page_t m)
759 {
760 struct vm_pageout_queue *q;
761 vm_object_t m_object;
762
763 m_object = VM_PAGE_OBJECT(m);
764
765 assert(m_object != VM_OBJECT_NULL);
766 assert(m_object != kernel_object);
767
768 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
769 vm_object_lock_assert_exclusive(m_object);
770
771 if (m_object->internal == TRUE) {
772 q = &vm_pageout_queue_internal;
773 } else {
774 q = &vm_pageout_queue_external;
775 }
776
777 if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
778 vm_page_queue_remove(&q->pgo_pending, m, vmp_pageq);
779 m->vmp_q_state = VM_PAGE_NOT_ON_Q;
780
781 VM_PAGE_ZERO_PAGEQ_ENTRY(m);
782
783 vm_object_activity_end(m_object);
784
785 VM_PAGEOUT_DEBUG(vm_page_steal_pageout_page, 1);
786 }
787 if (m->vmp_laundry == TRUE) {
788 m->vmp_laundry = FALSE;
789 q->pgo_laundry--;
790
791 if (q->pgo_throttled == TRUE) {
792 q->pgo_throttled = FALSE;
793 thread_wakeup((event_t) &q->pgo_laundry);
794 }
795 if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
796 q->pgo_draining = FALSE;
797 thread_wakeup((event_t) (&q->pgo_laundry + 1));
798 }
799 VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count, 1);
800 }
801 }
802
803
804 static void
vm_pageout_throttle_up_batch(struct vm_pageout_queue * q,int batch_cnt)805 vm_pageout_throttle_up_batch(
806 struct vm_pageout_queue *q,
807 int batch_cnt)
808 {
809 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
810
811 VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count, batch_cnt);
812
813 q->pgo_laundry -= batch_cnt;
814
815 if (q->pgo_throttled == TRUE) {
816 q->pgo_throttled = FALSE;
817 thread_wakeup((event_t) &q->pgo_laundry);
818 }
819 if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
820 q->pgo_draining = FALSE;
821 thread_wakeup((event_t) (&q->pgo_laundry + 1));
822 }
823 }
824
825
826
827 /*
828 * VM memory pressure monitoring.
829 *
830 * vm_pageout_scan() keeps track of the number of pages it considers and
831 * reclaims, in the currently active vm_pageout_stat[vm_pageout_stat_now].
832 *
833 * compute_memory_pressure() is called every second from compute_averages()
834 * and moves "vm_pageout_stat_now" forward, to start accumulating the number
835 * of recalimed pages in a new vm_pageout_stat[] bucket.
836 *
837 * mach_vm_pressure_monitor() collects past statistics about memory pressure.
838 * The caller provides the number of seconds ("nsecs") worth of statistics
839 * it wants, up to 30 seconds.
840 * It computes the number of pages reclaimed in the past "nsecs" seconds and
841 * also returns the number of pages the system still needs to reclaim at this
842 * moment in time.
843 */
844 #if DEVELOPMENT || DEBUG
845 #define VM_PAGEOUT_STAT_SIZE (30 * 8) + 1
846 #else
847 #define VM_PAGEOUT_STAT_SIZE (1 * 8) + 1
848 #endif
849 struct vm_pageout_stat {
850 unsigned long vm_page_active_count;
851 unsigned long vm_page_speculative_count;
852 unsigned long vm_page_inactive_count;
853 unsigned long vm_page_anonymous_count;
854
855 unsigned long vm_page_free_count;
856 unsigned long vm_page_wire_count;
857 unsigned long vm_page_compressor_count;
858
859 unsigned long vm_page_pages_compressed;
860 unsigned long vm_page_pageable_internal_count;
861 unsigned long vm_page_pageable_external_count;
862 unsigned long vm_page_xpmapped_external_count;
863
864 unsigned int pages_grabbed;
865 unsigned int pages_freed;
866
867 unsigned int pages_compressed;
868 unsigned int pages_grabbed_by_compressor;
869 unsigned int failed_compressions;
870
871 unsigned int pages_evicted;
872 unsigned int pages_purged;
873
874 unsigned int considered;
875 unsigned int considered_bq_internal;
876 unsigned int considered_bq_external;
877
878 unsigned int skipped_external;
879 unsigned int skipped_internal;
880 unsigned int filecache_min_reactivations;
881
882 unsigned int freed_speculative;
883 unsigned int freed_cleaned;
884 unsigned int freed_internal;
885 unsigned int freed_external;
886
887 unsigned int cleaned_dirty_external;
888 unsigned int cleaned_dirty_internal;
889
890 unsigned int inactive_referenced;
891 unsigned int inactive_nolock;
892 unsigned int reactivation_limit_exceeded;
893 unsigned int forced_inactive_reclaim;
894
895 unsigned int throttled_internal_q;
896 unsigned int throttled_external_q;
897
898 unsigned int phantom_ghosts_found;
899 unsigned int phantom_ghosts_added;
900 } vm_pageout_stats[VM_PAGEOUT_STAT_SIZE] = {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, };
901
902 unsigned int vm_pageout_stat_now = 0;
903
904 #define VM_PAGEOUT_STAT_BEFORE(i) \
905 (((i) == 0) ? VM_PAGEOUT_STAT_SIZE - 1 : (i) - 1)
906 #define VM_PAGEOUT_STAT_AFTER(i) \
907 (((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1)
908
909 #if VM_PAGE_BUCKETS_CHECK
910 int vm_page_buckets_check_interval = 80; /* in eighths of a second */
911 #endif /* VM_PAGE_BUCKETS_CHECK */
912
913
914 void
915 record_memory_pressure(void);
916 void
record_memory_pressure(void)917 record_memory_pressure(void)
918 {
919 unsigned int vm_pageout_next;
920
921 #if VM_PAGE_BUCKETS_CHECK
922 /* check the consistency of VM page buckets at regular interval */
923 static int counter = 0;
924 if ((++counter % vm_page_buckets_check_interval) == 0) {
925 vm_page_buckets_check();
926 }
927 #endif /* VM_PAGE_BUCKETS_CHECK */
928
929 vm_pageout_state.vm_memory_pressure =
930 vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_speculative +
931 vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_cleaned +
932 vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_internal +
933 vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_external;
934
935 commpage_set_memory_pressure((unsigned int)vm_pageout_state.vm_memory_pressure );
936
937 /* move "now" forward */
938 vm_pageout_next = VM_PAGEOUT_STAT_AFTER(vm_pageout_stat_now);
939
940 bzero(&vm_pageout_stats[vm_pageout_next], sizeof(struct vm_pageout_stat));
941
942 vm_pageout_stat_now = vm_pageout_next;
943 }
944
945
946 /*
947 * IMPORTANT
948 * mach_vm_ctl_page_free_wanted() is called indirectly, via
949 * mach_vm_pressure_monitor(), when taking a stackshot. Therefore,
950 * it must be safe in the restricted stackshot context. Locks and/or
951 * blocking are not allowable.
952 */
953 unsigned int
mach_vm_ctl_page_free_wanted(void)954 mach_vm_ctl_page_free_wanted(void)
955 {
956 unsigned int page_free_target, page_free_count, page_free_wanted;
957
958 page_free_target = vm_page_free_target;
959 page_free_count = vm_page_free_count;
960 if (page_free_target > page_free_count) {
961 page_free_wanted = page_free_target - page_free_count;
962 } else {
963 page_free_wanted = 0;
964 }
965
966 return page_free_wanted;
967 }
968
969
970 /*
971 * IMPORTANT:
972 * mach_vm_pressure_monitor() is called when taking a stackshot, with
973 * wait_for_pressure FALSE, so that code path must remain safe in the
974 * restricted stackshot context. No blocking or locks are allowable.
975 * on that code path.
976 */
977
978 kern_return_t
mach_vm_pressure_monitor(boolean_t wait_for_pressure,unsigned int nsecs_monitored,unsigned int * pages_reclaimed_p,unsigned int * pages_wanted_p)979 mach_vm_pressure_monitor(
980 boolean_t wait_for_pressure,
981 unsigned int nsecs_monitored,
982 unsigned int *pages_reclaimed_p,
983 unsigned int *pages_wanted_p)
984 {
985 wait_result_t wr;
986 unsigned int vm_pageout_then, vm_pageout_now;
987 unsigned int pages_reclaimed;
988 unsigned int units_of_monitor;
989
990 units_of_monitor = 8 * nsecs_monitored;
991 /*
992 * We don't take the vm_page_queue_lock here because we don't want
993 * vm_pressure_monitor() to get in the way of the vm_pageout_scan()
994 * thread when it's trying to reclaim memory. We don't need fully
995 * accurate monitoring anyway...
996 */
997
998 if (wait_for_pressure) {
999 /* wait until there's memory pressure */
1000 while (vm_page_free_count >= vm_page_free_target) {
1001 wr = assert_wait((event_t) &vm_page_free_wanted,
1002 THREAD_INTERRUPTIBLE);
1003 if (wr == THREAD_WAITING) {
1004 wr = thread_block(THREAD_CONTINUE_NULL);
1005 }
1006 if (wr == THREAD_INTERRUPTED) {
1007 return KERN_ABORTED;
1008 }
1009 if (wr == THREAD_AWAKENED) {
1010 /*
1011 * The memory pressure might have already
1012 * been relieved but let's not block again
1013 * and let's report that there was memory
1014 * pressure at some point.
1015 */
1016 break;
1017 }
1018 }
1019 }
1020
1021 /* provide the number of pages the system wants to reclaim */
1022 if (pages_wanted_p != NULL) {
1023 *pages_wanted_p = mach_vm_ctl_page_free_wanted();
1024 }
1025
1026 if (pages_reclaimed_p == NULL) {
1027 return KERN_SUCCESS;
1028 }
1029
1030 /* provide number of pages reclaimed in the last "nsecs_monitored" */
1031 vm_pageout_now = vm_pageout_stat_now;
1032 pages_reclaimed = 0;
1033 for (vm_pageout_then =
1034 VM_PAGEOUT_STAT_BEFORE(vm_pageout_now);
1035 vm_pageout_then != vm_pageout_now &&
1036 units_of_monitor-- != 0;
1037 vm_pageout_then =
1038 VM_PAGEOUT_STAT_BEFORE(vm_pageout_then)) {
1039 pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_speculative;
1040 pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_cleaned;
1041 pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_internal;
1042 pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_external;
1043 }
1044 *pages_reclaimed_p = pages_reclaimed;
1045
1046 return KERN_SUCCESS;
1047 }
1048
1049
1050
1051 #if DEVELOPMENT || DEBUG
1052
1053 static void
1054 vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *, int);
1055
1056 /*
1057 * condition variable used to make sure there is
1058 * only a single sweep going on at a time
1059 */
1060 boolean_t vm_pageout_disconnect_all_pages_active = FALSE;
1061
1062
1063 void
vm_pageout_disconnect_all_pages()1064 vm_pageout_disconnect_all_pages()
1065 {
1066 vm_page_lock_queues();
1067
1068 if (vm_pageout_disconnect_all_pages_active == TRUE) {
1069 vm_page_unlock_queues();
1070 return;
1071 }
1072 vm_pageout_disconnect_all_pages_active = TRUE;
1073 vm_page_unlock_queues();
1074
1075 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_throttled, vm_page_throttled_count);
1076 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_anonymous, vm_page_anonymous_count);
1077 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_active, vm_page_active_count);
1078
1079 vm_pageout_disconnect_all_pages_active = FALSE;
1080 }
1081
1082
1083 void
vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t * q,int qcount)1084 vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *q, int qcount)
1085 {
1086 vm_page_t m;
1087 vm_object_t t_object = NULL;
1088 vm_object_t l_object = NULL;
1089 vm_object_t m_object = NULL;
1090 int delayed_unlock = 0;
1091 int try_failed_count = 0;
1092 int disconnected_count = 0;
1093 int paused_count = 0;
1094 int object_locked_count = 0;
1095
1096 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_ALL_PAGE_MAPPINGS)) | DBG_FUNC_START,
1097 q, qcount, 0, 0, 0);
1098
1099 vm_page_lock_queues();
1100
1101 while (qcount && !vm_page_queue_empty(q)) {
1102 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
1103
1104 m = (vm_page_t) vm_page_queue_first(q);
1105 m_object = VM_PAGE_OBJECT(m);
1106
1107 /*
1108 * check to see if we currently are working
1109 * with the same object... if so, we've
1110 * already got the lock
1111 */
1112 if (m_object != l_object) {
1113 /*
1114 * the object associated with candidate page is
1115 * different from the one we were just working
1116 * with... dump the lock if we still own it
1117 */
1118 if (l_object != NULL) {
1119 vm_object_unlock(l_object);
1120 l_object = NULL;
1121 }
1122 if (m_object != t_object) {
1123 try_failed_count = 0;
1124 }
1125
1126 /*
1127 * Try to lock object; since we've alread got the
1128 * page queues lock, we can only 'try' for this one.
1129 * if the 'try' fails, we need to do a mutex_pause
1130 * to allow the owner of the object lock a chance to
1131 * run...
1132 */
1133 if (!vm_object_lock_try_scan(m_object)) {
1134 if (try_failed_count > 20) {
1135 goto reenter_pg_on_q;
1136 }
1137 vm_page_unlock_queues();
1138 mutex_pause(try_failed_count++);
1139 vm_page_lock_queues();
1140 delayed_unlock = 0;
1141
1142 paused_count++;
1143
1144 t_object = m_object;
1145 continue;
1146 }
1147 object_locked_count++;
1148
1149 l_object = m_object;
1150 }
1151 if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || m->vmp_error || m->vmp_free_when_done) {
1152 /*
1153 * put it back on the head of its queue
1154 */
1155 goto reenter_pg_on_q;
1156 }
1157 if (m->vmp_pmapped == TRUE) {
1158 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
1159
1160 disconnected_count++;
1161 }
1162 reenter_pg_on_q:
1163 vm_page_queue_remove(q, m, vmp_pageq);
1164 vm_page_queue_enter(q, m, vmp_pageq);
1165
1166 qcount--;
1167 try_failed_count = 0;
1168
1169 if (delayed_unlock++ > 128) {
1170 if (l_object != NULL) {
1171 vm_object_unlock(l_object);
1172 l_object = NULL;
1173 }
1174 lck_mtx_yield(&vm_page_queue_lock);
1175 delayed_unlock = 0;
1176 }
1177 }
1178 if (l_object != NULL) {
1179 vm_object_unlock(l_object);
1180 l_object = NULL;
1181 }
1182 vm_page_unlock_queues();
1183
1184 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_ALL_PAGE_MAPPINGS)) | DBG_FUNC_END,
1185 q, disconnected_count, object_locked_count, paused_count, 0);
1186 }
1187
1188 #endif
1189
1190
1191 static void
1192 vm_pageout_page_queue(vm_page_queue_head_t *, int);
1193
1194 /*
1195 * condition variable used to make sure there is
1196 * only a single sweep going on at a time
1197 */
1198 boolean_t vm_pageout_anonymous_pages_active = FALSE;
1199
1200
1201 void
vm_pageout_anonymous_pages()1202 vm_pageout_anonymous_pages()
1203 {
1204 if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
1205 vm_page_lock_queues();
1206
1207 if (vm_pageout_anonymous_pages_active == TRUE) {
1208 vm_page_unlock_queues();
1209 return;
1210 }
1211 vm_pageout_anonymous_pages_active = TRUE;
1212 vm_page_unlock_queues();
1213
1214 vm_pageout_page_queue(&vm_page_queue_throttled, vm_page_throttled_count);
1215 vm_pageout_page_queue(&vm_page_queue_anonymous, vm_page_anonymous_count);
1216 vm_pageout_page_queue(&vm_page_queue_active, vm_page_active_count);
1217
1218 if (VM_CONFIG_SWAP_IS_PRESENT) {
1219 vm_consider_swapping();
1220 }
1221
1222 vm_page_lock_queues();
1223 vm_pageout_anonymous_pages_active = FALSE;
1224 vm_page_unlock_queues();
1225 }
1226 }
1227
1228
1229 void
vm_pageout_page_queue(vm_page_queue_head_t * q,int qcount)1230 vm_pageout_page_queue(vm_page_queue_head_t *q, int qcount)
1231 {
1232 vm_page_t m;
1233 vm_object_t t_object = NULL;
1234 vm_object_t l_object = NULL;
1235 vm_object_t m_object = NULL;
1236 int delayed_unlock = 0;
1237 int try_failed_count = 0;
1238 int refmod_state;
1239 int pmap_options;
1240 struct vm_pageout_queue *iq;
1241 ppnum_t phys_page;
1242
1243
1244 iq = &vm_pageout_queue_internal;
1245
1246 vm_page_lock_queues();
1247
1248 while (qcount && !vm_page_queue_empty(q)) {
1249 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
1250
1251 if (VM_PAGE_Q_THROTTLED(iq)) {
1252 if (l_object != NULL) {
1253 vm_object_unlock(l_object);
1254 l_object = NULL;
1255 }
1256 iq->pgo_draining = TRUE;
1257
1258 assert_wait((event_t) (&iq->pgo_laundry + 1), THREAD_INTERRUPTIBLE);
1259 vm_page_unlock_queues();
1260
1261 thread_block(THREAD_CONTINUE_NULL);
1262
1263 vm_page_lock_queues();
1264 delayed_unlock = 0;
1265 continue;
1266 }
1267 m = (vm_page_t) vm_page_queue_first(q);
1268 m_object = VM_PAGE_OBJECT(m);
1269
1270 /*
1271 * check to see if we currently are working
1272 * with the same object... if so, we've
1273 * already got the lock
1274 */
1275 if (m_object != l_object) {
1276 if (!m_object->internal) {
1277 goto reenter_pg_on_q;
1278 }
1279
1280 /*
1281 * the object associated with candidate page is
1282 * different from the one we were just working
1283 * with... dump the lock if we still own it
1284 */
1285 if (l_object != NULL) {
1286 vm_object_unlock(l_object);
1287 l_object = NULL;
1288 }
1289 if (m_object != t_object) {
1290 try_failed_count = 0;
1291 }
1292
1293 /*
1294 * Try to lock object; since we've alread got the
1295 * page queues lock, we can only 'try' for this one.
1296 * if the 'try' fails, we need to do a mutex_pause
1297 * to allow the owner of the object lock a chance to
1298 * run...
1299 */
1300 if (!vm_object_lock_try_scan(m_object)) {
1301 if (try_failed_count > 20) {
1302 goto reenter_pg_on_q;
1303 }
1304 vm_page_unlock_queues();
1305 mutex_pause(try_failed_count++);
1306 vm_page_lock_queues();
1307 delayed_unlock = 0;
1308
1309 t_object = m_object;
1310 continue;
1311 }
1312 l_object = m_object;
1313 }
1314 if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || m->vmp_error || m->vmp_free_when_done) {
1315 /*
1316 * page is not to be cleaned
1317 * put it back on the head of its queue
1318 */
1319 goto reenter_pg_on_q;
1320 }
1321 phys_page = VM_PAGE_GET_PHYS_PAGE(m);
1322
1323 if (m->vmp_reference == FALSE && m->vmp_pmapped == TRUE) {
1324 refmod_state = pmap_get_refmod(phys_page);
1325
1326 if (refmod_state & VM_MEM_REFERENCED) {
1327 m->vmp_reference = TRUE;
1328 }
1329 if (refmod_state & VM_MEM_MODIFIED) {
1330 SET_PAGE_DIRTY(m, FALSE);
1331 }
1332 }
1333 if (m->vmp_reference == TRUE) {
1334 m->vmp_reference = FALSE;
1335 pmap_clear_refmod_options(phys_page, VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
1336 goto reenter_pg_on_q;
1337 }
1338 if (m->vmp_pmapped == TRUE) {
1339 if (m->vmp_dirty || m->vmp_precious) {
1340 pmap_options = PMAP_OPTIONS_COMPRESSOR;
1341 } else {
1342 pmap_options = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
1343 }
1344 refmod_state = pmap_disconnect_options(phys_page, pmap_options, NULL);
1345 if (refmod_state & VM_MEM_MODIFIED) {
1346 SET_PAGE_DIRTY(m, FALSE);
1347 }
1348 }
1349
1350 if (!m->vmp_dirty && !m->vmp_precious) {
1351 vm_page_unlock_queues();
1352 VM_PAGE_FREE(m);
1353 vm_page_lock_queues();
1354 delayed_unlock = 0;
1355
1356 goto next_pg;
1357 }
1358 if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL) {
1359 if (!m_object->pager_initialized) {
1360 vm_page_unlock_queues();
1361
1362 vm_object_collapse(m_object, (vm_object_offset_t) 0, TRUE);
1363
1364 if (!m_object->pager_initialized) {
1365 vm_object_compressor_pager_create(m_object);
1366 }
1367
1368 vm_page_lock_queues();
1369 delayed_unlock = 0;
1370 }
1371 if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL) {
1372 goto reenter_pg_on_q;
1373 }
1374 /*
1375 * vm_object_compressor_pager_create will drop the object lock
1376 * which means 'm' may no longer be valid to use
1377 */
1378 continue;
1379 }
1380 /*
1381 * we've already factored out pages in the laundry which
1382 * means this page can't be on the pageout queue so it's
1383 * safe to do the vm_page_queues_remove
1384 */
1385 vm_page_queues_remove(m, TRUE);
1386
1387 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
1388
1389 vm_pageout_cluster(m);
1390
1391 goto next_pg;
1392
1393 reenter_pg_on_q:
1394 vm_page_queue_remove(q, m, vmp_pageq);
1395 vm_page_queue_enter(q, m, vmp_pageq);
1396 next_pg:
1397 qcount--;
1398 try_failed_count = 0;
1399
1400 if (delayed_unlock++ > 128) {
1401 if (l_object != NULL) {
1402 vm_object_unlock(l_object);
1403 l_object = NULL;
1404 }
1405 lck_mtx_yield(&vm_page_queue_lock);
1406 delayed_unlock = 0;
1407 }
1408 }
1409 if (l_object != NULL) {
1410 vm_object_unlock(l_object);
1411 l_object = NULL;
1412 }
1413 vm_page_unlock_queues();
1414 }
1415
1416
1417
1418 /*
1419 * function in BSD to apply I/O throttle to the pageout thread
1420 */
1421 extern void vm_pageout_io_throttle(void);
1422
1423 #define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, obj) \
1424 MACRO_BEGIN \
1425 /* \
1426 * If a "reusable" page somehow made it back into \
1427 * the active queue, it's been re-used and is not \
1428 * quite re-usable. \
1429 * If the VM object was "all_reusable", consider it \
1430 * as "all re-used" instead of converting it to \
1431 * "partially re-used", which could be expensive. \
1432 */ \
1433 assert(VM_PAGE_OBJECT((m)) == (obj)); \
1434 if ((m)->vmp_reusable || \
1435 (obj)->all_reusable) { \
1436 vm_object_reuse_pages((obj), \
1437 (m)->vmp_offset, \
1438 (m)->vmp_offset + PAGE_SIZE_64, \
1439 FALSE); \
1440 } \
1441 MACRO_END
1442
1443
1444 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT 64
1445 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX 1024
1446
1447 #define FCS_IDLE 0
1448 #define FCS_DELAYED 1
1449 #define FCS_DEADLOCK_DETECTED 2
1450
1451 struct flow_control {
1452 int state;
1453 mach_timespec_t ts;
1454 };
1455
1456
1457 #if CONFIG_BACKGROUND_QUEUE
1458 uint64_t vm_pageout_rejected_bq_internal = 0;
1459 uint64_t vm_pageout_rejected_bq_external = 0;
1460 uint64_t vm_pageout_skipped_bq_internal = 0;
1461 #endif
1462
1463 #define ANONS_GRABBED_LIMIT 2
1464
1465
1466 #if 0
1467 static void vm_pageout_delayed_unlock(int *, int *, vm_page_t *);
1468 #endif
1469 static void vm_pageout_prepare_to_block(vm_object_t *, int *, vm_page_t *, int *, int);
1470
1471 #define VM_PAGEOUT_PB_NO_ACTION 0
1472 #define VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER 1
1473 #define VM_PAGEOUT_PB_THREAD_YIELD 2
1474
1475
1476 #if 0
1477 static void
1478 vm_pageout_delayed_unlock(int *delayed_unlock, int *local_freed, vm_page_t *local_freeq)
1479 {
1480 if (*local_freeq) {
1481 vm_page_unlock_queues();
1482
1483 VM_DEBUG_CONSTANT_EVENT(
1484 vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
1485 vm_page_free_count, 0, 0, 1);
1486
1487 vm_page_free_list(*local_freeq, TRUE);
1488
1489 VM_DEBUG_CONSTANT_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
1490 vm_page_free_count, *local_freed, 0, 1);
1491
1492 *local_freeq = NULL;
1493 *local_freed = 0;
1494
1495 vm_page_lock_queues();
1496 } else {
1497 lck_mtx_yield(&vm_page_queue_lock);
1498 }
1499 *delayed_unlock = 1;
1500 }
1501 #endif
1502
1503
1504 static void
vm_pageout_prepare_to_block(vm_object_t * object,int * delayed_unlock,vm_page_t * local_freeq,int * local_freed,int action)1505 vm_pageout_prepare_to_block(vm_object_t *object, int *delayed_unlock,
1506 vm_page_t *local_freeq, int *local_freed, int action)
1507 {
1508 vm_page_unlock_queues();
1509
1510 if (*object != NULL) {
1511 vm_object_unlock(*object);
1512 *object = NULL;
1513 }
1514 if (*local_freeq) {
1515 vm_page_free_list(*local_freeq, TRUE);
1516
1517 *local_freeq = NULL;
1518 *local_freed = 0;
1519 }
1520 *delayed_unlock = 1;
1521
1522 switch (action) {
1523 case VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER:
1524 vm_consider_waking_compactor_swapper();
1525 break;
1526 case VM_PAGEOUT_PB_THREAD_YIELD:
1527 thread_yield_internal(1);
1528 break;
1529 case VM_PAGEOUT_PB_NO_ACTION:
1530 default:
1531 break;
1532 }
1533 vm_page_lock_queues();
1534 }
1535
1536
1537 static struct vm_pageout_vminfo last;
1538
1539 uint64_t last_vm_page_pages_grabbed = 0;
1540
1541 extern uint32_t c_segment_pages_compressed;
1542
1543 extern uint64_t shared_region_pager_reclaimed;
1544 extern struct memory_object_pager_ops shared_region_pager_ops;
1545
1546 void
update_vm_info(void)1547 update_vm_info(void)
1548 {
1549 unsigned long tmp;
1550 uint64_t tmp64;
1551
1552 vm_pageout_stats[vm_pageout_stat_now].vm_page_active_count = vm_page_active_count;
1553 vm_pageout_stats[vm_pageout_stat_now].vm_page_speculative_count = vm_page_speculative_count;
1554 vm_pageout_stats[vm_pageout_stat_now].vm_page_inactive_count = vm_page_inactive_count;
1555 vm_pageout_stats[vm_pageout_stat_now].vm_page_anonymous_count = vm_page_anonymous_count;
1556
1557 vm_pageout_stats[vm_pageout_stat_now].vm_page_free_count = vm_page_free_count;
1558 vm_pageout_stats[vm_pageout_stat_now].vm_page_wire_count = vm_page_wire_count;
1559 vm_pageout_stats[vm_pageout_stat_now].vm_page_compressor_count = VM_PAGE_COMPRESSOR_COUNT;
1560
1561 vm_pageout_stats[vm_pageout_stat_now].vm_page_pages_compressed = c_segment_pages_compressed;
1562 vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_internal_count = vm_page_pageable_internal_count;
1563 vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_external_count = vm_page_pageable_external_count;
1564 vm_pageout_stats[vm_pageout_stat_now].vm_page_xpmapped_external_count = vm_page_xpmapped_external_count;
1565
1566
1567 tmp = vm_pageout_vminfo.vm_pageout_considered_page;
1568 vm_pageout_stats[vm_pageout_stat_now].considered = (unsigned int)(tmp - last.vm_pageout_considered_page);
1569 last.vm_pageout_considered_page = tmp;
1570
1571 tmp64 = vm_pageout_vminfo.vm_pageout_compressions;
1572 vm_pageout_stats[vm_pageout_stat_now].pages_compressed = (unsigned int)(tmp64 - last.vm_pageout_compressions);
1573 last.vm_pageout_compressions = tmp64;
1574
1575 tmp = vm_pageout_vminfo.vm_compressor_failed;
1576 vm_pageout_stats[vm_pageout_stat_now].failed_compressions = (unsigned int)(tmp - last.vm_compressor_failed);
1577 last.vm_compressor_failed = tmp;
1578
1579 tmp64 = vm_pageout_vminfo.vm_compressor_pages_grabbed;
1580 vm_pageout_stats[vm_pageout_stat_now].pages_grabbed_by_compressor = (unsigned int)(tmp64 - last.vm_compressor_pages_grabbed);
1581 last.vm_compressor_pages_grabbed = tmp64;
1582
1583 tmp = vm_pageout_vminfo.vm_phantom_cache_found_ghost;
1584 vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_found = (unsigned int)(tmp - last.vm_phantom_cache_found_ghost);
1585 last.vm_phantom_cache_found_ghost = tmp;
1586
1587 tmp = vm_pageout_vminfo.vm_phantom_cache_added_ghost;
1588 vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_added = (unsigned int)(tmp - last.vm_phantom_cache_added_ghost);
1589 last.vm_phantom_cache_added_ghost = tmp;
1590
1591 tmp64 = counter_load(&vm_page_grab_count);
1592 vm_pageout_stats[vm_pageout_stat_now].pages_grabbed = (unsigned int)(tmp64 - last_vm_page_pages_grabbed);
1593 last_vm_page_pages_grabbed = tmp64;
1594
1595 tmp = vm_pageout_vminfo.vm_page_pages_freed;
1596 vm_pageout_stats[vm_pageout_stat_now].pages_freed = (unsigned int)(tmp - last.vm_page_pages_freed);
1597 last.vm_page_pages_freed = tmp;
1598
1599
1600 if (vm_pageout_stats[vm_pageout_stat_now].considered) {
1601 tmp = vm_pageout_vminfo.vm_pageout_pages_evicted;
1602 vm_pageout_stats[vm_pageout_stat_now].pages_evicted = (unsigned int)(tmp - last.vm_pageout_pages_evicted);
1603 last.vm_pageout_pages_evicted = tmp;
1604
1605 tmp = vm_pageout_vminfo.vm_pageout_pages_purged;
1606 vm_pageout_stats[vm_pageout_stat_now].pages_purged = (unsigned int)(tmp - last.vm_pageout_pages_purged);
1607 last.vm_pageout_pages_purged = tmp;
1608
1609 tmp = vm_pageout_vminfo.vm_pageout_freed_speculative;
1610 vm_pageout_stats[vm_pageout_stat_now].freed_speculative = (unsigned int)(tmp - last.vm_pageout_freed_speculative);
1611 last.vm_pageout_freed_speculative = tmp;
1612
1613 tmp = vm_pageout_vminfo.vm_pageout_freed_external;
1614 vm_pageout_stats[vm_pageout_stat_now].freed_external = (unsigned int)(tmp - last.vm_pageout_freed_external);
1615 last.vm_pageout_freed_external = tmp;
1616
1617 tmp = vm_pageout_vminfo.vm_pageout_inactive_referenced;
1618 vm_pageout_stats[vm_pageout_stat_now].inactive_referenced = (unsigned int)(tmp - last.vm_pageout_inactive_referenced);
1619 last.vm_pageout_inactive_referenced = tmp;
1620
1621 tmp = vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_external;
1622 vm_pageout_stats[vm_pageout_stat_now].throttled_external_q = (unsigned int)(tmp - last.vm_pageout_scan_inactive_throttled_external);
1623 last.vm_pageout_scan_inactive_throttled_external = tmp;
1624
1625 tmp = vm_pageout_vminfo.vm_pageout_inactive_dirty_external;
1626 vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_external = (unsigned int)(tmp - last.vm_pageout_inactive_dirty_external);
1627 last.vm_pageout_inactive_dirty_external = tmp;
1628
1629 tmp = vm_pageout_vminfo.vm_pageout_freed_cleaned;
1630 vm_pageout_stats[vm_pageout_stat_now].freed_cleaned = (unsigned int)(tmp - last.vm_pageout_freed_cleaned);
1631 last.vm_pageout_freed_cleaned = tmp;
1632
1633 tmp = vm_pageout_vminfo.vm_pageout_inactive_nolock;
1634 vm_pageout_stats[vm_pageout_stat_now].inactive_nolock = (unsigned int)(tmp - last.vm_pageout_inactive_nolock);
1635 last.vm_pageout_inactive_nolock = tmp;
1636
1637 tmp = vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_internal;
1638 vm_pageout_stats[vm_pageout_stat_now].throttled_internal_q = (unsigned int)(tmp - last.vm_pageout_scan_inactive_throttled_internal);
1639 last.vm_pageout_scan_inactive_throttled_internal = tmp;
1640
1641 tmp = vm_pageout_vminfo.vm_pageout_skipped_external;
1642 vm_pageout_stats[vm_pageout_stat_now].skipped_external = (unsigned int)(tmp - last.vm_pageout_skipped_external);
1643 last.vm_pageout_skipped_external = tmp;
1644
1645 tmp = vm_pageout_vminfo.vm_pageout_skipped_internal;
1646 vm_pageout_stats[vm_pageout_stat_now].skipped_internal = (unsigned int)(tmp - last.vm_pageout_skipped_internal);
1647 last.vm_pageout_skipped_internal = tmp;
1648
1649 tmp = vm_pageout_vminfo.vm_pageout_reactivation_limit_exceeded;
1650 vm_pageout_stats[vm_pageout_stat_now].reactivation_limit_exceeded = (unsigned int)(tmp - last.vm_pageout_reactivation_limit_exceeded);
1651 last.vm_pageout_reactivation_limit_exceeded = tmp;
1652
1653 tmp = vm_pageout_vminfo.vm_pageout_inactive_force_reclaim;
1654 vm_pageout_stats[vm_pageout_stat_now].forced_inactive_reclaim = (unsigned int)(tmp - last.vm_pageout_inactive_force_reclaim);
1655 last.vm_pageout_inactive_force_reclaim = tmp;
1656
1657 tmp = vm_pageout_vminfo.vm_pageout_freed_internal;
1658 vm_pageout_stats[vm_pageout_stat_now].freed_internal = (unsigned int)(tmp - last.vm_pageout_freed_internal);
1659 last.vm_pageout_freed_internal = tmp;
1660
1661 tmp = vm_pageout_vminfo.vm_pageout_considered_bq_internal;
1662 vm_pageout_stats[vm_pageout_stat_now].considered_bq_internal = (unsigned int)(tmp - last.vm_pageout_considered_bq_internal);
1663 last.vm_pageout_considered_bq_internal = tmp;
1664
1665 tmp = vm_pageout_vminfo.vm_pageout_considered_bq_external;
1666 vm_pageout_stats[vm_pageout_stat_now].considered_bq_external = (unsigned int)(tmp - last.vm_pageout_considered_bq_external);
1667 last.vm_pageout_considered_bq_external = tmp;
1668
1669 tmp = vm_pageout_vminfo.vm_pageout_filecache_min_reactivated;
1670 vm_pageout_stats[vm_pageout_stat_now].filecache_min_reactivations = (unsigned int)(tmp - last.vm_pageout_filecache_min_reactivated);
1671 last.vm_pageout_filecache_min_reactivated = tmp;
1672
1673 tmp = vm_pageout_vminfo.vm_pageout_inactive_dirty_internal;
1674 vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_internal = (unsigned int)(tmp - last.vm_pageout_inactive_dirty_internal);
1675 last.vm_pageout_inactive_dirty_internal = tmp;
1676 }
1677
1678 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO1)) | DBG_FUNC_NONE,
1679 vm_pageout_stats[vm_pageout_stat_now].vm_page_active_count,
1680 vm_pageout_stats[vm_pageout_stat_now].vm_page_speculative_count,
1681 vm_pageout_stats[vm_pageout_stat_now].vm_page_inactive_count,
1682 vm_pageout_stats[vm_pageout_stat_now].vm_page_anonymous_count,
1683 0);
1684
1685 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO2)) | DBG_FUNC_NONE,
1686 vm_pageout_stats[vm_pageout_stat_now].vm_page_free_count,
1687 vm_pageout_stats[vm_pageout_stat_now].vm_page_wire_count,
1688 vm_pageout_stats[vm_pageout_stat_now].vm_page_compressor_count,
1689 0,
1690 0);
1691
1692 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO3)) | DBG_FUNC_NONE,
1693 vm_pageout_stats[vm_pageout_stat_now].vm_page_pages_compressed,
1694 vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_internal_count,
1695 vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_external_count,
1696 vm_pageout_stats[vm_pageout_stat_now].vm_page_xpmapped_external_count,
1697 0);
1698
1699 if (vm_pageout_stats[vm_pageout_stat_now].considered ||
1700 vm_pageout_stats[vm_pageout_stat_now].pages_compressed ||
1701 vm_pageout_stats[vm_pageout_stat_now].failed_compressions) {
1702 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO4)) | DBG_FUNC_NONE,
1703 vm_pageout_stats[vm_pageout_stat_now].considered,
1704 vm_pageout_stats[vm_pageout_stat_now].freed_speculative,
1705 vm_pageout_stats[vm_pageout_stat_now].freed_external,
1706 vm_pageout_stats[vm_pageout_stat_now].inactive_referenced,
1707 0);
1708
1709 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO5)) | DBG_FUNC_NONE,
1710 vm_pageout_stats[vm_pageout_stat_now].throttled_external_q,
1711 vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_external,
1712 vm_pageout_stats[vm_pageout_stat_now].freed_cleaned,
1713 vm_pageout_stats[vm_pageout_stat_now].inactive_nolock,
1714 0);
1715
1716 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO6)) | DBG_FUNC_NONE,
1717 vm_pageout_stats[vm_pageout_stat_now].throttled_internal_q,
1718 vm_pageout_stats[vm_pageout_stat_now].pages_compressed,
1719 vm_pageout_stats[vm_pageout_stat_now].pages_grabbed_by_compressor,
1720 vm_pageout_stats[vm_pageout_stat_now].skipped_external,
1721 0);
1722
1723 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO7)) | DBG_FUNC_NONE,
1724 vm_pageout_stats[vm_pageout_stat_now].reactivation_limit_exceeded,
1725 vm_pageout_stats[vm_pageout_stat_now].forced_inactive_reclaim,
1726 vm_pageout_stats[vm_pageout_stat_now].failed_compressions,
1727 vm_pageout_stats[vm_pageout_stat_now].freed_internal,
1728 0);
1729
1730 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO8)) | DBG_FUNC_NONE,
1731 vm_pageout_stats[vm_pageout_stat_now].considered_bq_internal,
1732 vm_pageout_stats[vm_pageout_stat_now].considered_bq_external,
1733 vm_pageout_stats[vm_pageout_stat_now].filecache_min_reactivations,
1734 vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_internal,
1735 0);
1736 }
1737 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO9)) | DBG_FUNC_NONE,
1738 vm_pageout_stats[vm_pageout_stat_now].pages_grabbed,
1739 vm_pageout_stats[vm_pageout_stat_now].pages_freed,
1740 vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_found,
1741 vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_added,
1742 0);
1743
1744 record_memory_pressure();
1745 }
1746
1747 extern boolean_t hibernation_vmqueues_inspection;
1748
1749 /*
1750 * Return values for functions called by vm_pageout_scan
1751 * that control its flow.
1752 *
1753 * PROCEED -- vm_pageout_scan will keep making forward progress.
1754 * DONE_RETURN -- page demand satisfied, work is done -> vm_pageout_scan returns.
1755 * NEXT_ITERATION -- restart the 'for' loop in vm_pageout_scan aka continue.
1756 */
1757
1758 #define VM_PAGEOUT_SCAN_PROCEED (0)
1759 #define VM_PAGEOUT_SCAN_DONE_RETURN (1)
1760 #define VM_PAGEOUT_SCAN_NEXT_ITERATION (2)
1761
1762 /*
1763 * This function is called only from vm_pageout_scan and
1764 * it moves overflow secluded pages (one-at-a-time) to the
1765 * batched 'local' free Q or active Q.
1766 */
1767 static void
vps_deal_with_secluded_page_overflow(vm_page_t * local_freeq,int * local_freed)1768 vps_deal_with_secluded_page_overflow(vm_page_t *local_freeq, int *local_freed)
1769 {
1770 #if CONFIG_SECLUDED_MEMORY
1771 /*
1772 * Deal with secluded_q overflow.
1773 */
1774 if (vm_page_secluded_count > vm_page_secluded_target) {
1775 vm_page_t secluded_page;
1776
1777 /*
1778 * SECLUDED_AGING_BEFORE_ACTIVE:
1779 * Excess secluded pages go to the active queue and
1780 * will later go to the inactive queue.
1781 */
1782 assert((vm_page_secluded_count_free +
1783 vm_page_secluded_count_inuse) ==
1784 vm_page_secluded_count);
1785 secluded_page = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
1786 assert(secluded_page->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
1787
1788 vm_page_queues_remove(secluded_page, FALSE);
1789 assert(!secluded_page->vmp_fictitious);
1790 assert(!VM_PAGE_WIRED(secluded_page));
1791
1792 if (secluded_page->vmp_object == 0) {
1793 /* transfer to free queue */
1794 assert(secluded_page->vmp_busy);
1795 secluded_page->vmp_snext = *local_freeq;
1796 *local_freeq = secluded_page;
1797 *local_freed += 1;
1798 } else {
1799 /* transfer to head of active queue */
1800 vm_page_enqueue_active(secluded_page, FALSE);
1801 secluded_page = VM_PAGE_NULL;
1802 }
1803 }
1804 #else /* CONFIG_SECLUDED_MEMORY */
1805
1806 #pragma unused(local_freeq)
1807 #pragma unused(local_freed)
1808
1809 return;
1810
1811 #endif /* CONFIG_SECLUDED_MEMORY */
1812 }
1813
1814 /*
1815 * This function is called only from vm_pageout_scan and
1816 * it initializes the loop targets for vm_pageout_scan().
1817 */
1818 static void
vps_init_page_targets(void)1819 vps_init_page_targets(void)
1820 {
1821 /*
1822 * LD TODO: Other page targets should be calculated here too.
1823 */
1824 vm_page_anonymous_min = vm_page_inactive_target / 20;
1825
1826 if (vm_pageout_state.vm_page_speculative_percentage > 50) {
1827 vm_pageout_state.vm_page_speculative_percentage = 50;
1828 } else if (vm_pageout_state.vm_page_speculative_percentage <= 0) {
1829 vm_pageout_state.vm_page_speculative_percentage = 1;
1830 }
1831
1832 vm_pageout_state.vm_page_speculative_target = VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count +
1833 vm_page_inactive_count);
1834 }
1835
1836 /*
1837 * This function is called only from vm_pageout_scan and
1838 * it purges a single VM object at-a-time and will either
1839 * make vm_pageout_scan() restart the loop or keeping moving forward.
1840 */
1841 static int
vps_purge_object()1842 vps_purge_object()
1843 {
1844 int force_purge;
1845
1846 assert(available_for_purge >= 0);
1847 force_purge = 0; /* no force-purging */
1848
1849 #if VM_PRESSURE_EVENTS
1850 vm_pressure_level_t pressure_level;
1851
1852 pressure_level = memorystatus_vm_pressure_level;
1853
1854 if (pressure_level > kVMPressureNormal) {
1855 if (pressure_level >= kVMPressureCritical) {
1856 force_purge = vm_pageout_state.memorystatus_purge_on_critical;
1857 } else if (pressure_level >= kVMPressureUrgent) {
1858 force_purge = vm_pageout_state.memorystatus_purge_on_urgent;
1859 } else if (pressure_level >= kVMPressureWarning) {
1860 force_purge = vm_pageout_state.memorystatus_purge_on_warning;
1861 }
1862 }
1863 #endif /* VM_PRESSURE_EVENTS */
1864
1865 if (available_for_purge || force_purge) {
1866 memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_START);
1867
1868 VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_START, vm_page_free_count, 0, 0, 0);
1869 if (vm_purgeable_object_purge_one(force_purge, C_DONT_BLOCK)) {
1870 VM_PAGEOUT_DEBUG(vm_pageout_purged_objects, 1);
1871 VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, vm_page_free_count, 0, 0, 0);
1872 memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END);
1873
1874 return VM_PAGEOUT_SCAN_NEXT_ITERATION;
1875 }
1876 VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, 0, 0, 0, -1);
1877 memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END);
1878 }
1879
1880 return VM_PAGEOUT_SCAN_PROCEED;
1881 }
1882
1883 /*
1884 * This function is called only from vm_pageout_scan and
1885 * it will try to age the next speculative Q if the oldest
1886 * one is empty.
1887 */
1888 static int
vps_age_speculative_queue(boolean_t force_speculative_aging)1889 vps_age_speculative_queue(boolean_t force_speculative_aging)
1890 {
1891 #define DELAY_SPECULATIVE_AGE 1000
1892
1893 /*
1894 * try to pull pages from the aging bins...
1895 * see vm_page.h for an explanation of how
1896 * this mechanism works
1897 */
1898 boolean_t can_steal = FALSE;
1899 int num_scanned_queues;
1900 static int delay_speculative_age = 0; /* depends the # of times we go through the main pageout_scan loop.*/
1901 mach_timespec_t ts;
1902 struct vm_speculative_age_q *aq;
1903 struct vm_speculative_age_q *sq;
1904
1905 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
1906
1907 aq = &vm_page_queue_speculative[speculative_steal_index];
1908
1909 num_scanned_queues = 0;
1910 while (vm_page_queue_empty(&aq->age_q) &&
1911 num_scanned_queues++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
1912 speculative_steal_index++;
1913
1914 if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
1915 speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
1916 }
1917
1918 aq = &vm_page_queue_speculative[speculative_steal_index];
1919 }
1920
1921 if (num_scanned_queues == VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1) {
1922 /*
1923 * XXX We've scanned all the speculative
1924 * queues but still haven't found one
1925 * that is not empty, even though
1926 * vm_page_speculative_count is not 0.
1927 */
1928 if (!vm_page_queue_empty(&sq->age_q)) {
1929 return VM_PAGEOUT_SCAN_NEXT_ITERATION;
1930 }
1931 #if DEVELOPMENT || DEBUG
1932 panic("vm_pageout_scan: vm_page_speculative_count=%d but queues are empty", vm_page_speculative_count);
1933 #endif
1934 /* readjust... */
1935 vm_page_speculative_count = 0;
1936 /* ... and continue */
1937 return VM_PAGEOUT_SCAN_NEXT_ITERATION;
1938 }
1939
1940 if (vm_page_speculative_count > vm_pageout_state.vm_page_speculative_target || force_speculative_aging == TRUE) {
1941 can_steal = TRUE;
1942 } else {
1943 if (!delay_speculative_age) {
1944 mach_timespec_t ts_fully_aged;
1945
1946 ts_fully_aged.tv_sec = (VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_pageout_state.vm_page_speculative_q_age_ms) / 1000;
1947 ts_fully_aged.tv_nsec = ((VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_pageout_state.vm_page_speculative_q_age_ms) % 1000)
1948 * 1000 * NSEC_PER_USEC;
1949
1950 ADD_MACH_TIMESPEC(&ts_fully_aged, &aq->age_ts);
1951
1952 clock_sec_t sec;
1953 clock_nsec_t nsec;
1954 clock_get_system_nanotime(&sec, &nsec);
1955 ts.tv_sec = (unsigned int) sec;
1956 ts.tv_nsec = nsec;
1957
1958 if (CMP_MACH_TIMESPEC(&ts, &ts_fully_aged) >= 0) {
1959 can_steal = TRUE;
1960 } else {
1961 delay_speculative_age++;
1962 }
1963 } else {
1964 delay_speculative_age++;
1965 if (delay_speculative_age == DELAY_SPECULATIVE_AGE) {
1966 delay_speculative_age = 0;
1967 }
1968 }
1969 }
1970 if (can_steal == TRUE) {
1971 vm_page_speculate_ageit(aq);
1972 }
1973
1974 return VM_PAGEOUT_SCAN_PROCEED;
1975 }
1976
1977 /*
1978 * This function is called only from vm_pageout_scan and
1979 * it evicts a single VM object from the cache.
1980 */
1981 static int inline
vps_object_cache_evict(vm_object_t * object_to_unlock)1982 vps_object_cache_evict(vm_object_t *object_to_unlock)
1983 {
1984 static int cache_evict_throttle = 0;
1985 struct vm_speculative_age_q *sq;
1986
1987 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
1988
1989 if (vm_page_queue_empty(&sq->age_q) && cache_evict_throttle == 0) {
1990 int pages_evicted;
1991
1992 if (*object_to_unlock != NULL) {
1993 vm_object_unlock(*object_to_unlock);
1994 *object_to_unlock = NULL;
1995 }
1996 KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0);
1997
1998 pages_evicted = vm_object_cache_evict(100, 10);
1999
2000 KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_END, pages_evicted, 0, 0, 0, 0);
2001
2002 if (pages_evicted) {
2003 vm_pageout_vminfo.vm_pageout_pages_evicted += pages_evicted;
2004
2005 VM_DEBUG_EVENT(vm_pageout_cache_evict, VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE,
2006 vm_page_free_count, pages_evicted, vm_pageout_vminfo.vm_pageout_pages_evicted, 0);
2007 memoryshot(VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE);
2008
2009 /*
2010 * we just freed up to 100 pages,
2011 * so go back to the top of the main loop
2012 * and re-evaulate the memory situation
2013 */
2014 return VM_PAGEOUT_SCAN_NEXT_ITERATION;
2015 } else {
2016 cache_evict_throttle = 1000;
2017 }
2018 }
2019 if (cache_evict_throttle) {
2020 cache_evict_throttle--;
2021 }
2022
2023 return VM_PAGEOUT_SCAN_PROCEED;
2024 }
2025
2026
2027 /*
2028 * This function is called only from vm_pageout_scan and
2029 * it calculates the filecache min. that needs to be maintained
2030 * as we start to steal pages.
2031 */
2032 static void
vps_calculate_filecache_min(void)2033 vps_calculate_filecache_min(void)
2034 {
2035 int divisor = vm_pageout_state.vm_page_filecache_min_divisor;
2036
2037 #if CONFIG_JETSAM
2038 /*
2039 * don't let the filecache_min fall below 15% of available memory
2040 * on systems with an active compressor that isn't nearing its
2041 * limits w/r to accepting new data
2042 *
2043 * on systems w/o the compressor/swapper, the filecache is always
2044 * a very large percentage of the AVAILABLE_NON_COMPRESSED_MEMORY
2045 * since most (if not all) of the anonymous pages are in the
2046 * throttled queue (which isn't counted as available) which
2047 * effectively disables this filter
2048 */
2049 if (vm_compressor_low_on_space() || divisor == 0) {
2050 vm_pageout_state.vm_page_filecache_min = 0;
2051 } else {
2052 vm_pageout_state.vm_page_filecache_min =
2053 ((AVAILABLE_NON_COMPRESSED_MEMORY) * 10) / divisor;
2054 }
2055 #else
2056 if (vm_compressor_out_of_space() || divisor == 0) {
2057 vm_pageout_state.vm_page_filecache_min = 0;
2058 } else {
2059 /*
2060 * don't let the filecache_min fall below the specified critical level
2061 */
2062 vm_pageout_state.vm_page_filecache_min =
2063 ((AVAILABLE_NON_COMPRESSED_MEMORY) * 10) / divisor;
2064 }
2065 #endif
2066 if (vm_page_free_count < (vm_page_free_reserved / 4)) {
2067 vm_pageout_state.vm_page_filecache_min = 0;
2068 }
2069 }
2070
2071 /*
2072 * This function is called only from vm_pageout_scan and
2073 * it updates the flow control time to detect if VM pageoutscan
2074 * isn't making progress.
2075 */
2076 static void
vps_flow_control_reset_deadlock_timer(struct flow_control * flow_control)2077 vps_flow_control_reset_deadlock_timer(struct flow_control *flow_control)
2078 {
2079 mach_timespec_t ts;
2080 clock_sec_t sec;
2081 clock_nsec_t nsec;
2082
2083 ts.tv_sec = vm_pageout_state.vm_pageout_deadlock_wait / 1000;
2084 ts.tv_nsec = (vm_pageout_state.vm_pageout_deadlock_wait % 1000) * 1000 * NSEC_PER_USEC;
2085 clock_get_system_nanotime(&sec, &nsec);
2086 flow_control->ts.tv_sec = (unsigned int) sec;
2087 flow_control->ts.tv_nsec = nsec;
2088 ADD_MACH_TIMESPEC(&flow_control->ts, &ts);
2089
2090 flow_control->state = FCS_DELAYED;
2091
2092 vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_internal++;
2093 }
2094
2095 /*
2096 * This function is called only from vm_pageout_scan and
2097 * it is the flow control logic of VM pageout scan which
2098 * controls if it should block and for how long.
2099 * Any blocking of vm_pageout_scan happens ONLY in this function.
2100 */
2101 static int
vps_flow_control(struct flow_control * flow_control,int * anons_grabbed,vm_object_t * object,int * delayed_unlock,vm_page_t * local_freeq,int * local_freed,int * vm_pageout_deadlock_target,unsigned int inactive_burst_count)2102 vps_flow_control(struct flow_control *flow_control, int *anons_grabbed, vm_object_t *object, int *delayed_unlock,
2103 vm_page_t *local_freeq, int *local_freed, int *vm_pageout_deadlock_target, unsigned int inactive_burst_count)
2104 {
2105 boolean_t exceeded_burst_throttle = FALSE;
2106 unsigned int msecs = 0;
2107 uint32_t inactive_external_count;
2108 mach_timespec_t ts;
2109 struct vm_pageout_queue *iq;
2110 struct vm_pageout_queue *eq;
2111 struct vm_speculative_age_q *sq;
2112
2113 iq = &vm_pageout_queue_internal;
2114 eq = &vm_pageout_queue_external;
2115 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
2116
2117 /*
2118 * Sometimes we have to pause:
2119 * 1) No inactive pages - nothing to do.
2120 * 2) Loop control - no acceptable pages found on the inactive queue
2121 * within the last vm_pageout_burst_inactive_throttle iterations
2122 * 3) Flow control - default pageout queue is full
2123 */
2124 if (vm_page_queue_empty(&vm_page_queue_inactive) &&
2125 vm_page_queue_empty(&vm_page_queue_anonymous) &&
2126 vm_page_queue_empty(&vm_page_queue_cleaned) &&
2127 vm_page_queue_empty(&sq->age_q)) {
2128 VM_PAGEOUT_DEBUG(vm_pageout_scan_empty_throttle, 1);
2129 msecs = vm_pageout_state.vm_pageout_empty_wait;
2130 } else if (inactive_burst_count >=
2131 MIN(vm_pageout_state.vm_pageout_burst_inactive_throttle,
2132 (vm_page_inactive_count +
2133 vm_page_speculative_count))) {
2134 VM_PAGEOUT_DEBUG(vm_pageout_scan_burst_throttle, 1);
2135 msecs = vm_pageout_state.vm_pageout_burst_wait;
2136
2137 exceeded_burst_throttle = TRUE;
2138 } else if (VM_PAGE_Q_THROTTLED(iq) &&
2139 VM_DYNAMIC_PAGING_ENABLED()) {
2140 clock_sec_t sec;
2141 clock_nsec_t nsec;
2142
2143 switch (flow_control->state) {
2144 case FCS_IDLE:
2145 if ((vm_page_free_count + *local_freed) < vm_page_free_target &&
2146 vm_pageout_state.vm_restricted_to_single_processor == FALSE) {
2147 /*
2148 * since the compressor is running independently of vm_pageout_scan
2149 * let's not wait for it just yet... as long as we have a healthy supply
2150 * of filecache pages to work with, let's keep stealing those.
2151 */
2152 inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count;
2153
2154 if (vm_page_pageable_external_count > vm_pageout_state.vm_page_filecache_min &&
2155 (inactive_external_count >= VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count))) {
2156 *anons_grabbed = ANONS_GRABBED_LIMIT;
2157 VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle_deferred, 1);
2158 return VM_PAGEOUT_SCAN_PROCEED;
2159 }
2160 }
2161
2162 vps_flow_control_reset_deadlock_timer(flow_control);
2163 msecs = vm_pageout_state.vm_pageout_deadlock_wait;
2164
2165 break;
2166
2167 case FCS_DELAYED:
2168 clock_get_system_nanotime(&sec, &nsec);
2169 ts.tv_sec = (unsigned int) sec;
2170 ts.tv_nsec = nsec;
2171
2172 if (CMP_MACH_TIMESPEC(&ts, &flow_control->ts) >= 0) {
2173 /*
2174 * the pageout thread for the default pager is potentially
2175 * deadlocked since the
2176 * default pager queue has been throttled for more than the
2177 * allowable time... we need to move some clean pages or dirty
2178 * pages belonging to the external pagers if they aren't throttled
2179 * vm_page_free_wanted represents the number of threads currently
2180 * blocked waiting for pages... we'll move one page for each of
2181 * these plus a fixed amount to break the logjam... once we're done
2182 * moving this number of pages, we'll re-enter the FSC_DELAYED state
2183 * with a new timeout target since we have no way of knowing
2184 * whether we've broken the deadlock except through observation
2185 * of the queue associated with the default pager... we need to
2186 * stop moving pages and allow the system to run to see what
2187 * state it settles into.
2188 */
2189
2190 *vm_pageout_deadlock_target = vm_pageout_state.vm_pageout_deadlock_relief +
2191 vm_page_free_wanted + vm_page_free_wanted_privileged;
2192 VM_PAGEOUT_DEBUG(vm_pageout_scan_deadlock_detected, 1);
2193 flow_control->state = FCS_DEADLOCK_DETECTED;
2194 thread_wakeup(VM_PAGEOUT_GC_EVENT);
2195 return VM_PAGEOUT_SCAN_PROCEED;
2196 }
2197 /*
2198 * just resniff instead of trying
2199 * to compute a new delay time... we're going to be
2200 * awakened immediately upon a laundry completion,
2201 * so we won't wait any longer than necessary
2202 */
2203 msecs = vm_pageout_state.vm_pageout_idle_wait;
2204 break;
2205
2206 case FCS_DEADLOCK_DETECTED:
2207 if (*vm_pageout_deadlock_target) {
2208 return VM_PAGEOUT_SCAN_PROCEED;
2209 }
2210
2211 vps_flow_control_reset_deadlock_timer(flow_control);
2212 msecs = vm_pageout_state.vm_pageout_deadlock_wait;
2213
2214 break;
2215 }
2216 } else {
2217 /*
2218 * No need to pause...
2219 */
2220 return VM_PAGEOUT_SCAN_PROCEED;
2221 }
2222
2223 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
2224
2225 vm_pageout_prepare_to_block(object, delayed_unlock, local_freeq, local_freed,
2226 VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
2227
2228 if (vm_page_free_count >= vm_page_free_target) {
2229 /*
2230 * we're here because
2231 * 1) someone else freed up some pages while we had
2232 * the queues unlocked above
2233 * and we've hit one of the 3 conditions that
2234 * cause us to pause the pageout scan thread
2235 *
2236 * since we already have enough free pages,
2237 * let's avoid stalling and return normally
2238 *
2239 * before we return, make sure the pageout I/O threads
2240 * are running throttled in case there are still requests
2241 * in the laundry... since we have enough free pages
2242 * we don't need the laundry to be cleaned in a timely
2243 * fashion... so let's avoid interfering with foreground
2244 * activity
2245 *
2246 * we don't want to hold vm_page_queue_free_lock when
2247 * calling vm_pageout_adjust_eq_iothrottle (since it
2248 * may cause other locks to be taken), we do the intitial
2249 * check outside of the lock. Once we take the lock,
2250 * we recheck the condition since it may have changed.
2251 * if it has, no problem, we will make the threads
2252 * non-throttled before actually blocking
2253 */
2254 vm_pageout_adjust_eq_iothrottle(eq, TRUE);
2255 }
2256 lck_mtx_lock(&vm_page_queue_free_lock);
2257
2258 if (vm_page_free_count >= vm_page_free_target &&
2259 (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
2260 return VM_PAGEOUT_SCAN_DONE_RETURN;
2261 }
2262 lck_mtx_unlock(&vm_page_queue_free_lock);
2263
2264 if ((vm_page_free_count + vm_page_cleaned_count) < vm_page_free_target) {
2265 /*
2266 * we're most likely about to block due to one of
2267 * the 3 conditions that cause vm_pageout_scan to
2268 * not be able to make forward progress w/r
2269 * to providing new pages to the free queue,
2270 * so unthrottle the I/O threads in case we
2271 * have laundry to be cleaned... it needs
2272 * to be completed ASAP.
2273 *
2274 * even if we don't block, we want the io threads
2275 * running unthrottled since the sum of free +
2276 * clean pages is still under our free target
2277 */
2278 vm_pageout_adjust_eq_iothrottle(eq, FALSE);
2279 }
2280 if (vm_page_cleaned_count > 0 && exceeded_burst_throttle == FALSE) {
2281 /*
2282 * if we get here we're below our free target and
2283 * we're stalling due to a full laundry queue or
2284 * we don't have any inactive pages other then
2285 * those in the clean queue...
2286 * however, we have pages on the clean queue that
2287 * can be moved to the free queue, so let's not
2288 * stall the pageout scan
2289 */
2290 flow_control->state = FCS_IDLE;
2291 return VM_PAGEOUT_SCAN_PROCEED;
2292 }
2293 if (flow_control->state == FCS_DELAYED && !VM_PAGE_Q_THROTTLED(iq)) {
2294 flow_control->state = FCS_IDLE;
2295 return VM_PAGEOUT_SCAN_PROCEED;
2296 }
2297
2298 VM_CHECK_MEMORYSTATUS;
2299
2300 if (flow_control->state != FCS_IDLE) {
2301 VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle, 1);
2302 }
2303
2304 iq->pgo_throttled = TRUE;
2305 assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000 * NSEC_PER_USEC);
2306
2307 vm_page_unlock_queues();
2308
2309 assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
2310
2311 VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START,
2312 iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
2313 memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START);
2314
2315 thread_block(THREAD_CONTINUE_NULL);
2316
2317 VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END,
2318 iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
2319 memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END);
2320
2321 vm_page_lock_queues();
2322
2323 iq->pgo_throttled = FALSE;
2324
2325 vps_init_page_targets();
2326
2327 return VM_PAGEOUT_SCAN_NEXT_ITERATION;
2328 }
2329
2330 /*
2331 * This function is called only from vm_pageout_scan and
2332 * it will find and return the most appropriate page to be
2333 * reclaimed.
2334 */
2335 static int
vps_choose_victim_page(vm_page_t * victim_page,int * anons_grabbed,boolean_t * grab_anonymous,boolean_t force_anonymous,boolean_t * is_page_from_bg_q,unsigned int * reactivated_this_call)2336 vps_choose_victim_page(vm_page_t *victim_page, int *anons_grabbed, boolean_t *grab_anonymous, boolean_t force_anonymous,
2337 boolean_t *is_page_from_bg_q, unsigned int *reactivated_this_call)
2338 {
2339 vm_page_t m = NULL;
2340 vm_object_t m_object = VM_OBJECT_NULL;
2341 uint32_t inactive_external_count;
2342 struct vm_speculative_age_q *sq;
2343 struct vm_pageout_queue *iq;
2344 int retval = VM_PAGEOUT_SCAN_PROCEED;
2345
2346 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
2347 iq = &vm_pageout_queue_internal;
2348
2349 *is_page_from_bg_q = FALSE;
2350
2351 m = NULL;
2352 m_object = VM_OBJECT_NULL;
2353
2354 if (VM_DYNAMIC_PAGING_ENABLED()) {
2355 assert(vm_page_throttled_count == 0);
2356 assert(vm_page_queue_empty(&vm_page_queue_throttled));
2357 }
2358
2359 /*
2360 * Try for a clean-queue inactive page.
2361 * These are pages that vm_pageout_scan tried to steal earlier, but
2362 * were dirty and had to be cleaned. Pick them up now that they are clean.
2363 */
2364 if (!vm_page_queue_empty(&vm_page_queue_cleaned)) {
2365 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
2366
2367 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
2368
2369 goto found_page;
2370 }
2371
2372 /*
2373 * The next most eligible pages are ones we paged in speculatively,
2374 * but which have not yet been touched and have been aged out.
2375 */
2376 if (!vm_page_queue_empty(&sq->age_q)) {
2377 m = (vm_page_t) vm_page_queue_first(&sq->age_q);
2378
2379 assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q);
2380
2381 if (!m->vmp_dirty || force_anonymous == FALSE) {
2382 goto found_page;
2383 } else {
2384 m = NULL;
2385 }
2386 }
2387
2388 #if CONFIG_BACKGROUND_QUEUE
2389 if (vm_page_background_mode != VM_PAGE_BG_DISABLED && (vm_page_background_count > vm_page_background_target)) {
2390 vm_object_t bg_m_object = NULL;
2391
2392 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_background);
2393
2394 bg_m_object = VM_PAGE_OBJECT(m);
2395
2396 if (!VM_PAGE_PAGEABLE(m)) {
2397 /*
2398 * This page is on the background queue
2399 * but not on a pageable queue. This is
2400 * likely a transient state and whoever
2401 * took it out of its pageable queue
2402 * will likely put it back on a pageable
2403 * queue soon but we can't deal with it
2404 * at this point, so let's ignore this
2405 * page.
2406 */
2407 } else if (force_anonymous == FALSE || bg_m_object->internal) {
2408 if (bg_m_object->internal &&
2409 (VM_PAGE_Q_THROTTLED(iq) ||
2410 vm_compressor_out_of_space() == TRUE ||
2411 vm_page_free_count < (vm_page_free_reserved / 4))) {
2412 vm_pageout_skipped_bq_internal++;
2413 } else {
2414 *is_page_from_bg_q = TRUE;
2415
2416 if (bg_m_object->internal) {
2417 vm_pageout_vminfo.vm_pageout_considered_bq_internal++;
2418 } else {
2419 vm_pageout_vminfo.vm_pageout_considered_bq_external++;
2420 }
2421 goto found_page;
2422 }
2423 }
2424 }
2425 #endif /* CONFIG_BACKGROUND_QUEUE */
2426
2427 inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count;
2428
2429 if ((vm_page_pageable_external_count < vm_pageout_state.vm_page_filecache_min || force_anonymous == TRUE) ||
2430 (inactive_external_count < VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count))) {
2431 *grab_anonymous = TRUE;
2432 *anons_grabbed = 0;
2433
2434 if (VM_CONFIG_SWAP_IS_ACTIVE) {
2435 vm_pageout_vminfo.vm_pageout_skipped_external++;
2436 } else {
2437 if (vm_page_free_count < (COMPRESSOR_FREE_RESERVED_LIMIT * 2)) {
2438 /*
2439 * No swap and we are in dangerously low levels of free memory.
2440 * If we keep going ahead with anonymous pages, we are going to run into a situation
2441 * where the compressor will be stuck waiting for free pages (if it isn't already).
2442 *
2443 * So, pick a file backed page...
2444 */
2445 *grab_anonymous = FALSE;
2446 *anons_grabbed = ANONS_GRABBED_LIMIT;
2447 vm_pageout_vminfo.vm_pageout_skipped_internal++;
2448 }
2449 }
2450 goto want_anonymous;
2451 }
2452 *grab_anonymous = (vm_page_anonymous_count > vm_page_anonymous_min);
2453
2454 #if CONFIG_JETSAM
2455 /* If the file-backed pool has accumulated
2456 * significantly more pages than the jetsam
2457 * threshold, prefer to reclaim those
2458 * inline to minimise compute overhead of reclaiming
2459 * anonymous pages.
2460 * This calculation does not account for the CPU local
2461 * external page queues, as those are expected to be
2462 * much smaller relative to the global pools.
2463 */
2464
2465 struct vm_pageout_queue *eq = &vm_pageout_queue_external;
2466
2467 if (*grab_anonymous == TRUE && !VM_PAGE_Q_THROTTLED(eq)) {
2468 if (vm_page_pageable_external_count >
2469 vm_pageout_state.vm_page_filecache_min) {
2470 if ((vm_page_pageable_external_count *
2471 vm_pageout_memorystatus_fb_factor_dr) >
2472 (memorystatus_available_pages_critical *
2473 vm_pageout_memorystatus_fb_factor_nr)) {
2474 *grab_anonymous = FALSE;
2475
2476 VM_PAGEOUT_DEBUG(vm_grab_anon_overrides, 1);
2477 }
2478 }
2479 if (*grab_anonymous) {
2480 VM_PAGEOUT_DEBUG(vm_grab_anon_nops, 1);
2481 }
2482 }
2483 #endif /* CONFIG_JETSAM */
2484
2485 want_anonymous:
2486 if (*grab_anonymous == FALSE || *anons_grabbed >= ANONS_GRABBED_LIMIT || vm_page_queue_empty(&vm_page_queue_anonymous)) {
2487 if (!vm_page_queue_empty(&vm_page_queue_inactive)) {
2488 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
2489
2490 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
2491 *anons_grabbed = 0;
2492
2493 if (vm_page_pageable_external_count < vm_pageout_state.vm_page_filecache_min) {
2494 if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
2495 if ((++(*reactivated_this_call) % 100)) {
2496 vm_pageout_vminfo.vm_pageout_filecache_min_reactivated++;
2497
2498 vm_page_activate(m);
2499 counter_inc(&vm_statistics_reactivations);
2500 #if CONFIG_BACKGROUND_QUEUE
2501 #if DEVELOPMENT || DEBUG
2502 if (*is_page_from_bg_q == TRUE) {
2503 if (m_object->internal) {
2504 vm_pageout_rejected_bq_internal++;
2505 } else {
2506 vm_pageout_rejected_bq_external++;
2507 }
2508 }
2509 #endif /* DEVELOPMENT || DEBUG */
2510 #endif /* CONFIG_BACKGROUND_QUEUE */
2511 vm_pageout_state.vm_pageout_inactive_used++;
2512
2513 m = NULL;
2514 retval = VM_PAGEOUT_SCAN_NEXT_ITERATION;
2515
2516 goto found_page;
2517 }
2518
2519 /*
2520 * steal 1 of the file backed pages even if
2521 * we are under the limit that has been set
2522 * for a healthy filecache
2523 */
2524 }
2525 }
2526 goto found_page;
2527 }
2528 }
2529 if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
2530 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
2531
2532 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
2533 *anons_grabbed += 1;
2534
2535 goto found_page;
2536 }
2537
2538 m = NULL;
2539
2540 found_page:
2541 *victim_page = m;
2542
2543 return retval;
2544 }
2545
2546 /*
2547 * This function is called only from vm_pageout_scan and
2548 * it will put a page back on the active/inactive queue
2549 * if we can't reclaim it for some reason.
2550 */
2551 static void
vps_requeue_page(vm_page_t m,int page_prev_q_state,__unused boolean_t page_from_bg_q)2552 vps_requeue_page(vm_page_t m, int page_prev_q_state, __unused boolean_t page_from_bg_q)
2553 {
2554 if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
2555 vm_page_enqueue_inactive(m, FALSE);
2556 } else {
2557 vm_page_activate(m);
2558 }
2559
2560 #if CONFIG_BACKGROUND_QUEUE
2561 #if DEVELOPMENT || DEBUG
2562 vm_object_t m_object = VM_PAGE_OBJECT(m);
2563
2564 if (page_from_bg_q == TRUE) {
2565 if (m_object->internal) {
2566 vm_pageout_rejected_bq_internal++;
2567 } else {
2568 vm_pageout_rejected_bq_external++;
2569 }
2570 }
2571 #endif /* DEVELOPMENT || DEBUG */
2572 #endif /* CONFIG_BACKGROUND_QUEUE */
2573 }
2574
2575 /*
2576 * This function is called only from vm_pageout_scan and
2577 * it will try to grab the victim page's VM object (m_object)
2578 * which differs from the previous victim page's object (object).
2579 */
2580 static int
vps_switch_object(vm_page_t m,vm_object_t m_object,vm_object_t * object,int page_prev_q_state,boolean_t avoid_anon_pages,boolean_t page_from_bg_q)2581 vps_switch_object(vm_page_t m, vm_object_t m_object, vm_object_t *object, int page_prev_q_state, boolean_t avoid_anon_pages, boolean_t page_from_bg_q)
2582 {
2583 struct vm_speculative_age_q *sq;
2584
2585 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
2586
2587 /*
2588 * the object associated with candidate page is
2589 * different from the one we were just working
2590 * with... dump the lock if we still own it
2591 */
2592 if (*object != NULL) {
2593 vm_object_unlock(*object);
2594 *object = NULL;
2595 }
2596 /*
2597 * Try to lock object; since we've alread got the
2598 * page queues lock, we can only 'try' for this one.
2599 * if the 'try' fails, we need to do a mutex_pause
2600 * to allow the owner of the object lock a chance to
2601 * run... otherwise, we're likely to trip over this
2602 * object in the same state as we work our way through
2603 * the queue... clumps of pages associated with the same
2604 * object are fairly typical on the inactive and active queues
2605 */
2606 if (!vm_object_lock_try_scan(m_object)) {
2607 vm_page_t m_want = NULL;
2608
2609 vm_pageout_vminfo.vm_pageout_inactive_nolock++;
2610
2611 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
2612 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_nolock, 1);
2613 }
2614
2615 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
2616
2617 m->vmp_reference = FALSE;
2618
2619 if (!m_object->object_is_shared_cache) {
2620 /*
2621 * don't apply this optimization if this is the shared cache
2622 * object, it's too easy to get rid of very hot and important
2623 * pages...
2624 * m->vmp_object must be stable since we hold the page queues lock...
2625 * we can update the scan_collisions field sans the object lock
2626 * since it is a separate field and this is the only spot that does
2627 * a read-modify-write operation and it is never executed concurrently...
2628 * we can asynchronously set this field to 0 when creating a UPL, so it
2629 * is possible for the value to be a bit non-determistic, but that's ok
2630 * since it's only used as a hint
2631 */
2632 m_object->scan_collisions = 1;
2633 }
2634 if (!vm_page_queue_empty(&vm_page_queue_cleaned)) {
2635 m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
2636 } else if (!vm_page_queue_empty(&sq->age_q)) {
2637 m_want = (vm_page_t) vm_page_queue_first(&sq->age_q);
2638 } else if ((avoid_anon_pages || vm_page_queue_empty(&vm_page_queue_anonymous)) &&
2639 !vm_page_queue_empty(&vm_page_queue_inactive)) {
2640 m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
2641 } else if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
2642 m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
2643 }
2644
2645 /*
2646 * this is the next object we're going to be interested in
2647 * try to make sure its available after the mutex_pause
2648 * returns control
2649 */
2650 if (m_want) {
2651 vm_pageout_scan_wants_object = VM_PAGE_OBJECT(m_want);
2652 }
2653
2654 vps_requeue_page(m, page_prev_q_state, page_from_bg_q);
2655
2656 return VM_PAGEOUT_SCAN_NEXT_ITERATION;
2657 } else {
2658 *object = m_object;
2659 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
2660 }
2661
2662 return VM_PAGEOUT_SCAN_PROCEED;
2663 }
2664
2665 /*
2666 * This function is called only from vm_pageout_scan and
2667 * it notices that pageout scan may be rendered ineffective
2668 * due to a FS deadlock and will jetsam a process if possible.
2669 * If jetsam isn't supported, it'll move the page to the active
2670 * queue to try and get some different pages pushed onwards so
2671 * we can try to get out of this scenario.
2672 */
2673 static void
vps_deal_with_throttled_queues(vm_page_t m,vm_object_t * object,uint32_t * vm_pageout_inactive_external_forced_reactivate_limit,int * delayed_unlock,boolean_t * force_anonymous,__unused boolean_t is_page_from_bg_q)2674 vps_deal_with_throttled_queues(vm_page_t m, vm_object_t *object, uint32_t *vm_pageout_inactive_external_forced_reactivate_limit,
2675 int *delayed_unlock, boolean_t *force_anonymous, __unused boolean_t is_page_from_bg_q)
2676 {
2677 struct vm_pageout_queue *eq;
2678 vm_object_t cur_object = VM_OBJECT_NULL;
2679
2680 cur_object = *object;
2681
2682 eq = &vm_pageout_queue_external;
2683
2684 if (cur_object->internal == FALSE) {
2685 /*
2686 * we need to break up the following potential deadlock case...
2687 * a) The external pageout thread is stuck on the truncate lock for a file that is being extended i.e. written.
2688 * b) The thread doing the writing is waiting for pages while holding the truncate lock
2689 * c) Most of the pages in the inactive queue belong to this file.
2690 *
2691 * we are potentially in this deadlock because...
2692 * a) the external pageout queue is throttled
2693 * b) we're done with the active queue and moved on to the inactive queue
2694 * c) we've got a dirty external page
2695 *
2696 * since we don't know the reason for the external pageout queue being throttled we
2697 * must suspect that we are deadlocked, so move the current page onto the active queue
2698 * in an effort to cause a page from the active queue to 'age' to the inactive queue
2699 *
2700 * if we don't have jetsam configured (i.e. we have a dynamic pager), set
2701 * 'force_anonymous' to TRUE to cause us to grab a page from the cleaned/anonymous
2702 * pool the next time we select a victim page... if we can make enough new free pages,
2703 * the deadlock will break, the external pageout queue will empty and it will no longer
2704 * be throttled
2705 *
2706 * if we have jetsam configured, keep a count of the pages reactivated this way so
2707 * that we can try to find clean pages in the active/inactive queues before
2708 * deciding to jetsam a process
2709 */
2710 vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_external++;
2711
2712 vm_page_check_pageable_safe(m);
2713 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
2714 vm_page_queue_enter(&vm_page_queue_active, m, vmp_pageq);
2715 m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
2716 vm_page_active_count++;
2717 vm_page_pageable_external_count++;
2718
2719 vm_pageout_adjust_eq_iothrottle(eq, FALSE);
2720
2721 #if CONFIG_MEMORYSTATUS && CONFIG_JETSAM
2722
2723 #pragma unused(force_anonymous)
2724
2725 *vm_pageout_inactive_external_forced_reactivate_limit -= 1;
2726
2727 if (*vm_pageout_inactive_external_forced_reactivate_limit <= 0) {
2728 *vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
2729 /*
2730 * Possible deadlock scenario so request jetsam action
2731 */
2732
2733 assert(cur_object);
2734 vm_object_unlock(cur_object);
2735
2736 cur_object = VM_OBJECT_NULL;
2737
2738 /*
2739 * VM pageout scan needs to know we have dropped this lock and so set the
2740 * object variable we got passed in to NULL.
2741 */
2742 *object = VM_OBJECT_NULL;
2743
2744 vm_page_unlock_queues();
2745
2746 VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_START,
2747 vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count);
2748
2749 /* Kill first suitable process. If this call returned FALSE, we might have simply purged a process instead. */
2750 if (memorystatus_kill_on_VM_page_shortage(FALSE) == TRUE) {
2751 VM_PAGEOUT_DEBUG(vm_pageout_inactive_external_forced_jetsam_count, 1);
2752 }
2753
2754 VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_END,
2755 vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count);
2756
2757 vm_page_lock_queues();
2758 *delayed_unlock = 1;
2759 }
2760 #else /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
2761
2762 #pragma unused(vm_pageout_inactive_external_forced_reactivate_limit)
2763 #pragma unused(delayed_unlock)
2764
2765 *force_anonymous = TRUE;
2766 #endif /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
2767 } else {
2768 vm_page_activate(m);
2769 counter_inc(&vm_statistics_reactivations);
2770
2771 #if CONFIG_BACKGROUND_QUEUE
2772 #if DEVELOPMENT || DEBUG
2773 if (is_page_from_bg_q == TRUE) {
2774 if (cur_object->internal) {
2775 vm_pageout_rejected_bq_internal++;
2776 } else {
2777 vm_pageout_rejected_bq_external++;
2778 }
2779 }
2780 #endif /* DEVELOPMENT || DEBUG */
2781 #endif /* CONFIG_BACKGROUND_QUEUE */
2782
2783 vm_pageout_state.vm_pageout_inactive_used++;
2784 }
2785 }
2786
2787
2788 void
vm_page_balance_inactive(int max_to_move)2789 vm_page_balance_inactive(int max_to_move)
2790 {
2791 vm_page_t m;
2792
2793 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2794
2795 if (hibernation_vmqueues_inspection || hibernate_cleaning_in_progress) {
2796 /*
2797 * It is likely that the hibernation code path is
2798 * dealing with these very queues as we are about
2799 * to move pages around in/from them and completely
2800 * change the linkage of the pages.
2801 *
2802 * And so we skip the rebalancing of these queues.
2803 */
2804 return;
2805 }
2806 vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
2807 vm_page_inactive_count +
2808 vm_page_speculative_count);
2809
2810 while (max_to_move-- && (vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) {
2811 VM_PAGEOUT_DEBUG(vm_pageout_balanced, 1);
2812
2813 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
2814
2815 assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
2816 assert(!m->vmp_laundry);
2817 assert(VM_PAGE_OBJECT(m) != kernel_object);
2818 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
2819
2820 DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
2821
2822 /*
2823 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
2824 *
2825 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
2826 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
2827 * new reference happens. If no futher references happen on the page after that remote TLB flushes
2828 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
2829 * by pageout_scan, which is just fine since the last reference would have happened quite far
2830 * in the past (TLB caches don't hang around for very long), and of course could just as easily
2831 * have happened before we moved the page
2832 */
2833 if (m->vmp_pmapped == TRUE) {
2834 /*
2835 * We might be holding the page queue lock as a
2836 * spin lock and clearing the "referenced" bit could
2837 * take a while if there are lots of mappings of
2838 * that page, so make sure we acquire the lock as
2839 * as mutex to avoid a spinlock timeout.
2840 */
2841 vm_page_lockconvert_queues();
2842 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
2843 }
2844
2845 /*
2846 * The page might be absent or busy,
2847 * but vm_page_deactivate can handle that.
2848 * FALSE indicates that we don't want a H/W clear reference
2849 */
2850 vm_page_deactivate_internal(m, FALSE);
2851 }
2852 }
2853
2854
2855 /*
2856 * vm_pageout_scan does the dirty work for the pageout daemon.
2857 * It returns with both vm_page_queue_free_lock and vm_page_queue_lock
2858 * held and vm_page_free_wanted == 0.
2859 */
2860 void
vm_pageout_scan(void)2861 vm_pageout_scan(void)
2862 {
2863 unsigned int loop_count = 0;
2864 unsigned int inactive_burst_count = 0;
2865 unsigned int reactivated_this_call;
2866 unsigned int reactivate_limit;
2867 vm_page_t local_freeq = NULL;
2868 int local_freed = 0;
2869 int delayed_unlock;
2870 int delayed_unlock_limit = 0;
2871 int refmod_state = 0;
2872 int vm_pageout_deadlock_target = 0;
2873 struct vm_pageout_queue *iq;
2874 struct vm_pageout_queue *eq;
2875 struct vm_speculative_age_q *sq;
2876 struct flow_control flow_control = { .state = 0, .ts = { .tv_sec = 0, .tv_nsec = 0 } };
2877 boolean_t inactive_throttled = FALSE;
2878 vm_object_t object = NULL;
2879 uint32_t inactive_reclaim_run;
2880 boolean_t grab_anonymous = FALSE;
2881 boolean_t force_anonymous = FALSE;
2882 boolean_t force_speculative_aging = FALSE;
2883 int anons_grabbed = 0;
2884 int page_prev_q_state = 0;
2885 boolean_t page_from_bg_q = FALSE;
2886 uint32_t vm_pageout_inactive_external_forced_reactivate_limit = 0;
2887 vm_object_t m_object = VM_OBJECT_NULL;
2888 int retval = 0;
2889 boolean_t lock_yield_check = FALSE;
2890
2891
2892 VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_START,
2893 vm_pageout_vminfo.vm_pageout_freed_speculative,
2894 vm_pageout_state.vm_pageout_inactive_clean,
2895 vm_pageout_vminfo.vm_pageout_inactive_dirty_internal,
2896 vm_pageout_vminfo.vm_pageout_inactive_dirty_external);
2897
2898 flow_control.state = FCS_IDLE;
2899 iq = &vm_pageout_queue_internal;
2900 eq = &vm_pageout_queue_external;
2901 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
2902
2903 /* Ask the pmap layer to return any pages it no longer needs. */
2904 uint64_t pmap_wired_pages_freed = pmap_release_pages_fast();
2905
2906 vm_page_lock_queues();
2907
2908 vm_page_wire_count -= pmap_wired_pages_freed;
2909
2910 delayed_unlock = 1;
2911
2912 /*
2913 * Calculate the max number of referenced pages on the inactive
2914 * queue that we will reactivate.
2915 */
2916 reactivated_this_call = 0;
2917 reactivate_limit = VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count +
2918 vm_page_inactive_count);
2919 inactive_reclaim_run = 0;
2920
2921 vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
2922
2923 /*
2924 * We must limit the rate at which we send pages to the pagers
2925 * so that we don't tie up too many pages in the I/O queues.
2926 * We implement a throttling mechanism using the laundry count
2927 * to limit the number of pages outstanding to the default
2928 * and external pagers. We can bypass the throttles and look
2929 * for clean pages if the pageout queues don't drain in a timely
2930 * fashion since this may indicate that the pageout paths are
2931 * stalled waiting for memory, which only we can provide.
2932 */
2933
2934 vps_init_page_targets();
2935 assert(object == NULL);
2936 assert(delayed_unlock != 0);
2937
2938 for (;;) {
2939 vm_page_t m;
2940
2941 DTRACE_VM2(rev, int, 1, (uint64_t *), NULL);
2942
2943 if (lock_yield_check) {
2944 lock_yield_check = FALSE;
2945
2946 if (delayed_unlock++ > delayed_unlock_limit) {
2947 int freed = local_freed;
2948
2949 vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
2950 VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
2951 if (freed == 0) {
2952 lck_mtx_yield(&vm_page_queue_lock);
2953 }
2954 } else if (vm_pageout_scan_wants_object) {
2955 vm_page_unlock_queues();
2956 mutex_pause(0);
2957 vm_page_lock_queues();
2958 }
2959 }
2960
2961 if (vm_upl_wait_for_pages < 0) {
2962 vm_upl_wait_for_pages = 0;
2963 }
2964
2965 delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT + vm_upl_wait_for_pages;
2966
2967 if (delayed_unlock_limit > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX) {
2968 delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX;
2969 }
2970
2971 vps_deal_with_secluded_page_overflow(&local_freeq, &local_freed);
2972
2973 assert(delayed_unlock);
2974
2975 /*
2976 * maintain our balance
2977 */
2978 vm_page_balance_inactive(1);
2979
2980
2981 /**********************************************************************
2982 * above this point we're playing with the active and secluded queues
2983 * below this point we're playing with the throttling mechanisms
2984 * and the inactive queue
2985 **********************************************************************/
2986
2987 if (vm_page_free_count + local_freed >= vm_page_free_target) {
2988 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
2989
2990 vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
2991 VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
2992 /*
2993 * make sure the pageout I/O threads are running
2994 * throttled in case there are still requests
2995 * in the laundry... since we have met our targets
2996 * we don't need the laundry to be cleaned in a timely
2997 * fashion... so let's avoid interfering with foreground
2998 * activity
2999 */
3000 vm_pageout_adjust_eq_iothrottle(eq, TRUE);
3001
3002 lck_mtx_lock(&vm_page_queue_free_lock);
3003
3004 if ((vm_page_free_count >= vm_page_free_target) &&
3005 (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
3006 /*
3007 * done - we have met our target *and*
3008 * there is no one waiting for a page.
3009 */
3010 return_from_scan:
3011 assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
3012
3013 VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_NONE,
3014 vm_pageout_state.vm_pageout_inactive,
3015 vm_pageout_state.vm_pageout_inactive_used, 0, 0);
3016 VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_END,
3017 vm_pageout_vminfo.vm_pageout_freed_speculative,
3018 vm_pageout_state.vm_pageout_inactive_clean,
3019 vm_pageout_vminfo.vm_pageout_inactive_dirty_internal,
3020 vm_pageout_vminfo.vm_pageout_inactive_dirty_external);
3021
3022 return;
3023 }
3024 lck_mtx_unlock(&vm_page_queue_free_lock);
3025 }
3026
3027 /*
3028 * Before anything, we check if we have any ripe volatile
3029 * objects around. If so, try to purge the first object.
3030 * If the purge fails, fall through to reclaim a page instead.
3031 * If the purge succeeds, go back to the top and reevalute
3032 * the new memory situation.
3033 */
3034 retval = vps_purge_object();
3035
3036 if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3037 /*
3038 * Success
3039 */
3040 if (object != NULL) {
3041 vm_object_unlock(object);
3042 object = NULL;
3043 }
3044
3045 lock_yield_check = FALSE;
3046 continue;
3047 }
3048
3049 /*
3050 * If our 'aged' queue is empty and we have some speculative pages
3051 * in the other queues, let's go through and see if we need to age
3052 * them.
3053 *
3054 * If we succeeded in aging a speculative Q or just that everything
3055 * looks normal w.r.t queue age and queue counts, we keep going onward.
3056 *
3057 * If, for some reason, we seem to have a mismatch between the spec.
3058 * page count and the page queues, we reset those variables and
3059 * restart the loop (LD TODO: Track this better?).
3060 */
3061 if (vm_page_queue_empty(&sq->age_q) && vm_page_speculative_count) {
3062 retval = vps_age_speculative_queue(force_speculative_aging);
3063
3064 if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3065 lock_yield_check = FALSE;
3066 continue;
3067 }
3068 }
3069 force_speculative_aging = FALSE;
3070
3071 /*
3072 * Check to see if we need to evict objects from the cache.
3073 *
3074 * Note: 'object' here doesn't have anything to do with
3075 * the eviction part. We just need to make sure we have dropped
3076 * any object lock we might be holding if we need to go down
3077 * into the eviction logic.
3078 */
3079 retval = vps_object_cache_evict(&object);
3080
3081 if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3082 lock_yield_check = FALSE;
3083 continue;
3084 }
3085
3086
3087 /*
3088 * Calculate our filecache_min that will affect the loop
3089 * going forward.
3090 */
3091 vps_calculate_filecache_min();
3092
3093 /*
3094 * LD TODO: Use a structure to hold all state variables for a single
3095 * vm_pageout_scan iteration and pass that structure to this function instead.
3096 */
3097 retval = vps_flow_control(&flow_control, &anons_grabbed, &object,
3098 &delayed_unlock, &local_freeq, &local_freed,
3099 &vm_pageout_deadlock_target, inactive_burst_count);
3100
3101 if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3102 if (loop_count >= vm_page_inactive_count) {
3103 loop_count = 0;
3104 }
3105
3106 inactive_burst_count = 0;
3107
3108 assert(object == NULL);
3109 assert(delayed_unlock != 0);
3110
3111 lock_yield_check = FALSE;
3112 continue;
3113 } else if (retval == VM_PAGEOUT_SCAN_DONE_RETURN) {
3114 goto return_from_scan;
3115 }
3116
3117 flow_control.state = FCS_IDLE;
3118
3119 vm_pageout_inactive_external_forced_reactivate_limit = MIN((vm_page_active_count + vm_page_inactive_count),
3120 vm_pageout_inactive_external_forced_reactivate_limit);
3121 loop_count++;
3122 inactive_burst_count++;
3123 vm_pageout_state.vm_pageout_inactive++;
3124
3125 /*
3126 * Choose a victim.
3127 */
3128
3129 m = NULL;
3130 retval = vps_choose_victim_page(&m, &anons_grabbed, &grab_anonymous, force_anonymous, &page_from_bg_q, &reactivated_this_call);
3131
3132 if (m == NULL) {
3133 if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3134 inactive_burst_count = 0;
3135
3136 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3137 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
3138 }
3139
3140 lock_yield_check = TRUE;
3141 continue;
3142 }
3143
3144 /*
3145 * if we've gotten here, we have no victim page.
3146 * check to see if we've not finished balancing the queues
3147 * or we have a page on the aged speculative queue that we
3148 * skipped due to force_anonymous == TRUE.. or we have
3149 * speculative pages that we can prematurely age... if
3150 * one of these cases we'll keep going, else panic
3151 */
3152 force_anonymous = FALSE;
3153 VM_PAGEOUT_DEBUG(vm_pageout_no_victim, 1);
3154
3155 if (!vm_page_queue_empty(&sq->age_q)) {
3156 lock_yield_check = TRUE;
3157 continue;
3158 }
3159
3160 if (vm_page_speculative_count) {
3161 force_speculative_aging = TRUE;
3162 lock_yield_check = TRUE;
3163 continue;
3164 }
3165 panic("vm_pageout: no victim");
3166
3167 /* NOTREACHED */
3168 }
3169
3170 assert(VM_PAGE_PAGEABLE(m));
3171 m_object = VM_PAGE_OBJECT(m);
3172 force_anonymous = FALSE;
3173
3174 page_prev_q_state = m->vmp_q_state;
3175 /*
3176 * we just found this page on one of our queues...
3177 * it can't also be on the pageout queue, so safe
3178 * to call vm_page_queues_remove
3179 */
3180 vm_page_queues_remove(m, TRUE);
3181
3182 assert(!m->vmp_laundry);
3183 assert(!m->vmp_private);
3184 assert(!m->vmp_fictitious);
3185 assert(m_object != kernel_object);
3186 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
3187
3188 vm_pageout_vminfo.vm_pageout_considered_page++;
3189
3190 DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
3191
3192 /*
3193 * check to see if we currently are working
3194 * with the same object... if so, we've
3195 * already got the lock
3196 */
3197 if (m_object != object) {
3198 boolean_t avoid_anon_pages = (grab_anonymous == FALSE || anons_grabbed >= ANONS_GRABBED_LIMIT);
3199
3200 /*
3201 * vps_switch_object() will always drop the 'object' lock first
3202 * and then try to acquire the 'm_object' lock. So 'object' has to point to
3203 * either 'm_object' or NULL.
3204 */
3205 retval = vps_switch_object(m, m_object, &object, page_prev_q_state, avoid_anon_pages, page_from_bg_q);
3206
3207 if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3208 lock_yield_check = TRUE;
3209 continue;
3210 }
3211 }
3212 assert(m_object == object);
3213 assert(VM_PAGE_OBJECT(m) == m_object);
3214
3215 if (m->vmp_busy) {
3216 /*
3217 * Somebody is already playing with this page.
3218 * Put it back on the appropriate queue
3219 *
3220 */
3221 VM_PAGEOUT_DEBUG(vm_pageout_inactive_busy, 1);
3222
3223 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3224 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_busy, 1);
3225 }
3226
3227 vps_requeue_page(m, page_prev_q_state, page_from_bg_q);
3228
3229 lock_yield_check = TRUE;
3230 continue;
3231 }
3232
3233 /*
3234 * if (m->vmp_cleaning && !m->vmp_free_when_done)
3235 * If already cleaning this page in place
3236 * just leave if off the paging queues.
3237 * We can leave the page mapped, and upl_commit_range
3238 * will put it on the clean queue.
3239 *
3240 * if (m->vmp_free_when_done && !m->vmp_cleaning)
3241 * an msync INVALIDATE is in progress...
3242 * this page has been marked for destruction
3243 * after it has been cleaned,
3244 * but not yet gathered into a UPL
3245 * where 'cleaning' will be set...
3246 * just leave it off the paging queues
3247 *
3248 * if (m->vmp_free_when_done && m->vmp_clenaing)
3249 * an msync INVALIDATE is in progress
3250 * and the UPL has already gathered this page...
3251 * just leave it off the paging queues
3252 */
3253 if (m->vmp_free_when_done || m->vmp_cleaning) {
3254 lock_yield_check = TRUE;
3255 continue;
3256 }
3257
3258
3259 /*
3260 * If it's absent, in error or the object is no longer alive,
3261 * we can reclaim the page... in the no longer alive case,
3262 * there are 2 states the page can be in that preclude us
3263 * from reclaiming it - busy or cleaning - that we've already
3264 * dealt with
3265 */
3266 if (m->vmp_absent || m->vmp_error || !object->alive) {
3267 if (m->vmp_absent) {
3268 VM_PAGEOUT_DEBUG(vm_pageout_inactive_absent, 1);
3269 } else if (!object->alive) {
3270 VM_PAGEOUT_DEBUG(vm_pageout_inactive_notalive, 1);
3271 } else {
3272 VM_PAGEOUT_DEBUG(vm_pageout_inactive_error, 1);
3273 }
3274 reclaim_page:
3275 if (vm_pageout_deadlock_target) {
3276 VM_PAGEOUT_DEBUG(vm_pageout_scan_inactive_throttle_success, 1);
3277 vm_pageout_deadlock_target--;
3278 }
3279
3280 DTRACE_VM2(dfree, int, 1, (uint64_t *), NULL);
3281
3282 if (object->internal) {
3283 DTRACE_VM2(anonfree, int, 1, (uint64_t *), NULL);
3284 } else {
3285 DTRACE_VM2(fsfree, int, 1, (uint64_t *), NULL);
3286 }
3287 assert(!m->vmp_cleaning);
3288 assert(!m->vmp_laundry);
3289
3290 if (!object->internal &&
3291 object->pager != NULL &&
3292 object->pager->mo_pager_ops == &shared_region_pager_ops) {
3293 shared_region_pager_reclaimed++;
3294 }
3295
3296 m->vmp_busy = TRUE;
3297
3298 /*
3299 * remove page from object here since we're already
3300 * behind the object lock... defer the rest of the work
3301 * we'd normally do in vm_page_free_prepare_object
3302 * until 'vm_page_free_list' is called
3303 */
3304 if (m->vmp_tabled) {
3305 vm_page_remove(m, TRUE);
3306 }
3307
3308 assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
3309 m->vmp_snext = local_freeq;
3310 local_freeq = m;
3311 local_freed++;
3312
3313 if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
3314 vm_pageout_vminfo.vm_pageout_freed_speculative++;
3315 } else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3316 vm_pageout_vminfo.vm_pageout_freed_cleaned++;
3317 } else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q) {
3318 vm_pageout_vminfo.vm_pageout_freed_internal++;
3319 } else {
3320 vm_pageout_vminfo.vm_pageout_freed_external++;
3321 }
3322
3323 inactive_burst_count = 0;
3324
3325 lock_yield_check = TRUE;
3326 continue;
3327 }
3328 if (object->copy == VM_OBJECT_NULL) {
3329 /*
3330 * No one else can have any interest in this page.
3331 * If this is an empty purgable object, the page can be
3332 * reclaimed even if dirty.
3333 * If the page belongs to a volatile purgable object, we
3334 * reactivate it if the compressor isn't active.
3335 */
3336 if (object->purgable == VM_PURGABLE_EMPTY) {
3337 if (m->vmp_pmapped == TRUE) {
3338 /* unmap the page */
3339 refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
3340 if (refmod_state & VM_MEM_MODIFIED) {
3341 SET_PAGE_DIRTY(m, FALSE);
3342 }
3343 }
3344 if (m->vmp_dirty || m->vmp_precious) {
3345 /* we saved the cost of cleaning this page ! */
3346 vm_page_purged_count++;
3347 }
3348 goto reclaim_page;
3349 }
3350
3351 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
3352 /*
3353 * With the VM compressor, the cost of
3354 * reclaiming a page is much lower (no I/O),
3355 * so if we find a "volatile" page, it's better
3356 * to let it get compressed rather than letting
3357 * it occupy a full page until it gets purged.
3358 * So no need to check for "volatile" here.
3359 */
3360 } else if (object->purgable == VM_PURGABLE_VOLATILE) {
3361 /*
3362 * Avoid cleaning a "volatile" page which might
3363 * be purged soon.
3364 */
3365
3366 /* if it's wired, we can't put it on our queue */
3367 assert(!VM_PAGE_WIRED(m));
3368
3369 /* just stick it back on! */
3370 reactivated_this_call++;
3371
3372 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3373 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_volatile_reactivated, 1);
3374 }
3375
3376 goto reactivate_page;
3377 }
3378 }
3379 /*
3380 * If it's being used, reactivate.
3381 * (Fictitious pages are either busy or absent.)
3382 * First, update the reference and dirty bits
3383 * to make sure the page is unreferenced.
3384 */
3385 refmod_state = -1;
3386
3387 if (m->vmp_reference == FALSE && m->vmp_pmapped == TRUE) {
3388 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
3389
3390 if (refmod_state & VM_MEM_REFERENCED) {
3391 m->vmp_reference = TRUE;
3392 }
3393 if (refmod_state & VM_MEM_MODIFIED) {
3394 SET_PAGE_DIRTY(m, FALSE);
3395 }
3396 }
3397
3398 if (m->vmp_reference || m->vmp_dirty) {
3399 /* deal with a rogue "reusable" page */
3400 VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, m_object);
3401 }
3402
3403 if (vm_pageout_state.vm_page_xpmapped_min_divisor == 0) {
3404 vm_pageout_state.vm_page_xpmapped_min = 0;
3405 } else {
3406 vm_pageout_state.vm_page_xpmapped_min = (vm_page_external_count * 10) / vm_pageout_state.vm_page_xpmapped_min_divisor;
3407 }
3408
3409 if (!m->vmp_no_cache &&
3410 page_from_bg_q == FALSE &&
3411 (m->vmp_reference || (m->vmp_xpmapped && !object->internal &&
3412 (vm_page_xpmapped_external_count < vm_pageout_state.vm_page_xpmapped_min)))) {
3413 /*
3414 * The page we pulled off the inactive list has
3415 * been referenced. It is possible for other
3416 * processors to be touching pages faster than we
3417 * can clear the referenced bit and traverse the
3418 * inactive queue, so we limit the number of
3419 * reactivations.
3420 */
3421 if (++reactivated_this_call >= reactivate_limit) {
3422 vm_pageout_vminfo.vm_pageout_reactivation_limit_exceeded++;
3423 } else if (++inactive_reclaim_run >= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM) {
3424 vm_pageout_vminfo.vm_pageout_inactive_force_reclaim++;
3425 } else {
3426 uint32_t isinuse;
3427
3428 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3429 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reference_reactivated, 1);
3430 }
3431
3432 vm_pageout_vminfo.vm_pageout_inactive_referenced++;
3433 reactivate_page:
3434 if (!object->internal && object->pager != MEMORY_OBJECT_NULL &&
3435 vnode_pager_get_isinuse(object->pager, &isinuse) == KERN_SUCCESS && !isinuse) {
3436 /*
3437 * no explict mappings of this object exist
3438 * and it's not open via the filesystem
3439 */
3440 vm_page_deactivate(m);
3441 VM_PAGEOUT_DEBUG(vm_pageout_inactive_deactivated, 1);
3442 } else {
3443 /*
3444 * The page was/is being used, so put back on active list.
3445 */
3446 vm_page_activate(m);
3447 counter_inc(&vm_statistics_reactivations);
3448 inactive_burst_count = 0;
3449 }
3450 #if CONFIG_BACKGROUND_QUEUE
3451 #if DEVELOPMENT || DEBUG
3452 if (page_from_bg_q == TRUE) {
3453 if (m_object->internal) {
3454 vm_pageout_rejected_bq_internal++;
3455 } else {
3456 vm_pageout_rejected_bq_external++;
3457 }
3458 }
3459 #endif /* DEVELOPMENT || DEBUG */
3460 #endif /* CONFIG_BACKGROUND_QUEUE */
3461
3462 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3463 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
3464 }
3465 vm_pageout_state.vm_pageout_inactive_used++;
3466
3467 lock_yield_check = TRUE;
3468 continue;
3469 }
3470 /*
3471 * Make sure we call pmap_get_refmod() if it
3472 * wasn't already called just above, to update
3473 * the dirty bit.
3474 */
3475 if ((refmod_state == -1) && !m->vmp_dirty && m->vmp_pmapped) {
3476 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
3477 if (refmod_state & VM_MEM_MODIFIED) {
3478 SET_PAGE_DIRTY(m, FALSE);
3479 }
3480 }
3481 }
3482
3483 /*
3484 * we've got a candidate page to steal...
3485 *
3486 * m->vmp_dirty is up to date courtesy of the
3487 * preceding check for m->vmp_reference... if
3488 * we get here, then m->vmp_reference had to be
3489 * FALSE (or possibly "reactivate_limit" was
3490 * exceeded), but in either case we called
3491 * pmap_get_refmod() and updated both
3492 * m->vmp_reference and m->vmp_dirty
3493 *
3494 * if it's dirty or precious we need to
3495 * see if the target queue is throtttled
3496 * it if is, we need to skip over it by moving it back
3497 * to the end of the inactive queue
3498 */
3499
3500 inactive_throttled = FALSE;
3501
3502 if (m->vmp_dirty || m->vmp_precious) {
3503 if (object->internal) {
3504 if (VM_PAGE_Q_THROTTLED(iq)) {
3505 inactive_throttled = TRUE;
3506 }
3507 } else if (VM_PAGE_Q_THROTTLED(eq)) {
3508 inactive_throttled = TRUE;
3509 }
3510 }
3511 throttle_inactive:
3512 if (!VM_DYNAMIC_PAGING_ENABLED() &&
3513 object->internal && m->vmp_dirty &&
3514 (object->purgable == VM_PURGABLE_DENY ||
3515 object->purgable == VM_PURGABLE_NONVOLATILE ||
3516 object->purgable == VM_PURGABLE_VOLATILE)) {
3517 vm_page_check_pageable_safe(m);
3518 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
3519 vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
3520 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
3521 vm_page_throttled_count++;
3522
3523 VM_PAGEOUT_DEBUG(vm_pageout_scan_reclaimed_throttled, 1);
3524
3525 inactive_burst_count = 0;
3526
3527 lock_yield_check = TRUE;
3528 continue;
3529 }
3530 if (inactive_throttled == TRUE) {
3531 vps_deal_with_throttled_queues(m, &object, &vm_pageout_inactive_external_forced_reactivate_limit,
3532 &delayed_unlock, &force_anonymous, page_from_bg_q);
3533
3534 inactive_burst_count = 0;
3535
3536 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3537 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
3538 }
3539
3540 lock_yield_check = TRUE;
3541 continue;
3542 }
3543
3544 /*
3545 * we've got a page that we can steal...
3546 * eliminate all mappings and make sure
3547 * we have the up-to-date modified state
3548 *
3549 * if we need to do a pmap_disconnect then we
3550 * need to re-evaluate m->vmp_dirty since the pmap_disconnect
3551 * provides the true state atomically... the
3552 * page was still mapped up to the pmap_disconnect
3553 * and may have been dirtied at the last microsecond
3554 *
3555 * Note that if 'pmapped' is FALSE then the page is not
3556 * and has not been in any map, so there is no point calling
3557 * pmap_disconnect(). m->vmp_dirty could have been set in anticipation
3558 * of likely usage of the page.
3559 */
3560 if (m->vmp_pmapped == TRUE) {
3561 int pmap_options;
3562
3563 /*
3564 * Don't count this page as going into the compressor
3565 * if any of these are true:
3566 * 1) compressed pager isn't enabled
3567 * 2) Freezer enabled device with compressed pager
3568 * backend (exclusive use) i.e. most of the VM system
3569 * (including vm_pageout_scan) has no knowledge of
3570 * the compressor
3571 * 3) This page belongs to a file and hence will not be
3572 * sent into the compressor
3573 */
3574 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE ||
3575 object->internal == FALSE) {
3576 pmap_options = 0;
3577 } else if (m->vmp_dirty || m->vmp_precious) {
3578 /*
3579 * VM knows that this page is dirty (or
3580 * precious) and needs to be compressed
3581 * rather than freed.
3582 * Tell the pmap layer to count this page
3583 * as "compressed".
3584 */
3585 pmap_options = PMAP_OPTIONS_COMPRESSOR;
3586 } else {
3587 /*
3588 * VM does not know if the page needs to
3589 * be preserved but the pmap layer might tell
3590 * us if any mapping has "modified" it.
3591 * Let's the pmap layer to count this page
3592 * as compressed if and only if it has been
3593 * modified.
3594 */
3595 pmap_options =
3596 PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
3597 }
3598 refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m),
3599 pmap_options,
3600 NULL);
3601 if (refmod_state & VM_MEM_MODIFIED) {
3602 SET_PAGE_DIRTY(m, FALSE);
3603 }
3604 }
3605
3606 /*
3607 * reset our count of pages that have been reclaimed
3608 * since the last page was 'stolen'
3609 */
3610 inactive_reclaim_run = 0;
3611
3612 /*
3613 * If it's clean and not precious, we can free the page.
3614 */
3615 if (!m->vmp_dirty && !m->vmp_precious) {
3616 vm_pageout_state.vm_pageout_inactive_clean++;
3617
3618 /*
3619 * OK, at this point we have found a page we are going to free.
3620 */
3621 #if CONFIG_PHANTOM_CACHE
3622 if (!object->internal) {
3623 vm_phantom_cache_add_ghost(m);
3624 }
3625 #endif
3626 goto reclaim_page;
3627 }
3628
3629 /*
3630 * The page may have been dirtied since the last check
3631 * for a throttled target queue (which may have been skipped
3632 * if the page was clean then). With the dirty page
3633 * disconnected here, we can make one final check.
3634 */
3635 if (object->internal) {
3636 if (VM_PAGE_Q_THROTTLED(iq)) {
3637 inactive_throttled = TRUE;
3638 }
3639 } else if (VM_PAGE_Q_THROTTLED(eq)) {
3640 inactive_throttled = TRUE;
3641 }
3642
3643 if (inactive_throttled == TRUE) {
3644 goto throttle_inactive;
3645 }
3646
3647 #if VM_PRESSURE_EVENTS
3648 #if CONFIG_JETSAM
3649
3650 /*
3651 * If Jetsam is enabled, then the sending
3652 * of memory pressure notifications is handled
3653 * from the same thread that takes care of high-water
3654 * and other jetsams i.e. the memorystatus_thread.
3655 */
3656
3657 #else /* CONFIG_JETSAM */
3658
3659 vm_pressure_response();
3660
3661 #endif /* CONFIG_JETSAM */
3662 #endif /* VM_PRESSURE_EVENTS */
3663
3664 if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
3665 VM_PAGEOUT_DEBUG(vm_pageout_speculative_dirty, 1);
3666 }
3667
3668 if (object->internal) {
3669 vm_pageout_vminfo.vm_pageout_inactive_dirty_internal++;
3670 } else {
3671 vm_pageout_vminfo.vm_pageout_inactive_dirty_external++;
3672 }
3673
3674 /*
3675 * internal pages will go to the compressor...
3676 * external pages will go to the appropriate pager to be cleaned
3677 * and upon completion will end up on 'vm_page_queue_cleaned' which
3678 * is a preferred queue to steal from
3679 */
3680 vm_pageout_cluster(m);
3681 inactive_burst_count = 0;
3682
3683 /*
3684 * back to top of pageout scan loop
3685 */
3686 }
3687 }
3688
3689
3690 void
vm_page_free_reserve(int pages)3691 vm_page_free_reserve(
3692 int pages)
3693 {
3694 int free_after_reserve;
3695
3696 if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
3697 if ((vm_page_free_reserved + pages + COMPRESSOR_FREE_RESERVED_LIMIT) >= (VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT)) {
3698 vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT;
3699 } else {
3700 vm_page_free_reserved += (pages + COMPRESSOR_FREE_RESERVED_LIMIT);
3701 }
3702 } else {
3703 if ((vm_page_free_reserved + pages) >= VM_PAGE_FREE_RESERVED_LIMIT) {
3704 vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT;
3705 } else {
3706 vm_page_free_reserved += pages;
3707 }
3708 }
3709 free_after_reserve = vm_pageout_state.vm_page_free_count_init - vm_page_free_reserved;
3710
3711 vm_page_free_min = vm_page_free_reserved +
3712 VM_PAGE_FREE_MIN(free_after_reserve);
3713
3714 if (vm_page_free_min > VM_PAGE_FREE_MIN_LIMIT) {
3715 vm_page_free_min = VM_PAGE_FREE_MIN_LIMIT;
3716 }
3717
3718 vm_page_free_target = vm_page_free_reserved +
3719 VM_PAGE_FREE_TARGET(free_after_reserve);
3720
3721 if (vm_page_free_target > VM_PAGE_FREE_TARGET_LIMIT) {
3722 vm_page_free_target = VM_PAGE_FREE_TARGET_LIMIT;
3723 }
3724
3725 if (vm_page_free_target < vm_page_free_min + 5) {
3726 vm_page_free_target = vm_page_free_min + 5;
3727 }
3728
3729 vm_page_throttle_limit = vm_page_free_target - (vm_page_free_target / 2);
3730 }
3731
3732 /*
3733 * vm_pageout is the high level pageout daemon.
3734 */
3735
3736 void
vm_pageout_continue(void)3737 vm_pageout_continue(void)
3738 {
3739 DTRACE_VM2(pgrrun, int, 1, (uint64_t *), NULL);
3740 VM_PAGEOUT_DEBUG(vm_pageout_scan_event_counter, 1);
3741
3742 lck_mtx_lock(&vm_page_queue_free_lock);
3743 vm_pageout_running = TRUE;
3744 lck_mtx_unlock(&vm_page_queue_free_lock);
3745
3746 vm_pageout_scan();
3747 /*
3748 * we hold both the vm_page_queue_free_lock
3749 * and the vm_page_queues_lock at this point
3750 */
3751 assert(vm_page_free_wanted == 0);
3752 assert(vm_page_free_wanted_privileged == 0);
3753 assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT);
3754
3755 vm_pageout_running = FALSE;
3756 #if XNU_TARGET_OS_OSX
3757 if (vm_pageout_waiter) {
3758 vm_pageout_waiter = FALSE;
3759 thread_wakeup((event_t)&vm_pageout_waiter);
3760 }
3761 #endif /* XNU_TARGET_OS_OSX */
3762
3763 lck_mtx_unlock(&vm_page_queue_free_lock);
3764 vm_page_unlock_queues();
3765
3766 thread_block((thread_continue_t)vm_pageout_continue);
3767 /*NOTREACHED*/
3768 }
3769
3770 #if XNU_TARGET_OS_OSX
3771 kern_return_t
vm_pageout_wait(uint64_t deadline)3772 vm_pageout_wait(uint64_t deadline)
3773 {
3774 kern_return_t kr;
3775
3776 lck_mtx_lock(&vm_page_queue_free_lock);
3777 for (kr = KERN_SUCCESS; vm_pageout_running && (KERN_SUCCESS == kr);) {
3778 vm_pageout_waiter = TRUE;
3779 if (THREAD_AWAKENED != lck_mtx_sleep_deadline(
3780 &vm_page_queue_free_lock, LCK_SLEEP_DEFAULT,
3781 (event_t) &vm_pageout_waiter, THREAD_UNINT, deadline)) {
3782 kr = KERN_OPERATION_TIMED_OUT;
3783 }
3784 }
3785 lck_mtx_unlock(&vm_page_queue_free_lock);
3786
3787 return kr;
3788 }
3789 #endif /* XNU_TARGET_OS_OSX */
3790
3791
3792 static void
vm_pageout_iothread_external_continue(struct vm_pageout_queue * q)3793 vm_pageout_iothread_external_continue(struct vm_pageout_queue *q)
3794 {
3795 vm_page_t m = NULL;
3796 vm_object_t object;
3797 vm_object_offset_t offset;
3798 memory_object_t pager;
3799
3800 /* On systems with a compressor, the external IO thread clears its
3801 * VM privileged bit to accommodate large allocations (e.g. bulk UPL
3802 * creation)
3803 */
3804 if (vm_pageout_state.vm_pageout_internal_iothread != THREAD_NULL) {
3805 current_thread()->options &= ~TH_OPT_VMPRIV;
3806 }
3807
3808 vm_page_lockspin_queues();
3809
3810 while (!vm_page_queue_empty(&q->pgo_pending)) {
3811 q->pgo_busy = TRUE;
3812 vm_page_queue_remove_first(&q->pgo_pending, m, vmp_pageq);
3813
3814 assert(m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q);
3815 VM_PAGE_CHECK(m);
3816 /*
3817 * grab a snapshot of the object and offset this
3818 * page is tabled in so that we can relookup this
3819 * page after we've taken the object lock - these
3820 * fields are stable while we hold the page queues lock
3821 * but as soon as we drop it, there is nothing to keep
3822 * this page in this object... we hold an activity_in_progress
3823 * on this object which will keep it from terminating
3824 */
3825 object = VM_PAGE_OBJECT(m);
3826 offset = m->vmp_offset;
3827
3828 m->vmp_q_state = VM_PAGE_NOT_ON_Q;
3829 VM_PAGE_ZERO_PAGEQ_ENTRY(m);
3830
3831 vm_page_unlock_queues();
3832
3833 vm_object_lock(object);
3834
3835 m = vm_page_lookup(object, offset);
3836
3837 if (m == NULL || m->vmp_busy || m->vmp_cleaning ||
3838 !m->vmp_laundry || (m->vmp_q_state != VM_PAGE_NOT_ON_Q)) {
3839 /*
3840 * it's either the same page that someone else has
3841 * started cleaning (or it's finished cleaning or
3842 * been put back on the pageout queue), or
3843 * the page has been freed or we have found a
3844 * new page at this offset... in all of these cases
3845 * we merely need to release the activity_in_progress
3846 * we took when we put the page on the pageout queue
3847 */
3848 vm_object_activity_end(object);
3849 vm_object_unlock(object);
3850
3851 vm_page_lockspin_queues();
3852 continue;
3853 }
3854 pager = object->pager;
3855
3856 if (pager == MEMORY_OBJECT_NULL) {
3857 /*
3858 * This pager has been destroyed by either
3859 * memory_object_destroy or vm_object_destroy, and
3860 * so there is nowhere for the page to go.
3861 */
3862 if (m->vmp_free_when_done) {
3863 /*
3864 * Just free the page... VM_PAGE_FREE takes
3865 * care of cleaning up all the state...
3866 * including doing the vm_pageout_throttle_up
3867 */
3868 VM_PAGE_FREE(m);
3869 } else {
3870 vm_page_lockspin_queues();
3871
3872 vm_pageout_throttle_up(m);
3873 vm_page_activate(m);
3874
3875 vm_page_unlock_queues();
3876
3877 /*
3878 * And we are done with it.
3879 */
3880 }
3881 vm_object_activity_end(object);
3882 vm_object_unlock(object);
3883
3884 vm_page_lockspin_queues();
3885 continue;
3886 }
3887 #if 0
3888 /*
3889 * we don't hold the page queue lock
3890 * so this check isn't safe to make
3891 */
3892 VM_PAGE_CHECK(m);
3893 #endif
3894 /*
3895 * give back the activity_in_progress reference we
3896 * took when we queued up this page and replace it
3897 * it with a paging_in_progress reference that will
3898 * also hold the paging offset from changing and
3899 * prevent the object from terminating
3900 */
3901 vm_object_activity_end(object);
3902 vm_object_paging_begin(object);
3903 vm_object_unlock(object);
3904
3905 /*
3906 * Send the data to the pager.
3907 * any pageout clustering happens there
3908 */
3909 memory_object_data_return(pager,
3910 m->vmp_offset + object->paging_offset,
3911 PAGE_SIZE,
3912 NULL,
3913 NULL,
3914 FALSE,
3915 FALSE,
3916 0);
3917
3918 vm_object_lock(object);
3919 vm_object_paging_end(object);
3920 vm_object_unlock(object);
3921
3922 vm_pageout_io_throttle();
3923
3924 vm_page_lockspin_queues();
3925 }
3926 q->pgo_busy = FALSE;
3927 q->pgo_idle = TRUE;
3928
3929 assert_wait((event_t) &q->pgo_pending, THREAD_UNINT);
3930 vm_page_unlock_queues();
3931
3932 thread_block_parameter((thread_continue_t)vm_pageout_iothread_external_continue, (void *) q);
3933 /*NOTREACHED*/
3934 }
3935
3936
3937 #define MAX_FREE_BATCH 32
3938 uint32_t vm_compressor_time_thread; /* Set via sysctl to record time accrued by
3939 * this thread.
3940 */
3941
3942
3943 void
3944 vm_pageout_iothread_internal_continue(struct cq *);
3945 void
vm_pageout_iothread_internal_continue(struct cq * cq)3946 vm_pageout_iothread_internal_continue(struct cq *cq)
3947 {
3948 struct vm_pageout_queue *q;
3949 vm_page_t m = NULL;
3950 boolean_t pgo_draining;
3951 vm_page_t local_q;
3952 int local_cnt;
3953 vm_page_t local_freeq = NULL;
3954 int local_freed = 0;
3955 int local_batch_size;
3956 #if DEVELOPMENT || DEBUG
3957 int ncomps = 0;
3958 boolean_t marked_active = FALSE;
3959 #endif
3960 KERNEL_DEBUG(0xe040000c | DBG_FUNC_END, 0, 0, 0, 0, 0);
3961
3962 q = cq->q;
3963 #if __AMP__
3964 if (vm_compressor_ebound && (vm_pageout_state.vm_compressor_thread_count > 1)) {
3965 local_batch_size = (q->pgo_maxlaundry >> 3);
3966 local_batch_size = MAX(local_batch_size, 16);
3967 } else {
3968 local_batch_size = q->pgo_maxlaundry / (vm_pageout_state.vm_compressor_thread_count * 2);
3969 }
3970 #else
3971 local_batch_size = q->pgo_maxlaundry / (vm_pageout_state.vm_compressor_thread_count * 2);
3972 #endif
3973
3974 #if RECORD_THE_COMPRESSED_DATA
3975 if (q->pgo_laundry) {
3976 c_compressed_record_init();
3977 }
3978 #endif
3979 while (TRUE) {
3980 int pages_left_on_q = 0;
3981
3982 local_cnt = 0;
3983 local_q = NULL;
3984
3985 KERNEL_DEBUG(0xe0400014 | DBG_FUNC_START, 0, 0, 0, 0, 0);
3986
3987 vm_page_lock_queues();
3988 #if DEVELOPMENT || DEBUG
3989 if (marked_active == FALSE) {
3990 vmct_active++;
3991 vmct_state[cq->id] = VMCT_ACTIVE;
3992 marked_active = TRUE;
3993 if (vmct_active == 1) {
3994 vm_compressor_epoch_start = mach_absolute_time();
3995 }
3996 }
3997 #endif
3998 KERNEL_DEBUG(0xe0400014 | DBG_FUNC_END, 0, 0, 0, 0, 0);
3999
4000 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_START, q->pgo_laundry, 0, 0, 0, 0);
4001
4002 while (!vm_page_queue_empty(&q->pgo_pending) && local_cnt < local_batch_size) {
4003 vm_page_queue_remove_first(&q->pgo_pending, m, vmp_pageq);
4004 assert(m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q);
4005 VM_PAGE_CHECK(m);
4006
4007 m->vmp_q_state = VM_PAGE_NOT_ON_Q;
4008 VM_PAGE_ZERO_PAGEQ_ENTRY(m);
4009 m->vmp_laundry = FALSE;
4010
4011 m->vmp_snext = local_q;
4012 local_q = m;
4013 local_cnt++;
4014 }
4015 if (local_q == NULL) {
4016 break;
4017 }
4018
4019 q->pgo_busy = TRUE;
4020
4021 if ((pgo_draining = q->pgo_draining) == FALSE) {
4022 vm_pageout_throttle_up_batch(q, local_cnt);
4023 pages_left_on_q = q->pgo_laundry;
4024 } else {
4025 pages_left_on_q = q->pgo_laundry - local_cnt;
4026 }
4027
4028 vm_page_unlock_queues();
4029
4030 #if !RECORD_THE_COMPRESSED_DATA
4031 if (pages_left_on_q >= local_batch_size && cq->id < (vm_pageout_state.vm_compressor_thread_count - 1)) {
4032 thread_wakeup((event_t) ((uintptr_t)&q->pgo_pending + cq->id + 1));
4033 }
4034 #endif
4035 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, q->pgo_laundry, 0, 0, 0, 0);
4036
4037 while (local_q) {
4038 KERNEL_DEBUG(0xe0400024 | DBG_FUNC_START, local_cnt, 0, 0, 0, 0);
4039
4040 m = local_q;
4041 local_q = m->vmp_snext;
4042 m->vmp_snext = NULL;
4043
4044 if (vm_pageout_compress_page(&cq->current_chead, cq->scratch_buf, m) == KERN_SUCCESS) {
4045 #if DEVELOPMENT || DEBUG
4046 ncomps++;
4047 #endif
4048 KERNEL_DEBUG(0xe0400024 | DBG_FUNC_END, local_cnt, 0, 0, 0, 0);
4049
4050 m->vmp_snext = local_freeq;
4051 local_freeq = m;
4052 local_freed++;
4053
4054 if (local_freed >= MAX_FREE_BATCH) {
4055 OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
4056
4057 vm_page_free_list(local_freeq, TRUE);
4058
4059 local_freeq = NULL;
4060 local_freed = 0;
4061 }
4062 }
4063 #if !CONFIG_JETSAM
4064 while (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) {
4065 kern_return_t wait_result;
4066 int need_wakeup = 0;
4067
4068 if (local_freeq) {
4069 OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
4070
4071 vm_page_free_list(local_freeq, TRUE);
4072 local_freeq = NULL;
4073 local_freed = 0;
4074
4075 continue;
4076 }
4077 lck_mtx_lock_spin(&vm_page_queue_free_lock);
4078
4079 if (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) {
4080 if (vm_page_free_wanted_privileged++ == 0) {
4081 need_wakeup = 1;
4082 }
4083 wait_result = assert_wait((event_t)&vm_page_free_wanted_privileged, THREAD_UNINT);
4084
4085 lck_mtx_unlock(&vm_page_queue_free_lock);
4086
4087 if (need_wakeup) {
4088 thread_wakeup((event_t)&vm_page_free_wanted);
4089 }
4090
4091 if (wait_result == THREAD_WAITING) {
4092 thread_block(THREAD_CONTINUE_NULL);
4093 }
4094 } else {
4095 lck_mtx_unlock(&vm_page_queue_free_lock);
4096 }
4097 }
4098 #endif
4099 }
4100 if (local_freeq) {
4101 OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
4102
4103 vm_page_free_list(local_freeq, TRUE);
4104 local_freeq = NULL;
4105 local_freed = 0;
4106 }
4107 if (pgo_draining == TRUE) {
4108 vm_page_lockspin_queues();
4109 vm_pageout_throttle_up_batch(q, local_cnt);
4110 vm_page_unlock_queues();
4111 }
4112 }
4113 KERNEL_DEBUG(0xe040000c | DBG_FUNC_START, 0, 0, 0, 0, 0);
4114
4115 /*
4116 * queue lock is held and our q is empty
4117 */
4118 q->pgo_busy = FALSE;
4119 q->pgo_idle = TRUE;
4120
4121 assert_wait((event_t) ((uintptr_t)&q->pgo_pending + cq->id), THREAD_UNINT);
4122 #if DEVELOPMENT || DEBUG
4123 if (marked_active == TRUE) {
4124 vmct_active--;
4125 vmct_state[cq->id] = VMCT_IDLE;
4126
4127 if (vmct_active == 0) {
4128 vm_compressor_epoch_stop = mach_absolute_time();
4129 assertf(vm_compressor_epoch_stop >= vm_compressor_epoch_start,
4130 "Compressor epoch non-monotonic: 0x%llx -> 0x%llx",
4131 vm_compressor_epoch_start, vm_compressor_epoch_stop);
4132 /* This interval includes intervals where one or more
4133 * compressor threads were pre-empted
4134 */
4135 vmct_stats.vmct_cthreads_total += vm_compressor_epoch_stop - vm_compressor_epoch_start;
4136 }
4137 }
4138 #endif
4139 vm_page_unlock_queues();
4140 #if DEVELOPMENT || DEBUG
4141 if (__improbable(vm_compressor_time_thread)) {
4142 vmct_stats.vmct_runtimes[cq->id] = thread_get_runtime_self();
4143 vmct_stats.vmct_pages[cq->id] += ncomps;
4144 vmct_stats.vmct_iterations[cq->id]++;
4145 if (ncomps > vmct_stats.vmct_maxpages[cq->id]) {
4146 vmct_stats.vmct_maxpages[cq->id] = ncomps;
4147 }
4148 if (ncomps < vmct_stats.vmct_minpages[cq->id]) {
4149 vmct_stats.vmct_minpages[cq->id] = ncomps;
4150 }
4151 }
4152 #endif
4153
4154 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, 0, 0, 0, 0, 0);
4155
4156 thread_block_parameter((thread_continue_t)vm_pageout_iothread_internal_continue, (void *) cq);
4157 /*NOTREACHED*/
4158 }
4159
4160
4161 kern_return_t
vm_pageout_compress_page(void ** current_chead,char * scratch_buf,vm_page_t m)4162 vm_pageout_compress_page(void **current_chead, char *scratch_buf, vm_page_t m)
4163 {
4164 vm_object_t object;
4165 memory_object_t pager;
4166 int compressed_count_delta;
4167 kern_return_t retval;
4168
4169 object = VM_PAGE_OBJECT(m);
4170
4171 assert(!m->vmp_free_when_done);
4172 assert(!m->vmp_laundry);
4173
4174 pager = object->pager;
4175
4176 if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) {
4177 KERNEL_DEBUG(0xe0400010 | DBG_FUNC_START, object, pager, 0, 0, 0);
4178
4179 vm_object_lock(object);
4180
4181 /*
4182 * If there is no memory object for the page, create
4183 * one and hand it to the compression pager.
4184 */
4185
4186 if (!object->pager_initialized) {
4187 vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
4188 }
4189 if (!object->pager_initialized) {
4190 vm_object_compressor_pager_create(object);
4191 }
4192
4193 pager = object->pager;
4194
4195 if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) {
4196 /*
4197 * Still no pager for the object,
4198 * or the pager has been destroyed.
4199 * Reactivate the page.
4200 *
4201 * Should only happen if there is no
4202 * compression pager
4203 */
4204 PAGE_WAKEUP_DONE(m);
4205
4206 vm_page_lockspin_queues();
4207 vm_page_activate(m);
4208 VM_PAGEOUT_DEBUG(vm_pageout_dirty_no_pager, 1);
4209 vm_page_unlock_queues();
4210
4211 /*
4212 * And we are done with it.
4213 */
4214 vm_object_activity_end(object);
4215 vm_object_unlock(object);
4216
4217 return KERN_FAILURE;
4218 }
4219 vm_object_unlock(object);
4220
4221 KERNEL_DEBUG(0xe0400010 | DBG_FUNC_END, object, pager, 0, 0, 0);
4222 }
4223 assert(object->pager_initialized && pager != MEMORY_OBJECT_NULL);
4224 assert(object->activity_in_progress > 0);
4225
4226 retval = vm_compressor_pager_put(
4227 pager,
4228 m->vmp_offset + object->paging_offset,
4229 VM_PAGE_GET_PHYS_PAGE(m),
4230 current_chead,
4231 scratch_buf,
4232 &compressed_count_delta);
4233
4234 vm_object_lock(object);
4235
4236 assert(object->activity_in_progress > 0);
4237 assert(VM_PAGE_OBJECT(m) == object);
4238 assert( !VM_PAGE_WIRED(m));
4239
4240 vm_compressor_pager_count(pager,
4241 compressed_count_delta,
4242 FALSE, /* shared_lock */
4243 object);
4244
4245 if (retval == KERN_SUCCESS) {
4246 /*
4247 * If the object is purgeable, its owner's
4248 * purgeable ledgers will be updated in
4249 * vm_page_remove() but the page still
4250 * contributes to the owner's memory footprint,
4251 * so account for it as such.
4252 */
4253 if ((object->purgable != VM_PURGABLE_DENY ||
4254 object->vo_ledger_tag) &&
4255 object->vo_owner != NULL) {
4256 /* one more compressed purgeable/tagged page */
4257 vm_object_owner_compressed_update(object,
4258 +1);
4259 }
4260 counter_inc(&vm_statistics_compressions);
4261
4262 if (m->vmp_tabled) {
4263 vm_page_remove(m, TRUE);
4264 }
4265 } else {
4266 PAGE_WAKEUP_DONE(m);
4267
4268 vm_page_lockspin_queues();
4269
4270 vm_page_activate(m);
4271 vm_pageout_vminfo.vm_compressor_failed++;
4272
4273 vm_page_unlock_queues();
4274 }
4275 vm_object_activity_end(object);
4276 vm_object_unlock(object);
4277
4278 return retval;
4279 }
4280
4281
4282 static void
vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue * eq,boolean_t req_lowpriority)4283 vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue *eq, boolean_t req_lowpriority)
4284 {
4285 uint32_t policy;
4286
4287 if (hibernate_cleaning_in_progress == TRUE) {
4288 req_lowpriority = FALSE;
4289 }
4290
4291 if (eq->pgo_inited == TRUE && eq->pgo_lowpriority != req_lowpriority) {
4292 vm_page_unlock_queues();
4293
4294 if (req_lowpriority == TRUE) {
4295 policy = THROTTLE_LEVEL_PAGEOUT_THROTTLED;
4296 DTRACE_VM(laundrythrottle);
4297 } else {
4298 policy = THROTTLE_LEVEL_PAGEOUT_UNTHROTTLED;
4299 DTRACE_VM(laundryunthrottle);
4300 }
4301 proc_set_thread_policy_with_tid(kernel_task, eq->pgo_tid,
4302 TASK_POLICY_EXTERNAL, TASK_POLICY_IO, policy);
4303
4304 vm_page_lock_queues();
4305 eq->pgo_lowpriority = req_lowpriority;
4306 }
4307 }
4308
4309
4310 static void
vm_pageout_iothread_external(void)4311 vm_pageout_iothread_external(void)
4312 {
4313 thread_t self = current_thread();
4314
4315 self->options |= TH_OPT_VMPRIV;
4316
4317 DTRACE_VM2(laundrythrottle, int, 1, (uint64_t *), NULL);
4318
4319 proc_set_thread_policy(self, TASK_POLICY_EXTERNAL,
4320 TASK_POLICY_IO, THROTTLE_LEVEL_PAGEOUT_THROTTLED);
4321
4322 vm_page_lock_queues();
4323
4324 vm_pageout_queue_external.pgo_tid = self->thread_id;
4325 vm_pageout_queue_external.pgo_lowpriority = TRUE;
4326 vm_pageout_queue_external.pgo_inited = TRUE;
4327
4328 vm_page_unlock_queues();
4329
4330 #if CONFIG_THREAD_GROUPS
4331 thread_group_vm_add();
4332 #endif /* CONFIG_THREAD_GROUPS */
4333
4334 vm_pageout_iothread_external_continue(&vm_pageout_queue_external);
4335
4336 /*NOTREACHED*/
4337 }
4338
4339
4340 static void
vm_pageout_iothread_internal(struct cq * cq)4341 vm_pageout_iothread_internal(struct cq *cq)
4342 {
4343 thread_t self = current_thread();
4344
4345 self->options |= TH_OPT_VMPRIV;
4346
4347 vm_page_lock_queues();
4348
4349 vm_pageout_queue_internal.pgo_tid = self->thread_id;
4350 vm_pageout_queue_internal.pgo_lowpriority = TRUE;
4351 vm_pageout_queue_internal.pgo_inited = TRUE;
4352
4353 vm_page_unlock_queues();
4354
4355 if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
4356 thread_vm_bind_group_add();
4357 }
4358
4359 #if CONFIG_THREAD_GROUPS
4360 thread_group_vm_add();
4361 #endif /* CONFIG_THREAD_GROUPS */
4362
4363 #if __AMP__
4364 if (vm_compressor_ebound) {
4365 /*
4366 * Use the soft bound option for vm_compressor to allow it to run on
4367 * P-cores if E-cluster is unavailable.
4368 */
4369 thread_bind_cluster_type(self, 'E', true);
4370 }
4371 #endif /* __AMP__ */
4372
4373 thread_set_thread_name(current_thread(), "VM_compressor");
4374 #if DEVELOPMENT || DEBUG
4375 vmct_stats.vmct_minpages[cq->id] = INT32_MAX;
4376 #endif
4377 vm_pageout_iothread_internal_continue(cq);
4378
4379 /*NOTREACHED*/
4380 }
4381
4382 kern_return_t
vm_set_buffer_cleanup_callout(boolean_t (* func)(int))4383 vm_set_buffer_cleanup_callout(boolean_t (*func)(int))
4384 {
4385 if (OSCompareAndSwapPtr(NULL, ptrauth_nop_cast(void *, func), (void * volatile *) &consider_buffer_cache_collect)) {
4386 return KERN_SUCCESS;
4387 } else {
4388 return KERN_FAILURE; /* Already set */
4389 }
4390 }
4391
4392 extern boolean_t memorystatus_manual_testing_on;
4393 extern unsigned int memorystatus_level;
4394
4395
4396 #if VM_PRESSURE_EVENTS
4397
4398 boolean_t vm_pressure_events_enabled = FALSE;
4399
4400 extern uint64_t next_warning_notification_sent_at_ts;
4401 extern uint64_t next_critical_notification_sent_at_ts;
4402
4403 #define PRESSURE_LEVEL_STUCK_THRESHOLD_MINS (30) /* 30 minutes. */
4404
4405 /*
4406 * The last time there was change in pressure level OR we forced a check
4407 * because the system is stuck in a non-normal pressure level.
4408 */
4409 uint64_t vm_pressure_last_level_transition_abs = 0;
4410
4411 /*
4412 * This is how the long the system waits 'stuck' in an unchanged non-normal pressure
4413 * level before resending out notifications for that level again.
4414 */
4415 int vm_pressure_level_transition_threshold = PRESSURE_LEVEL_STUCK_THRESHOLD_MINS;
4416
4417 void
vm_pressure_response(void)4418 vm_pressure_response(void)
4419 {
4420 vm_pressure_level_t old_level = kVMPressureNormal;
4421 int new_level = -1;
4422 unsigned int total_pages;
4423 uint64_t available_memory = 0;
4424 uint64_t curr_ts, abs_time_since_level_transition, time_in_ns;
4425 bool force_check = false;
4426 int time_in_mins;
4427
4428
4429 if (vm_pressure_events_enabled == FALSE) {
4430 return;
4431 }
4432
4433 #if !XNU_TARGET_OS_OSX
4434
4435 available_memory = (uint64_t) memorystatus_available_pages;
4436
4437 #else /* !XNU_TARGET_OS_OSX */
4438
4439 available_memory = (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY;
4440 memorystatus_available_pages = (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY;
4441
4442 #endif /* !XNU_TARGET_OS_OSX */
4443
4444 total_pages = (unsigned int) atop_64(max_mem);
4445 #if CONFIG_SECLUDED_MEMORY
4446 total_pages -= vm_page_secluded_count;
4447 #endif /* CONFIG_SECLUDED_MEMORY */
4448 memorystatus_level = (unsigned int) ((available_memory * 100) / total_pages);
4449
4450 if (memorystatus_manual_testing_on) {
4451 return;
4452 }
4453
4454 curr_ts = mach_absolute_time();
4455 abs_time_since_level_transition = curr_ts - vm_pressure_last_level_transition_abs;
4456
4457 absolutetime_to_nanoseconds(abs_time_since_level_transition, &time_in_ns);
4458 time_in_mins = (int) ((time_in_ns / NSEC_PER_SEC) / 60);
4459 force_check = (time_in_mins >= vm_pressure_level_transition_threshold);
4460
4461 old_level = memorystatus_vm_pressure_level;
4462
4463 switch (memorystatus_vm_pressure_level) {
4464 case kVMPressureNormal:
4465 {
4466 if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
4467 new_level = kVMPressureCritical;
4468 } else if (VM_PRESSURE_NORMAL_TO_WARNING()) {
4469 new_level = kVMPressureWarning;
4470 }
4471 break;
4472 }
4473
4474 case kVMPressureWarning:
4475 case kVMPressureUrgent:
4476 {
4477 if (VM_PRESSURE_WARNING_TO_NORMAL()) {
4478 new_level = kVMPressureNormal;
4479 } else if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
4480 new_level = kVMPressureCritical;
4481 } else if (force_check) {
4482 new_level = kVMPressureWarning;
4483 next_warning_notification_sent_at_ts = curr_ts;
4484 }
4485 break;
4486 }
4487
4488 case kVMPressureCritical:
4489 {
4490 if (VM_PRESSURE_WARNING_TO_NORMAL()) {
4491 new_level = kVMPressureNormal;
4492 } else if (VM_PRESSURE_CRITICAL_TO_WARNING()) {
4493 new_level = kVMPressureWarning;
4494 } else if (force_check) {
4495 new_level = kVMPressureCritical;
4496 next_critical_notification_sent_at_ts = curr_ts;
4497 }
4498 break;
4499 }
4500
4501 default:
4502 return;
4503 }
4504
4505 if (new_level != -1 || force_check) {
4506 if (new_level != -1) {
4507 memorystatus_vm_pressure_level = (vm_pressure_level_t) new_level;
4508
4509 if (new_level != (int) old_level) {
4510 VM_DEBUG_CONSTANT_EVENT(vm_pressure_level_change, VM_PRESSURE_LEVEL_CHANGE, DBG_FUNC_NONE,
4511 new_level, old_level, 0, 0);
4512 }
4513 } else {
4514 VM_DEBUG_CONSTANT_EVENT(vm_pressure_level_change, VM_PRESSURE_LEVEL_CHANGE, DBG_FUNC_NONE,
4515 new_level, old_level, force_check, 0);
4516 }
4517
4518 if (hibernation_vmqueues_inspection || hibernate_cleaning_in_progress) {
4519 /*
4520 * We don't want to schedule a wakeup while hibernation is in progress
4521 * because that could collide with checks for non-monotonicity in the scheduler.
4522 * We do however do all the updates to memorystatus_vm_pressure_level because
4523 * we _might_ want to use that for decisions regarding which pages or how
4524 * many pages we want to dump in hibernation.
4525 */
4526 return;
4527 }
4528
4529 if ((memorystatus_vm_pressure_level != kVMPressureNormal) || (old_level != memorystatus_vm_pressure_level) || force_check) {
4530 if (vm_pageout_state.vm_pressure_thread_running == FALSE) {
4531 thread_wakeup(&vm_pressure_thread);
4532 }
4533
4534 if (old_level != memorystatus_vm_pressure_level) {
4535 thread_wakeup(&vm_pageout_state.vm_pressure_changed);
4536 }
4537 vm_pressure_last_level_transition_abs = curr_ts; /* renew the window of observation for a stuck pressure level */
4538 }
4539 }
4540 }
4541 #endif /* VM_PRESSURE_EVENTS */
4542
4543 /*
4544 * Function called by a kernel thread to either get the current pressure level or
4545 * wait until memory pressure changes from a given level.
4546 */
4547 kern_return_t
mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure,__unused unsigned int * pressure_level)4548 mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure, __unused unsigned int *pressure_level)
4549 {
4550 #if !VM_PRESSURE_EVENTS
4551
4552 return KERN_FAILURE;
4553
4554 #else /* VM_PRESSURE_EVENTS */
4555
4556 wait_result_t wr = 0;
4557 vm_pressure_level_t old_level = memorystatus_vm_pressure_level;
4558
4559 if (pressure_level == NULL) {
4560 return KERN_INVALID_ARGUMENT;
4561 }
4562
4563 if (*pressure_level == kVMPressureJetsam) {
4564 if (!wait_for_pressure) {
4565 return KERN_INVALID_ARGUMENT;
4566 }
4567
4568 lck_mtx_lock(&memorystatus_jetsam_fg_band_lock);
4569 wr = assert_wait((event_t)&memorystatus_jetsam_fg_band_waiters,
4570 THREAD_INTERRUPTIBLE);
4571 if (wr == THREAD_WAITING) {
4572 ++memorystatus_jetsam_fg_band_waiters;
4573 lck_mtx_unlock(&memorystatus_jetsam_fg_band_lock);
4574 wr = thread_block(THREAD_CONTINUE_NULL);
4575 } else {
4576 lck_mtx_unlock(&memorystatus_jetsam_fg_band_lock);
4577 }
4578 if (wr != THREAD_AWAKENED) {
4579 return KERN_ABORTED;
4580 }
4581 *pressure_level = kVMPressureJetsam;
4582 return KERN_SUCCESS;
4583 }
4584
4585 if (wait_for_pressure == TRUE) {
4586 while (old_level == *pressure_level) {
4587 wr = assert_wait((event_t) &vm_pageout_state.vm_pressure_changed,
4588 THREAD_INTERRUPTIBLE);
4589 if (wr == THREAD_WAITING) {
4590 wr = thread_block(THREAD_CONTINUE_NULL);
4591 }
4592 if (wr == THREAD_INTERRUPTED) {
4593 return KERN_ABORTED;
4594 }
4595
4596 if (wr == THREAD_AWAKENED) {
4597 old_level = memorystatus_vm_pressure_level;
4598 }
4599 }
4600 }
4601
4602 *pressure_level = old_level;
4603 return KERN_SUCCESS;
4604 #endif /* VM_PRESSURE_EVENTS */
4605 }
4606
4607 #if VM_PRESSURE_EVENTS
4608 void
vm_pressure_thread(void)4609 vm_pressure_thread(void)
4610 {
4611 static boolean_t thread_initialized = FALSE;
4612
4613 if (thread_initialized == TRUE) {
4614 vm_pageout_state.vm_pressure_thread_running = TRUE;
4615 consider_vm_pressure_events();
4616 vm_pageout_state.vm_pressure_thread_running = FALSE;
4617 }
4618
4619 #if CONFIG_THREAD_GROUPS
4620 thread_group_vm_add();
4621 #endif /* CONFIG_THREAD_GROUPS */
4622
4623 thread_set_thread_name(current_thread(), "VM_pressure");
4624 thread_initialized = TRUE;
4625 assert_wait((event_t) &vm_pressure_thread, THREAD_UNINT);
4626 thread_block((thread_continue_t)vm_pressure_thread);
4627 }
4628 #endif /* VM_PRESSURE_EVENTS */
4629
4630
4631 /*
4632 * called once per-second via "compute_averages"
4633 */
4634 void
compute_pageout_gc_throttle(__unused void * arg)4635 compute_pageout_gc_throttle(__unused void *arg)
4636 {
4637 if (vm_pageout_vminfo.vm_pageout_considered_page != vm_pageout_state.vm_pageout_considered_page_last) {
4638 vm_pageout_state.vm_pageout_considered_page_last = vm_pageout_vminfo.vm_pageout_considered_page;
4639
4640 thread_wakeup(VM_PAGEOUT_GC_EVENT);
4641 }
4642 }
4643
4644 /*
4645 * vm_pageout_garbage_collect can also be called when the zone allocator needs
4646 * to call zone_gc on a different thread in order to trigger zone-map-exhaustion
4647 * jetsams. We need to check if the zone map size is above its jetsam limit to
4648 * decide if this was indeed the case.
4649 *
4650 * We need to do this on a different thread because of the following reasons:
4651 *
4652 * 1. In the case of synchronous jetsams, the leaking process can try to jetsam
4653 * itself causing the system to hang. We perform synchronous jetsams if we're
4654 * leaking in the VM map entries zone, so the leaking process could be doing a
4655 * zalloc for a VM map entry while holding its vm_map lock, when it decides to
4656 * jetsam itself. We also need the vm_map lock on the process termination path,
4657 * which would now lead the dying process to deadlock against itself.
4658 *
4659 * 2. The jetsam path might need to allocate zone memory itself. We could try
4660 * using the non-blocking variant of zalloc for this path, but we can still
4661 * end up trying to do a kmem_alloc when the zone maps are almost full.
4662 */
4663 __dead2
4664 void
vm_pageout_garbage_collect(void * step,wait_result_t wr __unused)4665 vm_pageout_garbage_collect(void *step, wait_result_t wr __unused)
4666 {
4667 assert(step == VM_PAGEOUT_GC_INIT || step == VM_PAGEOUT_GC_COLLECT);
4668
4669 if (step == VM_PAGEOUT_GC_INIT) {
4670 /* first time being called is not about GC */
4671 #if CONFIG_THREAD_GROUPS
4672 thread_group_vm_add();
4673 #endif /* CONFIG_THREAD_GROUPS */
4674 } else if (zone_map_nearing_exhaustion()) {
4675 /*
4676 * Woken up by the zone allocator for zone-map-exhaustion jetsams.
4677 *
4678 * Bail out after calling zone_gc (which triggers the
4679 * zone-map-exhaustion jetsams). If we fall through, the subsequent
4680 * operations that clear out a bunch of caches might allocate zone
4681 * memory themselves (for eg. vm_map operations would need VM map
4682 * entries). Since the zone map is almost full at this point, we
4683 * could end up with a panic. We just need to quickly jetsam a
4684 * process and exit here.
4685 *
4686 * It could so happen that we were woken up to relieve memory
4687 * pressure and the zone map also happened to be near its limit at
4688 * the time, in which case we'll skip out early. But that should be
4689 * ok; if memory pressure persists, the thread will simply be woken
4690 * up again.
4691 */
4692 zone_gc(ZONE_GC_JETSAM);
4693 } else {
4694 /* Woken up by vm_pageout_scan or compute_pageout_gc_throttle. */
4695 boolean_t buf_large_zfree = FALSE;
4696 boolean_t first_try = TRUE;
4697
4698 stack_collect();
4699
4700 consider_machine_collect();
4701 mbuf_drain(FALSE);
4702
4703 do {
4704 if (consider_buffer_cache_collect != NULL) {
4705 buf_large_zfree = (*consider_buffer_cache_collect)(0);
4706 }
4707 if (first_try == TRUE || buf_large_zfree == TRUE) {
4708 /*
4709 * zone_gc should be last, because the other operations
4710 * might return memory to zones.
4711 */
4712 zone_gc(ZONE_GC_TRIM);
4713 }
4714 first_try = FALSE;
4715 } while (buf_large_zfree == TRUE && vm_page_free_count < vm_page_free_target);
4716
4717 consider_machine_adjust();
4718 }
4719
4720 assert_wait(VM_PAGEOUT_GC_EVENT, THREAD_UNINT);
4721
4722 thread_block_parameter(vm_pageout_garbage_collect, VM_PAGEOUT_GC_COLLECT);
4723 __builtin_unreachable();
4724 }
4725
4726
4727 #if VM_PAGE_BUCKETS_CHECK
4728 #if VM_PAGE_FAKE_BUCKETS
4729 extern vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
4730 #endif /* VM_PAGE_FAKE_BUCKETS */
4731 #endif /* VM_PAGE_BUCKETS_CHECK */
4732
4733
4734
4735 void
vm_set_restrictions(unsigned int num_cpus)4736 vm_set_restrictions(unsigned int num_cpus)
4737 {
4738 int vm_restricted_to_single_processor = 0;
4739
4740 if (PE_parse_boot_argn("vm_restricted_to_single_processor", &vm_restricted_to_single_processor, sizeof(vm_restricted_to_single_processor))) {
4741 kprintf("Overriding vm_restricted_to_single_processor to %d\n", vm_restricted_to_single_processor);
4742 vm_pageout_state.vm_restricted_to_single_processor = (vm_restricted_to_single_processor ? TRUE : FALSE);
4743 } else {
4744 assert(num_cpus > 0);
4745
4746 if (num_cpus <= 3) {
4747 /*
4748 * on systems with a limited number of CPUS, bind the
4749 * 4 major threads that can free memory and that tend to use
4750 * a fair bit of CPU under pressured conditions to a single processor.
4751 * This insures that these threads don't hog all of the available CPUs
4752 * (important for camera launch), while allowing them to run independently
4753 * w/r to locks... the 4 threads are
4754 * vm_pageout_scan, vm_pageout_iothread_internal (compressor),
4755 * vm_compressor_swap_trigger_thread (minor and major compactions),
4756 * memorystatus_thread (jetsams).
4757 *
4758 * the first time the thread is run, it is responsible for checking the
4759 * state of vm_restricted_to_single_processor, and if TRUE it calls
4760 * thread_bind_master... someday this should be replaced with a group
4761 * scheduling mechanism and KPI.
4762 */
4763 vm_pageout_state.vm_restricted_to_single_processor = TRUE;
4764 } else {
4765 vm_pageout_state.vm_restricted_to_single_processor = FALSE;
4766 }
4767 }
4768 }
4769
4770 /*
4771 * Set up vm_config based on the vm_compressor_mode.
4772 * Must run BEFORE the pageout thread starts up.
4773 */
4774 __startup_func
4775 void
vm_config_init(void)4776 vm_config_init(void)
4777 {
4778 bzero(&vm_config, sizeof(vm_config));
4779
4780 switch (vm_compressor_mode) {
4781 case VM_PAGER_DEFAULT:
4782 printf("mapping deprecated VM_PAGER_DEFAULT to VM_PAGER_COMPRESSOR_WITH_SWAP\n");
4783 OS_FALLTHROUGH;
4784
4785 case VM_PAGER_COMPRESSOR_WITH_SWAP:
4786 vm_config.compressor_is_present = TRUE;
4787 vm_config.swap_is_present = TRUE;
4788 vm_config.compressor_is_active = TRUE;
4789 vm_config.swap_is_active = TRUE;
4790 break;
4791
4792 case VM_PAGER_COMPRESSOR_NO_SWAP:
4793 vm_config.compressor_is_present = TRUE;
4794 vm_config.swap_is_present = TRUE;
4795 vm_config.compressor_is_active = TRUE;
4796 break;
4797
4798 case VM_PAGER_FREEZER_DEFAULT:
4799 printf("mapping deprecated VM_PAGER_FREEZER_DEFAULT to VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP\n");
4800 OS_FALLTHROUGH;
4801
4802 case VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP:
4803 vm_config.compressor_is_present = TRUE;
4804 vm_config.swap_is_present = TRUE;
4805 break;
4806
4807 case VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP:
4808 vm_config.compressor_is_present = TRUE;
4809 vm_config.swap_is_present = TRUE;
4810 vm_config.compressor_is_active = TRUE;
4811 vm_config.freezer_swap_is_active = TRUE;
4812 break;
4813
4814 case VM_PAGER_NOT_CONFIGURED:
4815 break;
4816
4817 default:
4818 printf("unknown compressor mode - %x\n", vm_compressor_mode);
4819 break;
4820 }
4821 }
4822
4823 __startup_func
4824 static void
vm_pageout_create_gc_thread(void)4825 vm_pageout_create_gc_thread(void)
4826 {
4827 thread_t thread;
4828
4829 if (kernel_thread_create(vm_pageout_garbage_collect,
4830 VM_PAGEOUT_GC_INIT, BASEPRI_DEFAULT, &thread) != KERN_SUCCESS) {
4831 panic("vm_pageout_garbage_collect: create failed");
4832 }
4833 thread_set_thread_name(thread, "VM_pageout_garbage_collect");
4834 if (thread->reserved_stack == 0) {
4835 assert(thread->kernel_stack);
4836 thread->reserved_stack = thread->kernel_stack;
4837 }
4838
4839 /* thread is started in vm_pageout() */
4840 vm_pageout_gc_thread = thread;
4841 }
4842 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, vm_pageout_create_gc_thread);
4843
4844 void
vm_pageout(void)4845 vm_pageout(void)
4846 {
4847 thread_t self = current_thread();
4848 thread_t thread;
4849 kern_return_t result;
4850 spl_t s;
4851
4852 /*
4853 * Set thread privileges.
4854 */
4855 s = splsched();
4856
4857 #if CONFIG_VPS_DYNAMIC_PRIO
4858
4859 int vps_dynprio_bootarg = 0;
4860
4861 if (PE_parse_boot_argn("vps_dynamic_priority_enabled", &vps_dynprio_bootarg, sizeof(vps_dynprio_bootarg))) {
4862 vps_dynamic_priority_enabled = (vps_dynprio_bootarg ? TRUE : FALSE);
4863 kprintf("Overriding vps_dynamic_priority_enabled to %d\n", vps_dynamic_priority_enabled);
4864 } else {
4865 if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
4866 vps_dynamic_priority_enabled = TRUE;
4867 } else {
4868 vps_dynamic_priority_enabled = FALSE;
4869 }
4870 }
4871
4872 if (vps_dynamic_priority_enabled) {
4873 sched_set_kernel_thread_priority(self, MAXPRI_THROTTLE);
4874 thread_set_eager_preempt(self);
4875 } else {
4876 sched_set_kernel_thread_priority(self, BASEPRI_VM);
4877 }
4878
4879 #else /* CONFIG_VPS_DYNAMIC_PRIO */
4880
4881 vps_dynamic_priority_enabled = FALSE;
4882 sched_set_kernel_thread_priority(self, BASEPRI_VM);
4883
4884 #endif /* CONFIG_VPS_DYNAMIC_PRIO */
4885
4886 thread_lock(self);
4887 self->options |= TH_OPT_VMPRIV;
4888 thread_unlock(self);
4889
4890 if (!self->reserved_stack) {
4891 self->reserved_stack = self->kernel_stack;
4892 }
4893
4894 if (vm_pageout_state.vm_restricted_to_single_processor == TRUE &&
4895 vps_dynamic_priority_enabled == FALSE) {
4896 thread_vm_bind_group_add();
4897 }
4898
4899
4900 #if CONFIG_THREAD_GROUPS
4901 thread_group_vm_add();
4902 #endif /* CONFIG_THREAD_GROUPS */
4903
4904 #if __AMP__
4905 PE_parse_boot_argn("vmpgo_pcluster", &vm_pgo_pbound, sizeof(vm_pgo_pbound));
4906 if (vm_pgo_pbound) {
4907 /*
4908 * Use the soft bound option for vm pageout to allow it to run on
4909 * E-cores if P-cluster is unavailable.
4910 */
4911 thread_bind_cluster_type(self, 'P', true);
4912 }
4913 #endif /* __AMP__ */
4914
4915 splx(s);
4916
4917 thread_set_thread_name(current_thread(), "VM_pageout_scan");
4918
4919 /*
4920 * Initialize some paging parameters.
4921 */
4922
4923 vm_pageout_state.vm_pressure_thread_running = FALSE;
4924 vm_pageout_state.vm_pressure_changed = FALSE;
4925 vm_pageout_state.memorystatus_purge_on_warning = 2;
4926 vm_pageout_state.memorystatus_purge_on_urgent = 5;
4927 vm_pageout_state.memorystatus_purge_on_critical = 8;
4928 vm_pageout_state.vm_page_speculative_q_age_ms = VM_PAGE_SPECULATIVE_Q_AGE_MS;
4929 vm_pageout_state.vm_page_speculative_percentage = 5;
4930 vm_pageout_state.vm_page_speculative_target = 0;
4931
4932 vm_pageout_state.vm_pageout_external_iothread = THREAD_NULL;
4933 vm_pageout_state.vm_pageout_internal_iothread = THREAD_NULL;
4934
4935 vm_pageout_state.vm_pageout_swap_wait = 0;
4936 vm_pageout_state.vm_pageout_idle_wait = 0;
4937 vm_pageout_state.vm_pageout_empty_wait = 0;
4938 vm_pageout_state.vm_pageout_burst_wait = 0;
4939 vm_pageout_state.vm_pageout_deadlock_wait = 0;
4940 vm_pageout_state.vm_pageout_deadlock_relief = 0;
4941 vm_pageout_state.vm_pageout_burst_inactive_throttle = 0;
4942
4943 vm_pageout_state.vm_pageout_inactive = 0;
4944 vm_pageout_state.vm_pageout_inactive_used = 0;
4945 vm_pageout_state.vm_pageout_inactive_clean = 0;
4946
4947 vm_pageout_state.vm_memory_pressure = 0;
4948 vm_pageout_state.vm_page_filecache_min = 0;
4949 #if CONFIG_JETSAM
4950 vm_pageout_state.vm_page_filecache_min_divisor = 70;
4951 vm_pageout_state.vm_page_xpmapped_min_divisor = 40;
4952 #else
4953 vm_pageout_state.vm_page_filecache_min_divisor = 27;
4954 vm_pageout_state.vm_page_xpmapped_min_divisor = 36;
4955 #endif
4956 vm_pageout_state.vm_page_free_count_init = vm_page_free_count;
4957
4958 vm_pageout_state.vm_pageout_considered_page_last = 0;
4959
4960 if (vm_pageout_state.vm_pageout_swap_wait == 0) {
4961 vm_pageout_state.vm_pageout_swap_wait = VM_PAGEOUT_SWAP_WAIT;
4962 }
4963
4964 if (vm_pageout_state.vm_pageout_idle_wait == 0) {
4965 vm_pageout_state.vm_pageout_idle_wait = VM_PAGEOUT_IDLE_WAIT;
4966 }
4967
4968 if (vm_pageout_state.vm_pageout_burst_wait == 0) {
4969 vm_pageout_state.vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT;
4970 }
4971
4972 if (vm_pageout_state.vm_pageout_empty_wait == 0) {
4973 vm_pageout_state.vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT;
4974 }
4975
4976 if (vm_pageout_state.vm_pageout_deadlock_wait == 0) {
4977 vm_pageout_state.vm_pageout_deadlock_wait = VM_PAGEOUT_DEADLOCK_WAIT;
4978 }
4979
4980 if (vm_pageout_state.vm_pageout_deadlock_relief == 0) {
4981 vm_pageout_state.vm_pageout_deadlock_relief = VM_PAGEOUT_DEADLOCK_RELIEF;
4982 }
4983
4984 if (vm_pageout_state.vm_pageout_burst_inactive_throttle == 0) {
4985 vm_pageout_state.vm_pageout_burst_inactive_throttle = VM_PAGEOUT_BURST_INACTIVE_THROTTLE;
4986 }
4987 /*
4988 * even if we've already called vm_page_free_reserve
4989 * call it again here to insure that the targets are
4990 * accurately calculated (it uses vm_page_free_count_init)
4991 * calling it with an arg of 0 will not change the reserve
4992 * but will re-calculate free_min and free_target
4993 */
4994 if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED(processor_count)) {
4995 vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count)) - vm_page_free_reserved);
4996 } else {
4997 vm_page_free_reserve(0);
4998 }
4999
5000
5001 vm_page_queue_init(&vm_pageout_queue_external.pgo_pending);
5002 vm_pageout_queue_external.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
5003 vm_pageout_queue_external.pgo_laundry = 0;
5004 vm_pageout_queue_external.pgo_idle = FALSE;
5005 vm_pageout_queue_external.pgo_busy = FALSE;
5006 vm_pageout_queue_external.pgo_throttled = FALSE;
5007 vm_pageout_queue_external.pgo_draining = FALSE;
5008 vm_pageout_queue_external.pgo_lowpriority = FALSE;
5009 vm_pageout_queue_external.pgo_tid = -1;
5010 vm_pageout_queue_external.pgo_inited = FALSE;
5011
5012 vm_page_queue_init(&vm_pageout_queue_internal.pgo_pending);
5013 vm_pageout_queue_internal.pgo_maxlaundry = 0;
5014 vm_pageout_queue_internal.pgo_laundry = 0;
5015 vm_pageout_queue_internal.pgo_idle = FALSE;
5016 vm_pageout_queue_internal.pgo_busy = FALSE;
5017 vm_pageout_queue_internal.pgo_throttled = FALSE;
5018 vm_pageout_queue_internal.pgo_draining = FALSE;
5019 vm_pageout_queue_internal.pgo_lowpriority = FALSE;
5020 vm_pageout_queue_internal.pgo_tid = -1;
5021 vm_pageout_queue_internal.pgo_inited = FALSE;
5022
5023 /* internal pageout thread started when default pager registered first time */
5024 /* external pageout and garbage collection threads started here */
5025
5026 result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_external, NULL,
5027 BASEPRI_VM,
5028 &vm_pageout_state.vm_pageout_external_iothread);
5029 if (result != KERN_SUCCESS) {
5030 panic("vm_pageout_iothread_external: create failed");
5031 }
5032 thread_set_thread_name(vm_pageout_state.vm_pageout_external_iothread, "VM_pageout_external_iothread");
5033 thread_deallocate(vm_pageout_state.vm_pageout_external_iothread);
5034
5035 thread_mtx_lock(vm_pageout_gc_thread );
5036 thread_start(vm_pageout_gc_thread );
5037 thread_mtx_unlock(vm_pageout_gc_thread);
5038
5039 #if VM_PRESSURE_EVENTS
5040 result = kernel_thread_start_priority((thread_continue_t)vm_pressure_thread, NULL,
5041 BASEPRI_DEFAULT,
5042 &thread);
5043
5044 if (result != KERN_SUCCESS) {
5045 panic("vm_pressure_thread: create failed");
5046 }
5047
5048 thread_deallocate(thread);
5049 #endif
5050
5051 vm_object_reaper_init();
5052
5053
5054 if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
5055 vm_compressor_init();
5056 }
5057
5058 #if VM_PRESSURE_EVENTS
5059 vm_pressure_events_enabled = TRUE;
5060 #endif /* VM_PRESSURE_EVENTS */
5061
5062 #if CONFIG_PHANTOM_CACHE
5063 vm_phantom_cache_init();
5064 #endif
5065 #if VM_PAGE_BUCKETS_CHECK
5066 #if VM_PAGE_FAKE_BUCKETS
5067 printf("**** DEBUG: protecting fake buckets [0x%llx:0x%llx]\n",
5068 (uint64_t) vm_page_fake_buckets_start,
5069 (uint64_t) vm_page_fake_buckets_end);
5070 pmap_protect(kernel_pmap,
5071 vm_page_fake_buckets_start,
5072 vm_page_fake_buckets_end,
5073 VM_PROT_READ);
5074 // *(char *) vm_page_fake_buckets_start = 'x'; /* panic! */
5075 #endif /* VM_PAGE_FAKE_BUCKETS */
5076 #endif /* VM_PAGE_BUCKETS_CHECK */
5077
5078 #if VM_OBJECT_TRACKING
5079 vm_object_tracking_init();
5080 #endif /* VM_OBJECT_TRACKING */
5081
5082 #if __arm64__
5083 // vm_tests();
5084 #endif /* __arm64__ */
5085
5086 vm_pageout_continue();
5087
5088 /*
5089 * Unreached code!
5090 *
5091 * The vm_pageout_continue() call above never returns, so the code below is never
5092 * executed. We take advantage of this to declare several DTrace VM related probe
5093 * points that our kernel doesn't have an analog for. These are probe points that
5094 * exist in Solaris and are in the DTrace documentation, so people may have written
5095 * scripts that use them. Declaring the probe points here means their scripts will
5096 * compile and execute which we want for portability of the scripts, but since this
5097 * section of code is never reached, the probe points will simply never fire. Yes,
5098 * this is basically a hack. The problem is the DTrace probe points were chosen with
5099 * Solaris specific VM events in mind, not portability to different VM implementations.
5100 */
5101
5102 DTRACE_VM2(execfree, int, 1, (uint64_t *), NULL);
5103 DTRACE_VM2(execpgin, int, 1, (uint64_t *), NULL);
5104 DTRACE_VM2(execpgout, int, 1, (uint64_t *), NULL);
5105 DTRACE_VM2(pgswapin, int, 1, (uint64_t *), NULL);
5106 DTRACE_VM2(pgswapout, int, 1, (uint64_t *), NULL);
5107 DTRACE_VM2(swapin, int, 1, (uint64_t *), NULL);
5108 DTRACE_VM2(swapout, int, 1, (uint64_t *), NULL);
5109 /*NOTREACHED*/
5110 }
5111
5112
5113
5114 kern_return_t
vm_pageout_internal_start(void)5115 vm_pageout_internal_start(void)
5116 {
5117 kern_return_t result = KERN_SUCCESS;
5118 host_basic_info_data_t hinfo;
5119 vm_offset_t buf, bufsize;
5120
5121 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
5122
5123 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
5124 #define BSD_HOST 1
5125 host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
5126
5127 assert(hinfo.max_cpus > 0);
5128
5129 #if !XNU_TARGET_OS_OSX
5130 vm_pageout_state.vm_compressor_thread_count = 1;
5131 #else /* !XNU_TARGET_OS_OSX */
5132 if (hinfo.max_cpus > 4) {
5133 vm_pageout_state.vm_compressor_thread_count = 2;
5134 } else {
5135 vm_pageout_state.vm_compressor_thread_count = 1;
5136 }
5137 #endif /* !XNU_TARGET_OS_OSX */
5138 PE_parse_boot_argn("vmcomp_threads", &vm_pageout_state.vm_compressor_thread_count,
5139 sizeof(vm_pageout_state.vm_compressor_thread_count));
5140
5141 #if __AMP__
5142 PE_parse_boot_argn("vmcomp_ecluster", &vm_compressor_ebound, sizeof(vm_compressor_ebound));
5143 if (vm_compressor_ebound) {
5144 vm_pageout_state.vm_compressor_thread_count = 2;
5145 }
5146 #endif
5147 if (vm_pageout_state.vm_compressor_thread_count >= hinfo.max_cpus) {
5148 vm_pageout_state.vm_compressor_thread_count = hinfo.max_cpus - 1;
5149 }
5150 if (vm_pageout_state.vm_compressor_thread_count <= 0) {
5151 vm_pageout_state.vm_compressor_thread_count = 1;
5152 } else if (vm_pageout_state.vm_compressor_thread_count > MAX_COMPRESSOR_THREAD_COUNT) {
5153 vm_pageout_state.vm_compressor_thread_count = MAX_COMPRESSOR_THREAD_COUNT;
5154 }
5155
5156 vm_pageout_queue_internal.pgo_maxlaundry =
5157 (vm_pageout_state.vm_compressor_thread_count * 4) * VM_PAGE_LAUNDRY_MAX;
5158
5159 PE_parse_boot_argn("vmpgoi_maxlaundry",
5160 &vm_pageout_queue_internal.pgo_maxlaundry,
5161 sizeof(vm_pageout_queue_internal.pgo_maxlaundry));
5162
5163 bufsize = COMPRESSOR_SCRATCH_BUF_SIZE;
5164
5165 kmem_alloc(kernel_map, &buf,
5166 bufsize * vm_pageout_state.vm_compressor_thread_count,
5167 KMA_DATA | KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT,
5168 VM_KERN_MEMORY_COMPRESSOR);
5169
5170 for (int i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) {
5171 ciq[i].id = i;
5172 ciq[i].q = &vm_pageout_queue_internal;
5173 ciq[i].current_chead = NULL;
5174 ciq[i].scratch_buf = (char *)(buf + i * bufsize);
5175
5176 result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal,
5177 (void *)&ciq[i], BASEPRI_VM,
5178 &vm_pageout_state.vm_pageout_internal_iothread);
5179
5180 if (result == KERN_SUCCESS) {
5181 thread_deallocate(vm_pageout_state.vm_pageout_internal_iothread);
5182 } else {
5183 break;
5184 }
5185 }
5186 return result;
5187 }
5188
5189 #if CONFIG_IOSCHED
5190 /*
5191 * To support I/O Expedite for compressed files we mark the upls with special flags.
5192 * The way decmpfs works is that we create a big upl which marks all the pages needed to
5193 * represent the compressed file as busy. We tag this upl with the flag UPL_DECMP_REQ. Decmpfs
5194 * then issues smaller I/Os for compressed I/Os, deflates them and puts the data into the pages
5195 * being held in the big original UPL. We mark each of these smaller UPLs with the flag
5196 * UPL_DECMP_REAL_IO. Any outstanding real I/O UPL is tracked by the big req upl using the
5197 * decmp_io_upl field (in the upl structure). This link is protected in the forward direction
5198 * by the req upl lock (the reverse link doesnt need synch. since we never inspect this link
5199 * unless the real I/O upl is being destroyed).
5200 */
5201
5202
5203 static void
upl_set_decmp_info(upl_t upl,upl_t src_upl)5204 upl_set_decmp_info(upl_t upl, upl_t src_upl)
5205 {
5206 assert((src_upl->flags & UPL_DECMP_REQ) != 0);
5207
5208 upl_lock(src_upl);
5209 if (src_upl->decmp_io_upl) {
5210 /*
5211 * If there is already an alive real I/O UPL, ignore this new UPL.
5212 * This case should rarely happen and even if it does, it just means
5213 * that we might issue a spurious expedite which the driver is expected
5214 * to handle.
5215 */
5216 upl_unlock(src_upl);
5217 return;
5218 }
5219 src_upl->decmp_io_upl = (void *)upl;
5220 src_upl->ref_count++;
5221
5222 upl->flags |= UPL_DECMP_REAL_IO;
5223 upl->decmp_io_upl = (void *)src_upl;
5224 upl_unlock(src_upl);
5225 }
5226 #endif /* CONFIG_IOSCHED */
5227
5228 #if UPL_DEBUG
5229 int upl_debug_enabled = 1;
5230 #else
5231 int upl_debug_enabled = 0;
5232 #endif
5233
5234 static upl_t
upl_create(int type,int flags,upl_size_t size)5235 upl_create(int type, int flags, upl_size_t size)
5236 {
5237 upl_t upl;
5238 vm_size_t page_field_size = 0;
5239 int upl_flags = 0;
5240 vm_size_t upl_size = sizeof(struct upl);
5241
5242 assert(page_aligned(size));
5243
5244 size = round_page_32(size);
5245
5246 if (type & UPL_CREATE_LITE) {
5247 page_field_size = (atop(size) + 7) >> 3;
5248 page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
5249
5250 upl_flags |= UPL_LITE;
5251 }
5252 if (type & UPL_CREATE_INTERNAL) {
5253 upl_size += sizeof(struct upl_page_info) * atop(size);
5254
5255 upl_flags |= UPL_INTERNAL;
5256 }
5257 upl = (upl_t)kheap_alloc(KHEAP_DEFAULT, upl_size + page_field_size, Z_WAITOK | Z_ZERO);
5258
5259 upl->flags = upl_flags | flags;
5260 upl->ref_count = 1;
5261 upl_lock_init(upl);
5262 #if CONFIG_IOSCHED
5263 if (type & UPL_CREATE_IO_TRACKING) {
5264 upl->upl_priority = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
5265 }
5266
5267 if ((type & UPL_CREATE_INTERNAL) && (type & UPL_CREATE_EXPEDITE_SUP)) {
5268 /* Only support expedite on internal UPLs */
5269 thread_t curthread = current_thread();
5270 upl->upl_reprio_info = kalloc_data(sizeof(uint64_t) * atop(size), Z_WAITOK | Z_ZERO);
5271 upl->flags |= UPL_EXPEDITE_SUPPORTED;
5272 if (curthread->decmp_upl != NULL) {
5273 upl_set_decmp_info(upl, curthread->decmp_upl);
5274 }
5275 }
5276 #endif
5277 #if CONFIG_IOSCHED || UPL_DEBUG
5278 if ((type & UPL_CREATE_IO_TRACKING) || upl_debug_enabled) {
5279 upl->upl_creator = current_thread();
5280 upl->flags |= UPL_TRACKED_BY_OBJECT;
5281 }
5282 #endif
5283
5284 #if UPL_DEBUG
5285 (void) OSBacktrace(&upl->upl_create_retaddr[0], UPL_DEBUG_STACK_FRAMES);
5286 #endif /* UPL_DEBUG */
5287
5288 return upl;
5289 }
5290
5291 static void
upl_destroy(upl_t upl)5292 upl_destroy(upl_t upl)
5293 {
5294 int page_field_size; /* bit field in word size buf */
5295 int size;
5296
5297 // DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object);
5298
5299 if (upl->ext_ref_count) {
5300 panic("upl(%p) ext_ref_count", upl);
5301 }
5302
5303 #if CONFIG_IOSCHED
5304 if ((upl->flags & UPL_DECMP_REAL_IO) && upl->decmp_io_upl) {
5305 upl_t src_upl;
5306 src_upl = upl->decmp_io_upl;
5307 assert((src_upl->flags & UPL_DECMP_REQ) != 0);
5308 upl_lock(src_upl);
5309 src_upl->decmp_io_upl = NULL;
5310 upl_unlock(src_upl);
5311 upl_deallocate(src_upl);
5312 }
5313 #endif /* CONFIG_IOSCHED */
5314
5315 #if CONFIG_IOSCHED || UPL_DEBUG
5316 if (((upl->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) &&
5317 !(upl->flags & UPL_VECTOR)) {
5318 vm_object_t object;
5319
5320 if (upl->flags & UPL_SHADOWED) {
5321 object = upl->map_object->shadow;
5322 } else {
5323 object = upl->map_object;
5324 }
5325
5326 vm_object_lock(object);
5327 queue_remove(&object->uplq, upl, upl_t, uplq);
5328 vm_object_activity_end(object);
5329 vm_object_collapse(object, 0, TRUE);
5330 vm_object_unlock(object);
5331 }
5332 #endif
5333 /*
5334 * drop a reference on the map_object whether or
5335 * not a pageout object is inserted
5336 */
5337 if (upl->flags & UPL_SHADOWED) {
5338 vm_object_deallocate(upl->map_object);
5339 }
5340
5341 if (upl->flags & UPL_DEVICE_MEMORY) {
5342 size = PAGE_SIZE;
5343 } else {
5344 size = upl_adjusted_size(upl, PAGE_MASK);
5345 }
5346 page_field_size = 0;
5347
5348 if (upl->flags & UPL_LITE) {
5349 page_field_size = ((size / PAGE_SIZE) + 7) >> 3;
5350 page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
5351 }
5352 upl_lock_destroy(upl);
5353 upl->vector_upl = (vector_upl_t) 0xfeedbeef;
5354
5355 #if CONFIG_IOSCHED
5356 if (upl->flags & UPL_EXPEDITE_SUPPORTED) {
5357 kfree_data(upl->upl_reprio_info, sizeof(uint64_t) * (size / PAGE_SIZE));
5358 }
5359 #endif
5360
5361 if (upl->flags & UPL_INTERNAL) {
5362 kheap_free(KHEAP_DEFAULT, upl,
5363 sizeof(struct upl) +
5364 (sizeof(struct upl_page_info) * (size / PAGE_SIZE))
5365 + page_field_size);
5366 } else {
5367 kheap_free(KHEAP_DEFAULT, upl, sizeof(struct upl) + page_field_size);
5368 }
5369 }
5370
5371 void
upl_deallocate(upl_t upl)5372 upl_deallocate(upl_t upl)
5373 {
5374 upl_lock(upl);
5375
5376 if (--upl->ref_count == 0) {
5377 if (vector_upl_is_valid(upl)) {
5378 vector_upl_deallocate(upl);
5379 }
5380 upl_unlock(upl);
5381
5382 if (upl->upl_iodone) {
5383 upl_callout_iodone(upl);
5384 }
5385
5386 upl_destroy(upl);
5387 } else {
5388 upl_unlock(upl);
5389 }
5390 }
5391
5392 #if CONFIG_IOSCHED
5393 void
upl_mark_decmp(upl_t upl)5394 upl_mark_decmp(upl_t upl)
5395 {
5396 if (upl->flags & UPL_TRACKED_BY_OBJECT) {
5397 upl->flags |= UPL_DECMP_REQ;
5398 upl->upl_creator->decmp_upl = (void *)upl;
5399 }
5400 }
5401
5402 void
upl_unmark_decmp(upl_t upl)5403 upl_unmark_decmp(upl_t upl)
5404 {
5405 if (upl && (upl->flags & UPL_DECMP_REQ)) {
5406 upl->upl_creator->decmp_upl = NULL;
5407 }
5408 }
5409
5410 #endif /* CONFIG_IOSCHED */
5411
5412 #define VM_PAGE_Q_BACKING_UP(q) \
5413 ((q)->pgo_laundry >= (((q)->pgo_maxlaundry * 8) / 10))
5414
5415 boolean_t must_throttle_writes(void);
5416
5417 boolean_t
must_throttle_writes()5418 must_throttle_writes()
5419 {
5420 if (VM_PAGE_Q_BACKING_UP(&vm_pageout_queue_external) &&
5421 vm_page_pageable_external_count > (AVAILABLE_NON_COMPRESSED_MEMORY * 6) / 10) {
5422 return TRUE;
5423 }
5424
5425 return FALSE;
5426 }
5427
5428 #define MIN_DELAYED_WORK_CTX_ALLOCATED (16)
5429 #define MAX_DELAYED_WORK_CTX_ALLOCATED (512)
5430
5431 int vm_page_delayed_work_ctx_needed = 0;
5432 SECURITY_READ_ONLY_LATE(zone_t) dw_ctx_zone;
5433
5434 void
vm_page_delayed_work_init_ctx(void)5435 vm_page_delayed_work_init_ctx(void)
5436 {
5437 size_t elem_size = sizeof(struct vm_page_delayed_work_ctx);
5438
5439 dw_ctx_zone = zone_create_ext("delayed-work-ctx", elem_size,
5440 ZC_NOGC, ZONE_ID_ANY, ^(zone_t z) {
5441 zone_set_exhaustible(z, MAX_DELAYED_WORK_CTX_ALLOCATED);
5442 });
5443
5444 zone_fill_initially(dw_ctx_zone, MIN_DELAYED_WORK_CTX_ALLOCATED);
5445 }
5446
5447 struct vm_page_delayed_work*
vm_page_delayed_work_get_ctx(void)5448 vm_page_delayed_work_get_ctx(void)
5449 {
5450 struct vm_page_delayed_work_ctx * dw_ctx = NULL;
5451
5452 dw_ctx = (struct vm_page_delayed_work_ctx*) zalloc_noblock(dw_ctx_zone);
5453
5454 if (dw_ctx) {
5455 dw_ctx->delayed_owner = current_thread();
5456 } else {
5457 vm_page_delayed_work_ctx_needed++;
5458 }
5459 return dw_ctx ? dw_ctx->dwp : NULL;
5460 }
5461
5462 void
vm_page_delayed_work_finish_ctx(struct vm_page_delayed_work * dwp)5463 vm_page_delayed_work_finish_ctx(struct vm_page_delayed_work* dwp)
5464 {
5465 struct vm_page_delayed_work_ctx *ldw_ctx;
5466
5467 ldw_ctx = (struct vm_page_delayed_work_ctx *)dwp;
5468 ldw_ctx->delayed_owner = NULL;
5469
5470 zfree(dw_ctx_zone, ldw_ctx);
5471 }
5472
5473 /*
5474 * Routine: vm_object_upl_request
5475 * Purpose:
5476 * Cause the population of a portion of a vm_object.
5477 * Depending on the nature of the request, the pages
5478 * returned may be contain valid data or be uninitialized.
5479 * A page list structure, listing the physical pages
5480 * will be returned upon request.
5481 * This function is called by the file system or any other
5482 * supplier of backing store to a pager.
5483 * IMPORTANT NOTE: The caller must still respect the relationship
5484 * between the vm_object and its backing memory object. The
5485 * caller MUST NOT substitute changes in the backing file
5486 * without first doing a memory_object_lock_request on the
5487 * target range unless it is know that the pages are not
5488 * shared with another entity at the pager level.
5489 * Copy_in_to:
5490 * if a page list structure is present
5491 * return the mapped physical pages, where a
5492 * page is not present, return a non-initialized
5493 * one. If the no_sync bit is turned on, don't
5494 * call the pager unlock to synchronize with other
5495 * possible copies of the page. Leave pages busy
5496 * in the original object, if a page list structure
5497 * was specified. When a commit of the page list
5498 * pages is done, the dirty bit will be set for each one.
5499 * Copy_out_from:
5500 * If a page list structure is present, return
5501 * all mapped pages. Where a page does not exist
5502 * map a zero filled one. Leave pages busy in
5503 * the original object. If a page list structure
5504 * is not specified, this call is a no-op.
5505 *
5506 * Note: access of default pager objects has a rather interesting
5507 * twist. The caller of this routine, presumably the file system
5508 * page cache handling code, will never actually make a request
5509 * against a default pager backed object. Only the default
5510 * pager will make requests on backing store related vm_objects
5511 * In this way the default pager can maintain the relationship
5512 * between backing store files (abstract memory objects) and
5513 * the vm_objects (cache objects), they support.
5514 *
5515 */
5516
5517 __private_extern__ kern_return_t
vm_object_upl_request(vm_object_t object,vm_object_offset_t offset,upl_size_t size,upl_t * upl_ptr,upl_page_info_array_t user_page_list,unsigned int * page_list_count,upl_control_flags_t cntrl_flags,vm_tag_t tag)5518 vm_object_upl_request(
5519 vm_object_t object,
5520 vm_object_offset_t offset,
5521 upl_size_t size,
5522 upl_t *upl_ptr,
5523 upl_page_info_array_t user_page_list,
5524 unsigned int *page_list_count,
5525 upl_control_flags_t cntrl_flags,
5526 vm_tag_t tag)
5527 {
5528 vm_page_t dst_page = VM_PAGE_NULL;
5529 vm_object_offset_t dst_offset;
5530 upl_size_t xfer_size;
5531 unsigned int size_in_pages;
5532 boolean_t dirty;
5533 boolean_t hw_dirty;
5534 upl_t upl = NULL;
5535 unsigned int entry;
5536 vm_page_t alias_page = NULL;
5537 int refmod_state = 0;
5538 wpl_array_t lite_list = NULL;
5539 vm_object_t last_copy_object;
5540 struct vm_page_delayed_work dw_array;
5541 struct vm_page_delayed_work *dwp, *dwp_start;
5542 bool dwp_finish_ctx = TRUE;
5543 int dw_count;
5544 int dw_limit;
5545 int io_tracking_flag = 0;
5546 int grab_options;
5547 int page_grab_count = 0;
5548 ppnum_t phys_page;
5549 pmap_flush_context pmap_flush_context_storage;
5550 boolean_t pmap_flushes_delayed = FALSE;
5551 #if DEVELOPMENT || DEBUG
5552 task_t task = current_task();
5553 #endif /* DEVELOPMENT || DEBUG */
5554
5555 dwp_start = dwp = NULL;
5556
5557 if (cntrl_flags & ~UPL_VALID_FLAGS) {
5558 /*
5559 * For forward compatibility's sake,
5560 * reject any unknown flag.
5561 */
5562 return KERN_INVALID_VALUE;
5563 }
5564 if ((!object->internal) && (object->paging_offset != 0)) {
5565 panic("vm_object_upl_request: external object with non-zero paging offset");
5566 }
5567 if (object->phys_contiguous) {
5568 panic("vm_object_upl_request: contiguous object specified");
5569 }
5570
5571 assertf(page_aligned(offset) && page_aligned(size),
5572 "offset 0x%llx size 0x%x",
5573 offset, size);
5574
5575 VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request, VM_UPL_REQUEST, DBG_FUNC_START, size, cntrl_flags, 0, 0);
5576
5577 dw_count = 0;
5578 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
5579 dwp_start = vm_page_delayed_work_get_ctx();
5580 if (dwp_start == NULL) {
5581 dwp_start = &dw_array;
5582 dw_limit = 1;
5583 dwp_finish_ctx = FALSE;
5584 }
5585
5586 dwp = dwp_start;
5587
5588 if (size > MAX_UPL_SIZE_BYTES) {
5589 size = MAX_UPL_SIZE_BYTES;
5590 }
5591
5592 if ((cntrl_flags & UPL_SET_INTERNAL) && page_list_count != NULL) {
5593 *page_list_count = MAX_UPL_SIZE_BYTES >> PAGE_SHIFT;
5594 }
5595
5596 #if CONFIG_IOSCHED || UPL_DEBUG
5597 if (object->io_tracking || upl_debug_enabled) {
5598 io_tracking_flag |= UPL_CREATE_IO_TRACKING;
5599 }
5600 #endif
5601 #if CONFIG_IOSCHED
5602 if (object->io_tracking) {
5603 io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP;
5604 }
5605 #endif
5606
5607 if (cntrl_flags & UPL_SET_INTERNAL) {
5608 if (cntrl_flags & UPL_SET_LITE) {
5609 upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size);
5610
5611 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
5612 lite_list = (wpl_array_t)
5613 (((uintptr_t)user_page_list) +
5614 ((size / PAGE_SIZE) * sizeof(upl_page_info_t)));
5615 if (size == 0) {
5616 user_page_list = NULL;
5617 lite_list = NULL;
5618 }
5619 } else {
5620 upl = upl_create(UPL_CREATE_INTERNAL | io_tracking_flag, 0, size);
5621
5622 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
5623 if (size == 0) {
5624 user_page_list = NULL;
5625 }
5626 }
5627 } else {
5628 if (cntrl_flags & UPL_SET_LITE) {
5629 upl = upl_create(UPL_CREATE_EXTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size);
5630
5631 lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
5632 if (size == 0) {
5633 lite_list = NULL;
5634 }
5635 } else {
5636 upl = upl_create(UPL_CREATE_EXTERNAL | io_tracking_flag, 0, size);
5637 }
5638 }
5639 *upl_ptr = upl;
5640
5641 if (user_page_list) {
5642 user_page_list[0].device = FALSE;
5643 }
5644
5645 if (cntrl_flags & UPL_SET_LITE) {
5646 upl->map_object = object;
5647 } else {
5648 upl->map_object = vm_object_allocate(size);
5649 /*
5650 * No neeed to lock the new object: nobody else knows
5651 * about it yet, so it's all ours so far.
5652 */
5653 upl->map_object->shadow = object;
5654 upl->map_object->pageout = TRUE;
5655 upl->map_object->can_persist = FALSE;
5656 upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
5657 upl->map_object->vo_shadow_offset = offset;
5658 upl->map_object->wimg_bits = object->wimg_bits;
5659 assertf(page_aligned(upl->map_object->vo_shadow_offset),
5660 "object %p shadow_offset 0x%llx",
5661 upl->map_object, upl->map_object->vo_shadow_offset);
5662
5663 alias_page = vm_page_grab_fictitious(TRUE);
5664
5665 upl->flags |= UPL_SHADOWED;
5666 }
5667 if (cntrl_flags & UPL_FOR_PAGEOUT) {
5668 upl->flags |= UPL_PAGEOUT;
5669 }
5670
5671 vm_object_lock(object);
5672 vm_object_activity_begin(object);
5673
5674 grab_options = 0;
5675 #if CONFIG_SECLUDED_MEMORY
5676 if (object->can_grab_secluded) {
5677 grab_options |= VM_PAGE_GRAB_SECLUDED;
5678 }
5679 #endif /* CONFIG_SECLUDED_MEMORY */
5680
5681 /*
5682 * we can lock in the paging_offset once paging_in_progress is set
5683 */
5684 upl->u_size = size;
5685 upl->u_offset = offset + object->paging_offset;
5686
5687 #if CONFIG_IOSCHED || UPL_DEBUG
5688 if (object->io_tracking || upl_debug_enabled) {
5689 vm_object_activity_begin(object);
5690 queue_enter(&object->uplq, upl, upl_t, uplq);
5691 }
5692 #endif
5693 if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != VM_OBJECT_NULL) {
5694 /*
5695 * Honor copy-on-write obligations
5696 *
5697 * The caller is gathering these pages and
5698 * might modify their contents. We need to
5699 * make sure that the copy object has its own
5700 * private copies of these pages before we let
5701 * the caller modify them.
5702 */
5703 vm_object_update(object,
5704 offset,
5705 size,
5706 NULL,
5707 NULL,
5708 FALSE, /* should_return */
5709 MEMORY_OBJECT_COPY_SYNC,
5710 VM_PROT_NO_CHANGE);
5711
5712 VM_PAGEOUT_DEBUG(upl_cow, 1);
5713 VM_PAGEOUT_DEBUG(upl_cow_pages, (size >> PAGE_SHIFT));
5714 }
5715 /*
5716 * remember which copy object we synchronized with
5717 */
5718 last_copy_object = object->copy;
5719 entry = 0;
5720
5721 xfer_size = size;
5722 dst_offset = offset;
5723 size_in_pages = size / PAGE_SIZE;
5724
5725 if (vm_page_free_count > (vm_page_free_target + size_in_pages) ||
5726 object->resident_page_count < ((MAX_UPL_SIZE_BYTES * 2) >> PAGE_SHIFT)) {
5727 object->scan_collisions = 0;
5728 }
5729
5730 if ((cntrl_flags & UPL_WILL_MODIFY) && must_throttle_writes() == TRUE) {
5731 boolean_t isSSD = FALSE;
5732
5733 #if !XNU_TARGET_OS_OSX
5734 isSSD = TRUE;
5735 #else /* !XNU_TARGET_OS_OSX */
5736 vnode_pager_get_isSSD(object->pager, &isSSD);
5737 #endif /* !XNU_TARGET_OS_OSX */
5738 vm_object_unlock(object);
5739
5740 OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
5741
5742 if (isSSD == TRUE) {
5743 delay(1000 * size_in_pages);
5744 } else {
5745 delay(5000 * size_in_pages);
5746 }
5747 OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
5748
5749 vm_object_lock(object);
5750 }
5751
5752 while (xfer_size) {
5753 dwp->dw_mask = 0;
5754
5755 if ((alias_page == NULL) && !(cntrl_flags & UPL_SET_LITE)) {
5756 vm_object_unlock(object);
5757 alias_page = vm_page_grab_fictitious(TRUE);
5758 vm_object_lock(object);
5759 }
5760 if (cntrl_flags & UPL_COPYOUT_FROM) {
5761 upl->flags |= UPL_PAGE_SYNC_DONE;
5762
5763 if (((dst_page = vm_page_lookup(object, dst_offset)) == VM_PAGE_NULL) ||
5764 dst_page->vmp_fictitious ||
5765 dst_page->vmp_absent ||
5766 dst_page->vmp_error ||
5767 dst_page->vmp_cleaning ||
5768 (VM_PAGE_WIRED(dst_page))) {
5769 if (user_page_list) {
5770 user_page_list[entry].phys_addr = 0;
5771 }
5772
5773 goto try_next_page;
5774 }
5775 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
5776
5777 /*
5778 * grab this up front...
5779 * a high percentange of the time we're going to
5780 * need the hardware modification state a bit later
5781 * anyway... so we can eliminate an extra call into
5782 * the pmap layer by grabbing it here and recording it
5783 */
5784 if (dst_page->vmp_pmapped) {
5785 refmod_state = pmap_get_refmod(phys_page);
5786 } else {
5787 refmod_state = 0;
5788 }
5789
5790 if ((refmod_state & VM_MEM_REFERENCED) && VM_PAGE_INACTIVE(dst_page)) {
5791 /*
5792 * page is on inactive list and referenced...
5793 * reactivate it now... this gets it out of the
5794 * way of vm_pageout_scan which would have to
5795 * reactivate it upon tripping over it
5796 */
5797 dwp->dw_mask |= DW_vm_page_activate;
5798 }
5799 if (cntrl_flags & UPL_RET_ONLY_DIRTY) {
5800 /*
5801 * we're only asking for DIRTY pages to be returned
5802 */
5803 if (dst_page->vmp_laundry || !(cntrl_flags & UPL_FOR_PAGEOUT)) {
5804 /*
5805 * if we were the page stolen by vm_pageout_scan to be
5806 * cleaned (as opposed to a buddy being clustered in
5807 * or this request is not being driven by a PAGEOUT cluster
5808 * then we only need to check for the page being dirty or
5809 * precious to decide whether to return it
5810 */
5811 if (dst_page->vmp_dirty || dst_page->vmp_precious || (refmod_state & VM_MEM_MODIFIED)) {
5812 goto check_busy;
5813 }
5814 goto dont_return;
5815 }
5816 /*
5817 * this is a request for a PAGEOUT cluster and this page
5818 * is merely along for the ride as a 'buddy'... not only
5819 * does it have to be dirty to be returned, but it also
5820 * can't have been referenced recently...
5821 */
5822 if ((hibernate_cleaning_in_progress == TRUE ||
5823 (!((refmod_state & VM_MEM_REFERENCED) || dst_page->vmp_reference) ||
5824 (dst_page->vmp_q_state == VM_PAGE_ON_THROTTLED_Q))) &&
5825 ((refmod_state & VM_MEM_MODIFIED) || dst_page->vmp_dirty || dst_page->vmp_precious)) {
5826 goto check_busy;
5827 }
5828 dont_return:
5829 /*
5830 * if we reach here, we're not to return
5831 * the page... go on to the next one
5832 */
5833 if (dst_page->vmp_laundry == TRUE) {
5834 /*
5835 * if we get here, the page is not 'cleaning' (filtered out above).
5836 * since it has been referenced, remove it from the laundry
5837 * so we don't pay the cost of an I/O to clean a page
5838 * we're just going to take back
5839 */
5840 vm_page_lockspin_queues();
5841
5842 vm_pageout_steal_laundry(dst_page, TRUE);
5843 vm_page_activate(dst_page);
5844
5845 vm_page_unlock_queues();
5846 }
5847 if (user_page_list) {
5848 user_page_list[entry].phys_addr = 0;
5849 }
5850
5851 goto try_next_page;
5852 }
5853 check_busy:
5854 if (dst_page->vmp_busy) {
5855 if (cntrl_flags & UPL_NOBLOCK) {
5856 if (user_page_list) {
5857 user_page_list[entry].phys_addr = 0;
5858 }
5859 dwp->dw_mask = 0;
5860
5861 goto try_next_page;
5862 }
5863 /*
5864 * someone else is playing with the
5865 * page. We will have to wait.
5866 */
5867 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
5868
5869 continue;
5870 }
5871 if (dst_page->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
5872 vm_page_lockspin_queues();
5873
5874 if (dst_page->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
5875 /*
5876 * we've buddied up a page for a clustered pageout
5877 * that has already been moved to the pageout
5878 * queue by pageout_scan... we need to remove
5879 * it from the queue and drop the laundry count
5880 * on that queue
5881 */
5882 vm_pageout_throttle_up(dst_page);
5883 }
5884 vm_page_unlock_queues();
5885 }
5886 hw_dirty = refmod_state & VM_MEM_MODIFIED;
5887 dirty = hw_dirty ? TRUE : dst_page->vmp_dirty;
5888
5889 if (phys_page > upl->highest_page) {
5890 upl->highest_page = phys_page;
5891 }
5892
5893 assert(!pmap_is_noencrypt(phys_page));
5894
5895 if (cntrl_flags & UPL_SET_LITE) {
5896 unsigned int pg_num;
5897
5898 pg_num = (unsigned int) ((dst_offset - offset) / PAGE_SIZE);
5899 assert(pg_num == (dst_offset - offset) / PAGE_SIZE);
5900 lite_list[pg_num >> 5] |= 1U << (pg_num & 31);
5901
5902 if (hw_dirty) {
5903 if (pmap_flushes_delayed == FALSE) {
5904 pmap_flush_context_init(&pmap_flush_context_storage);
5905 pmap_flushes_delayed = TRUE;
5906 }
5907 pmap_clear_refmod_options(phys_page,
5908 VM_MEM_MODIFIED,
5909 PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_CLEAR_WRITE,
5910 &pmap_flush_context_storage);
5911 }
5912
5913 /*
5914 * Mark original page as cleaning
5915 * in place.
5916 */
5917 dst_page->vmp_cleaning = TRUE;
5918 dst_page->vmp_precious = FALSE;
5919 } else {
5920 /*
5921 * use pageclean setup, it is more
5922 * convenient even for the pageout
5923 * cases here
5924 */
5925 vm_object_lock(upl->map_object);
5926 vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
5927 vm_object_unlock(upl->map_object);
5928
5929 alias_page->vmp_absent = FALSE;
5930 alias_page = NULL;
5931 }
5932 if (dirty) {
5933 SET_PAGE_DIRTY(dst_page, FALSE);
5934 } else {
5935 dst_page->vmp_dirty = FALSE;
5936 }
5937
5938 if (!dirty) {
5939 dst_page->vmp_precious = TRUE;
5940 }
5941
5942 if (!(cntrl_flags & UPL_CLEAN_IN_PLACE)) {
5943 if (!VM_PAGE_WIRED(dst_page)) {
5944 dst_page->vmp_free_when_done = TRUE;
5945 }
5946 }
5947 } else {
5948 if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != last_copy_object) {
5949 /*
5950 * Honor copy-on-write obligations
5951 *
5952 * The copy object has changed since we
5953 * last synchronized for copy-on-write.
5954 * Another copy object might have been
5955 * inserted while we released the object's
5956 * lock. Since someone could have seen the
5957 * original contents of the remaining pages
5958 * through that new object, we have to
5959 * synchronize with it again for the remaining
5960 * pages only. The previous pages are "busy"
5961 * so they can not be seen through the new
5962 * mapping. The new mapping will see our
5963 * upcoming changes for those previous pages,
5964 * but that's OK since they couldn't see what
5965 * was there before. It's just a race anyway
5966 * and there's no guarantee of consistency or
5967 * atomicity. We just don't want new mappings
5968 * to see both the *before* and *after* pages.
5969 */
5970 if (object->copy != VM_OBJECT_NULL) {
5971 vm_object_update(
5972 object,
5973 dst_offset,/* current offset */
5974 xfer_size, /* remaining size */
5975 NULL,
5976 NULL,
5977 FALSE, /* should_return */
5978 MEMORY_OBJECT_COPY_SYNC,
5979 VM_PROT_NO_CHANGE);
5980
5981 VM_PAGEOUT_DEBUG(upl_cow_again, 1);
5982 VM_PAGEOUT_DEBUG(upl_cow_again_pages, (xfer_size >> PAGE_SHIFT));
5983 }
5984 /*
5985 * remember the copy object we synced with
5986 */
5987 last_copy_object = object->copy;
5988 }
5989 dst_page = vm_page_lookup(object, dst_offset);
5990
5991 if (dst_page != VM_PAGE_NULL) {
5992 if ((cntrl_flags & UPL_RET_ONLY_ABSENT)) {
5993 /*
5994 * skip over pages already present in the cache
5995 */
5996 if (user_page_list) {
5997 user_page_list[entry].phys_addr = 0;
5998 }
5999
6000 goto try_next_page;
6001 }
6002 if (dst_page->vmp_fictitious) {
6003 panic("need corner case for fictitious page");
6004 }
6005
6006 if (dst_page->vmp_busy || dst_page->vmp_cleaning) {
6007 /*
6008 * someone else is playing with the
6009 * page. We will have to wait.
6010 */
6011 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
6012
6013 continue;
6014 }
6015 if (dst_page->vmp_laundry) {
6016 vm_pageout_steal_laundry(dst_page, FALSE);
6017 }
6018 } else {
6019 if (object->private) {
6020 /*
6021 * This is a nasty wrinkle for users
6022 * of upl who encounter device or
6023 * private memory however, it is
6024 * unavoidable, only a fault can
6025 * resolve the actual backing
6026 * physical page by asking the
6027 * backing device.
6028 */
6029 if (user_page_list) {
6030 user_page_list[entry].phys_addr = 0;
6031 }
6032
6033 goto try_next_page;
6034 }
6035 if (object->scan_collisions) {
6036 /*
6037 * the pageout_scan thread is trying to steal
6038 * pages from this object, but has run into our
6039 * lock... grab 2 pages from the head of the object...
6040 * the first is freed on behalf of pageout_scan, the
6041 * 2nd is for our own use... we use vm_object_page_grab
6042 * in both cases to avoid taking pages from the free
6043 * list since we are under memory pressure and our
6044 * lock on this object is getting in the way of
6045 * relieving it
6046 */
6047 dst_page = vm_object_page_grab(object);
6048
6049 if (dst_page != VM_PAGE_NULL) {
6050 vm_page_release(dst_page,
6051 FALSE);
6052 }
6053
6054 dst_page = vm_object_page_grab(object);
6055 }
6056 if (dst_page == VM_PAGE_NULL) {
6057 /*
6058 * need to allocate a page
6059 */
6060 dst_page = vm_page_grab_options(grab_options);
6061 if (dst_page != VM_PAGE_NULL) {
6062 page_grab_count++;
6063 }
6064 }
6065 if (dst_page == VM_PAGE_NULL) {
6066 if ((cntrl_flags & (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) == (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) {
6067 /*
6068 * we don't want to stall waiting for pages to come onto the free list
6069 * while we're already holding absent pages in this UPL
6070 * the caller will deal with the empty slots
6071 */
6072 if (user_page_list) {
6073 user_page_list[entry].phys_addr = 0;
6074 }
6075
6076 goto try_next_page;
6077 }
6078 /*
6079 * no pages available... wait
6080 * then try again for the same
6081 * offset...
6082 */
6083 vm_object_unlock(object);
6084
6085 OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
6086
6087 VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
6088
6089 VM_PAGE_WAIT();
6090 OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
6091
6092 VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
6093
6094 vm_object_lock(object);
6095
6096 continue;
6097 }
6098 vm_page_insert(dst_page, object, dst_offset);
6099
6100 dst_page->vmp_absent = TRUE;
6101 dst_page->vmp_busy = FALSE;
6102
6103 if (cntrl_flags & UPL_RET_ONLY_ABSENT) {
6104 /*
6105 * if UPL_RET_ONLY_ABSENT was specified,
6106 * than we're definitely setting up a
6107 * upl for a clustered read/pagein
6108 * operation... mark the pages as clustered
6109 * so upl_commit_range can put them on the
6110 * speculative list
6111 */
6112 dst_page->vmp_clustered = TRUE;
6113
6114 if (!(cntrl_flags & UPL_FILE_IO)) {
6115 counter_inc(&vm_statistics_pageins);
6116 }
6117 }
6118 }
6119 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
6120
6121 dst_page->vmp_overwriting = TRUE;
6122
6123 if (dst_page->vmp_pmapped) {
6124 if (!(cntrl_flags & UPL_FILE_IO)) {
6125 /*
6126 * eliminate all mappings from the
6127 * original object and its prodigy
6128 */
6129 refmod_state = pmap_disconnect(phys_page);
6130 } else {
6131 refmod_state = pmap_get_refmod(phys_page);
6132 }
6133 } else {
6134 refmod_state = 0;
6135 }
6136
6137 hw_dirty = refmod_state & VM_MEM_MODIFIED;
6138 dirty = hw_dirty ? TRUE : dst_page->vmp_dirty;
6139
6140 if (cntrl_flags & UPL_SET_LITE) {
6141 unsigned int pg_num;
6142
6143 pg_num = (unsigned int) ((dst_offset - offset) / PAGE_SIZE);
6144 assert(pg_num == (dst_offset - offset) / PAGE_SIZE);
6145 lite_list[pg_num >> 5] |= 1U << (pg_num & 31);
6146
6147 if (hw_dirty) {
6148 pmap_clear_modify(phys_page);
6149 }
6150
6151 /*
6152 * Mark original page as cleaning
6153 * in place.
6154 */
6155 dst_page->vmp_cleaning = TRUE;
6156 dst_page->vmp_precious = FALSE;
6157 } else {
6158 /*
6159 * use pageclean setup, it is more
6160 * convenient even for the pageout
6161 * cases here
6162 */
6163 vm_object_lock(upl->map_object);
6164 vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
6165 vm_object_unlock(upl->map_object);
6166
6167 alias_page->vmp_absent = FALSE;
6168 alias_page = NULL;
6169 }
6170
6171 if (cntrl_flags & UPL_REQUEST_SET_DIRTY) {
6172 upl->flags &= ~UPL_CLEAR_DIRTY;
6173 upl->flags |= UPL_SET_DIRTY;
6174 dirty = TRUE;
6175 /*
6176 * Page belonging to a code-signed object is about to
6177 * be written. Mark it tainted and disconnect it from
6178 * all pmaps so processes have to fault it back in and
6179 * deal with the tainted bit.
6180 */
6181 if (object->code_signed && dst_page->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
6182 dst_page->vmp_cs_tainted = VMP_CS_ALL_TRUE;
6183 vm_page_upl_tainted++;
6184 if (dst_page->vmp_pmapped) {
6185 refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
6186 if (refmod_state & VM_MEM_REFERENCED) {
6187 dst_page->vmp_reference = TRUE;
6188 }
6189 }
6190 }
6191 } else if (cntrl_flags & UPL_CLEAN_IN_PLACE) {
6192 /*
6193 * clean in place for read implies
6194 * that a write will be done on all
6195 * the pages that are dirty before
6196 * a upl commit is done. The caller
6197 * is obligated to preserve the
6198 * contents of all pages marked dirty
6199 */
6200 upl->flags |= UPL_CLEAR_DIRTY;
6201 }
6202 dst_page->vmp_dirty = dirty;
6203
6204 if (!dirty) {
6205 dst_page->vmp_precious = TRUE;
6206 }
6207
6208 if (!VM_PAGE_WIRED(dst_page)) {
6209 /*
6210 * deny access to the target page while
6211 * it is being worked on
6212 */
6213 dst_page->vmp_busy = TRUE;
6214 } else {
6215 dwp->dw_mask |= DW_vm_page_wire;
6216 }
6217
6218 /*
6219 * We might be about to satisfy a fault which has been
6220 * requested. So no need for the "restart" bit.
6221 */
6222 dst_page->vmp_restart = FALSE;
6223 if (!dst_page->vmp_absent && !(cntrl_flags & UPL_WILL_MODIFY)) {
6224 /*
6225 * expect the page to be used
6226 */
6227 dwp->dw_mask |= DW_set_reference;
6228 }
6229 if (cntrl_flags & UPL_PRECIOUS) {
6230 if (object->internal) {
6231 SET_PAGE_DIRTY(dst_page, FALSE);
6232 dst_page->vmp_precious = FALSE;
6233 } else {
6234 dst_page->vmp_precious = TRUE;
6235 }
6236 } else {
6237 dst_page->vmp_precious = FALSE;
6238 }
6239 }
6240 if (dst_page->vmp_busy) {
6241 upl->flags |= UPL_HAS_BUSY;
6242 }
6243
6244 if (phys_page > upl->highest_page) {
6245 upl->highest_page = phys_page;
6246 }
6247 assert(!pmap_is_noencrypt(phys_page));
6248 if (user_page_list) {
6249 user_page_list[entry].phys_addr = phys_page;
6250 user_page_list[entry].free_when_done = dst_page->vmp_free_when_done;
6251 user_page_list[entry].absent = dst_page->vmp_absent;
6252 user_page_list[entry].dirty = dst_page->vmp_dirty;
6253 user_page_list[entry].precious = dst_page->vmp_precious;
6254 user_page_list[entry].device = FALSE;
6255 user_page_list[entry].needed = FALSE;
6256 if (dst_page->vmp_clustered == TRUE) {
6257 user_page_list[entry].speculative = (dst_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE;
6258 } else {
6259 user_page_list[entry].speculative = FALSE;
6260 }
6261 user_page_list[entry].cs_validated = dst_page->vmp_cs_validated;
6262 user_page_list[entry].cs_tainted = dst_page->vmp_cs_tainted;
6263 user_page_list[entry].cs_nx = dst_page->vmp_cs_nx;
6264 user_page_list[entry].mark = FALSE;
6265 }
6266 /*
6267 * if UPL_RET_ONLY_ABSENT is set, then
6268 * we are working with a fresh page and we've
6269 * just set the clustered flag on it to
6270 * indicate that it was drug in as part of a
6271 * speculative cluster... so leave it alone
6272 */
6273 if (!(cntrl_flags & UPL_RET_ONLY_ABSENT)) {
6274 /*
6275 * someone is explicitly grabbing this page...
6276 * update clustered and speculative state
6277 *
6278 */
6279 if (dst_page->vmp_clustered) {
6280 VM_PAGE_CONSUME_CLUSTERED(dst_page);
6281 }
6282 }
6283 try_next_page:
6284 if (dwp->dw_mask) {
6285 if (dwp->dw_mask & DW_vm_page_activate) {
6286 counter_inc(&vm_statistics_reactivations);
6287 }
6288
6289 VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count);
6290
6291 if (dw_count >= dw_limit) {
6292 vm_page_do_delayed_work(object, tag, dwp_start, dw_count);
6293
6294 dwp = dwp_start;
6295 dw_count = 0;
6296 }
6297 }
6298 entry++;
6299 dst_offset += PAGE_SIZE_64;
6300 xfer_size -= PAGE_SIZE;
6301 }
6302 if (dw_count) {
6303 vm_page_do_delayed_work(object, tag, dwp_start, dw_count);
6304 dwp = dwp_start;
6305 dw_count = 0;
6306 }
6307
6308 if (alias_page != NULL) {
6309 VM_PAGE_FREE(alias_page);
6310 }
6311 if (pmap_flushes_delayed == TRUE) {
6312 pmap_flush(&pmap_flush_context_storage);
6313 }
6314
6315 if (page_list_count != NULL) {
6316 if (upl->flags & UPL_INTERNAL) {
6317 *page_list_count = 0;
6318 } else if (*page_list_count > entry) {
6319 *page_list_count = entry;
6320 }
6321 }
6322 #if UPL_DEBUG
6323 upl->upl_state = 1;
6324 #endif
6325 vm_object_unlock(object);
6326
6327 VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request, VM_UPL_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0);
6328 #if DEVELOPMENT || DEBUG
6329 if (task != NULL) {
6330 ledger_credit(task->ledger, task_ledgers.pages_grabbed_upl, page_grab_count);
6331 }
6332 #endif /* DEVELOPMENT || DEBUG */
6333
6334 if (dwp_start && dwp_finish_ctx) {
6335 vm_page_delayed_work_finish_ctx(dwp_start);
6336 dwp_start = dwp = NULL;
6337 }
6338
6339 return KERN_SUCCESS;
6340 }
6341
6342 /*
6343 * Routine: vm_object_super_upl_request
6344 * Purpose:
6345 * Cause the population of a portion of a vm_object
6346 * in much the same way as memory_object_upl_request.
6347 * Depending on the nature of the request, the pages
6348 * returned may be contain valid data or be uninitialized.
6349 * However, the region may be expanded up to the super
6350 * cluster size provided.
6351 */
6352
6353 __private_extern__ kern_return_t
vm_object_super_upl_request(vm_object_t object,vm_object_offset_t offset,upl_size_t size,upl_size_t super_cluster,upl_t * upl,upl_page_info_t * user_page_list,unsigned int * page_list_count,upl_control_flags_t cntrl_flags,vm_tag_t tag)6354 vm_object_super_upl_request(
6355 vm_object_t object,
6356 vm_object_offset_t offset,
6357 upl_size_t size,
6358 upl_size_t super_cluster,
6359 upl_t *upl,
6360 upl_page_info_t *user_page_list,
6361 unsigned int *page_list_count,
6362 upl_control_flags_t cntrl_flags,
6363 vm_tag_t tag)
6364 {
6365 if (object->paging_offset > offset || ((cntrl_flags & UPL_VECTOR) == UPL_VECTOR)) {
6366 return KERN_FAILURE;
6367 }
6368
6369 assert(object->paging_in_progress);
6370 offset = offset - object->paging_offset;
6371
6372 if (super_cluster > size) {
6373 vm_object_offset_t base_offset;
6374 upl_size_t super_size;
6375 vm_object_size_t super_size_64;
6376
6377 base_offset = (offset & ~((vm_object_offset_t) super_cluster - 1));
6378 super_size = (offset + size) > (base_offset + super_cluster) ? super_cluster << 1 : super_cluster;
6379 super_size_64 = ((base_offset + super_size) > object->vo_size) ? (object->vo_size - base_offset) : super_size;
6380 super_size = (upl_size_t) super_size_64;
6381 assert(super_size == super_size_64);
6382
6383 if (offset > (base_offset + super_size)) {
6384 panic("vm_object_super_upl_request: Missed target pageout"
6385 " %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
6386 offset, base_offset, super_size, super_cluster,
6387 size, object->paging_offset);
6388 }
6389 /*
6390 * apparently there is a case where the vm requests a
6391 * page to be written out who's offset is beyond the
6392 * object size
6393 */
6394 if ((offset + size) > (base_offset + super_size)) {
6395 super_size_64 = (offset + size) - base_offset;
6396 super_size = (upl_size_t) super_size_64;
6397 assert(super_size == super_size_64);
6398 }
6399
6400 offset = base_offset;
6401 size = super_size;
6402 }
6403 return vm_object_upl_request(object, offset, size, upl, user_page_list, page_list_count, cntrl_flags, tag);
6404 }
6405
6406 int cs_executable_create_upl = 0;
6407 extern int proc_selfpid(void);
6408 extern char *proc_name_address(void *p);
6409
6410 kern_return_t
vm_map_create_upl(vm_map_t map,vm_map_address_t offset,upl_size_t * upl_size,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,upl_control_flags_t * flags,vm_tag_t tag)6411 vm_map_create_upl(
6412 vm_map_t map,
6413 vm_map_address_t offset,
6414 upl_size_t *upl_size,
6415 upl_t *upl,
6416 upl_page_info_array_t page_list,
6417 unsigned int *count,
6418 upl_control_flags_t *flags,
6419 vm_tag_t tag)
6420 {
6421 vm_map_entry_t entry;
6422 upl_control_flags_t caller_flags;
6423 int force_data_sync;
6424 int sync_cow_data;
6425 vm_object_t local_object;
6426 vm_map_offset_t local_offset;
6427 vm_map_offset_t local_start;
6428 kern_return_t ret;
6429 vm_map_address_t original_offset;
6430 vm_map_size_t original_size, adjusted_size;
6431 vm_map_offset_t local_entry_start;
6432 vm_object_offset_t local_entry_offset;
6433 vm_object_offset_t offset_in_mapped_page;
6434 boolean_t release_map = FALSE;
6435
6436 start_with_map:
6437
6438 original_offset = offset;
6439 original_size = *upl_size;
6440 adjusted_size = original_size;
6441
6442 caller_flags = *flags;
6443
6444 if (caller_flags & ~UPL_VALID_FLAGS) {
6445 /*
6446 * For forward compatibility's sake,
6447 * reject any unknown flag.
6448 */
6449 ret = KERN_INVALID_VALUE;
6450 goto done;
6451 }
6452 force_data_sync = (caller_flags & UPL_FORCE_DATA_SYNC);
6453 sync_cow_data = !(caller_flags & UPL_COPYOUT_FROM);
6454
6455 if (upl == NULL) {
6456 ret = KERN_INVALID_ARGUMENT;
6457 goto done;
6458 }
6459
6460 REDISCOVER_ENTRY:
6461 vm_map_lock_read(map);
6462
6463 if (!vm_map_lookup_entry(map, offset, &entry)) {
6464 vm_map_unlock_read(map);
6465 ret = KERN_FAILURE;
6466 goto done;
6467 }
6468
6469 local_entry_start = entry->vme_start;
6470 local_entry_offset = VME_OFFSET(entry);
6471
6472 if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) {
6473 DEBUG4K_UPL("map %p (%d) offset 0x%llx size 0x%x flags 0x%llx\n", map, VM_MAP_PAGE_SHIFT(map), (uint64_t)offset, *upl_size, *flags);
6474 }
6475
6476 if (entry->vme_end - original_offset < adjusted_size) {
6477 adjusted_size = entry->vme_end - original_offset;
6478 assert(adjusted_size > 0);
6479 *upl_size = (upl_size_t) adjusted_size;
6480 assert(*upl_size == adjusted_size);
6481 }
6482
6483 if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
6484 *flags = 0;
6485
6486 if (!entry->is_sub_map &&
6487 VME_OBJECT(entry) != VM_OBJECT_NULL) {
6488 if (VME_OBJECT(entry)->private) {
6489 *flags = UPL_DEV_MEMORY;
6490 }
6491
6492 if (VME_OBJECT(entry)->phys_contiguous) {
6493 *flags |= UPL_PHYS_CONTIG;
6494 }
6495 }
6496 vm_map_unlock_read(map);
6497 ret = KERN_SUCCESS;
6498 goto done;
6499 }
6500
6501 offset_in_mapped_page = 0;
6502 if (VM_MAP_PAGE_SIZE(map) < PAGE_SIZE) {
6503 offset = vm_map_trunc_page(original_offset, VM_MAP_PAGE_MASK(map));
6504 *upl_size = (upl_size_t)
6505 (vm_map_round_page(original_offset + adjusted_size,
6506 VM_MAP_PAGE_MASK(map))
6507 - offset);
6508
6509 offset_in_mapped_page = original_offset - offset;
6510 assert(offset_in_mapped_page < VM_MAP_PAGE_SIZE(map));
6511
6512 DEBUG4K_UPL("map %p (%d) offset 0x%llx size 0x%llx flags 0x%llx -> offset 0x%llx adjusted_size 0x%llx *upl_size 0x%x offset_in_mapped_page 0x%llx\n", map, VM_MAP_PAGE_SHIFT(map), (uint64_t)original_offset, (uint64_t)original_size, *flags, (uint64_t)offset, (uint64_t)adjusted_size, *upl_size, offset_in_mapped_page);
6513 }
6514
6515 if (VME_OBJECT(entry) == VM_OBJECT_NULL ||
6516 !VME_OBJECT(entry)->phys_contiguous) {
6517 if (*upl_size > MAX_UPL_SIZE_BYTES) {
6518 *upl_size = MAX_UPL_SIZE_BYTES;
6519 }
6520 }
6521
6522 /*
6523 * Create an object if necessary.
6524 */
6525 if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
6526 if (vm_map_lock_read_to_write(map)) {
6527 goto REDISCOVER_ENTRY;
6528 }
6529
6530 VME_OBJECT_SET(entry,
6531 vm_object_allocate((vm_size_t)
6532 vm_object_round_page((entry->vme_end - entry->vme_start))));
6533 VME_OFFSET_SET(entry, 0);
6534 assert(entry->use_pmap);
6535
6536 vm_map_lock_write_to_read(map);
6537 }
6538
6539 if (!(caller_flags & UPL_COPYOUT_FROM) &&
6540 !entry->is_sub_map &&
6541 !(entry->protection & VM_PROT_WRITE)) {
6542 vm_map_unlock_read(map);
6543 ret = KERN_PROTECTION_FAILURE;
6544 goto done;
6545 }
6546
6547 #if !XNU_TARGET_OS_OSX
6548 if (map->pmap != kernel_pmap &&
6549 (caller_flags & UPL_COPYOUT_FROM) &&
6550 (entry->protection & VM_PROT_EXECUTE) &&
6551 !(entry->protection & VM_PROT_WRITE)) {
6552 vm_offset_t kaddr;
6553 vm_size_t ksize;
6554
6555 /*
6556 * We're about to create a read-only UPL backed by
6557 * memory from an executable mapping.
6558 * Wiring the pages would result in the pages being copied
6559 * (due to the "MAP_PRIVATE" mapping) and no longer
6560 * code-signed, so no longer eligible for execution.
6561 * Instead, let's copy the data into a kernel buffer and
6562 * create the UPL from this kernel buffer.
6563 * The kernel buffer is then freed, leaving the UPL holding
6564 * the last reference on the VM object, so the memory will
6565 * be released when the UPL is committed.
6566 */
6567
6568 vm_map_unlock_read(map);
6569 entry = VM_MAP_ENTRY_NULL;
6570 /* allocate kernel buffer */
6571 ksize = round_page(*upl_size);
6572 kaddr = 0;
6573 ret = kmem_alloc(kernel_map, &kaddr, ksize,
6574 KMA_PAGEABLE | KMA_DATA, tag);
6575 if (ret == KERN_SUCCESS) {
6576 /* copyin the user data */
6577 ret = copyinmap(map, offset, (void *)kaddr, *upl_size);
6578 }
6579 if (ret == KERN_SUCCESS) {
6580 if (ksize > *upl_size) {
6581 /* zero out the extra space in kernel buffer */
6582 memset((void *)(kaddr + *upl_size),
6583 0,
6584 ksize - *upl_size);
6585 }
6586 /* create the UPL from the kernel buffer */
6587 vm_object_offset_t offset_in_object;
6588 vm_object_offset_t offset_in_object_page;
6589
6590 offset_in_object = offset - local_entry_start + local_entry_offset;
6591 offset_in_object_page = offset_in_object - vm_object_trunc_page(offset_in_object);
6592 assert(offset_in_object_page < PAGE_SIZE);
6593 assert(offset_in_object_page + offset_in_mapped_page < PAGE_SIZE);
6594 *upl_size -= offset_in_object_page + offset_in_mapped_page;
6595 ret = vm_map_create_upl(kernel_map,
6596 (vm_map_address_t)(kaddr + offset_in_object_page + offset_in_mapped_page),
6597 upl_size, upl, page_list, count, flags, tag);
6598 }
6599 if (kaddr != 0) {
6600 /* free the kernel buffer */
6601 kmem_free(kernel_map, kaddr, ksize);
6602 kaddr = 0;
6603 ksize = 0;
6604 }
6605 #if DEVELOPMENT || DEBUG
6606 DTRACE_VM4(create_upl_from_executable,
6607 vm_map_t, map,
6608 vm_map_address_t, offset,
6609 upl_size_t, *upl_size,
6610 kern_return_t, ret);
6611 #endif /* DEVELOPMENT || DEBUG */
6612 goto done;
6613 }
6614 #endif /* !XNU_TARGET_OS_OSX */
6615
6616 local_object = VME_OBJECT(entry);
6617 assert(local_object != VM_OBJECT_NULL);
6618
6619 if (!entry->is_sub_map &&
6620 !entry->needs_copy &&
6621 *upl_size != 0 &&
6622 local_object->vo_size > *upl_size && /* partial UPL */
6623 entry->wired_count == 0 && /* No COW for entries that are wired */
6624 (map->pmap != kernel_pmap) && /* alias checks */
6625 (vm_map_entry_should_cow_for_true_share(entry) /* case 1 */
6626 ||
6627 ( /* case 2 */
6628 local_object->internal &&
6629 (local_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) &&
6630 local_object->ref_count > 1))) {
6631 vm_prot_t prot;
6632
6633 /*
6634 * Case 1:
6635 * Set up the targeted range for copy-on-write to avoid
6636 * applying true_share/copy_delay to the entire object.
6637 *
6638 * Case 2:
6639 * This map entry covers only part of an internal
6640 * object. There could be other map entries covering
6641 * other areas of this object and some of these map
6642 * entries could be marked as "needs_copy", which
6643 * assumes that the object is COPY_SYMMETRIC.
6644 * To avoid marking this object as COPY_DELAY and
6645 * "true_share", let's shadow it and mark the new
6646 * (smaller) object as "true_share" and COPY_DELAY.
6647 */
6648
6649 if (vm_map_lock_read_to_write(map)) {
6650 goto REDISCOVER_ENTRY;
6651 }
6652 vm_map_lock_assert_exclusive(map);
6653 assert(VME_OBJECT(entry) == local_object);
6654
6655 vm_map_clip_start(map,
6656 entry,
6657 vm_map_trunc_page(offset,
6658 VM_MAP_PAGE_MASK(map)));
6659 vm_map_clip_end(map,
6660 entry,
6661 vm_map_round_page(offset + *upl_size,
6662 VM_MAP_PAGE_MASK(map)));
6663 if ((entry->vme_end - offset) < *upl_size) {
6664 *upl_size = (upl_size_t) (entry->vme_end - offset);
6665 assert(*upl_size == entry->vme_end - offset);
6666 }
6667
6668 prot = entry->protection & ~VM_PROT_WRITE;
6669 if (override_nx(map, VME_ALIAS(entry)) && prot) {
6670 prot |= VM_PROT_EXECUTE;
6671 }
6672 vm_object_pmap_protect(local_object,
6673 VME_OFFSET(entry),
6674 entry->vme_end - entry->vme_start,
6675 ((entry->is_shared ||
6676 map->mapped_in_other_pmaps)
6677 ? PMAP_NULL
6678 : map->pmap),
6679 VM_MAP_PAGE_SIZE(map),
6680 entry->vme_start,
6681 prot);
6682
6683 assert(entry->wired_count == 0);
6684
6685 /*
6686 * Lock the VM object and re-check its status: if it's mapped
6687 * in another address space, we could still be racing with
6688 * another thread holding that other VM map exclusively.
6689 */
6690 vm_object_lock(local_object);
6691 if (local_object->true_share) {
6692 /* object is already in proper state: no COW needed */
6693 assert(local_object->copy_strategy !=
6694 MEMORY_OBJECT_COPY_SYMMETRIC);
6695 } else {
6696 /* not true_share: ask for copy-on-write below */
6697 assert(local_object->copy_strategy ==
6698 MEMORY_OBJECT_COPY_SYMMETRIC);
6699 entry->needs_copy = TRUE;
6700 }
6701 vm_object_unlock(local_object);
6702
6703 vm_map_lock_write_to_read(map);
6704 }
6705
6706 if (entry->needs_copy) {
6707 /*
6708 * Honor copy-on-write for COPY_SYMMETRIC
6709 * strategy.
6710 */
6711 vm_map_t local_map;
6712 vm_object_t object;
6713 vm_object_offset_t new_offset;
6714 vm_prot_t prot;
6715 boolean_t wired;
6716 vm_map_version_t version;
6717 vm_map_t real_map;
6718 vm_prot_t fault_type;
6719
6720 local_map = map;
6721
6722 if (caller_flags & UPL_COPYOUT_FROM) {
6723 fault_type = VM_PROT_READ | VM_PROT_COPY;
6724 vm_counters.create_upl_extra_cow++;
6725 vm_counters.create_upl_extra_cow_pages +=
6726 (entry->vme_end - entry->vme_start) / PAGE_SIZE;
6727 } else {
6728 fault_type = VM_PROT_WRITE;
6729 }
6730 if (vm_map_lookup_locked(&local_map,
6731 offset, fault_type,
6732 OBJECT_LOCK_EXCLUSIVE,
6733 &version, &object,
6734 &new_offset, &prot, &wired,
6735 NULL,
6736 &real_map, NULL) != KERN_SUCCESS) {
6737 if (fault_type == VM_PROT_WRITE) {
6738 vm_counters.create_upl_lookup_failure_write++;
6739 } else {
6740 vm_counters.create_upl_lookup_failure_copy++;
6741 }
6742 vm_map_unlock_read(local_map);
6743 ret = KERN_FAILURE;
6744 goto done;
6745 }
6746 if (real_map != local_map) {
6747 vm_map_unlock(real_map);
6748 }
6749 vm_map_unlock_read(local_map);
6750
6751 vm_object_unlock(object);
6752
6753 goto REDISCOVER_ENTRY;
6754 }
6755
6756 if (entry->is_sub_map) {
6757 vm_map_t submap;
6758
6759 submap = VME_SUBMAP(entry);
6760 local_start = entry->vme_start;
6761 local_offset = (vm_map_offset_t)VME_OFFSET(entry);
6762
6763 vm_map_reference(submap);
6764 vm_map_unlock_read(map);
6765
6766 DEBUG4K_UPL("map %p offset 0x%llx (0x%llx) size 0x%x (adjusted 0x%llx original 0x%llx) offset_in_mapped_page 0x%llx submap %p\n", map, (uint64_t)offset, (uint64_t)original_offset, *upl_size, (uint64_t)adjusted_size, (uint64_t)original_size, offset_in_mapped_page, submap);
6767 offset += offset_in_mapped_page;
6768 *upl_size -= offset_in_mapped_page;
6769
6770 if (release_map) {
6771 vm_map_deallocate(map);
6772 }
6773 map = submap;
6774 release_map = TRUE;
6775 offset = local_offset + (offset - local_start);
6776 goto start_with_map;
6777 }
6778
6779 if (sync_cow_data &&
6780 (VME_OBJECT(entry)->shadow ||
6781 VME_OBJECT(entry)->copy)) {
6782 local_object = VME_OBJECT(entry);
6783 local_start = entry->vme_start;
6784 local_offset = (vm_map_offset_t)VME_OFFSET(entry);
6785
6786 vm_object_reference(local_object);
6787 vm_map_unlock_read(map);
6788
6789 if (local_object->shadow && local_object->copy) {
6790 vm_object_lock_request(local_object->shadow,
6791 ((vm_object_offset_t)
6792 ((offset - local_start) +
6793 local_offset) +
6794 local_object->vo_shadow_offset),
6795 *upl_size, FALSE,
6796 MEMORY_OBJECT_DATA_SYNC,
6797 VM_PROT_NO_CHANGE);
6798 }
6799 sync_cow_data = FALSE;
6800 vm_object_deallocate(local_object);
6801
6802 goto REDISCOVER_ENTRY;
6803 }
6804 if (force_data_sync) {
6805 local_object = VME_OBJECT(entry);
6806 local_start = entry->vme_start;
6807 local_offset = (vm_map_offset_t)VME_OFFSET(entry);
6808
6809 vm_object_reference(local_object);
6810 vm_map_unlock_read(map);
6811
6812 vm_object_lock_request(local_object,
6813 ((vm_object_offset_t)
6814 ((offset - local_start) +
6815 local_offset)),
6816 (vm_object_size_t)*upl_size,
6817 FALSE,
6818 MEMORY_OBJECT_DATA_SYNC,
6819 VM_PROT_NO_CHANGE);
6820
6821 force_data_sync = FALSE;
6822 vm_object_deallocate(local_object);
6823
6824 goto REDISCOVER_ENTRY;
6825 }
6826 if (VME_OBJECT(entry)->private) {
6827 *flags = UPL_DEV_MEMORY;
6828 } else {
6829 *flags = 0;
6830 }
6831
6832 if (VME_OBJECT(entry)->phys_contiguous) {
6833 *flags |= UPL_PHYS_CONTIG;
6834 }
6835
6836 local_object = VME_OBJECT(entry);
6837 local_offset = (vm_map_offset_t)VME_OFFSET(entry);
6838 local_start = entry->vme_start;
6839
6840 /*
6841 * Wiring will copy the pages to the shadow object.
6842 * The shadow object will not be code-signed so
6843 * attempting to execute code from these copied pages
6844 * would trigger a code-signing violation.
6845 */
6846 if (entry->protection & VM_PROT_EXECUTE) {
6847 #if MACH_ASSERT
6848 printf("pid %d[%s] create_upl out of executable range from "
6849 "0x%llx to 0x%llx: side effects may include "
6850 "code-signing violations later on\n",
6851 proc_selfpid(),
6852 (current_task()->bsd_info
6853 ? proc_name_address(current_task()->bsd_info)
6854 : "?"),
6855 (uint64_t) entry->vme_start,
6856 (uint64_t) entry->vme_end);
6857 #endif /* MACH_ASSERT */
6858 DTRACE_VM2(cs_executable_create_upl,
6859 uint64_t, (uint64_t)entry->vme_start,
6860 uint64_t, (uint64_t)entry->vme_end);
6861 cs_executable_create_upl++;
6862 }
6863
6864 vm_object_lock(local_object);
6865
6866 /*
6867 * Ensure that this object is "true_share" and "copy_delay" now,
6868 * while we're still holding the VM map lock. After we unlock the map,
6869 * anything could happen to that mapping, including some copy-on-write
6870 * activity. We need to make sure that the IOPL will point at the
6871 * same memory as the mapping.
6872 */
6873 if (local_object->true_share) {
6874 assert(local_object->copy_strategy !=
6875 MEMORY_OBJECT_COPY_SYMMETRIC);
6876 } else if (local_object != kernel_object &&
6877 local_object != compressor_object &&
6878 !local_object->phys_contiguous) {
6879 #if VM_OBJECT_TRACKING_OP_TRUESHARE
6880 if (!local_object->true_share &&
6881 vm_object_tracking_btlog) {
6882 btlog_record(vm_object_tracking_btlog, local_object,
6883 VM_OBJECT_TRACKING_OP_TRUESHARE,
6884 btref_get(__builtin_frame_address(0), 0));
6885 }
6886 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
6887 local_object->true_share = TRUE;
6888 if (local_object->copy_strategy ==
6889 MEMORY_OBJECT_COPY_SYMMETRIC) {
6890 local_object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
6891 }
6892 }
6893
6894 vm_object_reference_locked(local_object);
6895 vm_object_unlock(local_object);
6896
6897 vm_map_unlock_read(map);
6898
6899 offset += offset_in_mapped_page;
6900 assert(*upl_size > offset_in_mapped_page);
6901 *upl_size -= offset_in_mapped_page;
6902
6903 ret = vm_object_iopl_request(local_object,
6904 ((vm_object_offset_t)
6905 ((offset - local_start) + local_offset)),
6906 *upl_size,
6907 upl,
6908 page_list,
6909 count,
6910 caller_flags,
6911 tag);
6912 vm_object_deallocate(local_object);
6913
6914 done:
6915 if (release_map) {
6916 vm_map_deallocate(map);
6917 }
6918
6919 return ret;
6920 }
6921
6922 /*
6923 * Internal routine to enter a UPL into a VM map.
6924 *
6925 * JMM - This should just be doable through the standard
6926 * vm_map_enter() API.
6927 */
6928 kern_return_t
vm_map_enter_upl_range(vm_map_t map,upl_t upl,vm_object_offset_t offset_to_map,upl_size_t size_to_map,vm_prot_t prot_to_map,vm_map_offset_t * dst_addr)6929 vm_map_enter_upl_range(
6930 vm_map_t map,
6931 upl_t upl,
6932 vm_object_offset_t offset_to_map,
6933 upl_size_t size_to_map,
6934 vm_prot_t prot_to_map,
6935 vm_map_offset_t *dst_addr)
6936 {
6937 vm_map_size_t size;
6938 vm_object_offset_t offset;
6939 vm_map_offset_t addr;
6940 vm_page_t m;
6941 kern_return_t kr;
6942 int isVectorUPL = 0, curr_upl = 0;
6943 upl_t vector_upl = NULL;
6944 vm_offset_t vector_upl_dst_addr = 0;
6945 vm_map_t vector_upl_submap = NULL;
6946 upl_offset_t subupl_offset = 0;
6947 upl_size_t subupl_size = 0;
6948
6949 if (upl == UPL_NULL) {
6950 return KERN_INVALID_ARGUMENT;
6951 }
6952
6953 DEBUG4K_UPL("map %p upl %p flags 0x%x object %p offset 0x%llx (uploff: 0x%llx) size 0x%x (uplsz: 0x%x) \n", map, upl, upl->flags, upl->map_object, offset_to_map, upl->u_offset, size_to_map, upl->u_size);
6954 assert(map == kernel_map);
6955
6956 if ((isVectorUPL = vector_upl_is_valid(upl))) {
6957 int mapped = 0, valid_upls = 0;
6958 vector_upl = upl;
6959
6960 upl_lock(vector_upl);
6961 for (curr_upl = 0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
6962 upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
6963 if (upl == NULL) {
6964 continue;
6965 }
6966 valid_upls++;
6967 if (UPL_PAGE_LIST_MAPPED & upl->flags) {
6968 mapped++;
6969 }
6970 }
6971
6972 if (mapped) {
6973 if (mapped != valid_upls) {
6974 panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped", mapped, valid_upls);
6975 } else {
6976 upl_unlock(vector_upl);
6977 return KERN_FAILURE;
6978 }
6979 }
6980
6981 if (VM_MAP_PAGE_MASK(map) < PAGE_MASK) {
6982 panic("TODO4K: vector UPL not implemented");
6983 }
6984
6985 vector_upl_submap = kmem_suballoc(map, &vector_upl_dst_addr,
6986 vector_upl->u_size, VM_MAP_CREATE_DEFAULT,
6987 VM_FLAGS_ANYWHERE, KMS_NOFAIL | KMS_DATA,
6988 VM_KERN_MEMORY_NONE).kmr_submap;
6989 map = vector_upl_submap;
6990 vector_upl_set_submap(vector_upl, vector_upl_submap, vector_upl_dst_addr);
6991 curr_upl = 0;
6992 } else {
6993 upl_lock(upl);
6994 }
6995
6996 process_upl_to_enter:
6997 if (isVectorUPL) {
6998 if (curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
6999 *dst_addr = vector_upl_dst_addr;
7000 upl_unlock(vector_upl);
7001 return KERN_SUCCESS;
7002 }
7003 upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
7004 if (upl == NULL) {
7005 goto process_upl_to_enter;
7006 }
7007
7008 vector_upl_get_iostate(vector_upl, upl, &subupl_offset, &subupl_size);
7009 *dst_addr = (vm_map_offset_t)(vector_upl_dst_addr + (vm_map_offset_t)subupl_offset);
7010 } else {
7011 /*
7012 * check to see if already mapped
7013 */
7014 if (UPL_PAGE_LIST_MAPPED & upl->flags) {
7015 upl_unlock(upl);
7016 return KERN_FAILURE;
7017 }
7018 }
7019
7020 if ((!(upl->flags & UPL_SHADOWED)) &&
7021 ((upl->flags & UPL_HAS_BUSY) ||
7022 !((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || (upl->map_object->phys_contiguous)))) {
7023 vm_object_t object;
7024 vm_page_t alias_page;
7025 vm_object_offset_t new_offset;
7026 unsigned int pg_num;
7027 wpl_array_t lite_list;
7028
7029 size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map));
7030 if (upl->flags & UPL_INTERNAL) {
7031 lite_list = (wpl_array_t)
7032 ((((uintptr_t)upl) + sizeof(struct upl))
7033 + ((size / PAGE_SIZE) * sizeof(upl_page_info_t)));
7034 } else {
7035 lite_list = (wpl_array_t)(((uintptr_t)upl) + sizeof(struct upl));
7036 }
7037 object = upl->map_object;
7038 upl->map_object = vm_object_allocate(vm_object_round_page(size));
7039
7040 vm_object_lock(upl->map_object);
7041
7042 upl->map_object->shadow = object;
7043 upl->map_object->pageout = TRUE;
7044 upl->map_object->can_persist = FALSE;
7045 upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
7046 upl->map_object->vo_shadow_offset = upl_adjusted_offset(upl, PAGE_MASK) - object->paging_offset;
7047 assertf(page_aligned(upl->map_object->vo_shadow_offset),
7048 "object %p shadow_offset 0x%llx",
7049 upl->map_object,
7050 (uint64_t)upl->map_object->vo_shadow_offset);
7051 upl->map_object->wimg_bits = object->wimg_bits;
7052 offset = upl->map_object->vo_shadow_offset;
7053 new_offset = 0;
7054
7055 upl->flags |= UPL_SHADOWED;
7056
7057 while (size) {
7058 pg_num = (unsigned int) (new_offset / PAGE_SIZE);
7059 assert(pg_num == new_offset / PAGE_SIZE);
7060
7061 if (lite_list[pg_num >> 5] & (1U << (pg_num & 31))) {
7062 alias_page = vm_page_grab_fictitious(TRUE);
7063
7064 vm_object_lock(object);
7065
7066 m = vm_page_lookup(object, offset);
7067 if (m == VM_PAGE_NULL) {
7068 panic("vm_upl_map: page missing");
7069 }
7070
7071 /*
7072 * Convert the fictitious page to a private
7073 * shadow of the real page.
7074 */
7075 assert(alias_page->vmp_fictitious);
7076 alias_page->vmp_fictitious = FALSE;
7077 alias_page->vmp_private = TRUE;
7078 alias_page->vmp_free_when_done = TRUE;
7079 /*
7080 * since m is a page in the upl it must
7081 * already be wired or BUSY, so it's
7082 * safe to assign the underlying physical
7083 * page to the alias
7084 */
7085 VM_PAGE_SET_PHYS_PAGE(alias_page, VM_PAGE_GET_PHYS_PAGE(m));
7086
7087 vm_object_unlock(object);
7088
7089 vm_page_lockspin_queues();
7090 vm_page_wire(alias_page, VM_KERN_MEMORY_NONE, TRUE);
7091 vm_page_unlock_queues();
7092
7093 vm_page_insert_wired(alias_page, upl->map_object, new_offset, VM_KERN_MEMORY_NONE);
7094
7095 assert(!alias_page->vmp_wanted);
7096 alias_page->vmp_busy = FALSE;
7097 alias_page->vmp_absent = FALSE;
7098 }
7099 size -= PAGE_SIZE;
7100 offset += PAGE_SIZE_64;
7101 new_offset += PAGE_SIZE_64;
7102 }
7103 vm_object_unlock(upl->map_object);
7104 }
7105 if (upl->flags & UPL_SHADOWED) {
7106 if (isVectorUPL) {
7107 offset = 0;
7108 } else {
7109 offset = offset_to_map;
7110 }
7111 } else {
7112 offset = upl_adjusted_offset(upl, VM_MAP_PAGE_MASK(map)) - upl->map_object->paging_offset;
7113 if (!isVectorUPL) {
7114 offset += offset_to_map;
7115 }
7116 }
7117
7118 if (isVectorUPL) {
7119 size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map));
7120 } else {
7121 size = MIN(upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map)), size_to_map);
7122 }
7123
7124 vm_object_reference(upl->map_object);
7125
7126 if (!isVectorUPL) {
7127 *dst_addr = 0;
7128 /*
7129 * NEED A UPL_MAP ALIAS
7130 */
7131 kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
7132 VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_OSFMK,
7133 upl->map_object, offset, FALSE,
7134 prot_to_map, VM_PROT_ALL, VM_INHERIT_DEFAULT);
7135
7136 if (kr != KERN_SUCCESS) {
7137 vm_object_deallocate(upl->map_object);
7138 upl_unlock(upl);
7139 return kr;
7140 }
7141 } else {
7142 kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
7143 VM_FLAGS_FIXED, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_OSFMK,
7144 upl->map_object, offset, FALSE,
7145 prot_to_map, VM_PROT_ALL, VM_INHERIT_DEFAULT);
7146 if (kr) {
7147 panic("vm_map_enter failed for a Vector UPL");
7148 }
7149 }
7150 upl->u_mapped_size = (upl_size_t) size; /* When we allow multiple submappings of the UPL */
7151 /* this will have to be an increment rather than */
7152 /* an assignment. */
7153 vm_object_lock(upl->map_object);
7154
7155 for (addr = *dst_addr; size > 0; size -= PAGE_SIZE, addr += PAGE_SIZE) {
7156 m = vm_page_lookup(upl->map_object, offset);
7157
7158 if (m) {
7159 m->vmp_pmapped = TRUE;
7160
7161 /* CODE SIGNING ENFORCEMENT: page has been wpmapped,
7162 * but only in kernel space. If this was on a user map,
7163 * we'd have to set the wpmapped bit. */
7164 /* m->vmp_wpmapped = TRUE; */
7165 assert(map->pmap == kernel_pmap);
7166
7167 PMAP_ENTER(map->pmap, addr, m, prot_to_map, VM_PROT_NONE, 0, TRUE, kr);
7168
7169 assert(kr == KERN_SUCCESS);
7170 #if KASAN
7171 kasan_notify_address(addr, PAGE_SIZE_64);
7172 #endif
7173 }
7174 offset += PAGE_SIZE_64;
7175 }
7176 vm_object_unlock(upl->map_object);
7177
7178 /*
7179 * hold a reference for the mapping
7180 */
7181 upl->ref_count++;
7182 upl->flags |= UPL_PAGE_LIST_MAPPED;
7183 upl->kaddr = (vm_offset_t) *dst_addr;
7184 assert(upl->kaddr == *dst_addr);
7185
7186 if (isVectorUPL) {
7187 goto process_upl_to_enter;
7188 }
7189
7190 if (!isVectorUPL) {
7191 vm_map_offset_t addr_adjustment;
7192
7193 addr_adjustment = (vm_map_offset_t)(upl->u_offset - upl_adjusted_offset(upl, VM_MAP_PAGE_MASK(map)));
7194 if (addr_adjustment) {
7195 assert(VM_MAP_PAGE_MASK(map) != PAGE_MASK);
7196 DEBUG4K_UPL("dst_addr 0x%llx (+ 0x%llx) -> 0x%llx\n", (uint64_t)*dst_addr, (uint64_t)addr_adjustment, (uint64_t)(*dst_addr + addr_adjustment));
7197 *dst_addr += addr_adjustment;
7198 }
7199 }
7200
7201 upl_unlock(upl);
7202
7203 return KERN_SUCCESS;
7204 }
7205
7206 kern_return_t
vm_map_enter_upl(vm_map_t map,upl_t upl,vm_map_offset_t * dst_addr)7207 vm_map_enter_upl(
7208 vm_map_t map,
7209 upl_t upl,
7210 vm_map_offset_t *dst_addr)
7211 {
7212 upl_size_t upl_size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map));
7213 return vm_map_enter_upl_range(map, upl, 0, upl_size, VM_PROT_DEFAULT, dst_addr);
7214 }
7215
7216 /*
7217 * Internal routine to remove a UPL mapping from a VM map.
7218 *
7219 * XXX - This should just be doable through a standard
7220 * vm_map_remove() operation. Otherwise, implicit clean-up
7221 * of the target map won't be able to correctly remove
7222 * these (and release the reference on the UPL). Having
7223 * to do this means we can't map these into user-space
7224 * maps yet.
7225 */
7226 kern_return_t
vm_map_remove_upl_range(vm_map_t map,upl_t upl,__unused vm_object_offset_t offset_to_unmap,__unused upl_size_t size_to_unmap)7227 vm_map_remove_upl_range(
7228 vm_map_t map,
7229 upl_t upl,
7230 __unused vm_object_offset_t offset_to_unmap,
7231 __unused upl_size_t size_to_unmap)
7232 {
7233 vm_address_t addr;
7234 upl_size_t size;
7235 int isVectorUPL = 0, curr_upl = 0;
7236 upl_t vector_upl = NULL;
7237
7238 if (upl == UPL_NULL) {
7239 return KERN_INVALID_ARGUMENT;
7240 }
7241
7242 if ((isVectorUPL = vector_upl_is_valid(upl))) {
7243 int unmapped = 0, valid_upls = 0;
7244 vector_upl = upl;
7245 upl_lock(vector_upl);
7246 for (curr_upl = 0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
7247 upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
7248 if (upl == NULL) {
7249 continue;
7250 }
7251 valid_upls++;
7252 if (!(UPL_PAGE_LIST_MAPPED & upl->flags)) {
7253 unmapped++;
7254 }
7255 }
7256
7257 if (unmapped) {
7258 if (unmapped != valid_upls) {
7259 panic("%d of the %d sub-upls within the Vector UPL is/are not mapped", unmapped, valid_upls);
7260 } else {
7261 upl_unlock(vector_upl);
7262 return KERN_FAILURE;
7263 }
7264 }
7265 curr_upl = 0;
7266 } else {
7267 upl_lock(upl);
7268 }
7269
7270 process_upl_to_remove:
7271 if (isVectorUPL) {
7272 if (curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
7273 vm_map_t v_upl_submap;
7274 vm_offset_t v_upl_submap_dst_addr;
7275 vector_upl_get_submap(vector_upl, &v_upl_submap, &v_upl_submap_dst_addr);
7276
7277 vm_map_remove(map, v_upl_submap_dst_addr,
7278 v_upl_submap_dst_addr + vector_upl->u_size);
7279 vm_map_deallocate(v_upl_submap);
7280 upl_unlock(vector_upl);
7281 return KERN_SUCCESS;
7282 }
7283
7284 upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
7285 if (upl == NULL) {
7286 goto process_upl_to_remove;
7287 }
7288 }
7289
7290 if (upl->flags & UPL_PAGE_LIST_MAPPED) {
7291 addr = upl->kaddr;
7292 size = upl->u_mapped_size;
7293
7294 assert(upl->ref_count > 1);
7295 upl->ref_count--; /* removing mapping ref */
7296
7297 upl->flags &= ~UPL_PAGE_LIST_MAPPED;
7298 upl->kaddr = (vm_offset_t) 0;
7299 upl->u_mapped_size = 0;
7300
7301 if (isVectorUPL) {
7302 /*
7303 * If it's a Vectored UPL, we'll be removing the entire
7304 * submap anyways, so no need to remove individual UPL
7305 * element mappings from within the submap
7306 */
7307 goto process_upl_to_remove;
7308 }
7309
7310 upl_unlock(upl);
7311
7312 vm_map_remove(map,
7313 vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(map)),
7314 vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(map)));
7315 return KERN_SUCCESS;
7316 }
7317 upl_unlock(upl);
7318
7319 return KERN_FAILURE;
7320 }
7321
7322 kern_return_t
vm_map_remove_upl(vm_map_t map,upl_t upl)7323 vm_map_remove_upl(
7324 vm_map_t map,
7325 upl_t upl)
7326 {
7327 upl_size_t upl_size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map));
7328 return vm_map_remove_upl_range(map, upl, 0, upl_size);
7329 }
7330
7331 kern_return_t
upl_commit_range(upl_t upl,upl_offset_t offset,upl_size_t size,int flags,upl_page_info_t * page_list,mach_msg_type_number_t count,boolean_t * empty)7332 upl_commit_range(
7333 upl_t upl,
7334 upl_offset_t offset,
7335 upl_size_t size,
7336 int flags,
7337 upl_page_info_t *page_list,
7338 mach_msg_type_number_t count,
7339 boolean_t *empty)
7340 {
7341 upl_size_t xfer_size, subupl_size;
7342 vm_object_t shadow_object;
7343 vm_object_t object;
7344 vm_object_t m_object;
7345 vm_object_offset_t target_offset;
7346 upl_offset_t subupl_offset = offset;
7347 int entry;
7348 wpl_array_t lite_list;
7349 int occupied;
7350 int clear_refmod = 0;
7351 int pgpgout_count = 0;
7352 struct vm_page_delayed_work dw_array;
7353 struct vm_page_delayed_work *dwp, *dwp_start;
7354 bool dwp_finish_ctx = TRUE;
7355 int dw_count;
7356 int dw_limit;
7357 int isVectorUPL = 0;
7358 upl_t vector_upl = NULL;
7359 boolean_t should_be_throttled = FALSE;
7360
7361 vm_page_t nxt_page = VM_PAGE_NULL;
7362 int fast_path_possible = 0;
7363 int fast_path_full_commit = 0;
7364 int throttle_page = 0;
7365 int unwired_count = 0;
7366 int local_queue_count = 0;
7367 vm_page_t first_local, last_local;
7368 vm_object_offset_t obj_start, obj_end, obj_offset;
7369 kern_return_t kr = KERN_SUCCESS;
7370
7371 // DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p offset 0x%llx size 0x%llx flags 0x%x\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object, (uint64_t)offset, (uint64_t)size, flags);
7372
7373 dwp_start = dwp = NULL;
7374
7375 subupl_size = size;
7376 *empty = FALSE;
7377
7378 if (upl == UPL_NULL) {
7379 return KERN_INVALID_ARGUMENT;
7380 }
7381
7382 dw_count = 0;
7383 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
7384 dwp_start = vm_page_delayed_work_get_ctx();
7385 if (dwp_start == NULL) {
7386 dwp_start = &dw_array;
7387 dw_limit = 1;
7388 dwp_finish_ctx = FALSE;
7389 }
7390
7391 dwp = dwp_start;
7392
7393 if (count == 0) {
7394 page_list = NULL;
7395 }
7396
7397 if ((isVectorUPL = vector_upl_is_valid(upl))) {
7398 vector_upl = upl;
7399 upl_lock(vector_upl);
7400 } else {
7401 upl_lock(upl);
7402 }
7403
7404 process_upl_to_commit:
7405
7406 if (isVectorUPL) {
7407 size = subupl_size;
7408 offset = subupl_offset;
7409 if (size == 0) {
7410 upl_unlock(vector_upl);
7411 kr = KERN_SUCCESS;
7412 goto done;
7413 }
7414 upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
7415 if (upl == NULL) {
7416 upl_unlock(vector_upl);
7417 kr = KERN_FAILURE;
7418 goto done;
7419 }
7420 page_list = UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl);
7421 subupl_size -= size;
7422 subupl_offset += size;
7423 }
7424
7425 #if UPL_DEBUG
7426 if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
7427 (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
7428
7429 upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
7430 upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
7431
7432 upl->upl_commit_index++;
7433 }
7434 #endif
7435 if (upl->flags & UPL_DEVICE_MEMORY) {
7436 xfer_size = 0;
7437 } else if ((offset + size) <= upl_adjusted_size(upl, PAGE_MASK)) {
7438 xfer_size = size;
7439 } else {
7440 if (!isVectorUPL) {
7441 upl_unlock(upl);
7442 } else {
7443 upl_unlock(vector_upl);
7444 }
7445 DEBUG4K_ERROR("upl %p (u_offset 0x%llx u_size 0x%x) offset 0x%x size 0x%x\n", upl, upl->u_offset, upl->u_size, offset, size);
7446 kr = KERN_FAILURE;
7447 goto done;
7448 }
7449 if (upl->flags & UPL_SET_DIRTY) {
7450 flags |= UPL_COMMIT_SET_DIRTY;
7451 }
7452 if (upl->flags & UPL_CLEAR_DIRTY) {
7453 flags |= UPL_COMMIT_CLEAR_DIRTY;
7454 }
7455
7456 if (upl->flags & UPL_INTERNAL) {
7457 lite_list = (wpl_array_t) ((((uintptr_t)upl) + sizeof(struct upl))
7458 + ((upl_adjusted_size(upl, PAGE_MASK) / PAGE_SIZE) * sizeof(upl_page_info_t)));
7459 } else {
7460 lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
7461 }
7462
7463 object = upl->map_object;
7464
7465 if (upl->flags & UPL_SHADOWED) {
7466 vm_object_lock(object);
7467 shadow_object = object->shadow;
7468 } else {
7469 shadow_object = object;
7470 }
7471 entry = offset / PAGE_SIZE;
7472 target_offset = (vm_object_offset_t)offset;
7473
7474 if (upl->flags & UPL_KERNEL_OBJECT) {
7475 vm_object_lock_shared(shadow_object);
7476 } else {
7477 vm_object_lock(shadow_object);
7478 }
7479
7480 VM_OBJECT_WIRED_PAGE_UPDATE_START(shadow_object);
7481
7482 if (upl->flags & UPL_ACCESS_BLOCKED) {
7483 assert(shadow_object->blocked_access);
7484 shadow_object->blocked_access = FALSE;
7485 vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
7486 }
7487
7488 if (shadow_object->code_signed) {
7489 /*
7490 * CODE SIGNING:
7491 * If the object is code-signed, do not let this UPL tell
7492 * us if the pages are valid or not. Let the pages be
7493 * validated by VM the normal way (when they get mapped or
7494 * copied).
7495 */
7496 flags &= ~UPL_COMMIT_CS_VALIDATED;
7497 }
7498 if (!page_list) {
7499 /*
7500 * No page list to get the code-signing info from !?
7501 */
7502 flags &= ~UPL_COMMIT_CS_VALIDATED;
7503 }
7504 if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal) {
7505 should_be_throttled = TRUE;
7506 }
7507
7508 if ((upl->flags & UPL_IO_WIRE) &&
7509 !(flags & UPL_COMMIT_FREE_ABSENT) &&
7510 !isVectorUPL &&
7511 shadow_object->purgable != VM_PURGABLE_VOLATILE &&
7512 shadow_object->purgable != VM_PURGABLE_EMPTY) {
7513 if (!vm_page_queue_empty(&shadow_object->memq)) {
7514 if (size == shadow_object->vo_size) {
7515 nxt_page = (vm_page_t)vm_page_queue_first(&shadow_object->memq);
7516 fast_path_full_commit = 1;
7517 }
7518 fast_path_possible = 1;
7519
7520 if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal &&
7521 (shadow_object->purgable == VM_PURGABLE_DENY ||
7522 shadow_object->purgable == VM_PURGABLE_NONVOLATILE ||
7523 shadow_object->purgable == VM_PURGABLE_VOLATILE)) {
7524 throttle_page = 1;
7525 }
7526 }
7527 }
7528 first_local = VM_PAGE_NULL;
7529 last_local = VM_PAGE_NULL;
7530
7531 obj_start = target_offset + upl->u_offset - shadow_object->paging_offset;
7532 obj_end = obj_start + xfer_size;
7533 obj_start = vm_object_trunc_page(obj_start);
7534 obj_end = vm_object_round_page(obj_end);
7535 for (obj_offset = obj_start;
7536 obj_offset < obj_end;
7537 obj_offset += PAGE_SIZE) {
7538 vm_page_t t, m;
7539
7540 dwp->dw_mask = 0;
7541 clear_refmod = 0;
7542
7543 m = VM_PAGE_NULL;
7544
7545 if (upl->flags & UPL_LITE) {
7546 unsigned int pg_num;
7547
7548 if (nxt_page != VM_PAGE_NULL) {
7549 m = nxt_page;
7550 nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->vmp_listq);
7551 target_offset = m->vmp_offset;
7552 }
7553 pg_num = (unsigned int) (target_offset / PAGE_SIZE);
7554 assert(pg_num == target_offset / PAGE_SIZE);
7555
7556 if (lite_list[pg_num >> 5] & (1U << (pg_num & 31))) {
7557 lite_list[pg_num >> 5] &= ~(1U << (pg_num & 31));
7558
7559 if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) {
7560 m = vm_page_lookup(shadow_object, obj_offset);
7561 }
7562 } else {
7563 m = NULL;
7564 }
7565 }
7566 if (upl->flags & UPL_SHADOWED) {
7567 if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
7568 t->vmp_free_when_done = FALSE;
7569
7570 VM_PAGE_FREE(t);
7571
7572 if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) {
7573 m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
7574 }
7575 }
7576 }
7577 if (m == VM_PAGE_NULL) {
7578 goto commit_next_page;
7579 }
7580
7581 m_object = VM_PAGE_OBJECT(m);
7582
7583 if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
7584 assert(m->vmp_busy);
7585
7586 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
7587 goto commit_next_page;
7588 }
7589
7590 if (flags & UPL_COMMIT_CS_VALIDATED) {
7591 /*
7592 * CODE SIGNING:
7593 * Set the code signing bits according to
7594 * what the UPL says they should be.
7595 */
7596 m->vmp_cs_validated |= page_list[entry].cs_validated;
7597 m->vmp_cs_tainted |= page_list[entry].cs_tainted;
7598 m->vmp_cs_nx |= page_list[entry].cs_nx;
7599 }
7600 if (flags & UPL_COMMIT_WRITTEN_BY_KERNEL) {
7601 m->vmp_written_by_kernel = TRUE;
7602 }
7603
7604 if (upl->flags & UPL_IO_WIRE) {
7605 if (page_list) {
7606 page_list[entry].phys_addr = 0;
7607 }
7608
7609 if (flags & UPL_COMMIT_SET_DIRTY) {
7610 SET_PAGE_DIRTY(m, FALSE);
7611 } else if (flags & UPL_COMMIT_CLEAR_DIRTY) {
7612 m->vmp_dirty = FALSE;
7613
7614 if (!(flags & UPL_COMMIT_CS_VALIDATED) &&
7615 m->vmp_cs_validated &&
7616 m->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
7617 /*
7618 * CODE SIGNING:
7619 * This page is no longer dirty
7620 * but could have been modified,
7621 * so it will need to be
7622 * re-validated.
7623 */
7624 m->vmp_cs_validated = VMP_CS_ALL_FALSE;
7625
7626 VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
7627
7628 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
7629 }
7630 clear_refmod |= VM_MEM_MODIFIED;
7631 }
7632 if (upl->flags & UPL_ACCESS_BLOCKED) {
7633 /*
7634 * We blocked access to the pages in this UPL.
7635 * Clear the "busy" bit and wake up any waiter
7636 * for this page.
7637 */
7638 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
7639 }
7640 if (fast_path_possible) {
7641 assert(m_object->purgable != VM_PURGABLE_EMPTY);
7642 assert(m_object->purgable != VM_PURGABLE_VOLATILE);
7643 if (m->vmp_absent) {
7644 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
7645 assert(m->vmp_wire_count == 0);
7646 assert(m->vmp_busy);
7647
7648 m->vmp_absent = FALSE;
7649 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
7650 } else {
7651 if (m->vmp_wire_count == 0) {
7652 panic("wire_count == 0, m = %p, obj = %p", m, shadow_object);
7653 }
7654 assert(m->vmp_q_state == VM_PAGE_IS_WIRED);
7655
7656 /*
7657 * XXX FBDP need to update some other
7658 * counters here (purgeable_wired_count)
7659 * (ledgers), ...
7660 */
7661 assert(m->vmp_wire_count > 0);
7662 m->vmp_wire_count--;
7663
7664 if (m->vmp_wire_count == 0) {
7665 m->vmp_q_state = VM_PAGE_NOT_ON_Q;
7666 unwired_count++;
7667 }
7668 }
7669 if (m->vmp_wire_count == 0) {
7670 assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
7671
7672 if (last_local == VM_PAGE_NULL) {
7673 assert(first_local == VM_PAGE_NULL);
7674
7675 last_local = m;
7676 first_local = m;
7677 } else {
7678 assert(first_local != VM_PAGE_NULL);
7679
7680 m->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
7681 first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(m);
7682 first_local = m;
7683 }
7684 local_queue_count++;
7685
7686 if (throttle_page) {
7687 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
7688 } else {
7689 if (flags & UPL_COMMIT_INACTIVATE) {
7690 if (shadow_object->internal) {
7691 m->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
7692 } else {
7693 m->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
7694 }
7695 } else {
7696 m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
7697 }
7698 }
7699 }
7700 } else {
7701 if (flags & UPL_COMMIT_INACTIVATE) {
7702 dwp->dw_mask |= DW_vm_page_deactivate_internal;
7703 clear_refmod |= VM_MEM_REFERENCED;
7704 }
7705 if (m->vmp_absent) {
7706 if (flags & UPL_COMMIT_FREE_ABSENT) {
7707 dwp->dw_mask |= DW_vm_page_free;
7708 } else {
7709 m->vmp_absent = FALSE;
7710 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
7711
7712 if (!(dwp->dw_mask & DW_vm_page_deactivate_internal)) {
7713 dwp->dw_mask |= DW_vm_page_activate;
7714 }
7715 }
7716 } else {
7717 dwp->dw_mask |= DW_vm_page_unwire;
7718 }
7719 }
7720 goto commit_next_page;
7721 }
7722 assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
7723
7724 if (page_list) {
7725 page_list[entry].phys_addr = 0;
7726 }
7727
7728 /*
7729 * make sure to clear the hardware
7730 * modify or reference bits before
7731 * releasing the BUSY bit on this page
7732 * otherwise we risk losing a legitimate
7733 * change of state
7734 */
7735 if (flags & UPL_COMMIT_CLEAR_DIRTY) {
7736 m->vmp_dirty = FALSE;
7737
7738 clear_refmod |= VM_MEM_MODIFIED;
7739 }
7740 if (m->vmp_laundry) {
7741 dwp->dw_mask |= DW_vm_pageout_throttle_up;
7742 }
7743
7744 if (VM_PAGE_WIRED(m)) {
7745 m->vmp_free_when_done = FALSE;
7746 }
7747
7748 if (!(flags & UPL_COMMIT_CS_VALIDATED) &&
7749 m->vmp_cs_validated &&
7750 m->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
7751 /*
7752 * CODE SIGNING:
7753 * This page is no longer dirty
7754 * but could have been modified,
7755 * so it will need to be
7756 * re-validated.
7757 */
7758 m->vmp_cs_validated = VMP_CS_ALL_FALSE;
7759
7760 VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
7761
7762 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
7763 }
7764 if (m->vmp_overwriting) {
7765 /*
7766 * the (COPY_OUT_FROM == FALSE) request_page_list case
7767 */
7768 if (m->vmp_busy) {
7769 #if CONFIG_PHANTOM_CACHE
7770 if (m->vmp_absent && !m_object->internal) {
7771 dwp->dw_mask |= DW_vm_phantom_cache_update;
7772 }
7773 #endif
7774 m->vmp_absent = FALSE;
7775
7776 dwp->dw_mask |= DW_clear_busy;
7777 } else {
7778 /*
7779 * alternate (COPY_OUT_FROM == FALSE) page_list case
7780 * Occurs when the original page was wired
7781 * at the time of the list request
7782 */
7783 assert(VM_PAGE_WIRED(m));
7784
7785 dwp->dw_mask |= DW_vm_page_unwire; /* reactivates */
7786 }
7787 m->vmp_overwriting = FALSE;
7788 }
7789 m->vmp_cleaning = FALSE;
7790
7791 if (m->vmp_free_when_done) {
7792 /*
7793 * With the clean queue enabled, UPL_PAGEOUT should
7794 * no longer set the pageout bit. Its pages now go
7795 * to the clean queue.
7796 *
7797 * We don't use the cleaned Q anymore and so this
7798 * assert isn't correct. The code for the clean Q
7799 * still exists and might be used in the future. If we
7800 * go back to the cleaned Q, we will re-enable this
7801 * assert.
7802 *
7803 * assert(!(upl->flags & UPL_PAGEOUT));
7804 */
7805 assert(!m_object->internal);
7806
7807 m->vmp_free_when_done = FALSE;
7808
7809 if ((flags & UPL_COMMIT_SET_DIRTY) ||
7810 (m->vmp_pmapped && (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED))) {
7811 /*
7812 * page was re-dirtied after we started
7813 * the pageout... reactivate it since
7814 * we don't know whether the on-disk
7815 * copy matches what is now in memory
7816 */
7817 SET_PAGE_DIRTY(m, FALSE);
7818
7819 dwp->dw_mask |= DW_vm_page_activate | DW_PAGE_WAKEUP;
7820
7821 if (upl->flags & UPL_PAGEOUT) {
7822 counter_inc(&vm_statistics_reactivations);
7823 DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
7824 }
7825 } else if (m->vmp_busy && !(upl->flags & UPL_HAS_BUSY)) {
7826 /*
7827 * Someone else might still be handling this
7828 * page (vm_fault() for example), so let's not
7829 * free it or "un-busy" it!
7830 * Put that page in the "speculative" queue
7831 * for now (since we would otherwise have freed
7832 * it) and let whoever is keeping the page
7833 * "busy" move it if needed when they're done
7834 * with it.
7835 */
7836 dwp->dw_mask |= DW_vm_page_speculate;
7837 } else {
7838 /*
7839 * page has been successfully cleaned
7840 * go ahead and free it for other use
7841 */
7842 if (m_object->internal) {
7843 DTRACE_VM2(anonpgout, int, 1, (uint64_t *), NULL);
7844 } else {
7845 DTRACE_VM2(fspgout, int, 1, (uint64_t *), NULL);
7846 }
7847 m->vmp_dirty = FALSE;
7848 if (!(upl->flags & UPL_HAS_BUSY)) {
7849 assert(!m->vmp_busy);
7850 }
7851 m->vmp_busy = TRUE;
7852
7853 dwp->dw_mask |= DW_vm_page_free;
7854 }
7855 goto commit_next_page;
7856 }
7857 /*
7858 * It is a part of the semantic of COPYOUT_FROM
7859 * UPLs that a commit implies cache sync
7860 * between the vm page and the backing store
7861 * this can be used to strip the precious bit
7862 * as well as clean
7863 */
7864 if ((upl->flags & UPL_PAGE_SYNC_DONE) || (flags & UPL_COMMIT_CLEAR_PRECIOUS)) {
7865 m->vmp_precious = FALSE;
7866 }
7867
7868 if (flags & UPL_COMMIT_SET_DIRTY) {
7869 SET_PAGE_DIRTY(m, FALSE);
7870 } else {
7871 m->vmp_dirty = FALSE;
7872 }
7873
7874 /* with the clean queue on, move *all* cleaned pages to the clean queue */
7875 if (hibernate_cleaning_in_progress == FALSE && !m->vmp_dirty && (upl->flags & UPL_PAGEOUT)) {
7876 pgpgout_count++;
7877
7878 counter_inc(&vm_statistics_pageouts);
7879 DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL);
7880
7881 dwp->dw_mask |= DW_enqueue_cleaned;
7882 } else if (should_be_throttled == TRUE && (m->vmp_q_state == VM_PAGE_NOT_ON_Q)) {
7883 /*
7884 * page coming back in from being 'frozen'...
7885 * it was dirty before it was frozen, so keep it so
7886 * the vm_page_activate will notice that it really belongs
7887 * on the throttle queue and put it there
7888 */
7889 SET_PAGE_DIRTY(m, FALSE);
7890 dwp->dw_mask |= DW_vm_page_activate;
7891 } else {
7892 if ((flags & UPL_COMMIT_INACTIVATE) && !m->vmp_clustered && (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q)) {
7893 dwp->dw_mask |= DW_vm_page_deactivate_internal;
7894 clear_refmod |= VM_MEM_REFERENCED;
7895 } else if (!VM_PAGE_PAGEABLE(m)) {
7896 if (m->vmp_clustered || (flags & UPL_COMMIT_SPECULATE)) {
7897 dwp->dw_mask |= DW_vm_page_speculate;
7898 } else if (m->vmp_reference) {
7899 dwp->dw_mask |= DW_vm_page_activate;
7900 } else {
7901 dwp->dw_mask |= DW_vm_page_deactivate_internal;
7902 clear_refmod |= VM_MEM_REFERENCED;
7903 }
7904 }
7905 }
7906 if (upl->flags & UPL_ACCESS_BLOCKED) {
7907 /*
7908 * We blocked access to the pages in this URL.
7909 * Clear the "busy" bit on this page before we
7910 * wake up any waiter.
7911 */
7912 dwp->dw_mask |= DW_clear_busy;
7913 }
7914 /*
7915 * Wakeup any thread waiting for the page to be un-cleaning.
7916 */
7917 dwp->dw_mask |= DW_PAGE_WAKEUP;
7918
7919 commit_next_page:
7920 if (clear_refmod) {
7921 pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m), clear_refmod);
7922 }
7923
7924 target_offset += PAGE_SIZE_64;
7925 xfer_size -= PAGE_SIZE;
7926 entry++;
7927
7928 if (dwp->dw_mask) {
7929 if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
7930 VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
7931
7932 if (dw_count >= dw_limit) {
7933 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
7934
7935 dwp = dwp_start;
7936 dw_count = 0;
7937 }
7938 } else {
7939 if (dwp->dw_mask & DW_clear_busy) {
7940 m->vmp_busy = FALSE;
7941 }
7942
7943 if (dwp->dw_mask & DW_PAGE_WAKEUP) {
7944 PAGE_WAKEUP(m);
7945 }
7946 }
7947 }
7948 }
7949 if (dw_count) {
7950 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
7951 dwp = dwp_start;
7952 dw_count = 0;
7953 }
7954
7955 if (fast_path_possible) {
7956 assert(shadow_object->purgable != VM_PURGABLE_VOLATILE);
7957 assert(shadow_object->purgable != VM_PURGABLE_EMPTY);
7958
7959 if (local_queue_count || unwired_count) {
7960 if (local_queue_count) {
7961 vm_page_t first_target;
7962 vm_page_queue_head_t *target_queue;
7963
7964 if (throttle_page) {
7965 target_queue = &vm_page_queue_throttled;
7966 } else {
7967 if (flags & UPL_COMMIT_INACTIVATE) {
7968 if (shadow_object->internal) {
7969 target_queue = &vm_page_queue_anonymous;
7970 } else {
7971 target_queue = &vm_page_queue_inactive;
7972 }
7973 } else {
7974 target_queue = &vm_page_queue_active;
7975 }
7976 }
7977 /*
7978 * Transfer the entire local queue to a regular LRU page queues.
7979 */
7980 vm_page_lockspin_queues();
7981
7982 first_target = (vm_page_t) vm_page_queue_first(target_queue);
7983
7984 if (vm_page_queue_empty(target_queue)) {
7985 target_queue->prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
7986 } else {
7987 first_target->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
7988 }
7989
7990 target_queue->next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
7991 first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(target_queue);
7992 last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_target);
7993
7994 /*
7995 * Adjust the global page counts.
7996 */
7997 if (throttle_page) {
7998 vm_page_throttled_count += local_queue_count;
7999 } else {
8000 if (flags & UPL_COMMIT_INACTIVATE) {
8001 if (shadow_object->internal) {
8002 vm_page_anonymous_count += local_queue_count;
8003 }
8004 vm_page_inactive_count += local_queue_count;
8005
8006 token_new_pagecount += local_queue_count;
8007 } else {
8008 vm_page_active_count += local_queue_count;
8009 }
8010
8011 if (shadow_object->internal) {
8012 vm_page_pageable_internal_count += local_queue_count;
8013 } else {
8014 vm_page_pageable_external_count += local_queue_count;
8015 }
8016 }
8017 } else {
8018 vm_page_lockspin_queues();
8019 }
8020 if (unwired_count) {
8021 vm_page_wire_count -= unwired_count;
8022 VM_CHECK_MEMORYSTATUS;
8023 }
8024 vm_page_unlock_queues();
8025
8026 VM_OBJECT_WIRED_PAGE_COUNT(shadow_object, -unwired_count);
8027 }
8028 }
8029 occupied = 1;
8030
8031 if (upl->flags & UPL_DEVICE_MEMORY) {
8032 occupied = 0;
8033 } else if (upl->flags & UPL_LITE) {
8034 int pg_num;
8035 int i;
8036
8037 occupied = 0;
8038
8039 if (!fast_path_full_commit) {
8040 pg_num = upl_adjusted_size(upl, PAGE_MASK) / PAGE_SIZE;
8041 pg_num = (pg_num + 31) >> 5;
8042
8043 for (i = 0; i < pg_num; i++) {
8044 if (lite_list[i] != 0) {
8045 occupied = 1;
8046 break;
8047 }
8048 }
8049 }
8050 } else {
8051 if (vm_page_queue_empty(&upl->map_object->memq)) {
8052 occupied = 0;
8053 }
8054 }
8055 if (occupied == 0) {
8056 /*
8057 * If this UPL element belongs to a Vector UPL and is
8058 * empty, then this is the right function to deallocate
8059 * it. So go ahead set the *empty variable. The flag
8060 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
8061 * should be considered relevant for the Vector UPL and not
8062 * the internal UPLs.
8063 */
8064 if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) {
8065 *empty = TRUE;
8066 }
8067
8068 if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
8069 /*
8070 * this is not a paging object
8071 * so we need to drop the paging reference
8072 * that was taken when we created the UPL
8073 * against this object
8074 */
8075 vm_object_activity_end(shadow_object);
8076 vm_object_collapse(shadow_object, 0, TRUE);
8077 } else {
8078 /*
8079 * we dontated the paging reference to
8080 * the map object... vm_pageout_object_terminate
8081 * will drop this reference
8082 */
8083 }
8084 }
8085 VM_OBJECT_WIRED_PAGE_UPDATE_END(shadow_object, shadow_object->wire_tag);
8086 vm_object_unlock(shadow_object);
8087 if (object != shadow_object) {
8088 vm_object_unlock(object);
8089 }
8090
8091 if (!isVectorUPL) {
8092 upl_unlock(upl);
8093 } else {
8094 /*
8095 * If we completed our operations on an UPL that is
8096 * part of a Vectored UPL and if empty is TRUE, then
8097 * we should go ahead and deallocate this UPL element.
8098 * Then we check if this was the last of the UPL elements
8099 * within that Vectored UPL. If so, set empty to TRUE
8100 * so that in ubc_upl_commit_range or ubc_upl_commit, we
8101 * can go ahead and deallocate the Vector UPL too.
8102 */
8103 if (*empty == TRUE) {
8104 *empty = vector_upl_set_subupl(vector_upl, upl, 0);
8105 upl_deallocate(upl);
8106 }
8107 goto process_upl_to_commit;
8108 }
8109 if (pgpgout_count) {
8110 DTRACE_VM2(pgpgout, int, pgpgout_count, (uint64_t *), NULL);
8111 }
8112
8113 kr = KERN_SUCCESS;
8114 done:
8115 if (dwp_start && dwp_finish_ctx) {
8116 vm_page_delayed_work_finish_ctx(dwp_start);
8117 dwp_start = dwp = NULL;
8118 }
8119
8120 return kr;
8121 }
8122
8123 kern_return_t
upl_abort_range(upl_t upl,upl_offset_t offset,upl_size_t size,int error,boolean_t * empty)8124 upl_abort_range(
8125 upl_t upl,
8126 upl_offset_t offset,
8127 upl_size_t size,
8128 int error,
8129 boolean_t *empty)
8130 {
8131 upl_page_info_t *user_page_list = NULL;
8132 upl_size_t xfer_size, subupl_size;
8133 vm_object_t shadow_object;
8134 vm_object_t object;
8135 vm_object_offset_t target_offset;
8136 upl_offset_t subupl_offset = offset;
8137 int entry;
8138 wpl_array_t lite_list;
8139 int occupied;
8140 struct vm_page_delayed_work dw_array;
8141 struct vm_page_delayed_work *dwp, *dwp_start;
8142 bool dwp_finish_ctx = TRUE;
8143 int dw_count;
8144 int dw_limit;
8145 int isVectorUPL = 0;
8146 upl_t vector_upl = NULL;
8147 vm_object_offset_t obj_start, obj_end, obj_offset;
8148 kern_return_t kr = KERN_SUCCESS;
8149
8150 // DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p offset 0x%llx size 0x%llx error 0x%x\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object, (uint64_t)offset, (uint64_t)size, error);
8151
8152 dwp_start = dwp = NULL;
8153
8154 subupl_size = size;
8155 *empty = FALSE;
8156
8157 if (upl == UPL_NULL) {
8158 return KERN_INVALID_ARGUMENT;
8159 }
8160
8161 if ((upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES)) {
8162 return upl_commit_range(upl, offset, size, UPL_COMMIT_FREE_ABSENT, NULL, 0, empty);
8163 }
8164
8165 dw_count = 0;
8166 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
8167 dwp_start = vm_page_delayed_work_get_ctx();
8168 if (dwp_start == NULL) {
8169 dwp_start = &dw_array;
8170 dw_limit = 1;
8171 dwp_finish_ctx = FALSE;
8172 }
8173
8174 dwp = dwp_start;
8175
8176 if ((isVectorUPL = vector_upl_is_valid(upl))) {
8177 vector_upl = upl;
8178 upl_lock(vector_upl);
8179 } else {
8180 upl_lock(upl);
8181 }
8182
8183 process_upl_to_abort:
8184 if (isVectorUPL) {
8185 size = subupl_size;
8186 offset = subupl_offset;
8187 if (size == 0) {
8188 upl_unlock(vector_upl);
8189 kr = KERN_SUCCESS;
8190 goto done;
8191 }
8192 upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
8193 if (upl == NULL) {
8194 upl_unlock(vector_upl);
8195 kr = KERN_FAILURE;
8196 goto done;
8197 }
8198 subupl_size -= size;
8199 subupl_offset += size;
8200 }
8201
8202 *empty = FALSE;
8203
8204 #if UPL_DEBUG
8205 if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
8206 (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
8207
8208 upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
8209 upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
8210 upl->upl_commit_records[upl->upl_commit_index].c_aborted = 1;
8211
8212 upl->upl_commit_index++;
8213 }
8214 #endif
8215 if (upl->flags & UPL_DEVICE_MEMORY) {
8216 xfer_size = 0;
8217 } else if ((offset + size) <= upl_adjusted_size(upl, PAGE_MASK)) {
8218 xfer_size = size;
8219 } else {
8220 if (!isVectorUPL) {
8221 upl_unlock(upl);
8222 } else {
8223 upl_unlock(vector_upl);
8224 }
8225 DEBUG4K_ERROR("upl %p (u_offset 0x%llx u_size 0x%x) offset 0x%x size 0x%x\n", upl, upl->u_offset, upl->u_size, offset, size);
8226 kr = KERN_FAILURE;
8227 goto done;
8228 }
8229 if (upl->flags & UPL_INTERNAL) {
8230 lite_list = (wpl_array_t)
8231 ((((uintptr_t)upl) + sizeof(struct upl))
8232 + ((upl_adjusted_size(upl, PAGE_MASK) / PAGE_SIZE) * sizeof(upl_page_info_t)));
8233
8234 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
8235 } else {
8236 lite_list = (wpl_array_t)
8237 (((uintptr_t)upl) + sizeof(struct upl));
8238 }
8239 object = upl->map_object;
8240
8241 if (upl->flags & UPL_SHADOWED) {
8242 vm_object_lock(object);
8243 shadow_object = object->shadow;
8244 } else {
8245 shadow_object = object;
8246 }
8247
8248 entry = offset / PAGE_SIZE;
8249 target_offset = (vm_object_offset_t)offset;
8250
8251 if (upl->flags & UPL_KERNEL_OBJECT) {
8252 vm_object_lock_shared(shadow_object);
8253 } else {
8254 vm_object_lock(shadow_object);
8255 }
8256
8257 if (upl->flags & UPL_ACCESS_BLOCKED) {
8258 assert(shadow_object->blocked_access);
8259 shadow_object->blocked_access = FALSE;
8260 vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
8261 }
8262
8263 if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT)) {
8264 panic("upl_abort_range: kernel_object being DUMPED");
8265 }
8266
8267 obj_start = target_offset + upl->u_offset - shadow_object->paging_offset;
8268 obj_end = obj_start + xfer_size;
8269 obj_start = vm_object_trunc_page(obj_start);
8270 obj_end = vm_object_round_page(obj_end);
8271 for (obj_offset = obj_start;
8272 obj_offset < obj_end;
8273 obj_offset += PAGE_SIZE) {
8274 vm_page_t t, m;
8275 unsigned int pg_num;
8276 boolean_t needed;
8277
8278 pg_num = (unsigned int) (target_offset / PAGE_SIZE);
8279 assert(pg_num == target_offset / PAGE_SIZE);
8280
8281 needed = FALSE;
8282
8283 if (user_page_list) {
8284 needed = user_page_list[pg_num].needed;
8285 }
8286
8287 dwp->dw_mask = 0;
8288 m = VM_PAGE_NULL;
8289
8290 if (upl->flags & UPL_LITE) {
8291 if (lite_list[pg_num >> 5] & (1U << (pg_num & 31))) {
8292 lite_list[pg_num >> 5] &= ~(1U << (pg_num & 31));
8293
8294 if (!(upl->flags & UPL_KERNEL_OBJECT)) {
8295 m = vm_page_lookup(shadow_object, obj_offset);
8296 }
8297 }
8298 }
8299 if (upl->flags & UPL_SHADOWED) {
8300 if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
8301 t->vmp_free_when_done = FALSE;
8302
8303 VM_PAGE_FREE(t);
8304
8305 if (m == VM_PAGE_NULL) {
8306 m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
8307 }
8308 }
8309 }
8310 if ((upl->flags & UPL_KERNEL_OBJECT)) {
8311 goto abort_next_page;
8312 }
8313
8314 if (m != VM_PAGE_NULL) {
8315 assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
8316
8317 if (m->vmp_absent) {
8318 boolean_t must_free = TRUE;
8319
8320 /*
8321 * COPYOUT = FALSE case
8322 * check for error conditions which must
8323 * be passed back to the pages customer
8324 */
8325 if (error & UPL_ABORT_RESTART) {
8326 m->vmp_restart = TRUE;
8327 m->vmp_absent = FALSE;
8328 m->vmp_unusual = TRUE;
8329 must_free = FALSE;
8330 } else if (error & UPL_ABORT_UNAVAILABLE) {
8331 m->vmp_restart = FALSE;
8332 m->vmp_unusual = TRUE;
8333 must_free = FALSE;
8334 } else if (error & UPL_ABORT_ERROR) {
8335 m->vmp_restart = FALSE;
8336 m->vmp_absent = FALSE;
8337 m->vmp_error = TRUE;
8338 m->vmp_unusual = TRUE;
8339 must_free = FALSE;
8340 }
8341 if (m->vmp_clustered && needed == FALSE) {
8342 /*
8343 * This page was a part of a speculative
8344 * read-ahead initiated by the kernel
8345 * itself. No one is expecting this
8346 * page and no one will clean up its
8347 * error state if it ever becomes valid
8348 * in the future.
8349 * We have to free it here.
8350 */
8351 must_free = TRUE;
8352 }
8353 m->vmp_cleaning = FALSE;
8354
8355 if (m->vmp_overwriting && !m->vmp_busy) {
8356 /*
8357 * this shouldn't happen since
8358 * this is an 'absent' page, but
8359 * it doesn't hurt to check for
8360 * the 'alternate' method of
8361 * stabilizing the page...
8362 * we will mark 'busy' to be cleared
8363 * in the following code which will
8364 * take care of the primary stabilzation
8365 * method (i.e. setting 'busy' to TRUE)
8366 */
8367 dwp->dw_mask |= DW_vm_page_unwire;
8368 }
8369 m->vmp_overwriting = FALSE;
8370
8371 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
8372
8373 if (must_free == TRUE) {
8374 dwp->dw_mask |= DW_vm_page_free;
8375 } else {
8376 dwp->dw_mask |= DW_vm_page_activate;
8377 }
8378 } else {
8379 /*
8380 * Handle the trusted pager throttle.
8381 */
8382 if (m->vmp_laundry) {
8383 dwp->dw_mask |= DW_vm_pageout_throttle_up;
8384 }
8385
8386 if (upl->flags & UPL_ACCESS_BLOCKED) {
8387 /*
8388 * We blocked access to the pages in this UPL.
8389 * Clear the "busy" bit and wake up any waiter
8390 * for this page.
8391 */
8392 dwp->dw_mask |= DW_clear_busy;
8393 }
8394 if (m->vmp_overwriting) {
8395 if (m->vmp_busy) {
8396 dwp->dw_mask |= DW_clear_busy;
8397 } else {
8398 /*
8399 * deal with the 'alternate' method
8400 * of stabilizing the page...
8401 * we will either free the page
8402 * or mark 'busy' to be cleared
8403 * in the following code which will
8404 * take care of the primary stabilzation
8405 * method (i.e. setting 'busy' to TRUE)
8406 */
8407 dwp->dw_mask |= DW_vm_page_unwire;
8408 }
8409 m->vmp_overwriting = FALSE;
8410 }
8411 m->vmp_free_when_done = FALSE;
8412 m->vmp_cleaning = FALSE;
8413
8414 if (error & UPL_ABORT_DUMP_PAGES) {
8415 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
8416
8417 dwp->dw_mask |= DW_vm_page_free;
8418 } else {
8419 if (!(dwp->dw_mask & DW_vm_page_unwire)) {
8420 if (error & UPL_ABORT_REFERENCE) {
8421 /*
8422 * we've been told to explictly
8423 * reference this page... for
8424 * file I/O, this is done by
8425 * implementing an LRU on the inactive q
8426 */
8427 dwp->dw_mask |= DW_vm_page_lru;
8428 } else if (!VM_PAGE_PAGEABLE(m)) {
8429 dwp->dw_mask |= DW_vm_page_deactivate_internal;
8430 }
8431 }
8432 dwp->dw_mask |= DW_PAGE_WAKEUP;
8433 }
8434 }
8435 }
8436 abort_next_page:
8437 target_offset += PAGE_SIZE_64;
8438 xfer_size -= PAGE_SIZE;
8439 entry++;
8440
8441 if (dwp->dw_mask) {
8442 if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
8443 VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
8444
8445 if (dw_count >= dw_limit) {
8446 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
8447
8448 dwp = dwp_start;
8449 dw_count = 0;
8450 }
8451 } else {
8452 if (dwp->dw_mask & DW_clear_busy) {
8453 m->vmp_busy = FALSE;
8454 }
8455
8456 if (dwp->dw_mask & DW_PAGE_WAKEUP) {
8457 PAGE_WAKEUP(m);
8458 }
8459 }
8460 }
8461 }
8462 if (dw_count) {
8463 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
8464 dwp = dwp_start;
8465 dw_count = 0;
8466 }
8467
8468 occupied = 1;
8469
8470 if (upl->flags & UPL_DEVICE_MEMORY) {
8471 occupied = 0;
8472 } else if (upl->flags & UPL_LITE) {
8473 int pg_num;
8474 int i;
8475
8476 pg_num = upl_adjusted_size(upl, PAGE_MASK) / PAGE_SIZE;
8477 pg_num = (pg_num + 31) >> 5;
8478 occupied = 0;
8479
8480 for (i = 0; i < pg_num; i++) {
8481 if (lite_list[i] != 0) {
8482 occupied = 1;
8483 break;
8484 }
8485 }
8486 } else {
8487 if (vm_page_queue_empty(&upl->map_object->memq)) {
8488 occupied = 0;
8489 }
8490 }
8491 if (occupied == 0) {
8492 /*
8493 * If this UPL element belongs to a Vector UPL and is
8494 * empty, then this is the right function to deallocate
8495 * it. So go ahead set the *empty variable. The flag
8496 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
8497 * should be considered relevant for the Vector UPL and
8498 * not the internal UPLs.
8499 */
8500 if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) {
8501 *empty = TRUE;
8502 }
8503
8504 if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
8505 /*
8506 * this is not a paging object
8507 * so we need to drop the paging reference
8508 * that was taken when we created the UPL
8509 * against this object
8510 */
8511 vm_object_activity_end(shadow_object);
8512 vm_object_collapse(shadow_object, 0, TRUE);
8513 } else {
8514 /*
8515 * we dontated the paging reference to
8516 * the map object... vm_pageout_object_terminate
8517 * will drop this reference
8518 */
8519 }
8520 }
8521 vm_object_unlock(shadow_object);
8522 if (object != shadow_object) {
8523 vm_object_unlock(object);
8524 }
8525
8526 if (!isVectorUPL) {
8527 upl_unlock(upl);
8528 } else {
8529 /*
8530 * If we completed our operations on an UPL that is
8531 * part of a Vectored UPL and if empty is TRUE, then
8532 * we should go ahead and deallocate this UPL element.
8533 * Then we check if this was the last of the UPL elements
8534 * within that Vectored UPL. If so, set empty to TRUE
8535 * so that in ubc_upl_abort_range or ubc_upl_abort, we
8536 * can go ahead and deallocate the Vector UPL too.
8537 */
8538 if (*empty == TRUE) {
8539 *empty = vector_upl_set_subupl(vector_upl, upl, 0);
8540 upl_deallocate(upl);
8541 }
8542 goto process_upl_to_abort;
8543 }
8544
8545 kr = KERN_SUCCESS;
8546
8547 done:
8548 if (dwp_start && dwp_finish_ctx) {
8549 vm_page_delayed_work_finish_ctx(dwp_start);
8550 dwp_start = dwp = NULL;
8551 }
8552
8553 return kr;
8554 }
8555
8556
8557 kern_return_t
upl_abort(upl_t upl,int error)8558 upl_abort(
8559 upl_t upl,
8560 int error)
8561 {
8562 boolean_t empty;
8563
8564 if (upl == UPL_NULL) {
8565 return KERN_INVALID_ARGUMENT;
8566 }
8567
8568 return upl_abort_range(upl, 0, upl->u_size, error, &empty);
8569 }
8570
8571
8572 /* an option on commit should be wire */
8573 kern_return_t
upl_commit(upl_t upl,upl_page_info_t * page_list,mach_msg_type_number_t count)8574 upl_commit(
8575 upl_t upl,
8576 upl_page_info_t *page_list,
8577 mach_msg_type_number_t count)
8578 {
8579 boolean_t empty;
8580
8581 if (upl == UPL_NULL) {
8582 return KERN_INVALID_ARGUMENT;
8583 }
8584
8585 return upl_commit_range(upl, 0, upl->u_size, 0,
8586 page_list, count, &empty);
8587 }
8588
8589
8590 void
iopl_valid_data(upl_t upl,vm_tag_t tag)8591 iopl_valid_data(
8592 upl_t upl,
8593 vm_tag_t tag)
8594 {
8595 vm_object_t object;
8596 vm_offset_t offset;
8597 vm_page_t m, nxt_page = VM_PAGE_NULL;
8598 upl_size_t size;
8599 int wired_count = 0;
8600
8601 if (upl == NULL) {
8602 panic("iopl_valid_data: NULL upl");
8603 }
8604 if (vector_upl_is_valid(upl)) {
8605 panic("iopl_valid_data: vector upl");
8606 }
8607 if ((upl->flags & (UPL_DEVICE_MEMORY | UPL_SHADOWED | UPL_ACCESS_BLOCKED | UPL_IO_WIRE | UPL_INTERNAL)) != UPL_IO_WIRE) {
8608 panic("iopl_valid_data: unsupported upl, flags = %x", upl->flags);
8609 }
8610
8611 object = upl->map_object;
8612
8613 if (object == kernel_object || object == compressor_object) {
8614 panic("iopl_valid_data: object == kernel or compressor");
8615 }
8616
8617 if (object->purgable == VM_PURGABLE_VOLATILE ||
8618 object->purgable == VM_PURGABLE_EMPTY) {
8619 panic("iopl_valid_data: object %p purgable %d",
8620 object, object->purgable);
8621 }
8622
8623 size = upl_adjusted_size(upl, PAGE_MASK);
8624
8625 vm_object_lock(object);
8626 VM_OBJECT_WIRED_PAGE_UPDATE_START(object);
8627
8628 bool whole_object;
8629
8630 if (object->vo_size == size && object->resident_page_count == (size / PAGE_SIZE)) {
8631 nxt_page = (vm_page_t)vm_page_queue_first(&object->memq);
8632 whole_object = true;
8633 } else {
8634 offset = (vm_offset_t)(upl_adjusted_offset(upl, PAGE_MASK) - object->paging_offset);
8635 whole_object = false;
8636 }
8637
8638 while (size) {
8639 if (whole_object) {
8640 if (nxt_page != VM_PAGE_NULL) {
8641 m = nxt_page;
8642 nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->vmp_listq);
8643 }
8644 } else {
8645 m = vm_page_lookup(object, offset);
8646 offset += PAGE_SIZE;
8647
8648 if (m == VM_PAGE_NULL) {
8649 panic("iopl_valid_data: missing expected page at offset %lx", (long)offset);
8650 }
8651 }
8652 if (m->vmp_busy) {
8653 if (!m->vmp_absent) {
8654 panic("iopl_valid_data: busy page w/o absent");
8655 }
8656
8657 if (m->vmp_pageq.next || m->vmp_pageq.prev) {
8658 panic("iopl_valid_data: busy+absent page on page queue");
8659 }
8660 if (m->vmp_reusable) {
8661 panic("iopl_valid_data: %p is reusable", m);
8662 }
8663
8664 m->vmp_absent = FALSE;
8665 m->vmp_dirty = TRUE;
8666 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
8667 assert(m->vmp_wire_count == 0);
8668 m->vmp_wire_count++;
8669 assert(m->vmp_wire_count);
8670 if (m->vmp_wire_count == 1) {
8671 m->vmp_q_state = VM_PAGE_IS_WIRED;
8672 wired_count++;
8673 } else {
8674 panic("iopl_valid_data: %p already wired", m);
8675 }
8676
8677 PAGE_WAKEUP_DONE(m);
8678 }
8679 size -= PAGE_SIZE;
8680 }
8681 if (wired_count) {
8682 VM_OBJECT_WIRED_PAGE_COUNT(object, wired_count);
8683 assert(object->resident_page_count >= object->wired_page_count);
8684
8685 /* no need to adjust purgeable accounting for this object: */
8686 assert(object->purgable != VM_PURGABLE_VOLATILE);
8687 assert(object->purgable != VM_PURGABLE_EMPTY);
8688
8689 vm_page_lockspin_queues();
8690 vm_page_wire_count += wired_count;
8691 vm_page_unlock_queues();
8692 }
8693 VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag);
8694 vm_object_unlock(object);
8695 }
8696
8697
8698 void
vm_object_set_pmap_cache_attr(vm_object_t object,upl_page_info_array_t user_page_list,unsigned int num_pages,boolean_t batch_pmap_op)8699 vm_object_set_pmap_cache_attr(
8700 vm_object_t object,
8701 upl_page_info_array_t user_page_list,
8702 unsigned int num_pages,
8703 boolean_t batch_pmap_op)
8704 {
8705 unsigned int cache_attr = 0;
8706
8707 cache_attr = object->wimg_bits & VM_WIMG_MASK;
8708 assert(user_page_list);
8709 if (cache_attr != VM_WIMG_USE_DEFAULT) {
8710 PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, cache_attr, num_pages, batch_pmap_op);
8711 }
8712 }
8713
8714
8715 boolean_t vm_object_iopl_wire_full(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t, vm_tag_t);
8716 kern_return_t vm_object_iopl_wire_empty(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t, vm_tag_t, vm_object_offset_t *, int, int*);
8717
8718
8719
8720 boolean_t
vm_object_iopl_wire_full(vm_object_t object,upl_t upl,upl_page_info_array_t user_page_list,wpl_array_t lite_list,upl_control_flags_t cntrl_flags,vm_tag_t tag)8721 vm_object_iopl_wire_full(vm_object_t object, upl_t upl, upl_page_info_array_t user_page_list,
8722 wpl_array_t lite_list, upl_control_flags_t cntrl_flags, vm_tag_t tag)
8723 {
8724 vm_page_t dst_page;
8725 unsigned int entry;
8726 int page_count;
8727 int delayed_unlock = 0;
8728 boolean_t retval = TRUE;
8729 ppnum_t phys_page;
8730
8731 vm_object_lock_assert_exclusive(object);
8732 assert(object->purgable != VM_PURGABLE_VOLATILE);
8733 assert(object->purgable != VM_PURGABLE_EMPTY);
8734 assert(object->pager == NULL);
8735 assert(object->copy == NULL);
8736 assert(object->shadow == NULL);
8737
8738 page_count = object->resident_page_count;
8739 dst_page = (vm_page_t)vm_page_queue_first(&object->memq);
8740
8741 vm_page_lock_queues();
8742
8743 while (page_count--) {
8744 if (dst_page->vmp_busy ||
8745 dst_page->vmp_fictitious ||
8746 dst_page->vmp_absent ||
8747 dst_page->vmp_error ||
8748 dst_page->vmp_cleaning ||
8749 dst_page->vmp_restart ||
8750 dst_page->vmp_laundry) {
8751 retval = FALSE;
8752 goto done;
8753 }
8754 if ((cntrl_flags & UPL_REQUEST_FORCE_COHERENCY) && dst_page->vmp_written_by_kernel == TRUE) {
8755 retval = FALSE;
8756 goto done;
8757 }
8758 dst_page->vmp_reference = TRUE;
8759
8760 vm_page_wire(dst_page, tag, FALSE);
8761
8762 if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
8763 SET_PAGE_DIRTY(dst_page, FALSE);
8764 }
8765 entry = (unsigned int)(dst_page->vmp_offset / PAGE_SIZE);
8766 assert(entry >= 0 && entry < object->resident_page_count);
8767 lite_list[entry >> 5] |= 1U << (entry & 31);
8768
8769 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
8770
8771 if (phys_page > upl->highest_page) {
8772 upl->highest_page = phys_page;
8773 }
8774
8775 if (user_page_list) {
8776 user_page_list[entry].phys_addr = phys_page;
8777 user_page_list[entry].absent = dst_page->vmp_absent;
8778 user_page_list[entry].dirty = dst_page->vmp_dirty;
8779 user_page_list[entry].free_when_done = dst_page->vmp_free_when_done;
8780 user_page_list[entry].precious = dst_page->vmp_precious;
8781 user_page_list[entry].device = FALSE;
8782 user_page_list[entry].speculative = FALSE;
8783 user_page_list[entry].cs_validated = FALSE;
8784 user_page_list[entry].cs_tainted = FALSE;
8785 user_page_list[entry].cs_nx = FALSE;
8786 user_page_list[entry].needed = FALSE;
8787 user_page_list[entry].mark = FALSE;
8788 }
8789 if (delayed_unlock++ > 256) {
8790 delayed_unlock = 0;
8791 lck_mtx_yield(&vm_page_queue_lock);
8792
8793 VM_CHECK_MEMORYSTATUS;
8794 }
8795 dst_page = (vm_page_t)vm_page_queue_next(&dst_page->vmp_listq);
8796 }
8797 done:
8798 vm_page_unlock_queues();
8799
8800 VM_CHECK_MEMORYSTATUS;
8801
8802 return retval;
8803 }
8804
8805
8806 kern_return_t
vm_object_iopl_wire_empty(vm_object_t object,upl_t upl,upl_page_info_array_t user_page_list,wpl_array_t lite_list,upl_control_flags_t cntrl_flags,vm_tag_t tag,vm_object_offset_t * dst_offset,int page_count,int * page_grab_count)8807 vm_object_iopl_wire_empty(vm_object_t object, upl_t upl, upl_page_info_array_t user_page_list,
8808 wpl_array_t lite_list, upl_control_flags_t cntrl_flags, vm_tag_t tag, vm_object_offset_t *dst_offset,
8809 int page_count, int* page_grab_count)
8810 {
8811 vm_page_t dst_page;
8812 boolean_t no_zero_fill = FALSE;
8813 int interruptible;
8814 int pages_wired = 0;
8815 int pages_inserted = 0;
8816 int entry = 0;
8817 uint64_t delayed_ledger_update = 0;
8818 kern_return_t ret = KERN_SUCCESS;
8819 int grab_options;
8820 ppnum_t phys_page;
8821
8822 vm_object_lock_assert_exclusive(object);
8823 assert(object->purgable != VM_PURGABLE_VOLATILE);
8824 assert(object->purgable != VM_PURGABLE_EMPTY);
8825 assert(object->pager == NULL);
8826 assert(object->copy == NULL);
8827 assert(object->shadow == NULL);
8828
8829 if (cntrl_flags & UPL_SET_INTERRUPTIBLE) {
8830 interruptible = THREAD_ABORTSAFE;
8831 } else {
8832 interruptible = THREAD_UNINT;
8833 }
8834
8835 if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO)) {
8836 no_zero_fill = TRUE;
8837 }
8838
8839 grab_options = 0;
8840 #if CONFIG_SECLUDED_MEMORY
8841 if (object->can_grab_secluded) {
8842 grab_options |= VM_PAGE_GRAB_SECLUDED;
8843 }
8844 #endif /* CONFIG_SECLUDED_MEMORY */
8845
8846 while (page_count--) {
8847 while ((dst_page = vm_page_grab_options(grab_options))
8848 == VM_PAGE_NULL) {
8849 OSAddAtomic(page_count, &vm_upl_wait_for_pages);
8850
8851 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
8852
8853 if (vm_page_wait(interruptible) == FALSE) {
8854 /*
8855 * interrupted case
8856 */
8857 OSAddAtomic(-page_count, &vm_upl_wait_for_pages);
8858
8859 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1);
8860
8861 ret = MACH_SEND_INTERRUPTED;
8862 goto done;
8863 }
8864 OSAddAtomic(-page_count, &vm_upl_wait_for_pages);
8865
8866 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
8867 }
8868 if (no_zero_fill == FALSE) {
8869 vm_page_zero_fill(dst_page);
8870 } else {
8871 dst_page->vmp_absent = TRUE;
8872 }
8873
8874 dst_page->vmp_reference = TRUE;
8875
8876 if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
8877 SET_PAGE_DIRTY(dst_page, FALSE);
8878 }
8879 if (dst_page->vmp_absent == FALSE) {
8880 assert(dst_page->vmp_q_state == VM_PAGE_NOT_ON_Q);
8881 assert(dst_page->vmp_wire_count == 0);
8882 dst_page->vmp_wire_count++;
8883 dst_page->vmp_q_state = VM_PAGE_IS_WIRED;
8884 assert(dst_page->vmp_wire_count);
8885 pages_wired++;
8886 PAGE_WAKEUP_DONE(dst_page);
8887 }
8888 pages_inserted++;
8889
8890 vm_page_insert_internal(dst_page, object, *dst_offset, tag, FALSE, TRUE, TRUE, TRUE, &delayed_ledger_update);
8891
8892 lite_list[entry >> 5] |= 1U << (entry & 31);
8893
8894 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
8895
8896 if (phys_page > upl->highest_page) {
8897 upl->highest_page = phys_page;
8898 }
8899
8900 if (user_page_list) {
8901 user_page_list[entry].phys_addr = phys_page;
8902 user_page_list[entry].absent = dst_page->vmp_absent;
8903 user_page_list[entry].dirty = dst_page->vmp_dirty;
8904 user_page_list[entry].free_when_done = FALSE;
8905 user_page_list[entry].precious = FALSE;
8906 user_page_list[entry].device = FALSE;
8907 user_page_list[entry].speculative = FALSE;
8908 user_page_list[entry].cs_validated = FALSE;
8909 user_page_list[entry].cs_tainted = FALSE;
8910 user_page_list[entry].cs_nx = FALSE;
8911 user_page_list[entry].needed = FALSE;
8912 user_page_list[entry].mark = FALSE;
8913 }
8914 entry++;
8915 *dst_offset += PAGE_SIZE_64;
8916 }
8917 done:
8918 if (pages_wired) {
8919 vm_page_lockspin_queues();
8920 vm_page_wire_count += pages_wired;
8921 vm_page_unlock_queues();
8922 }
8923 if (pages_inserted) {
8924 if (object->internal) {
8925 OSAddAtomic(pages_inserted, &vm_page_internal_count);
8926 } else {
8927 OSAddAtomic(pages_inserted, &vm_page_external_count);
8928 }
8929 }
8930 if (delayed_ledger_update) {
8931 task_t owner;
8932 int ledger_idx_volatile;
8933 int ledger_idx_nonvolatile;
8934 int ledger_idx_volatile_compressed;
8935 int ledger_idx_nonvolatile_compressed;
8936 boolean_t do_footprint;
8937
8938 owner = VM_OBJECT_OWNER(object);
8939 assert(owner);
8940
8941 vm_object_ledger_tag_ledgers(object,
8942 &ledger_idx_volatile,
8943 &ledger_idx_nonvolatile,
8944 &ledger_idx_volatile_compressed,
8945 &ledger_idx_nonvolatile_compressed,
8946 &do_footprint);
8947
8948 /* more non-volatile bytes */
8949 ledger_credit(owner->ledger,
8950 ledger_idx_nonvolatile,
8951 delayed_ledger_update);
8952 if (do_footprint) {
8953 /* more footprint */
8954 ledger_credit(owner->ledger,
8955 task_ledgers.phys_footprint,
8956 delayed_ledger_update);
8957 }
8958 }
8959
8960 assert(page_grab_count);
8961 *page_grab_count = pages_inserted;
8962
8963 return ret;
8964 }
8965
8966
8967
8968 kern_return_t
vm_object_iopl_request(vm_object_t object,vm_object_offset_t offset,upl_size_t size,upl_t * upl_ptr,upl_page_info_array_t user_page_list,unsigned int * page_list_count,upl_control_flags_t cntrl_flags,vm_tag_t tag)8969 vm_object_iopl_request(
8970 vm_object_t object,
8971 vm_object_offset_t offset,
8972 upl_size_t size,
8973 upl_t *upl_ptr,
8974 upl_page_info_array_t user_page_list,
8975 unsigned int *page_list_count,
8976 upl_control_flags_t cntrl_flags,
8977 vm_tag_t tag)
8978 {
8979 vm_page_t dst_page;
8980 vm_object_offset_t dst_offset;
8981 upl_size_t xfer_size;
8982 upl_t upl = NULL;
8983 unsigned int entry;
8984 wpl_array_t lite_list = NULL;
8985 int no_zero_fill = FALSE;
8986 unsigned int size_in_pages;
8987 int page_grab_count = 0;
8988 u_int32_t psize;
8989 kern_return_t ret;
8990 vm_prot_t prot;
8991 struct vm_object_fault_info fault_info = {};
8992 struct vm_page_delayed_work dw_array;
8993 struct vm_page_delayed_work *dwp, *dwp_start;
8994 bool dwp_finish_ctx = TRUE;
8995 int dw_count;
8996 int dw_limit;
8997 int dw_index;
8998 boolean_t caller_lookup;
8999 int io_tracking_flag = 0;
9000 int interruptible;
9001 ppnum_t phys_page;
9002
9003 boolean_t set_cache_attr_needed = FALSE;
9004 boolean_t free_wired_pages = FALSE;
9005 boolean_t fast_path_empty_req = FALSE;
9006 boolean_t fast_path_full_req = FALSE;
9007
9008 #if DEVELOPMENT || DEBUG
9009 task_t task = current_task();
9010 #endif /* DEVELOPMENT || DEBUG */
9011
9012 dwp_start = dwp = NULL;
9013
9014 vm_object_offset_t original_offset = offset;
9015 upl_size_t original_size = size;
9016
9017 // DEBUG4K_UPL("object %p offset 0x%llx size 0x%llx cntrl_flags 0x%llx\n", object, (uint64_t)offset, (uint64_t)size, cntrl_flags);
9018
9019 size = (upl_size_t)(vm_object_round_page(offset + size) - vm_object_trunc_page(offset));
9020 offset = vm_object_trunc_page(offset);
9021 if (size != original_size || offset != original_offset) {
9022 DEBUG4K_IOKIT("flags 0x%llx object %p offset 0x%llx size 0x%x -> offset 0x%llx size 0x%x\n", cntrl_flags, object, original_offset, original_size, offset, size);
9023 }
9024
9025 if (cntrl_flags & ~UPL_VALID_FLAGS) {
9026 /*
9027 * For forward compatibility's sake,
9028 * reject any unknown flag.
9029 */
9030 return KERN_INVALID_VALUE;
9031 }
9032 if (vm_lopage_needed == FALSE) {
9033 cntrl_flags &= ~UPL_NEED_32BIT_ADDR;
9034 }
9035
9036 if (cntrl_flags & UPL_NEED_32BIT_ADDR) {
9037 if ((cntrl_flags & (UPL_SET_IO_WIRE | UPL_SET_LITE)) != (UPL_SET_IO_WIRE | UPL_SET_LITE)) {
9038 return KERN_INVALID_VALUE;
9039 }
9040
9041 if (object->phys_contiguous) {
9042 if ((offset + object->vo_shadow_offset) >= (vm_object_offset_t)max_valid_dma_address) {
9043 return KERN_INVALID_ADDRESS;
9044 }
9045
9046 if (((offset + object->vo_shadow_offset) + size) >= (vm_object_offset_t)max_valid_dma_address) {
9047 return KERN_INVALID_ADDRESS;
9048 }
9049 }
9050 }
9051 if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO)) {
9052 no_zero_fill = TRUE;
9053 }
9054
9055 if (cntrl_flags & UPL_COPYOUT_FROM) {
9056 prot = VM_PROT_READ;
9057 } else {
9058 prot = VM_PROT_READ | VM_PROT_WRITE;
9059 }
9060
9061 if ((!object->internal) && (object->paging_offset != 0)) {
9062 panic("vm_object_iopl_request: external object with non-zero paging offset");
9063 }
9064
9065
9066 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_START, size, cntrl_flags, prot, 0);
9067
9068 #if CONFIG_IOSCHED || UPL_DEBUG
9069 if ((object->io_tracking && object != kernel_object) || upl_debug_enabled) {
9070 io_tracking_flag |= UPL_CREATE_IO_TRACKING;
9071 }
9072 #endif
9073
9074 #if CONFIG_IOSCHED
9075 if (object->io_tracking) {
9076 /* Check if we're dealing with the kernel object. We do not support expedite on kernel object UPLs */
9077 if (object != kernel_object) {
9078 io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP;
9079 }
9080 }
9081 #endif
9082
9083 if (object->phys_contiguous) {
9084 psize = PAGE_SIZE;
9085 } else {
9086 psize = size;
9087
9088 dw_count = 0;
9089 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
9090 dwp_start = vm_page_delayed_work_get_ctx();
9091 if (dwp_start == NULL) {
9092 dwp_start = &dw_array;
9093 dw_limit = 1;
9094 dwp_finish_ctx = FALSE;
9095 }
9096
9097 dwp = dwp_start;
9098 }
9099
9100 if (cntrl_flags & UPL_SET_INTERNAL) {
9101 upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize);
9102
9103 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
9104 lite_list = (wpl_array_t) (((uintptr_t)user_page_list) +
9105 ((psize / PAGE_SIZE) * sizeof(upl_page_info_t)));
9106 if (size == 0) {
9107 user_page_list = NULL;
9108 lite_list = NULL;
9109 }
9110 } else {
9111 upl = upl_create(UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize);
9112
9113 lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
9114 if (size == 0) {
9115 lite_list = NULL;
9116 }
9117 }
9118 if (user_page_list) {
9119 user_page_list[0].device = FALSE;
9120 }
9121 *upl_ptr = upl;
9122
9123 if (cntrl_flags & UPL_NOZEROFILLIO) {
9124 DTRACE_VM4(upl_nozerofillio,
9125 vm_object_t, object,
9126 vm_object_offset_t, offset,
9127 upl_size_t, size,
9128 upl_t, upl);
9129 }
9130
9131 upl->map_object = object;
9132 upl->u_offset = original_offset;
9133 upl->u_size = original_size;
9134
9135 size_in_pages = size / PAGE_SIZE;
9136
9137 if (object == kernel_object &&
9138 !(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS))) {
9139 upl->flags |= UPL_KERNEL_OBJECT;
9140 #if UPL_DEBUG
9141 vm_object_lock(object);
9142 #else
9143 vm_object_lock_shared(object);
9144 #endif
9145 } else {
9146 vm_object_lock(object);
9147 vm_object_activity_begin(object);
9148 }
9149 /*
9150 * paging in progress also protects the paging_offset
9151 */
9152 upl->u_offset = original_offset + object->paging_offset;
9153
9154 if (cntrl_flags & UPL_BLOCK_ACCESS) {
9155 /*
9156 * The user requested that access to the pages in this UPL
9157 * be blocked until the UPL is commited or aborted.
9158 */
9159 upl->flags |= UPL_ACCESS_BLOCKED;
9160 }
9161
9162 #if CONFIG_IOSCHED || UPL_DEBUG
9163 if ((upl->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) {
9164 vm_object_activity_begin(object);
9165 queue_enter(&object->uplq, upl, upl_t, uplq);
9166 }
9167 #endif
9168
9169 if (object->phys_contiguous) {
9170 if (upl->flags & UPL_ACCESS_BLOCKED) {
9171 assert(!object->blocked_access);
9172 object->blocked_access = TRUE;
9173 }
9174
9175 vm_object_unlock(object);
9176
9177 /*
9178 * don't need any shadow mappings for this one
9179 * since it is already I/O memory
9180 */
9181 upl->flags |= UPL_DEVICE_MEMORY;
9182
9183 upl->highest_page = (ppnum_t) ((offset + object->vo_shadow_offset + size - 1) >> PAGE_SHIFT);
9184
9185 if (user_page_list) {
9186 user_page_list[0].phys_addr = (ppnum_t) ((offset + object->vo_shadow_offset) >> PAGE_SHIFT);
9187 user_page_list[0].device = TRUE;
9188 }
9189 if (page_list_count != NULL) {
9190 if (upl->flags & UPL_INTERNAL) {
9191 *page_list_count = 0;
9192 } else {
9193 *page_list_count = 1;
9194 }
9195 }
9196
9197 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, KERN_SUCCESS, 0, 0);
9198 #if DEVELOPMENT || DEBUG
9199 if (task != NULL) {
9200 ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count);
9201 }
9202 #endif /* DEVELOPMENT || DEBUG */
9203 return KERN_SUCCESS;
9204 }
9205 if (object != kernel_object && object != compressor_object) {
9206 /*
9207 * Protect user space from future COW operations
9208 */
9209 #if VM_OBJECT_TRACKING_OP_TRUESHARE
9210 if (!object->true_share &&
9211 vm_object_tracking_btlog) {
9212 btlog_record(vm_object_tracking_btlog, object,
9213 VM_OBJECT_TRACKING_OP_TRUESHARE,
9214 btref_get(__builtin_frame_address(0), 0));
9215 }
9216 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
9217
9218 vm_object_lock_assert_exclusive(object);
9219 object->true_share = TRUE;
9220
9221 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
9222 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
9223 }
9224 }
9225
9226 if (!(cntrl_flags & UPL_COPYOUT_FROM) &&
9227 object->copy != VM_OBJECT_NULL) {
9228 /*
9229 * Honor copy-on-write obligations
9230 *
9231 * The caller is gathering these pages and
9232 * might modify their contents. We need to
9233 * make sure that the copy object has its own
9234 * private copies of these pages before we let
9235 * the caller modify them.
9236 *
9237 * NOTE: someone else could map the original object
9238 * after we've done this copy-on-write here, and they
9239 * could then see an inconsistent picture of the memory
9240 * while it's being modified via the UPL. To prevent this,
9241 * we would have to block access to these pages until the
9242 * UPL is released. We could use the UPL_BLOCK_ACCESS
9243 * code path for that...
9244 */
9245 vm_object_update(object,
9246 offset,
9247 size,
9248 NULL,
9249 NULL,
9250 FALSE, /* should_return */
9251 MEMORY_OBJECT_COPY_SYNC,
9252 VM_PROT_NO_CHANGE);
9253 VM_PAGEOUT_DEBUG(iopl_cow, 1);
9254 VM_PAGEOUT_DEBUG(iopl_cow_pages, (size >> PAGE_SHIFT));
9255 }
9256 if (!(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS)) &&
9257 object->purgable != VM_PURGABLE_VOLATILE &&
9258 object->purgable != VM_PURGABLE_EMPTY &&
9259 object->copy == NULL &&
9260 size == object->vo_size &&
9261 offset == 0 &&
9262 object->shadow == NULL &&
9263 object->pager == NULL) {
9264 if (object->resident_page_count == size_in_pages) {
9265 assert(object != compressor_object);
9266 assert(object != kernel_object);
9267 fast_path_full_req = TRUE;
9268 } else if (object->resident_page_count == 0) {
9269 assert(object != compressor_object);
9270 assert(object != kernel_object);
9271 fast_path_empty_req = TRUE;
9272 set_cache_attr_needed = TRUE;
9273 }
9274 }
9275
9276 if (cntrl_flags & UPL_SET_INTERRUPTIBLE) {
9277 interruptible = THREAD_ABORTSAFE;
9278 } else {
9279 interruptible = THREAD_UNINT;
9280 }
9281
9282 entry = 0;
9283
9284 xfer_size = size;
9285 dst_offset = offset;
9286
9287 if (fast_path_full_req) {
9288 if (vm_object_iopl_wire_full(object, upl, user_page_list, lite_list, cntrl_flags, tag) == TRUE) {
9289 goto finish;
9290 }
9291 /*
9292 * we couldn't complete the processing of this request on the fast path
9293 * so fall through to the slow path and finish up
9294 */
9295 } else if (fast_path_empty_req) {
9296 if (cntrl_flags & UPL_REQUEST_NO_FAULT) {
9297 ret = KERN_MEMORY_ERROR;
9298 goto return_err;
9299 }
9300 ret = vm_object_iopl_wire_empty(object, upl, user_page_list, lite_list, cntrl_flags, tag, &dst_offset, size_in_pages, &page_grab_count);
9301
9302 if (ret) {
9303 free_wired_pages = TRUE;
9304 goto return_err;
9305 }
9306 goto finish;
9307 }
9308
9309 fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
9310 fault_info.lo_offset = offset;
9311 fault_info.hi_offset = offset + xfer_size;
9312 fault_info.mark_zf_absent = TRUE;
9313 fault_info.interruptible = interruptible;
9314 fault_info.batch_pmap_op = TRUE;
9315
9316 while (xfer_size) {
9317 vm_fault_return_t result;
9318
9319 dwp->dw_mask = 0;
9320
9321 if (fast_path_full_req) {
9322 /*
9323 * if we get here, it means that we ran into a page
9324 * state we couldn't handle in the fast path and
9325 * bailed out to the slow path... since the order
9326 * we look at pages is different between the 2 paths,
9327 * the following check is needed to determine whether
9328 * this page was already processed in the fast path
9329 */
9330 if (lite_list[entry >> 5] & (1 << (entry & 31))) {
9331 goto skip_page;
9332 }
9333 }
9334 dst_page = vm_page_lookup(object, dst_offset);
9335
9336 if (dst_page == VM_PAGE_NULL ||
9337 dst_page->vmp_busy ||
9338 dst_page->vmp_error ||
9339 dst_page->vmp_restart ||
9340 dst_page->vmp_absent ||
9341 dst_page->vmp_fictitious) {
9342 if (object == kernel_object) {
9343 panic("vm_object_iopl_request: missing/bad page in kernel object");
9344 }
9345 if (object == compressor_object) {
9346 panic("vm_object_iopl_request: missing/bad page in compressor object");
9347 }
9348
9349 if (cntrl_flags & UPL_REQUEST_NO_FAULT) {
9350 ret = KERN_MEMORY_ERROR;
9351 goto return_err;
9352 }
9353 set_cache_attr_needed = TRUE;
9354
9355 /*
9356 * We just looked up the page and the result remains valid
9357 * until the object lock is release, so send it to
9358 * vm_fault_page() (as "dst_page"), to avoid having to
9359 * look it up again there.
9360 */
9361 caller_lookup = TRUE;
9362
9363 do {
9364 vm_page_t top_page;
9365 kern_return_t error_code;
9366
9367 fault_info.cluster_size = xfer_size;
9368
9369 vm_object_paging_begin(object);
9370
9371 result = vm_fault_page(object, dst_offset,
9372 prot | VM_PROT_WRITE, FALSE,
9373 caller_lookup,
9374 &prot, &dst_page, &top_page,
9375 (int *)0,
9376 &error_code, no_zero_fill,
9377 &fault_info);
9378
9379 /* our lookup is no longer valid at this point */
9380 caller_lookup = FALSE;
9381
9382 switch (result) {
9383 case VM_FAULT_SUCCESS:
9384 page_grab_count++;
9385
9386 if (!dst_page->vmp_absent) {
9387 PAGE_WAKEUP_DONE(dst_page);
9388 } else {
9389 /*
9390 * we only get back an absent page if we
9391 * requested that it not be zero-filled
9392 * because we are about to fill it via I/O
9393 *
9394 * absent pages should be left BUSY
9395 * to prevent them from being faulted
9396 * into an address space before we've
9397 * had a chance to complete the I/O on
9398 * them since they may contain info that
9399 * shouldn't be seen by the faulting task
9400 */
9401 }
9402 /*
9403 * Release paging references and
9404 * top-level placeholder page, if any.
9405 */
9406 if (top_page != VM_PAGE_NULL) {
9407 vm_object_t local_object;
9408
9409 local_object = VM_PAGE_OBJECT(top_page);
9410
9411 /*
9412 * comparing 2 packed pointers
9413 */
9414 if (top_page->vmp_object != dst_page->vmp_object) {
9415 vm_object_lock(local_object);
9416 VM_PAGE_FREE(top_page);
9417 vm_object_paging_end(local_object);
9418 vm_object_unlock(local_object);
9419 } else {
9420 VM_PAGE_FREE(top_page);
9421 vm_object_paging_end(local_object);
9422 }
9423 }
9424 vm_object_paging_end(object);
9425 break;
9426
9427 case VM_FAULT_RETRY:
9428 vm_object_lock(object);
9429 break;
9430
9431 case VM_FAULT_MEMORY_SHORTAGE:
9432 OSAddAtomic((size_in_pages - entry), &vm_upl_wait_for_pages);
9433
9434 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
9435
9436 if (vm_page_wait(interruptible)) {
9437 OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages);
9438
9439 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
9440 vm_object_lock(object);
9441
9442 break;
9443 }
9444 OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages);
9445
9446 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1);
9447
9448 OS_FALLTHROUGH;
9449
9450 case VM_FAULT_INTERRUPTED:
9451 error_code = MACH_SEND_INTERRUPTED;
9452 OS_FALLTHROUGH;
9453 case VM_FAULT_MEMORY_ERROR:
9454 memory_error:
9455 ret = (error_code ? error_code: KERN_MEMORY_ERROR);
9456
9457 vm_object_lock(object);
9458 goto return_err;
9459
9460 case VM_FAULT_SUCCESS_NO_VM_PAGE:
9461 /* success but no page: fail */
9462 vm_object_paging_end(object);
9463 vm_object_unlock(object);
9464 goto memory_error;
9465
9466 default:
9467 panic("vm_object_iopl_request: unexpected error"
9468 " 0x%x from vm_fault_page()\n", result);
9469 }
9470 } while (result != VM_FAULT_SUCCESS);
9471 }
9472 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
9473
9474 if (upl->flags & UPL_KERNEL_OBJECT) {
9475 goto record_phys_addr;
9476 }
9477
9478 if (dst_page->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
9479 dst_page->vmp_busy = TRUE;
9480 goto record_phys_addr;
9481 }
9482
9483 if (dst_page->vmp_cleaning) {
9484 /*
9485 * Someone else is cleaning this page in place.
9486 * In theory, we should be able to proceed and use this
9487 * page but they'll probably end up clearing the "busy"
9488 * bit on it in upl_commit_range() but they didn't set
9489 * it, so they would clear our "busy" bit and open
9490 * us to race conditions.
9491 * We'd better wait for the cleaning to complete and
9492 * then try again.
9493 */
9494 VM_PAGEOUT_DEBUG(vm_object_iopl_request_sleep_for_cleaning, 1);
9495 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
9496 continue;
9497 }
9498 if (dst_page->vmp_laundry) {
9499 vm_pageout_steal_laundry(dst_page, FALSE);
9500 }
9501
9502 if ((cntrl_flags & UPL_NEED_32BIT_ADDR) &&
9503 phys_page >= (max_valid_dma_address >> PAGE_SHIFT)) {
9504 vm_page_t low_page;
9505 int refmod;
9506
9507 /*
9508 * support devices that can't DMA above 32 bits
9509 * by substituting pages from a pool of low address
9510 * memory for any pages we find above the 4G mark
9511 * can't substitute if the page is already wired because
9512 * we don't know whether that physical address has been
9513 * handed out to some other 64 bit capable DMA device to use
9514 */
9515 if (VM_PAGE_WIRED(dst_page)) {
9516 ret = KERN_PROTECTION_FAILURE;
9517 goto return_err;
9518 }
9519 low_page = vm_page_grablo();
9520
9521 if (low_page == VM_PAGE_NULL) {
9522 ret = KERN_RESOURCE_SHORTAGE;
9523 goto return_err;
9524 }
9525 /*
9526 * from here until the vm_page_replace completes
9527 * we musn't drop the object lock... we don't
9528 * want anyone refaulting this page in and using
9529 * it after we disconnect it... we want the fault
9530 * to find the new page being substituted.
9531 */
9532 if (dst_page->vmp_pmapped) {
9533 refmod = pmap_disconnect(phys_page);
9534 } else {
9535 refmod = 0;
9536 }
9537
9538 if (!dst_page->vmp_absent) {
9539 vm_page_copy(dst_page, low_page);
9540 }
9541
9542 low_page->vmp_reference = dst_page->vmp_reference;
9543 low_page->vmp_dirty = dst_page->vmp_dirty;
9544 low_page->vmp_absent = dst_page->vmp_absent;
9545
9546 if (refmod & VM_MEM_REFERENCED) {
9547 low_page->vmp_reference = TRUE;
9548 }
9549 if (refmod & VM_MEM_MODIFIED) {
9550 SET_PAGE_DIRTY(low_page, FALSE);
9551 }
9552
9553 vm_page_replace(low_page, object, dst_offset);
9554
9555 dst_page = low_page;
9556 /*
9557 * vm_page_grablo returned the page marked
9558 * BUSY... we don't need a PAGE_WAKEUP_DONE
9559 * here, because we've never dropped the object lock
9560 */
9561 if (!dst_page->vmp_absent) {
9562 dst_page->vmp_busy = FALSE;
9563 }
9564
9565 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
9566 }
9567 if (!dst_page->vmp_busy) {
9568 dwp->dw_mask |= DW_vm_page_wire;
9569 }
9570
9571 if (cntrl_flags & UPL_BLOCK_ACCESS) {
9572 /*
9573 * Mark the page "busy" to block any future page fault
9574 * on this page in addition to wiring it.
9575 * We'll also remove the mapping
9576 * of all these pages before leaving this routine.
9577 */
9578 assert(!dst_page->vmp_fictitious);
9579 dst_page->vmp_busy = TRUE;
9580 }
9581 /*
9582 * expect the page to be used
9583 * page queues lock must be held to set 'reference'
9584 */
9585 dwp->dw_mask |= DW_set_reference;
9586
9587 if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
9588 SET_PAGE_DIRTY(dst_page, TRUE);
9589 /*
9590 * Page belonging to a code-signed object is about to
9591 * be written. Mark it tainted and disconnect it from
9592 * all pmaps so processes have to fault it back in and
9593 * deal with the tainted bit.
9594 */
9595 if (object->code_signed && dst_page->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
9596 dst_page->vmp_cs_tainted = VMP_CS_ALL_TRUE;
9597 vm_page_iopl_tainted++;
9598 if (dst_page->vmp_pmapped) {
9599 int refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
9600 if (refmod & VM_MEM_REFERENCED) {
9601 dst_page->vmp_reference = TRUE;
9602 }
9603 }
9604 }
9605 }
9606 if ((cntrl_flags & UPL_REQUEST_FORCE_COHERENCY) && dst_page->vmp_written_by_kernel == TRUE) {
9607 pmap_sync_page_attributes_phys(phys_page);
9608 dst_page->vmp_written_by_kernel = FALSE;
9609 }
9610
9611 record_phys_addr:
9612 if (dst_page->vmp_busy) {
9613 upl->flags |= UPL_HAS_BUSY;
9614 }
9615
9616 lite_list[entry >> 5] |= 1U << (entry & 31);
9617
9618 if (phys_page > upl->highest_page) {
9619 upl->highest_page = phys_page;
9620 }
9621
9622 if (user_page_list) {
9623 user_page_list[entry].phys_addr = phys_page;
9624 user_page_list[entry].free_when_done = dst_page->vmp_free_when_done;
9625 user_page_list[entry].absent = dst_page->vmp_absent;
9626 user_page_list[entry].dirty = dst_page->vmp_dirty;
9627 user_page_list[entry].precious = dst_page->vmp_precious;
9628 user_page_list[entry].device = FALSE;
9629 user_page_list[entry].needed = FALSE;
9630 if (dst_page->vmp_clustered == TRUE) {
9631 user_page_list[entry].speculative = (dst_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE;
9632 } else {
9633 user_page_list[entry].speculative = FALSE;
9634 }
9635 user_page_list[entry].cs_validated = dst_page->vmp_cs_validated;
9636 user_page_list[entry].cs_tainted = dst_page->vmp_cs_tainted;
9637 user_page_list[entry].cs_nx = dst_page->vmp_cs_nx;
9638 user_page_list[entry].mark = FALSE;
9639 }
9640 if (object != kernel_object && object != compressor_object) {
9641 /*
9642 * someone is explicitly grabbing this page...
9643 * update clustered and speculative state
9644 *
9645 */
9646 if (dst_page->vmp_clustered) {
9647 VM_PAGE_CONSUME_CLUSTERED(dst_page);
9648 }
9649 }
9650 skip_page:
9651 entry++;
9652 dst_offset += PAGE_SIZE_64;
9653 xfer_size -= PAGE_SIZE;
9654
9655 if (dwp->dw_mask) {
9656 VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count);
9657
9658 if (dw_count >= dw_limit) {
9659 vm_page_do_delayed_work(object, tag, dwp_start, dw_count);
9660
9661 dwp = dwp_start;
9662 dw_count = 0;
9663 }
9664 }
9665 }
9666 assert(entry == size_in_pages);
9667
9668 if (dw_count) {
9669 vm_page_do_delayed_work(object, tag, dwp_start, dw_count);
9670 dwp = dwp_start;
9671 dw_count = 0;
9672 }
9673 finish:
9674 if (user_page_list && set_cache_attr_needed == TRUE) {
9675 vm_object_set_pmap_cache_attr(object, user_page_list, size_in_pages, TRUE);
9676 }
9677
9678 if (page_list_count != NULL) {
9679 if (upl->flags & UPL_INTERNAL) {
9680 *page_list_count = 0;
9681 } else if (*page_list_count > size_in_pages) {
9682 *page_list_count = size_in_pages;
9683 }
9684 }
9685 vm_object_unlock(object);
9686
9687 if (cntrl_flags & UPL_BLOCK_ACCESS) {
9688 /*
9689 * We've marked all the pages "busy" so that future
9690 * page faults will block.
9691 * Now remove the mapping for these pages, so that they
9692 * can't be accessed without causing a page fault.
9693 */
9694 vm_object_pmap_protect(object, offset, (vm_object_size_t)size,
9695 PMAP_NULL,
9696 PAGE_SIZE,
9697 0, VM_PROT_NONE);
9698 assert(!object->blocked_access);
9699 object->blocked_access = TRUE;
9700 }
9701
9702 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, KERN_SUCCESS, 0, 0);
9703 #if DEVELOPMENT || DEBUG
9704 if (task != NULL) {
9705 ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count);
9706 }
9707 #endif /* DEVELOPMENT || DEBUG */
9708
9709 if (dwp_start && dwp_finish_ctx) {
9710 vm_page_delayed_work_finish_ctx(dwp_start);
9711 dwp_start = dwp = NULL;
9712 }
9713
9714 return KERN_SUCCESS;
9715
9716 return_err:
9717 dw_index = 0;
9718
9719 for (; offset < dst_offset; offset += PAGE_SIZE) {
9720 boolean_t need_unwire;
9721
9722 dst_page = vm_page_lookup(object, offset);
9723
9724 if (dst_page == VM_PAGE_NULL) {
9725 panic("vm_object_iopl_request: Wired page missing.");
9726 }
9727
9728 /*
9729 * if we've already processed this page in an earlier
9730 * dw_do_work, we need to undo the wiring... we will
9731 * leave the dirty and reference bits on if they
9732 * were set, since we don't have a good way of knowing
9733 * what the previous state was and we won't get here
9734 * under any normal circumstances... we will always
9735 * clear BUSY and wakeup any waiters via vm_page_free
9736 * or PAGE_WAKEUP_DONE
9737 */
9738 need_unwire = TRUE;
9739
9740 if (dw_count) {
9741 if ((dwp_start)[dw_index].dw_m == dst_page) {
9742 /*
9743 * still in the deferred work list
9744 * which means we haven't yet called
9745 * vm_page_wire on this page
9746 */
9747 need_unwire = FALSE;
9748
9749 dw_index++;
9750 dw_count--;
9751 }
9752 }
9753 vm_page_lock_queues();
9754
9755 if (dst_page->vmp_absent || free_wired_pages == TRUE) {
9756 vm_page_free(dst_page);
9757
9758 need_unwire = FALSE;
9759 } else {
9760 if (need_unwire == TRUE) {
9761 vm_page_unwire(dst_page, TRUE);
9762 }
9763
9764 PAGE_WAKEUP_DONE(dst_page);
9765 }
9766 vm_page_unlock_queues();
9767
9768 if (need_unwire == TRUE) {
9769 counter_inc(&vm_statistics_reactivations);
9770 }
9771 }
9772 #if UPL_DEBUG
9773 upl->upl_state = 2;
9774 #endif
9775 if (!(upl->flags & UPL_KERNEL_OBJECT)) {
9776 vm_object_activity_end(object);
9777 vm_object_collapse(object, 0, TRUE);
9778 }
9779 vm_object_unlock(object);
9780 upl_destroy(upl);
9781
9782 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, ret, 0, 0);
9783 #if DEVELOPMENT || DEBUG
9784 if (task != NULL) {
9785 ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count);
9786 }
9787 #endif /* DEVELOPMENT || DEBUG */
9788
9789 if (dwp_start && dwp_finish_ctx) {
9790 vm_page_delayed_work_finish_ctx(dwp_start);
9791 dwp_start = dwp = NULL;
9792 }
9793 return ret;
9794 }
9795
9796 kern_return_t
upl_transpose(upl_t upl1,upl_t upl2)9797 upl_transpose(
9798 upl_t upl1,
9799 upl_t upl2)
9800 {
9801 kern_return_t retval;
9802 boolean_t upls_locked;
9803 vm_object_t object1, object2;
9804
9805 /* LD: Should mapped UPLs be eligible for a transpose? */
9806 if (upl1 == UPL_NULL || upl2 == UPL_NULL || upl1 == upl2 || ((upl1->flags & UPL_VECTOR) == UPL_VECTOR) || ((upl2->flags & UPL_VECTOR) == UPL_VECTOR)) {
9807 return KERN_INVALID_ARGUMENT;
9808 }
9809
9810 upls_locked = FALSE;
9811
9812 /*
9813 * Since we need to lock both UPLs at the same time,
9814 * avoid deadlocks by always taking locks in the same order.
9815 */
9816 if (upl1 < upl2) {
9817 upl_lock(upl1);
9818 upl_lock(upl2);
9819 } else {
9820 upl_lock(upl2);
9821 upl_lock(upl1);
9822 }
9823 upls_locked = TRUE; /* the UPLs will need to be unlocked */
9824
9825 object1 = upl1->map_object;
9826 object2 = upl2->map_object;
9827
9828 if (upl1->u_offset != 0 || upl2->u_offset != 0 ||
9829 upl1->u_size != upl2->u_size) {
9830 /*
9831 * We deal only with full objects, not subsets.
9832 * That's because we exchange the entire backing store info
9833 * for the objects: pager, resident pages, etc... We can't do
9834 * only part of it.
9835 */
9836 retval = KERN_INVALID_VALUE;
9837 goto done;
9838 }
9839
9840 /*
9841 * Tranpose the VM objects' backing store.
9842 */
9843 retval = vm_object_transpose(object1, object2,
9844 upl_adjusted_size(upl1, PAGE_MASK));
9845
9846 if (retval == KERN_SUCCESS) {
9847 /*
9848 * Make each UPL point to the correct VM object, i.e. the
9849 * object holding the pages that the UPL refers to...
9850 */
9851 #if CONFIG_IOSCHED || UPL_DEBUG
9852 if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || (upl2->flags & UPL_TRACKED_BY_OBJECT)) {
9853 vm_object_lock(object1);
9854 vm_object_lock(object2);
9855 }
9856 if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) {
9857 queue_remove(&object1->uplq, upl1, upl_t, uplq);
9858 }
9859 if ((upl2->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) {
9860 queue_remove(&object2->uplq, upl2, upl_t, uplq);
9861 }
9862 #endif
9863 upl1->map_object = object2;
9864 upl2->map_object = object1;
9865
9866 #if CONFIG_IOSCHED || UPL_DEBUG
9867 if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) {
9868 queue_enter(&object2->uplq, upl1, upl_t, uplq);
9869 }
9870 if ((upl2->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) {
9871 queue_enter(&object1->uplq, upl2, upl_t, uplq);
9872 }
9873 if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || (upl2->flags & UPL_TRACKED_BY_OBJECT)) {
9874 vm_object_unlock(object2);
9875 vm_object_unlock(object1);
9876 }
9877 #endif
9878 }
9879
9880 done:
9881 /*
9882 * Cleanup.
9883 */
9884 if (upls_locked) {
9885 upl_unlock(upl1);
9886 upl_unlock(upl2);
9887 upls_locked = FALSE;
9888 }
9889
9890 return retval;
9891 }
9892
9893 void
upl_range_needed(upl_t upl,int index,int count)9894 upl_range_needed(
9895 upl_t upl,
9896 int index,
9897 int count)
9898 {
9899 upl_page_info_t *user_page_list;
9900 int size_in_pages;
9901
9902 if (!(upl->flags & UPL_INTERNAL) || count <= 0) {
9903 return;
9904 }
9905
9906 size_in_pages = upl_adjusted_size(upl, PAGE_MASK) / PAGE_SIZE;
9907
9908 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
9909
9910 while (count-- && index < size_in_pages) {
9911 user_page_list[index++].needed = TRUE;
9912 }
9913 }
9914
9915
9916 /*
9917 * Reserve of virtual addresses in the kernel address space.
9918 * We need to map the physical pages in the kernel, so that we
9919 * can call the code-signing or slide routines with a kernel
9920 * virtual address. We keep this pool of pre-allocated kernel
9921 * virtual addresses so that we don't have to scan the kernel's
9922 * virtaul address space each time we need to work with
9923 * a physical page.
9924 */
9925 SIMPLE_LOCK_DECLARE(vm_paging_lock, 0);
9926 #define VM_PAGING_NUM_PAGES 64
9927 SECURITY_READ_ONLY_LATE(vm_offset_t) vm_paging_base_address = 0;
9928 bool vm_paging_page_inuse[VM_PAGING_NUM_PAGES] = { FALSE, };
9929 int vm_paging_max_index = 0;
9930 int vm_paging_page_waiter = 0;
9931 int vm_paging_page_waiter_total = 0;
9932
9933 unsigned long vm_paging_no_kernel_page = 0;
9934 unsigned long vm_paging_objects_mapped = 0;
9935 unsigned long vm_paging_pages_mapped = 0;
9936 unsigned long vm_paging_objects_mapped_slow = 0;
9937 unsigned long vm_paging_pages_mapped_slow = 0;
9938
9939 __startup_func
9940 static void
vm_paging_map_init(void)9941 vm_paging_map_init(void)
9942 {
9943 kmem_alloc(kernel_map, &vm_paging_base_address,
9944 ptoa(VM_PAGING_NUM_PAGES),
9945 KMA_DATA | KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT | KMA_PAGEABLE,
9946 VM_KERN_MEMORY_NONE);
9947 }
9948 STARTUP(ZALLOC, STARTUP_RANK_LAST, vm_paging_map_init);
9949
9950 /*
9951 * vm_paging_map_object:
9952 * Maps part of a VM object's pages in the kernel
9953 * virtual address space, using the pre-allocated
9954 * kernel virtual addresses, if possible.
9955 * Context:
9956 * The VM object is locked. This lock will get
9957 * dropped and re-acquired though, so the caller
9958 * must make sure the VM object is kept alive
9959 * (by holding a VM map that has a reference
9960 * on it, for example, or taking an extra reference).
9961 * The page should also be kept busy to prevent
9962 * it from being reclaimed.
9963 */
9964 kern_return_t
vm_paging_map_object(vm_page_t page,vm_object_t object,vm_object_offset_t offset,vm_prot_t protection,boolean_t can_unlock_object,vm_map_size_t * size,vm_map_offset_t * address,boolean_t * need_unmap)9965 vm_paging_map_object(
9966 vm_page_t page,
9967 vm_object_t object,
9968 vm_object_offset_t offset,
9969 vm_prot_t protection,
9970 boolean_t can_unlock_object,
9971 vm_map_size_t *size, /* IN/OUT */
9972 vm_map_offset_t *address, /* OUT */
9973 boolean_t *need_unmap) /* OUT */
9974 {
9975 kern_return_t kr;
9976 vm_map_offset_t page_map_offset;
9977 vm_map_size_t map_size;
9978 vm_object_offset_t object_offset;
9979 int i;
9980
9981 if (page != VM_PAGE_NULL && *size == PAGE_SIZE) {
9982 /* use permanent 1-to-1 kernel mapping of physical memory ? */
9983 *address = (vm_map_offset_t)
9984 phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(page) << PAGE_SHIFT);
9985 *need_unmap = FALSE;
9986 return KERN_SUCCESS;
9987
9988 assert(page->vmp_busy);
9989 /*
9990 * Use one of the pre-allocated kernel virtual addresses
9991 * and just enter the VM page in the kernel address space
9992 * at that virtual address.
9993 */
9994 simple_lock(&vm_paging_lock, &vm_pageout_lck_grp);
9995
9996 /*
9997 * Try and find an available kernel virtual address
9998 * from our pre-allocated pool.
9999 */
10000 page_map_offset = 0;
10001 for (;;) {
10002 for (i = 0; i < VM_PAGING_NUM_PAGES; i++) {
10003 if (vm_paging_page_inuse[i] == FALSE) {
10004 page_map_offset =
10005 vm_paging_base_address +
10006 (i * PAGE_SIZE);
10007 break;
10008 }
10009 }
10010 if (page_map_offset != 0) {
10011 /* found a space to map our page ! */
10012 break;
10013 }
10014
10015 if (can_unlock_object) {
10016 /*
10017 * If we can afford to unlock the VM object,
10018 * let's take the slow path now...
10019 */
10020 break;
10021 }
10022 /*
10023 * We can't afford to unlock the VM object, so
10024 * let's wait for a space to become available...
10025 */
10026 vm_paging_page_waiter_total++;
10027 vm_paging_page_waiter++;
10028 kr = assert_wait((event_t)&vm_paging_page_waiter, THREAD_UNINT);
10029 if (kr == THREAD_WAITING) {
10030 simple_unlock(&vm_paging_lock);
10031 kr = thread_block(THREAD_CONTINUE_NULL);
10032 simple_lock(&vm_paging_lock, &vm_pageout_lck_grp);
10033 }
10034 vm_paging_page_waiter--;
10035 /* ... and try again */
10036 }
10037
10038 if (page_map_offset != 0) {
10039 /*
10040 * We found a kernel virtual address;
10041 * map the physical page to that virtual address.
10042 */
10043 if (i > vm_paging_max_index) {
10044 vm_paging_max_index = i;
10045 }
10046 vm_paging_page_inuse[i] = TRUE;
10047 simple_unlock(&vm_paging_lock);
10048
10049 page->vmp_pmapped = TRUE;
10050
10051 /*
10052 * Keep the VM object locked over the PMAP_ENTER
10053 * and the actual use of the page by the kernel,
10054 * or this pmap mapping might get undone by a
10055 * vm_object_pmap_protect() call...
10056 */
10057 PMAP_ENTER(kernel_pmap,
10058 page_map_offset,
10059 page,
10060 protection,
10061 VM_PROT_NONE,
10062 0,
10063 TRUE,
10064 kr);
10065 assert(kr == KERN_SUCCESS);
10066 vm_paging_objects_mapped++;
10067 vm_paging_pages_mapped++;
10068 *address = page_map_offset;
10069 *need_unmap = TRUE;
10070
10071 #if KASAN
10072 kasan_notify_address(page_map_offset, PAGE_SIZE);
10073 #endif
10074
10075 /* all done and mapped, ready to use ! */
10076 return KERN_SUCCESS;
10077 }
10078
10079 /*
10080 * We ran out of pre-allocated kernel virtual
10081 * addresses. Just map the page in the kernel
10082 * the slow and regular way.
10083 */
10084 vm_paging_no_kernel_page++;
10085 simple_unlock(&vm_paging_lock);
10086 }
10087
10088 if (!can_unlock_object) {
10089 *address = 0;
10090 *size = 0;
10091 *need_unmap = FALSE;
10092 return KERN_NOT_SUPPORTED;
10093 }
10094
10095 object_offset = vm_object_trunc_page(offset);
10096 map_size = vm_map_round_page(*size,
10097 VM_MAP_PAGE_MASK(kernel_map));
10098
10099 /*
10100 * Try and map the required range of the object
10101 * in the kernel_map
10102 */
10103
10104 vm_object_reference_locked(object); /* for the map entry */
10105 vm_object_unlock(object);
10106
10107 kr = vm_map_enter(kernel_map,
10108 address,
10109 map_size,
10110 0,
10111 VM_FLAGS_ANYWHERE,
10112 VM_MAP_KERNEL_FLAGS_NONE,
10113 VM_KERN_MEMORY_NONE,
10114 object,
10115 object_offset,
10116 FALSE,
10117 protection,
10118 VM_PROT_ALL,
10119 VM_INHERIT_NONE);
10120 if (kr != KERN_SUCCESS) {
10121 *address = 0;
10122 *size = 0;
10123 *need_unmap = FALSE;
10124 vm_object_deallocate(object); /* for the map entry */
10125 vm_object_lock(object);
10126 return kr;
10127 }
10128
10129 *size = map_size;
10130
10131 /*
10132 * Enter the mapped pages in the page table now.
10133 */
10134 vm_object_lock(object);
10135 /*
10136 * VM object must be kept locked from before PMAP_ENTER()
10137 * until after the kernel is done accessing the page(s).
10138 * Otherwise, the pmap mappings in the kernel could be
10139 * undone by a call to vm_object_pmap_protect().
10140 */
10141
10142 for (page_map_offset = 0;
10143 map_size != 0;
10144 map_size -= PAGE_SIZE_64, page_map_offset += PAGE_SIZE_64) {
10145 page = vm_page_lookup(object, offset + page_map_offset);
10146 if (page == VM_PAGE_NULL) {
10147 printf("vm_paging_map_object: no page !?");
10148 vm_object_unlock(object);
10149 vm_map_remove(kernel_map, *address, *size);
10150 *address = 0;
10151 *size = 0;
10152 *need_unmap = FALSE;
10153 vm_object_lock(object);
10154 return KERN_MEMORY_ERROR;
10155 }
10156 page->vmp_pmapped = TRUE;
10157
10158 PMAP_ENTER(kernel_pmap,
10159 *address + page_map_offset,
10160 page,
10161 protection,
10162 VM_PROT_NONE,
10163 0,
10164 TRUE,
10165 kr);
10166 assert(kr == KERN_SUCCESS);
10167 #if KASAN
10168 kasan_notify_address(*address + page_map_offset, PAGE_SIZE);
10169 #endif
10170 }
10171
10172 vm_paging_objects_mapped_slow++;
10173 vm_paging_pages_mapped_slow += (unsigned long) (map_size / PAGE_SIZE_64);
10174
10175 *need_unmap = TRUE;
10176
10177 return KERN_SUCCESS;
10178 }
10179
10180 /*
10181 * vm_paging_unmap_object:
10182 * Unmaps part of a VM object's pages from the kernel
10183 * virtual address space.
10184 * Context:
10185 * The VM object is locked. This lock will get
10186 * dropped and re-acquired though.
10187 */
10188 void
vm_paging_unmap_object(vm_object_t object,vm_map_offset_t start,vm_map_offset_t end)10189 vm_paging_unmap_object(
10190 vm_object_t object,
10191 vm_map_offset_t start,
10192 vm_map_offset_t end)
10193 {
10194 int i;
10195
10196 if ((vm_paging_base_address == 0) ||
10197 (start < vm_paging_base_address) ||
10198 (end > (vm_paging_base_address
10199 + (VM_PAGING_NUM_PAGES * PAGE_SIZE)))) {
10200 /*
10201 * We didn't use our pre-allocated pool of
10202 * kernel virtual address. Deallocate the
10203 * virtual memory.
10204 */
10205 if (object != VM_OBJECT_NULL) {
10206 vm_object_unlock(object);
10207 }
10208 vm_map_remove(kernel_map, start, end);
10209 if (object != VM_OBJECT_NULL) {
10210 vm_object_lock(object);
10211 }
10212 } else {
10213 /*
10214 * We used a kernel virtual address from our
10215 * pre-allocated pool. Put it back in the pool
10216 * for next time.
10217 */
10218 assert(end - start == PAGE_SIZE);
10219 i = (int) ((start - vm_paging_base_address) >> PAGE_SHIFT);
10220 assert(i >= 0 && i < VM_PAGING_NUM_PAGES);
10221
10222 /* undo the pmap mapping */
10223 pmap_remove(kernel_pmap, start, end);
10224
10225 simple_lock(&vm_paging_lock, &vm_pageout_lck_grp);
10226 vm_paging_page_inuse[i] = FALSE;
10227 if (vm_paging_page_waiter) {
10228 thread_wakeup(&vm_paging_page_waiter);
10229 }
10230 simple_unlock(&vm_paging_lock);
10231 }
10232 }
10233
10234
10235 /*
10236 * page->vmp_object must be locked
10237 */
10238 void
vm_pageout_steal_laundry(vm_page_t page,boolean_t queues_locked)10239 vm_pageout_steal_laundry(vm_page_t page, boolean_t queues_locked)
10240 {
10241 if (!queues_locked) {
10242 vm_page_lockspin_queues();
10243 }
10244
10245 page->vmp_free_when_done = FALSE;
10246 /*
10247 * need to drop the laundry count...
10248 * we may also need to remove it
10249 * from the I/O paging queue...
10250 * vm_pageout_throttle_up handles both cases
10251 *
10252 * the laundry and pageout_queue flags are cleared...
10253 */
10254 vm_pageout_throttle_up(page);
10255
10256 if (!queues_locked) {
10257 vm_page_unlock_queues();
10258 }
10259 }
10260
10261 upl_t
vector_upl_create(vm_offset_t upl_offset)10262 vector_upl_create(vm_offset_t upl_offset)
10263 {
10264 int i = 0;
10265 upl_t upl;
10266 vector_upl_t vector_upl = kalloc_type(struct _vector_upl, Z_WAITOK);
10267
10268 upl = upl_create(0, UPL_VECTOR, 0);
10269 upl->vector_upl = vector_upl;
10270 upl->u_offset = upl_offset;
10271 vector_upl->size = 0;
10272 vector_upl->offset = upl_offset;
10273 vector_upl->invalid_upls = 0;
10274 vector_upl->num_upls = 0;
10275 vector_upl->pagelist = NULL;
10276
10277 for (i = 0; i < MAX_VECTOR_UPL_ELEMENTS; i++) {
10278 vector_upl->upl_iostates[i].size = 0;
10279 vector_upl->upl_iostates[i].offset = 0;
10280 }
10281 return upl;
10282 }
10283
10284 void
vector_upl_deallocate(upl_t upl)10285 vector_upl_deallocate(upl_t upl)
10286 {
10287 if (upl) {
10288 vector_upl_t vector_upl = upl->vector_upl;
10289 if (vector_upl) {
10290 if (vector_upl->invalid_upls != vector_upl->num_upls) {
10291 panic("Deallocating non-empty Vectored UPL");
10292 }
10293 kfree_data(vector_upl->pagelist, sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE));
10294 vector_upl->invalid_upls = 0;
10295 vector_upl->num_upls = 0;
10296 vector_upl->pagelist = NULL;
10297 vector_upl->size = 0;
10298 vector_upl->offset = 0;
10299 kfree_type(struct _vector_upl, vector_upl);
10300 vector_upl = (vector_upl_t)0xfeedfeed;
10301 } else {
10302 panic("vector_upl_deallocate was passed a non-vectored upl");
10303 }
10304 } else {
10305 panic("vector_upl_deallocate was passed a NULL upl");
10306 }
10307 }
10308
10309 boolean_t
vector_upl_is_valid(upl_t upl)10310 vector_upl_is_valid(upl_t upl)
10311 {
10312 if (upl && ((upl->flags & UPL_VECTOR) == UPL_VECTOR)) {
10313 vector_upl_t vector_upl = upl->vector_upl;
10314 if (vector_upl == NULL || vector_upl == (vector_upl_t)0xfeedfeed || vector_upl == (vector_upl_t)0xfeedbeef) {
10315 return FALSE;
10316 } else {
10317 return TRUE;
10318 }
10319 }
10320 return FALSE;
10321 }
10322
10323 boolean_t
vector_upl_set_subupl(upl_t upl,upl_t subupl,uint32_t io_size)10324 vector_upl_set_subupl(upl_t upl, upl_t subupl, uint32_t io_size)
10325 {
10326 if (vector_upl_is_valid(upl)) {
10327 vector_upl_t vector_upl = upl->vector_upl;
10328
10329 if (vector_upl) {
10330 if (subupl) {
10331 if (io_size) {
10332 if (io_size < PAGE_SIZE) {
10333 io_size = PAGE_SIZE;
10334 }
10335 subupl->vector_upl = (void*)vector_upl;
10336 vector_upl->upl_elems[vector_upl->num_upls++] = subupl;
10337 vector_upl->size += io_size;
10338 upl->u_size += io_size;
10339 } else {
10340 uint32_t i = 0, invalid_upls = 0;
10341 for (i = 0; i < vector_upl->num_upls; i++) {
10342 if (vector_upl->upl_elems[i] == subupl) {
10343 break;
10344 }
10345 }
10346 if (i == vector_upl->num_upls) {
10347 panic("Trying to remove sub-upl when none exists");
10348 }
10349
10350 vector_upl->upl_elems[i] = NULL;
10351 invalid_upls = os_atomic_inc(&(vector_upl)->invalid_upls,
10352 relaxed);
10353 if (invalid_upls == vector_upl->num_upls) {
10354 return TRUE;
10355 } else {
10356 return FALSE;
10357 }
10358 }
10359 } else {
10360 panic("vector_upl_set_subupl was passed a NULL upl element");
10361 }
10362 } else {
10363 panic("vector_upl_set_subupl was passed a non-vectored upl");
10364 }
10365 } else {
10366 panic("vector_upl_set_subupl was passed a NULL upl");
10367 }
10368
10369 return FALSE;
10370 }
10371
10372 void
vector_upl_set_pagelist(upl_t upl)10373 vector_upl_set_pagelist(upl_t upl)
10374 {
10375 if (vector_upl_is_valid(upl)) {
10376 uint32_t i = 0;
10377 vector_upl_t vector_upl = upl->vector_upl;
10378
10379 if (vector_upl) {
10380 vm_offset_t pagelist_size = 0, cur_upl_pagelist_size = 0;
10381
10382 vector_upl->pagelist = kalloc_data(sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE), Z_WAITOK);
10383
10384 for (i = 0; i < vector_upl->num_upls; i++) {
10385 cur_upl_pagelist_size = sizeof(struct upl_page_info) * upl_adjusted_size(vector_upl->upl_elems[i], PAGE_MASK) / PAGE_SIZE;
10386 bcopy(UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(vector_upl->upl_elems[i]), (char*)vector_upl->pagelist + pagelist_size, cur_upl_pagelist_size);
10387 pagelist_size += cur_upl_pagelist_size;
10388 if (vector_upl->upl_elems[i]->highest_page > upl->highest_page) {
10389 upl->highest_page = vector_upl->upl_elems[i]->highest_page;
10390 }
10391 }
10392 assert( pagelist_size == (sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE)));
10393 } else {
10394 panic("vector_upl_set_pagelist was passed a non-vectored upl");
10395 }
10396 } else {
10397 panic("vector_upl_set_pagelist was passed a NULL upl");
10398 }
10399 }
10400
10401 upl_t
vector_upl_subupl_byindex(upl_t upl,uint32_t index)10402 vector_upl_subupl_byindex(upl_t upl, uint32_t index)
10403 {
10404 if (vector_upl_is_valid(upl)) {
10405 vector_upl_t vector_upl = upl->vector_upl;
10406 if (vector_upl) {
10407 if (index < vector_upl->num_upls) {
10408 return vector_upl->upl_elems[index];
10409 }
10410 } else {
10411 panic("vector_upl_subupl_byindex was passed a non-vectored upl");
10412 }
10413 }
10414 return NULL;
10415 }
10416
10417 upl_t
vector_upl_subupl_byoffset(upl_t upl,upl_offset_t * upl_offset,upl_size_t * upl_size)10418 vector_upl_subupl_byoffset(upl_t upl, upl_offset_t *upl_offset, upl_size_t *upl_size)
10419 {
10420 if (vector_upl_is_valid(upl)) {
10421 uint32_t i = 0;
10422 vector_upl_t vector_upl = upl->vector_upl;
10423
10424 if (vector_upl) {
10425 upl_t subupl = NULL;
10426 vector_upl_iostates_t subupl_state;
10427
10428 for (i = 0; i < vector_upl->num_upls; i++) {
10429 subupl = vector_upl->upl_elems[i];
10430 subupl_state = vector_upl->upl_iostates[i];
10431 if (*upl_offset <= (subupl_state.offset + subupl_state.size - 1)) {
10432 /* We could have been passed an offset/size pair that belongs
10433 * to an UPL element that has already been committed/aborted.
10434 * If so, return NULL.
10435 */
10436 if (subupl == NULL) {
10437 return NULL;
10438 }
10439 if ((subupl_state.offset + subupl_state.size) < (*upl_offset + *upl_size)) {
10440 *upl_size = (subupl_state.offset + subupl_state.size) - *upl_offset;
10441 if (*upl_size > subupl_state.size) {
10442 *upl_size = subupl_state.size;
10443 }
10444 }
10445 if (*upl_offset >= subupl_state.offset) {
10446 *upl_offset -= subupl_state.offset;
10447 } else if (i) {
10448 panic("Vector UPL offset miscalculation");
10449 }
10450 return subupl;
10451 }
10452 }
10453 } else {
10454 panic("vector_upl_subupl_byoffset was passed a non-vectored UPL");
10455 }
10456 }
10457 return NULL;
10458 }
10459
10460 void
vector_upl_get_submap(upl_t upl,vm_map_t * v_upl_submap,vm_offset_t * submap_dst_addr)10461 vector_upl_get_submap(upl_t upl, vm_map_t *v_upl_submap, vm_offset_t *submap_dst_addr)
10462 {
10463 *v_upl_submap = NULL;
10464
10465 if (vector_upl_is_valid(upl)) {
10466 vector_upl_t vector_upl = upl->vector_upl;
10467 if (vector_upl) {
10468 *v_upl_submap = vector_upl->submap;
10469 *submap_dst_addr = vector_upl->submap_dst_addr;
10470 } else {
10471 panic("vector_upl_get_submap was passed a non-vectored UPL");
10472 }
10473 } else {
10474 panic("vector_upl_get_submap was passed a null UPL");
10475 }
10476 }
10477
10478 void
vector_upl_set_submap(upl_t upl,vm_map_t submap,vm_offset_t submap_dst_addr)10479 vector_upl_set_submap(upl_t upl, vm_map_t submap, vm_offset_t submap_dst_addr)
10480 {
10481 if (vector_upl_is_valid(upl)) {
10482 vector_upl_t vector_upl = upl->vector_upl;
10483 if (vector_upl) {
10484 vector_upl->submap = submap;
10485 vector_upl->submap_dst_addr = submap_dst_addr;
10486 } else {
10487 panic("vector_upl_get_submap was passed a non-vectored UPL");
10488 }
10489 } else {
10490 panic("vector_upl_get_submap was passed a NULL UPL");
10491 }
10492 }
10493
10494 void
vector_upl_set_iostate(upl_t upl,upl_t subupl,upl_offset_t offset,upl_size_t size)10495 vector_upl_set_iostate(upl_t upl, upl_t subupl, upl_offset_t offset, upl_size_t size)
10496 {
10497 if (vector_upl_is_valid(upl)) {
10498 uint32_t i = 0;
10499 vector_upl_t vector_upl = upl->vector_upl;
10500
10501 if (vector_upl) {
10502 for (i = 0; i < vector_upl->num_upls; i++) {
10503 if (vector_upl->upl_elems[i] == subupl) {
10504 break;
10505 }
10506 }
10507
10508 if (i == vector_upl->num_upls) {
10509 panic("setting sub-upl iostate when none exists");
10510 }
10511
10512 vector_upl->upl_iostates[i].offset = offset;
10513 if (size < PAGE_SIZE) {
10514 size = PAGE_SIZE;
10515 }
10516 vector_upl->upl_iostates[i].size = size;
10517 } else {
10518 panic("vector_upl_set_iostate was passed a non-vectored UPL");
10519 }
10520 } else {
10521 panic("vector_upl_set_iostate was passed a NULL UPL");
10522 }
10523 }
10524
10525 void
vector_upl_get_iostate(upl_t upl,upl_t subupl,upl_offset_t * offset,upl_size_t * size)10526 vector_upl_get_iostate(upl_t upl, upl_t subupl, upl_offset_t *offset, upl_size_t *size)
10527 {
10528 if (vector_upl_is_valid(upl)) {
10529 uint32_t i = 0;
10530 vector_upl_t vector_upl = upl->vector_upl;
10531
10532 if (vector_upl) {
10533 for (i = 0; i < vector_upl->num_upls; i++) {
10534 if (vector_upl->upl_elems[i] == subupl) {
10535 break;
10536 }
10537 }
10538
10539 if (i == vector_upl->num_upls) {
10540 panic("getting sub-upl iostate when none exists");
10541 }
10542
10543 *offset = vector_upl->upl_iostates[i].offset;
10544 *size = vector_upl->upl_iostates[i].size;
10545 } else {
10546 panic("vector_upl_get_iostate was passed a non-vectored UPL");
10547 }
10548 } else {
10549 panic("vector_upl_get_iostate was passed a NULL UPL");
10550 }
10551 }
10552
10553 void
vector_upl_get_iostate_byindex(upl_t upl,uint32_t index,upl_offset_t * offset,upl_size_t * size)10554 vector_upl_get_iostate_byindex(upl_t upl, uint32_t index, upl_offset_t *offset, upl_size_t *size)
10555 {
10556 if (vector_upl_is_valid(upl)) {
10557 vector_upl_t vector_upl = upl->vector_upl;
10558 if (vector_upl) {
10559 if (index < vector_upl->num_upls) {
10560 *offset = vector_upl->upl_iostates[index].offset;
10561 *size = vector_upl->upl_iostates[index].size;
10562 } else {
10563 *offset = *size = 0;
10564 }
10565 } else {
10566 panic("vector_upl_get_iostate_byindex was passed a non-vectored UPL");
10567 }
10568 } else {
10569 panic("vector_upl_get_iostate_byindex was passed a NULL UPL");
10570 }
10571 }
10572
10573 upl_page_info_t *
upl_get_internal_vectorupl_pagelist(upl_t upl)10574 upl_get_internal_vectorupl_pagelist(upl_t upl)
10575 {
10576 return ((vector_upl_t)(upl->vector_upl))->pagelist;
10577 }
10578
10579 void *
upl_get_internal_vectorupl(upl_t upl)10580 upl_get_internal_vectorupl(upl_t upl)
10581 {
10582 return upl->vector_upl;
10583 }
10584
10585 vm_size_t
upl_get_internal_pagelist_offset(void)10586 upl_get_internal_pagelist_offset(void)
10587 {
10588 return sizeof(struct upl);
10589 }
10590
10591 void
upl_clear_dirty(upl_t upl,boolean_t value)10592 upl_clear_dirty(
10593 upl_t upl,
10594 boolean_t value)
10595 {
10596 if (value) {
10597 upl->flags |= UPL_CLEAR_DIRTY;
10598 } else {
10599 upl->flags &= ~UPL_CLEAR_DIRTY;
10600 }
10601 }
10602
10603 void
upl_set_referenced(upl_t upl,boolean_t value)10604 upl_set_referenced(
10605 upl_t upl,
10606 boolean_t value)
10607 {
10608 upl_lock(upl);
10609 if (value) {
10610 upl->ext_ref_count++;
10611 } else {
10612 if (!upl->ext_ref_count) {
10613 panic("upl_set_referenced not %p", upl);
10614 }
10615 upl->ext_ref_count--;
10616 }
10617 upl_unlock(upl);
10618 }
10619
10620 #if CONFIG_IOSCHED
10621 void
upl_set_blkno(upl_t upl,vm_offset_t upl_offset,int io_size,int64_t blkno)10622 upl_set_blkno(
10623 upl_t upl,
10624 vm_offset_t upl_offset,
10625 int io_size,
10626 int64_t blkno)
10627 {
10628 int i, j;
10629 if ((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) {
10630 return;
10631 }
10632
10633 assert(upl->upl_reprio_info != 0);
10634 for (i = (int)(upl_offset / PAGE_SIZE), j = 0; j < io_size; i++, j += PAGE_SIZE) {
10635 UPL_SET_REPRIO_INFO(upl, i, blkno, io_size);
10636 }
10637 }
10638 #endif
10639
10640 void inline
memoryshot(unsigned int event,unsigned int control)10641 memoryshot(unsigned int event, unsigned int control)
10642 {
10643 if (vm_debug_events) {
10644 KERNEL_DEBUG_CONSTANT1((MACHDBG_CODE(DBG_MACH_VM_PRESSURE, event)) | control,
10645 vm_page_active_count, vm_page_inactive_count,
10646 vm_page_free_count, vm_page_speculative_count,
10647 vm_page_throttled_count);
10648 } else {
10649 (void) event;
10650 (void) control;
10651 }
10652 }
10653
10654 #ifdef MACH_BSD
10655
10656 boolean_t
upl_device_page(upl_page_info_t * upl)10657 upl_device_page(upl_page_info_t *upl)
10658 {
10659 return UPL_DEVICE_PAGE(upl);
10660 }
10661 boolean_t
upl_page_present(upl_page_info_t * upl,int index)10662 upl_page_present(upl_page_info_t *upl, int index)
10663 {
10664 return UPL_PAGE_PRESENT(upl, index);
10665 }
10666 boolean_t
upl_speculative_page(upl_page_info_t * upl,int index)10667 upl_speculative_page(upl_page_info_t *upl, int index)
10668 {
10669 return UPL_SPECULATIVE_PAGE(upl, index);
10670 }
10671 boolean_t
upl_dirty_page(upl_page_info_t * upl,int index)10672 upl_dirty_page(upl_page_info_t *upl, int index)
10673 {
10674 return UPL_DIRTY_PAGE(upl, index);
10675 }
10676 boolean_t
upl_valid_page(upl_page_info_t * upl,int index)10677 upl_valid_page(upl_page_info_t *upl, int index)
10678 {
10679 return UPL_VALID_PAGE(upl, index);
10680 }
10681 ppnum_t
upl_phys_page(upl_page_info_t * upl,int index)10682 upl_phys_page(upl_page_info_t *upl, int index)
10683 {
10684 return UPL_PHYS_PAGE(upl, index);
10685 }
10686
10687 void
upl_page_set_mark(upl_page_info_t * upl,int index,boolean_t v)10688 upl_page_set_mark(upl_page_info_t *upl, int index, boolean_t v)
10689 {
10690 upl[index].mark = v;
10691 }
10692
10693 boolean_t
upl_page_get_mark(upl_page_info_t * upl,int index)10694 upl_page_get_mark(upl_page_info_t *upl, int index)
10695 {
10696 return upl[index].mark;
10697 }
10698
10699 void
vm_countdirtypages(void)10700 vm_countdirtypages(void)
10701 {
10702 vm_page_t m;
10703 int dpages;
10704 int pgopages;
10705 int precpages;
10706
10707
10708 dpages = 0;
10709 pgopages = 0;
10710 precpages = 0;
10711
10712 vm_page_lock_queues();
10713 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
10714 do {
10715 if (m == (vm_page_t)0) {
10716 break;
10717 }
10718
10719 if (m->vmp_dirty) {
10720 dpages++;
10721 }
10722 if (m->vmp_free_when_done) {
10723 pgopages++;
10724 }
10725 if (m->vmp_precious) {
10726 precpages++;
10727 }
10728
10729 assert(VM_PAGE_OBJECT(m) != kernel_object);
10730 m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
10731 if (m == (vm_page_t)0) {
10732 break;
10733 }
10734 } while (!vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t) m));
10735 vm_page_unlock_queues();
10736
10737 vm_page_lock_queues();
10738 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
10739 do {
10740 if (m == (vm_page_t)0) {
10741 break;
10742 }
10743
10744 dpages++;
10745 assert(m->vmp_dirty);
10746 assert(!m->vmp_free_when_done);
10747 assert(VM_PAGE_OBJECT(m) != kernel_object);
10748 m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
10749 if (m == (vm_page_t)0) {
10750 break;
10751 }
10752 } while (!vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t) m));
10753 vm_page_unlock_queues();
10754
10755 vm_page_lock_queues();
10756 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
10757 do {
10758 if (m == (vm_page_t)0) {
10759 break;
10760 }
10761
10762 if (m->vmp_dirty) {
10763 dpages++;
10764 }
10765 if (m->vmp_free_when_done) {
10766 pgopages++;
10767 }
10768 if (m->vmp_precious) {
10769 precpages++;
10770 }
10771
10772 assert(VM_PAGE_OBJECT(m) != kernel_object);
10773 m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
10774 if (m == (vm_page_t)0) {
10775 break;
10776 }
10777 } while (!vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t) m));
10778 vm_page_unlock_queues();
10779
10780 printf("IN Q: %d : %d : %d\n", dpages, pgopages, precpages);
10781
10782 dpages = 0;
10783 pgopages = 0;
10784 precpages = 0;
10785
10786 vm_page_lock_queues();
10787 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
10788
10789 do {
10790 if (m == (vm_page_t)0) {
10791 break;
10792 }
10793 if (m->vmp_dirty) {
10794 dpages++;
10795 }
10796 if (m->vmp_free_when_done) {
10797 pgopages++;
10798 }
10799 if (m->vmp_precious) {
10800 precpages++;
10801 }
10802
10803 assert(VM_PAGE_OBJECT(m) != kernel_object);
10804 m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
10805 if (m == (vm_page_t)0) {
10806 break;
10807 }
10808 } while (!vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t) m));
10809 vm_page_unlock_queues();
10810
10811 printf("AC Q: %d : %d : %d\n", dpages, pgopages, precpages);
10812 }
10813 #endif /* MACH_BSD */
10814
10815
10816 #if CONFIG_IOSCHED
10817 int
upl_get_cached_tier(upl_t upl)10818 upl_get_cached_tier(upl_t upl)
10819 {
10820 assert(upl);
10821 if (upl->flags & UPL_TRACKED_BY_OBJECT) {
10822 return upl->upl_priority;
10823 }
10824 return -1;
10825 }
10826 #endif /* CONFIG_IOSCHED */
10827
10828
10829 void
upl_callout_iodone(upl_t upl)10830 upl_callout_iodone(upl_t upl)
10831 {
10832 struct upl_io_completion *upl_ctx = upl->upl_iodone;
10833
10834 if (upl_ctx) {
10835 void (*iodone_func)(void *, int) = upl_ctx->io_done;
10836
10837 assert(upl_ctx->io_done);
10838
10839 (*iodone_func)(upl_ctx->io_context, upl_ctx->io_error);
10840 }
10841 }
10842
10843 void
upl_set_iodone(upl_t upl,void * upl_iodone)10844 upl_set_iodone(upl_t upl, void *upl_iodone)
10845 {
10846 upl->upl_iodone = (struct upl_io_completion *)upl_iodone;
10847 }
10848
10849 void
upl_set_iodone_error(upl_t upl,int error)10850 upl_set_iodone_error(upl_t upl, int error)
10851 {
10852 struct upl_io_completion *upl_ctx = upl->upl_iodone;
10853
10854 if (upl_ctx) {
10855 upl_ctx->io_error = error;
10856 }
10857 }
10858
10859
10860 ppnum_t
upl_get_highest_page(upl_t upl)10861 upl_get_highest_page(
10862 upl_t upl)
10863 {
10864 return upl->highest_page;
10865 }
10866
10867 upl_size_t
upl_get_size(upl_t upl)10868 upl_get_size(
10869 upl_t upl)
10870 {
10871 return upl_adjusted_size(upl, PAGE_MASK);
10872 }
10873
10874 upl_size_t
upl_adjusted_size(upl_t upl,vm_map_offset_t pgmask)10875 upl_adjusted_size(
10876 upl_t upl,
10877 vm_map_offset_t pgmask)
10878 {
10879 vm_object_offset_t start_offset, end_offset;
10880
10881 start_offset = trunc_page_mask_64(upl->u_offset, pgmask);
10882 end_offset = round_page_mask_64(upl->u_offset + upl->u_size, pgmask);
10883
10884 return (upl_size_t)(end_offset - start_offset);
10885 }
10886
10887 vm_object_offset_t
upl_adjusted_offset(upl_t upl,vm_map_offset_t pgmask)10888 upl_adjusted_offset(
10889 upl_t upl,
10890 vm_map_offset_t pgmask)
10891 {
10892 return trunc_page_mask_64(upl->u_offset, pgmask);
10893 }
10894
10895 vm_object_offset_t
upl_get_data_offset(upl_t upl)10896 upl_get_data_offset(
10897 upl_t upl)
10898 {
10899 return upl->u_offset - upl_adjusted_offset(upl, PAGE_MASK);
10900 }
10901
10902 upl_t
upl_associated_upl(upl_t upl)10903 upl_associated_upl(upl_t upl)
10904 {
10905 return upl->associated_upl;
10906 }
10907
10908 void
upl_set_associated_upl(upl_t upl,upl_t associated_upl)10909 upl_set_associated_upl(upl_t upl, upl_t associated_upl)
10910 {
10911 upl->associated_upl = associated_upl;
10912 }
10913
10914 struct vnode *
upl_lookup_vnode(upl_t upl)10915 upl_lookup_vnode(upl_t upl)
10916 {
10917 if (!upl->map_object->internal) {
10918 return vnode_pager_lookup_vnode(upl->map_object->pager);
10919 } else {
10920 return NULL;
10921 }
10922 }
10923
10924 #if UPL_DEBUG
10925 kern_return_t
upl_ubc_alias_set(upl_t upl,uintptr_t alias1,uintptr_t alias2)10926 upl_ubc_alias_set(upl_t upl, uintptr_t alias1, uintptr_t alias2)
10927 {
10928 upl->ubc_alias1 = alias1;
10929 upl->ubc_alias2 = alias2;
10930 return KERN_SUCCESS;
10931 }
10932 int
upl_ubc_alias_get(upl_t upl,uintptr_t * al,uintptr_t * al2)10933 upl_ubc_alias_get(upl_t upl, uintptr_t * al, uintptr_t * al2)
10934 {
10935 if (al) {
10936 *al = upl->ubc_alias1;
10937 }
10938 if (al2) {
10939 *al2 = upl->ubc_alias2;
10940 }
10941 return KERN_SUCCESS;
10942 }
10943 #endif /* UPL_DEBUG */
10944
10945 #if VM_PRESSURE_EVENTS
10946 /*
10947 * Upward trajectory.
10948 */
10949 extern boolean_t vm_compressor_low_on_space(void);
10950
10951 boolean_t
VM_PRESSURE_NORMAL_TO_WARNING(void)10952 VM_PRESSURE_NORMAL_TO_WARNING(void)
10953 {
10954 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
10955 /* Available pages below our threshold */
10956 if (memorystatus_available_pages < memorystatus_available_pages_pressure) {
10957 /* No frozen processes to kill */
10958 if (memorystatus_frozen_count == 0) {
10959 /* Not enough suspended processes available. */
10960 if (memorystatus_suspended_count < MEMORYSTATUS_SUSPENDED_THRESHOLD) {
10961 return TRUE;
10962 }
10963 }
10964 }
10965 return FALSE;
10966 } else {
10967 return (AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) ? 1 : 0;
10968 }
10969 }
10970
10971 boolean_t
VM_PRESSURE_WARNING_TO_CRITICAL(void)10972 VM_PRESSURE_WARNING_TO_CRITICAL(void)
10973 {
10974 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
10975 /* Available pages below our threshold */
10976 if (memorystatus_available_pages < memorystatus_available_pages_critical) {
10977 return TRUE;
10978 }
10979 return FALSE;
10980 } else {
10981 return vm_compressor_low_on_space() || (AVAILABLE_NON_COMPRESSED_MEMORY < ((12 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0;
10982 }
10983 }
10984
10985 /*
10986 * Downward trajectory.
10987 */
10988 boolean_t
VM_PRESSURE_WARNING_TO_NORMAL(void)10989 VM_PRESSURE_WARNING_TO_NORMAL(void)
10990 {
10991 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
10992 /* Available pages above our threshold */
10993 unsigned int target_threshold = (unsigned int) (memorystatus_available_pages_pressure + ((15 * memorystatus_available_pages_pressure) / 100));
10994 if (memorystatus_available_pages > target_threshold) {
10995 return TRUE;
10996 }
10997 return FALSE;
10998 } else {
10999 return (AVAILABLE_NON_COMPRESSED_MEMORY > ((12 * VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) / 10)) ? 1 : 0;
11000 }
11001 }
11002
11003 boolean_t
VM_PRESSURE_CRITICAL_TO_WARNING(void)11004 VM_PRESSURE_CRITICAL_TO_WARNING(void)
11005 {
11006 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
11007 /* Available pages above our threshold */
11008 unsigned int target_threshold = (unsigned int)(memorystatus_available_pages_critical + ((15 * memorystatus_available_pages_critical) / 100));
11009 if (memorystatus_available_pages > target_threshold) {
11010 return TRUE;
11011 }
11012 return FALSE;
11013 } else {
11014 return (AVAILABLE_NON_COMPRESSED_MEMORY > ((14 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0;
11015 }
11016 }
11017 #endif /* VM_PRESSURE_EVENTS */
11018