xref: /xnu-8020.101.4/osfmk/vm/memory_object.c (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/memory_object.c
60  *	Author:	Michael Wayne Young
61  *
62  *	External memory management interface control functions.
63  */
64 
65 /*
66  *	Interface dependencies:
67  */
68 
69 #include <mach/std_types.h>     /* For pointer_t */
70 #include <mach/mach_types.h>
71 
72 #include <mach/mig.h>
73 #include <mach/kern_return.h>
74 #include <mach/memory_object.h>
75 #include <mach/memory_object_default.h>
76 #include <mach/memory_object_control_server.h>
77 #include <mach/host_priv_server.h>
78 #include <mach/boolean.h>
79 #include <mach/vm_prot.h>
80 #include <mach/message.h>
81 
82 /*
83  *	Implementation dependencies:
84  */
85 #include <string.h>             /* For memcpy() */
86 
87 #include <kern/host.h>
88 #include <kern/thread.h>        /* For current_thread() */
89 #include <kern/ipc_mig.h>
90 #include <kern/misc_protos.h>
91 
92 #include <vm/vm_object.h>
93 #include <vm/vm_fault.h>
94 #include <vm/memory_object.h>
95 #include <vm/vm_page.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/pmap.h>            /* For pmap_clear_modify */
98 #include <vm/vm_kern.h>         /* For kernel_map, vm_move */
99 #include <vm/vm_map.h>          /* For vm_map_pageable */
100 #include <vm/vm_purgeable_internal.h>   /* Needed by some vm_page.h macros */
101 #include <vm/vm_shared_region.h>
102 
103 #include <vm/vm_external.h>
104 
105 #include <vm/vm_protos.h>
106 
107 memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
108 LCK_MTX_EARLY_DECLARE(memory_manager_default_lock, &vm_object_lck_grp);
109 
110 
111 /*
112  *	Routine:	memory_object_should_return_page
113  *
114  *	Description:
115  *		Determine whether the given page should be returned,
116  *		based on the page's state and on the given return policy.
117  *
118  *		We should return the page if one of the following is true:
119  *
120  *		1. Page is dirty and should_return is not RETURN_NONE.
121  *		2. Page is precious and should_return is RETURN_ALL.
122  *		3. Should_return is RETURN_ANYTHING.
123  *
124  *		As a side effect, m->vmp_dirty will be made consistent
125  *		with pmap_is_modified(m), if should_return is not
126  *		MEMORY_OBJECT_RETURN_NONE.
127  */
128 
129 #define memory_object_should_return_page(m, should_return) \
130     (should_return != MEMORY_OBJECT_RETURN_NONE && \
131      (((m)->vmp_dirty || ((m)->vmp_dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(m)))) || \
132       ((m)->vmp_precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \
133       (should_return) == MEMORY_OBJECT_RETURN_ANYTHING))
134 
135 typedef int     memory_object_lock_result_t;
136 
137 #define MEMORY_OBJECT_LOCK_RESULT_DONE                  0
138 #define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK            1
139 #define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN           2
140 #define MEMORY_OBJECT_LOCK_RESULT_MUST_FREE             3
141 
142 memory_object_lock_result_t memory_object_lock_page(
143 	vm_page_t               m,
144 	memory_object_return_t  should_return,
145 	boolean_t               should_flush,
146 	vm_prot_t               prot);
147 
148 /*
149  *	Routine:	memory_object_lock_page
150  *
151  *	Description:
152  *		Perform the appropriate lock operations on the
153  *		given page.  See the description of
154  *		"memory_object_lock_request" for the meanings
155  *		of the arguments.
156  *
157  *		Returns an indication that the operation
158  *		completed, blocked, or that the page must
159  *		be cleaned.
160  */
161 memory_object_lock_result_t
memory_object_lock_page(vm_page_t m,memory_object_return_t should_return,boolean_t should_flush,vm_prot_t prot)162 memory_object_lock_page(
163 	vm_page_t               m,
164 	memory_object_return_t  should_return,
165 	boolean_t               should_flush,
166 	vm_prot_t               prot)
167 {
168 	if (prot == VM_PROT_NO_CHANGE_LEGACY) {
169 		prot = VM_PROT_NO_CHANGE;
170 	}
171 
172 	if (m->vmp_busy || m->vmp_cleaning) {
173 		return MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK;
174 	}
175 
176 	if (m->vmp_laundry) {
177 		vm_pageout_steal_laundry(m, FALSE);
178 	}
179 
180 	/*
181 	 *	Don't worry about pages for which the kernel
182 	 *	does not have any data.
183 	 */
184 	if (m->vmp_absent || m->vmp_error || m->vmp_restart) {
185 		if (m->vmp_error && should_flush && !VM_PAGE_WIRED(m)) {
186 			/*
187 			 * dump the page, pager wants us to
188 			 * clean it up and there is no
189 			 * relevant data to return
190 			 */
191 			return MEMORY_OBJECT_LOCK_RESULT_MUST_FREE;
192 		}
193 		return MEMORY_OBJECT_LOCK_RESULT_DONE;
194 	}
195 	assert(!m->vmp_fictitious);
196 
197 	if (VM_PAGE_WIRED(m)) {
198 		/*
199 		 * The page is wired... just clean or return the page if needed.
200 		 * Wired pages don't get flushed or disconnected from the pmap.
201 		 */
202 		if (memory_object_should_return_page(m, should_return)) {
203 			return MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN;
204 		}
205 
206 		return MEMORY_OBJECT_LOCK_RESULT_DONE;
207 	}
208 
209 	if (should_flush) {
210 		/*
211 		 * must do the pmap_disconnect before determining the
212 		 * need to return the page... otherwise it's possible
213 		 * for the page to go from the clean to the dirty state
214 		 * after we've made our decision
215 		 */
216 		if (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED) {
217 			SET_PAGE_DIRTY(m, FALSE);
218 		}
219 	} else {
220 		/*
221 		 * If we are decreasing permission, do it now;
222 		 * let the fault handler take care of increases
223 		 * (pmap_page_protect may not increase protection).
224 		 */
225 		if (prot != VM_PROT_NO_CHANGE) {
226 			pmap_page_protect(VM_PAGE_GET_PHYS_PAGE(m), VM_PROT_ALL & ~prot);
227 		}
228 	}
229 	/*
230 	 *	Handle returning dirty or precious pages
231 	 */
232 	if (memory_object_should_return_page(m, should_return)) {
233 		/*
234 		 * we use to do a pmap_disconnect here in support
235 		 * of memory_object_lock_request, but that routine
236 		 * no longer requires this...  in any event, in
237 		 * our world, it would turn into a big noop since
238 		 * we don't lock the page in any way and as soon
239 		 * as we drop the object lock, the page can be
240 		 * faulted back into an address space
241 		 *
242 		 *	if (!should_flush)
243 		 *		pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
244 		 */
245 		return MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN;
246 	}
247 
248 	/*
249 	 *	Handle flushing clean pages
250 	 */
251 	if (should_flush) {
252 		return MEMORY_OBJECT_LOCK_RESULT_MUST_FREE;
253 	}
254 
255 	/*
256 	 * we use to deactivate clean pages at this point,
257 	 * but we do not believe that an msync should change
258 	 * the 'age' of a page in the cache... here is the
259 	 * original comment and code concerning this...
260 	 *
261 	 *	XXX Make clean but not flush a paging hint,
262 	 *	and deactivate the pages.  This is a hack
263 	 *	because it overloads flush/clean with
264 	 *	implementation-dependent meaning.  This only
265 	 *	happens to pages that are already clean.
266 	 *
267 	 *   if (vm_page_deactivate_hint && (should_return != MEMORY_OBJECT_RETURN_NONE))
268 	 *	return (MEMORY_OBJECT_LOCK_RESULT_MUST_DEACTIVATE);
269 	 */
270 
271 	return MEMORY_OBJECT_LOCK_RESULT_DONE;
272 }
273 
274 
275 
276 /*
277  *	Routine:	memory_object_lock_request [user interface]
278  *
279  *	Description:
280  *		Control use of the data associated with the given
281  *		memory object.  For each page in the given range,
282  *		perform the following operations, in order:
283  *			1)  restrict access to the page (disallow
284  *			    forms specified by "prot");
285  *			2)  return data to the manager (if "should_return"
286  *			    is RETURN_DIRTY and the page is dirty, or
287  *                          "should_return" is RETURN_ALL and the page
288  *			    is either dirty or precious); and,
289  *			3)  flush the cached copy (if "should_flush"
290  *			    is asserted).
291  *		The set of pages is defined by a starting offset
292  *		("offset") and size ("size").  Only pages with the
293  *		same page alignment as the starting offset are
294  *		considered.
295  *
296  *		A single acknowledgement is sent (to the "reply_to"
297  *		port) when these actions are complete.  If successful,
298  *		the naked send right for reply_to is consumed.
299  */
300 
301 kern_return_t
memory_object_lock_request(memory_object_control_t control,memory_object_offset_t offset,memory_object_size_t size,memory_object_offset_t * resid_offset,int * io_errno,memory_object_return_t should_return,int flags,vm_prot_t prot)302 memory_object_lock_request(
303 	memory_object_control_t         control,
304 	memory_object_offset_t          offset,
305 	memory_object_size_t            size,
306 	memory_object_offset_t  *       resid_offset,
307 	int                     *       io_errno,
308 	memory_object_return_t          should_return,
309 	int                             flags,
310 	vm_prot_t                       prot)
311 {
312 	vm_object_t     object;
313 
314 	if (prot == VM_PROT_NO_CHANGE_LEGACY) {
315 		prot = VM_PROT_NO_CHANGE;
316 	}
317 
318 	/*
319 	 *	Check for bogus arguments.
320 	 */
321 	object = memory_object_control_to_vm_object(control);
322 	if (object == VM_OBJECT_NULL) {
323 		return KERN_INVALID_ARGUMENT;
324 	}
325 
326 	if ((prot & ~(VM_PROT_ALL | VM_PROT_ALLEXEC)) != 0 && prot != VM_PROT_NO_CHANGE) {
327 		return KERN_INVALID_ARGUMENT;
328 	}
329 
330 	size = round_page_64(size);
331 
332 	/*
333 	 *	Lock the object, and acquire a paging reference to
334 	 *	prevent the memory_object reference from being released.
335 	 */
336 	vm_object_lock(object);
337 	vm_object_paging_begin(object);
338 
339 	if (flags & MEMORY_OBJECT_DATA_FLUSH_ALL) {
340 		if ((should_return != MEMORY_OBJECT_RETURN_NONE) || offset || object->copy) {
341 			flags &= ~MEMORY_OBJECT_DATA_FLUSH_ALL;
342 			flags |= MEMORY_OBJECT_DATA_FLUSH;
343 		}
344 	}
345 	offset -= object->paging_offset;
346 
347 	if (flags & MEMORY_OBJECT_DATA_FLUSH_ALL) {
348 		vm_object_reap_pages(object, REAP_DATA_FLUSH);
349 	} else {
350 		(void)vm_object_update(object, offset, size, resid_offset,
351 		    io_errno, should_return, flags, prot);
352 	}
353 
354 	vm_object_paging_end(object);
355 	vm_object_unlock(object);
356 
357 	return KERN_SUCCESS;
358 }
359 
360 /*
361  *	memory_object_release_name:  [interface]
362  *
363  *	Enforces name semantic on memory_object reference count decrement
364  *	This routine should not be called unless the caller holds a name
365  *	reference gained through the memory_object_named_create or the
366  *	memory_object_rename call.
367  *	If the TERMINATE_IDLE flag is set, the call will return if the
368  *	reference count is not 1. i.e. idle with the only remaining reference
369  *	being the name.
370  *	If the decision is made to proceed the name field flag is set to
371  *	false and the reference count is decremented.  If the RESPECT_CACHE
372  *	flag is set and the reference count has gone to zero, the
373  *	memory_object is checked to see if it is cacheable otherwise when
374  *	the reference count is zero, it is simply terminated.
375  */
376 
377 kern_return_t
memory_object_release_name(memory_object_control_t control,int flags)378 memory_object_release_name(
379 	memory_object_control_t control,
380 	int                             flags)
381 {
382 	vm_object_t     object;
383 
384 	object = memory_object_control_to_vm_object(control);
385 	if (object == VM_OBJECT_NULL) {
386 		return KERN_INVALID_ARGUMENT;
387 	}
388 
389 	return vm_object_release_name(object, flags);
390 }
391 
392 
393 
394 /*
395  *	Routine:	memory_object_destroy [user interface]
396  *	Purpose:
397  *		Shut down a memory object, despite the
398  *		presence of address map (or other) references
399  *		to the vm_object.
400  */
401 kern_return_t
memory_object_destroy(memory_object_control_t control,kern_return_t reason)402 memory_object_destroy(
403 	memory_object_control_t control,
404 	kern_return_t           reason)
405 {
406 	vm_object_t             object;
407 
408 	object = memory_object_control_to_vm_object(control);
409 	if (object == VM_OBJECT_NULL) {
410 		return KERN_INVALID_ARGUMENT;
411 	}
412 
413 	return vm_object_destroy(object, reason);
414 }
415 
416 /*
417  *	Routine:	vm_object_sync
418  *
419  *	Kernel internal function to synch out pages in a given
420  *	range within an object to its memory manager.  Much the
421  *	same as memory_object_lock_request but page protection
422  *	is not changed.
423  *
424  *	If the should_flush and should_return flags are true pages
425  *	are flushed, that is dirty & precious pages are written to
426  *	the memory manager and then discarded.  If should_return
427  *	is false, only precious pages are returned to the memory
428  *	manager.
429  *
430  *	If should flush is false and should_return true, the memory
431  *	manager's copy of the pages is updated.  If should_return
432  *	is also false, only the precious pages are updated.  This
433  *	last option is of limited utility.
434  *
435  *	Returns:
436  *	FALSE		if no pages were returned to the pager
437  *	TRUE		otherwise.
438  */
439 
440 boolean_t
vm_object_sync(vm_object_t object,vm_object_offset_t offset,vm_object_size_t size,boolean_t should_flush,boolean_t should_return,boolean_t should_iosync)441 vm_object_sync(
442 	vm_object_t             object,
443 	vm_object_offset_t      offset,
444 	vm_object_size_t        size,
445 	boolean_t               should_flush,
446 	boolean_t               should_return,
447 	boolean_t               should_iosync)
448 {
449 	boolean_t       rv;
450 	int             flags;
451 
452 	/*
453 	 * Lock the object, and acquire a paging reference to
454 	 * prevent the memory_object and control ports from
455 	 * being destroyed.
456 	 */
457 	vm_object_lock(object);
458 	vm_object_paging_begin(object);
459 
460 	if (should_flush) {
461 		flags = MEMORY_OBJECT_DATA_FLUSH;
462 		/*
463 		 * This flush is from an msync(), not a truncate(), so the
464 		 * contents of the file are not affected.
465 		 * MEMORY_OBECT_DATA_NO_CHANGE lets vm_object_update() know
466 		 * that the data is not changed and that there's no need to
467 		 * push the old contents to a copy object.
468 		 */
469 		flags |= MEMORY_OBJECT_DATA_NO_CHANGE;
470 	} else {
471 		flags = 0;
472 	}
473 
474 	if (should_iosync) {
475 		flags |= MEMORY_OBJECT_IO_SYNC;
476 	}
477 
478 	rv = vm_object_update(object, offset, (vm_object_size_t)size, NULL, NULL,
479 	    (should_return) ?
480 	    MEMORY_OBJECT_RETURN_ALL :
481 	    MEMORY_OBJECT_RETURN_NONE,
482 	    flags,
483 	    VM_PROT_NO_CHANGE);
484 
485 
486 	vm_object_paging_end(object);
487 	vm_object_unlock(object);
488 	return rv;
489 }
490 
491 
492 
493 #define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, po, ro, ioerr, iosync)    \
494 MACRO_BEGIN                                                             \
495                                                                         \
496 	int			upl_flags;                              \
497 	memory_object_t		pager;                                  \
498                                                                         \
499 	if ((pager = (object)->pager) != MEMORY_OBJECT_NULL) {          \
500 	        vm_object_paging_begin(object);                         \
501 	        vm_object_unlock(object);                               \
502                                                                         \
503 	        if (iosync)                                             \
504 	                upl_flags = UPL_MSYNC | UPL_IOSYNC;             \
505 	        else                                                    \
506 	                upl_flags = UPL_MSYNC;                          \
507                                                                         \
508 	        (void) memory_object_data_return(pager,                 \
509 	                po,                                             \
510 	                (memory_object_cluster_size_t)data_cnt,         \
511 	                ro,                                             \
512 	                ioerr,                                          \
513 	                FALSE,                                          \
514 	                FALSE,                                          \
515 	                upl_flags);                                     \
516                                                                         \
517 	        vm_object_lock(object);                                 \
518 	        vm_object_paging_end(object);                           \
519 	}                                                               \
520 MACRO_END
521 
522 extern struct vnode *
523 vnode_pager_lookup_vnode(memory_object_t);
524 
525 static int
vm_object_update_extent(vm_object_t object,vm_object_offset_t offset,vm_object_offset_t offset_end,vm_object_offset_t * offset_resid,int * io_errno,boolean_t should_flush,memory_object_return_t should_return,boolean_t should_iosync,vm_prot_t prot)526 vm_object_update_extent(
527 	vm_object_t             object,
528 	vm_object_offset_t      offset,
529 	vm_object_offset_t      offset_end,
530 	vm_object_offset_t      *offset_resid,
531 	int                     *io_errno,
532 	boolean_t               should_flush,
533 	memory_object_return_t  should_return,
534 	boolean_t               should_iosync,
535 	vm_prot_t               prot)
536 {
537 	vm_page_t       m;
538 	int             retval = 0;
539 	vm_object_offset_t      paging_offset = 0;
540 	vm_object_offset_t      next_offset = offset;
541 	memory_object_lock_result_t     page_lock_result;
542 	memory_object_cluster_size_t    data_cnt = 0;
543 	struct  vm_page_delayed_work    dw_array;
544 	struct  vm_page_delayed_work    *dwp, *dwp_start;
545 	bool            dwp_finish_ctx = TRUE;
546 	int             dw_count;
547 	int             dw_limit;
548 	int             dirty_count;
549 
550 	dwp_start = dwp = NULL;
551 	dw_count = 0;
552 	dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
553 	dwp_start = vm_page_delayed_work_get_ctx();
554 	if (dwp_start == NULL) {
555 		dwp_start = &dw_array;
556 		dw_limit = 1;
557 		dwp_finish_ctx = FALSE;
558 	}
559 	dwp = dwp_start;
560 
561 	dirty_count = 0;
562 
563 	for (;
564 	    offset < offset_end && object->resident_page_count;
565 	    offset += PAGE_SIZE_64) {
566 		/*
567 		 * Limit the number of pages to be cleaned at once to a contiguous
568 		 * run, or at most MAX_UPL_TRANSFER_BYTES
569 		 */
570 		if (data_cnt) {
571 			if ((data_cnt >= MAX_UPL_TRANSFER_BYTES) || (next_offset != offset)) {
572 				if (dw_count) {
573 					vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
574 					dwp = dwp_start;
575 					dw_count = 0;
576 				}
577 				LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
578 				    paging_offset, offset_resid, io_errno, should_iosync);
579 				data_cnt = 0;
580 			}
581 		}
582 		while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
583 			dwp->dw_mask = 0;
584 
585 			page_lock_result = memory_object_lock_page(m, should_return, should_flush, prot);
586 
587 			if (data_cnt && page_lock_result != MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN) {
588 				/*
589 				 *	End of a run of dirty/precious pages.
590 				 */
591 				if (dw_count) {
592 					vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
593 					dwp = dwp_start;
594 					dw_count = 0;
595 				}
596 				LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
597 				    paging_offset, offset_resid, io_errno, should_iosync);
598 				/*
599 				 * LIST_REQ_PAGEOUT_PAGES will drop the object lock which will
600 				 * allow the state of page 'm' to change... we need to re-lookup
601 				 * the current offset
602 				 */
603 				data_cnt = 0;
604 				continue;
605 			}
606 
607 			switch (page_lock_result) {
608 			case MEMORY_OBJECT_LOCK_RESULT_DONE:
609 				break;
610 
611 			case MEMORY_OBJECT_LOCK_RESULT_MUST_FREE:
612 				if (m->vmp_dirty == TRUE) {
613 					dirty_count++;
614 				}
615 				dwp->dw_mask |= DW_vm_page_free;
616 				break;
617 
618 			case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK:
619 				PAGE_SLEEP(object, m, THREAD_UNINT);
620 				continue;
621 
622 			case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN:
623 				if (data_cnt == 0) {
624 					paging_offset = offset;
625 				}
626 
627 				data_cnt += PAGE_SIZE;
628 				next_offset = offset + PAGE_SIZE_64;
629 
630 				/*
631 				 * wired pages shouldn't be flushed and
632 				 * since they aren't on any queue,
633 				 * no need to remove them
634 				 */
635 				if (!VM_PAGE_WIRED(m)) {
636 					if (should_flush) {
637 						/*
638 						 * add additional state for the flush
639 						 */
640 						m->vmp_free_when_done = TRUE;
641 					}
642 					/*
643 					 * we use to remove the page from the queues at this
644 					 * point, but we do not believe that an msync
645 					 * should cause the 'age' of a page to be changed
646 					 *
647 					 *    else
648 					 *	dwp->dw_mask |= DW_VM_PAGE_QUEUES_REMOVE;
649 					 */
650 				}
651 				retval = 1;
652 				break;
653 			}
654 			if (dwp->dw_mask) {
655 				VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
656 
657 				if (dw_count >= dw_limit) {
658 					vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
659 					dwp = dwp_start;
660 					dw_count = 0;
661 				}
662 			}
663 			break;
664 		}
665 	}
666 
667 	if (object->pager) {
668 		task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_INVALIDATED, vnode_pager_lookup_vnode(object->pager));
669 	}
670 	/*
671 	 *	We have completed the scan for applicable pages.
672 	 *	Clean any pages that have been saved.
673 	 */
674 	if (dw_count) {
675 		vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
676 	}
677 
678 	if (data_cnt) {
679 		LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
680 		    paging_offset, offset_resid, io_errno, should_iosync);
681 	}
682 
683 	if (dwp_start && dwp_finish_ctx) {
684 		vm_page_delayed_work_finish_ctx(dwp_start);
685 		dwp_start = dwp = NULL;
686 	}
687 
688 	return retval;
689 }
690 
691 
692 
693 /*
694  *	Routine:	vm_object_update
695  *	Description:
696  *		Work function for m_o_lock_request(), vm_o_sync().
697  *
698  *		Called with object locked and paging ref taken.
699  */
700 kern_return_t
vm_object_update(vm_object_t object,vm_object_offset_t offset,vm_object_size_t size,vm_object_offset_t * resid_offset,int * io_errno,memory_object_return_t should_return,int flags,vm_prot_t protection)701 vm_object_update(
702 	vm_object_t             object,
703 	vm_object_offset_t      offset,
704 	vm_object_size_t        size,
705 	vm_object_offset_t      *resid_offset,
706 	int                     *io_errno,
707 	memory_object_return_t  should_return,
708 	int                     flags,
709 	vm_prot_t               protection)
710 {
711 	vm_object_t             copy_object = VM_OBJECT_NULL;
712 	boolean_t               data_returned = FALSE;
713 	boolean_t               update_cow;
714 	boolean_t               should_flush = (flags & MEMORY_OBJECT_DATA_FLUSH) ? TRUE : FALSE;
715 	boolean_t               should_iosync = (flags & MEMORY_OBJECT_IO_SYNC) ? TRUE : FALSE;
716 	vm_fault_return_t       result;
717 	int                     num_of_extents;
718 	int                     n;
719 #define MAX_EXTENTS     8
720 #define EXTENT_SIZE     (1024 * 1024 * 256)
721 #define RESIDENT_LIMIT  (1024 * 32)
722 	struct extent {
723 		vm_object_offset_t e_base;
724 		vm_object_offset_t e_min;
725 		vm_object_offset_t e_max;
726 	} extents[MAX_EXTENTS];
727 
728 	/*
729 	 *	To avoid blocking while scanning for pages, save
730 	 *	dirty pages to be cleaned all at once.
731 	 *
732 	 *	XXXO A similar strategy could be used to limit the
733 	 *	number of times that a scan must be restarted for
734 	 *	other reasons.  Those pages that would require blocking
735 	 *	could be temporarily collected in another list, or
736 	 *	their offsets could be recorded in a small array.
737 	 */
738 
739 	/*
740 	 * XXX	NOTE: May want to consider converting this to a page list
741 	 * XXX	vm_map_copy interface.  Need to understand object
742 	 * XXX	coalescing implications before doing so.
743 	 */
744 
745 	update_cow = ((flags & MEMORY_OBJECT_DATA_FLUSH)
746 	    && (!(flags & MEMORY_OBJECT_DATA_NO_CHANGE) &&
747 	    !(flags & MEMORY_OBJECT_DATA_PURGE)))
748 	    || (flags & MEMORY_OBJECT_COPY_SYNC);
749 
750 	if (update_cow || (flags & (MEMORY_OBJECT_DATA_PURGE | MEMORY_OBJECT_DATA_SYNC))) {
751 		int collisions = 0;
752 
753 		while ((copy_object = object->copy) != VM_OBJECT_NULL) {
754 			/*
755 			 * need to do a try here since we're swimming upstream
756 			 * against the normal lock ordering... however, we need
757 			 * to hold the object stable until we gain control of the
758 			 * copy object so we have to be careful how we approach this
759 			 */
760 			if (vm_object_lock_try(copy_object)) {
761 				/*
762 				 * we 'won' the lock on the copy object...
763 				 * no need to hold the object lock any longer...
764 				 * take a real reference on the copy object because
765 				 * we're going to call vm_fault_page on it which may
766 				 * under certain conditions drop the lock and the paging
767 				 * reference we're about to take... the reference
768 				 * will keep the copy object from going away if that happens
769 				 */
770 				vm_object_unlock(object);
771 				vm_object_reference_locked(copy_object);
772 				break;
773 			}
774 			vm_object_unlock(object);
775 
776 			collisions++;
777 			mutex_pause(collisions);
778 
779 			vm_object_lock(object);
780 		}
781 	}
782 	if ((copy_object != VM_OBJECT_NULL && update_cow) || (flags & MEMORY_OBJECT_DATA_SYNC)) {
783 		vm_object_offset_t      i;
784 		vm_object_size_t        copy_size;
785 		vm_object_offset_t      copy_offset;
786 		vm_prot_t               prot;
787 		vm_page_t               page;
788 		vm_page_t               top_page;
789 		kern_return_t           error = 0;
790 		struct vm_object_fault_info fault_info = {};
791 
792 		if (copy_object != VM_OBJECT_NULL) {
793 			/*
794 			 * translate offset with respect to shadow's offset
795 			 */
796 			copy_offset = (offset >= copy_object->vo_shadow_offset) ?
797 			    (offset - copy_object->vo_shadow_offset) : 0;
798 
799 			if (copy_offset > copy_object->vo_size) {
800 				copy_offset = copy_object->vo_size;
801 			}
802 
803 			/*
804 			 * clip size with respect to shadow offset
805 			 */
806 			if (offset >= copy_object->vo_shadow_offset) {
807 				copy_size = size;
808 			} else if (size >= copy_object->vo_shadow_offset - offset) {
809 				copy_size = (size - (copy_object->vo_shadow_offset - offset));
810 			} else {
811 				copy_size = 0;
812 			}
813 
814 			if (copy_offset + copy_size > copy_object->vo_size) {
815 				if (copy_object->vo_size >= copy_offset) {
816 					copy_size = copy_object->vo_size - copy_offset;
817 				} else {
818 					copy_size = 0;
819 				}
820 			}
821 			copy_size += copy_offset;
822 		} else {
823 			copy_object = object;
824 
825 			copy_size   = offset + size;
826 			copy_offset = offset;
827 		}
828 		fault_info.interruptible = THREAD_UNINT;
829 		fault_info.behavior  = VM_BEHAVIOR_SEQUENTIAL;
830 		fault_info.lo_offset = copy_offset;
831 		fault_info.hi_offset = copy_size;
832 		fault_info.stealth = TRUE;
833 		assert(fault_info.cs_bypass == FALSE);
834 		assert(fault_info.pmap_cs_associated == FALSE);
835 
836 		vm_object_paging_begin(copy_object);
837 
838 		for (i = copy_offset; i < copy_size; i += PAGE_SIZE) {
839 RETRY_COW_OF_LOCK_REQUEST:
840 			fault_info.cluster_size = (vm_size_t) (copy_size - i);
841 			assert(fault_info.cluster_size == copy_size - i);
842 
843 			prot =  VM_PROT_WRITE | VM_PROT_READ;
844 			page = VM_PAGE_NULL;
845 			result = vm_fault_page(copy_object, i,
846 			    VM_PROT_WRITE | VM_PROT_READ,
847 			    FALSE,
848 			    FALSE,                    /* page not looked up */
849 			    &prot,
850 			    &page,
851 			    &top_page,
852 			    (int *)0,
853 			    &error,
854 			    FALSE,
855 			    FALSE, &fault_info);
856 
857 			switch (result) {
858 			case VM_FAULT_SUCCESS:
859 				if (top_page) {
860 					vm_fault_cleanup(
861 						VM_PAGE_OBJECT(page), top_page);
862 					vm_object_lock(copy_object);
863 					vm_object_paging_begin(copy_object);
864 				}
865 				if ((!VM_PAGE_NON_SPECULATIVE_PAGEABLE(page))) {
866 					vm_page_lockspin_queues();
867 
868 					if ((!VM_PAGE_NON_SPECULATIVE_PAGEABLE(page))) {
869 						vm_page_deactivate(page);
870 					}
871 					vm_page_unlock_queues();
872 				}
873 				PAGE_WAKEUP_DONE(page);
874 				break;
875 			case VM_FAULT_RETRY:
876 				prot =  VM_PROT_WRITE | VM_PROT_READ;
877 				vm_object_lock(copy_object);
878 				vm_object_paging_begin(copy_object);
879 				goto RETRY_COW_OF_LOCK_REQUEST;
880 			case VM_FAULT_INTERRUPTED:
881 				prot =  VM_PROT_WRITE | VM_PROT_READ;
882 				vm_object_lock(copy_object);
883 				vm_object_paging_begin(copy_object);
884 				goto RETRY_COW_OF_LOCK_REQUEST;
885 			case VM_FAULT_MEMORY_SHORTAGE:
886 				VM_PAGE_WAIT();
887 				prot =  VM_PROT_WRITE | VM_PROT_READ;
888 				vm_object_lock(copy_object);
889 				vm_object_paging_begin(copy_object);
890 				goto RETRY_COW_OF_LOCK_REQUEST;
891 			case VM_FAULT_SUCCESS_NO_VM_PAGE:
892 				/* success but no VM page: fail */
893 				vm_object_paging_end(copy_object);
894 				vm_object_unlock(copy_object);
895 				OS_FALLTHROUGH;
896 			case VM_FAULT_MEMORY_ERROR:
897 				if (object != copy_object) {
898 					vm_object_deallocate(copy_object);
899 				}
900 				vm_object_lock(object);
901 				goto BYPASS_COW_COPYIN;
902 			default:
903 				panic("vm_object_update: unexpected error 0x%x"
904 				    " from vm_fault_page()\n", result);
905 			}
906 		}
907 		vm_object_paging_end(copy_object);
908 	}
909 	if ((flags & (MEMORY_OBJECT_DATA_SYNC | MEMORY_OBJECT_COPY_SYNC))) {
910 		if (copy_object != VM_OBJECT_NULL && copy_object != object) {
911 			vm_object_unlock(copy_object);
912 			vm_object_deallocate(copy_object);
913 			vm_object_lock(object);
914 		}
915 		return KERN_SUCCESS;
916 	}
917 	if (copy_object != VM_OBJECT_NULL && copy_object != object) {
918 		if ((flags & MEMORY_OBJECT_DATA_PURGE)) {
919 			vm_object_lock_assert_exclusive(copy_object);
920 			copy_object->shadow_severed = TRUE;
921 			copy_object->shadowed = FALSE;
922 			copy_object->shadow = NULL;
923 			/*
924 			 * delete the ref the COW was holding on the target object
925 			 */
926 			vm_object_deallocate(object);
927 		}
928 		vm_object_unlock(copy_object);
929 		vm_object_deallocate(copy_object);
930 		vm_object_lock(object);
931 	}
932 BYPASS_COW_COPYIN:
933 
934 	/*
935 	 * when we have a really large range to check relative
936 	 * to the number of actual resident pages, we'd like
937 	 * to use the resident page list to drive our checks
938 	 * however, the object lock will get dropped while processing
939 	 * the page which means the resident queue can change which
940 	 * means we can't walk the queue as we process the pages
941 	 * we also want to do the processing in offset order to allow
942 	 * 'runs' of pages to be collected if we're being told to
943 	 * flush to disk... the resident page queue is NOT ordered.
944 	 *
945 	 * a temporary solution (until we figure out how to deal with
946 	 * large address spaces more generically) is to pre-flight
947 	 * the resident page queue (if it's small enough) and develop
948 	 * a collection of extents (that encompass actual resident pages)
949 	 * to visit.  This will at least allow us to deal with some of the
950 	 * more pathological cases in a more efficient manner.  The current
951 	 * worst case (a single resident page at the end of an extremely large
952 	 * range) can take minutes to complete for ranges in the terrabyte
953 	 * category... since this routine is called when truncating a file,
954 	 * and we currently support files up to 16 Tbytes in size, this
955 	 * is not a theoretical problem
956 	 */
957 
958 	if ((object->resident_page_count < RESIDENT_LIMIT) &&
959 	    (atop_64(size) > (unsigned)(object->resident_page_count / (8 * MAX_EXTENTS)))) {
960 		vm_page_t               next;
961 		vm_object_offset_t      start;
962 		vm_object_offset_t      end;
963 		vm_object_size_t        e_mask;
964 		vm_page_t               m;
965 
966 		start = offset;
967 		end   = offset + size;
968 		num_of_extents = 0;
969 		e_mask = ~((vm_object_size_t)(EXTENT_SIZE - 1));
970 
971 		m = (vm_page_t) vm_page_queue_first(&object->memq);
972 
973 		while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t) m)) {
974 			next = (vm_page_t) vm_page_queue_next(&m->vmp_listq);
975 
976 			if ((m->vmp_offset >= start) && (m->vmp_offset < end)) {
977 				/*
978 				 * this is a page we're interested in
979 				 * try to fit it into a current extent
980 				 */
981 				for (n = 0; n < num_of_extents; n++) {
982 					if ((m->vmp_offset & e_mask) == extents[n].e_base) {
983 						/*
984 						 * use (PAGE_SIZE - 1) to determine the
985 						 * max offset so that we don't wrap if
986 						 * we're at the last page of the space
987 						 */
988 						if (m->vmp_offset < extents[n].e_min) {
989 							extents[n].e_min = m->vmp_offset;
990 						} else if ((m->vmp_offset + (PAGE_SIZE - 1)) > extents[n].e_max) {
991 							extents[n].e_max = m->vmp_offset + (PAGE_SIZE - 1);
992 						}
993 						break;
994 					}
995 				}
996 				if (n == num_of_extents) {
997 					/*
998 					 * didn't find a current extent that can encompass
999 					 * this page
1000 					 */
1001 					if (n < MAX_EXTENTS) {
1002 						/*
1003 						 * if we still have room,
1004 						 * create a new extent
1005 						 */
1006 						extents[n].e_base = m->vmp_offset & e_mask;
1007 						extents[n].e_min  = m->vmp_offset;
1008 						extents[n].e_max  = m->vmp_offset + (PAGE_SIZE - 1);
1009 
1010 						num_of_extents++;
1011 					} else {
1012 						/*
1013 						 * no room to create a new extent...
1014 						 * fall back to a single extent based
1015 						 * on the min and max page offsets
1016 						 * we find in the range we're interested in...
1017 						 * first, look through the extent list and
1018 						 * develop the overall min and max for the
1019 						 * pages we've looked at up to this point
1020 						 */
1021 						for (n = 1; n < num_of_extents; n++) {
1022 							if (extents[n].e_min < extents[0].e_min) {
1023 								extents[0].e_min = extents[n].e_min;
1024 							}
1025 							if (extents[n].e_max > extents[0].e_max) {
1026 								extents[0].e_max = extents[n].e_max;
1027 							}
1028 						}
1029 						/*
1030 						 * now setup to run through the remaining pages
1031 						 * to determine the overall min and max
1032 						 * offset for the specified range
1033 						 */
1034 						extents[0].e_base = 0;
1035 						e_mask = 0;
1036 						num_of_extents = 1;
1037 
1038 						/*
1039 						 * by continuing, we'll reprocess the
1040 						 * page that forced us to abandon trying
1041 						 * to develop multiple extents
1042 						 */
1043 						continue;
1044 					}
1045 				}
1046 			}
1047 			m = next;
1048 		}
1049 	} else {
1050 		extents[0].e_min = offset;
1051 		extents[0].e_max = offset + (size - 1);
1052 
1053 		num_of_extents = 1;
1054 	}
1055 	for (n = 0; n < num_of_extents; n++) {
1056 		if (vm_object_update_extent(object, extents[n].e_min, extents[n].e_max, resid_offset, io_errno,
1057 		    should_flush, should_return, should_iosync, protection)) {
1058 			data_returned = TRUE;
1059 		}
1060 	}
1061 	return data_returned;
1062 }
1063 
1064 
1065 static kern_return_t
vm_object_set_attributes_common(vm_object_t object,boolean_t may_cache,memory_object_copy_strategy_t copy_strategy)1066 vm_object_set_attributes_common(
1067 	vm_object_t     object,
1068 	boolean_t       may_cache,
1069 	memory_object_copy_strategy_t copy_strategy)
1070 {
1071 	boolean_t       object_became_ready;
1072 
1073 	if (object == VM_OBJECT_NULL) {
1074 		return KERN_INVALID_ARGUMENT;
1075 	}
1076 
1077 	/*
1078 	 *	Verify the attributes of importance
1079 	 */
1080 
1081 	switch (copy_strategy) {
1082 	case MEMORY_OBJECT_COPY_NONE:
1083 	case MEMORY_OBJECT_COPY_DELAY:
1084 		break;
1085 	default:
1086 		return KERN_INVALID_ARGUMENT;
1087 	}
1088 
1089 	if (may_cache) {
1090 		may_cache = TRUE;
1091 	}
1092 
1093 	vm_object_lock(object);
1094 
1095 	/*
1096 	 *	Copy the attributes
1097 	 */
1098 	assert(!object->internal);
1099 	object_became_ready = !object->pager_ready;
1100 	object->copy_strategy = copy_strategy;
1101 	object->can_persist = may_cache;
1102 
1103 	/*
1104 	 *	Wake up anyone waiting for the ready attribute
1105 	 *	to become asserted.
1106 	 */
1107 
1108 	if (object_became_ready) {
1109 		object->pager_ready = TRUE;
1110 		vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
1111 	}
1112 
1113 	vm_object_unlock(object);
1114 
1115 	return KERN_SUCCESS;
1116 }
1117 
1118 
1119 kern_return_t
memory_object_synchronize_completed(__unused memory_object_control_t control,__unused memory_object_offset_t offset,__unused memory_object_size_t length)1120 memory_object_synchronize_completed(
1121 	__unused    memory_object_control_t control,
1122 	__unused    memory_object_offset_t  offset,
1123 	__unused    memory_object_size_t    length)
1124 {
1125 	panic("memory_object_synchronize_completed no longer supported");
1126 	return KERN_FAILURE;
1127 }
1128 
1129 
1130 /*
1131  *	Set the memory object attribute as provided.
1132  *
1133  *	XXX This routine cannot be completed until the vm_msync, clean
1134  *	     in place, and cluster work is completed. See ifdef notyet
1135  *	     below and note that vm_object_set_attributes_common()
1136  *	     may have to be expanded.
1137  */
1138 kern_return_t
memory_object_change_attributes(memory_object_control_t control,memory_object_flavor_t flavor,memory_object_info_t attributes,mach_msg_type_number_t count)1139 memory_object_change_attributes(
1140 	memory_object_control_t         control,
1141 	memory_object_flavor_t          flavor,
1142 	memory_object_info_t            attributes,
1143 	mach_msg_type_number_t          count)
1144 {
1145 	vm_object_t                     object;
1146 	kern_return_t                   result = KERN_SUCCESS;
1147 	boolean_t                       may_cache;
1148 	boolean_t                       invalidate;
1149 	memory_object_copy_strategy_t   copy_strategy;
1150 
1151 	object = memory_object_control_to_vm_object(control);
1152 	if (object == VM_OBJECT_NULL) {
1153 		return KERN_INVALID_ARGUMENT;
1154 	}
1155 
1156 	vm_object_lock(object);
1157 
1158 	may_cache = object->can_persist;
1159 	copy_strategy = object->copy_strategy;
1160 #if notyet
1161 	invalidate = object->invalidate;
1162 #endif
1163 	vm_object_unlock(object);
1164 
1165 	switch (flavor) {
1166 	case OLD_MEMORY_OBJECT_BEHAVIOR_INFO:
1167 	{
1168 		old_memory_object_behave_info_t     behave;
1169 
1170 		if (count != OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1171 			result = KERN_INVALID_ARGUMENT;
1172 			break;
1173 		}
1174 
1175 		behave = (old_memory_object_behave_info_t) attributes;
1176 
1177 		invalidate = behave->invalidate;
1178 		copy_strategy = behave->copy_strategy;
1179 
1180 		break;
1181 	}
1182 
1183 	case MEMORY_OBJECT_BEHAVIOR_INFO:
1184 	{
1185 		memory_object_behave_info_t     behave;
1186 
1187 		if (count != MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1188 			result = KERN_INVALID_ARGUMENT;
1189 			break;
1190 		}
1191 
1192 		behave = (memory_object_behave_info_t) attributes;
1193 
1194 		invalidate = behave->invalidate;
1195 		copy_strategy = behave->copy_strategy;
1196 		break;
1197 	}
1198 
1199 	case MEMORY_OBJECT_PERFORMANCE_INFO:
1200 	{
1201 		memory_object_perf_info_t       perf;
1202 
1203 		if (count != MEMORY_OBJECT_PERF_INFO_COUNT) {
1204 			result = KERN_INVALID_ARGUMENT;
1205 			break;
1206 		}
1207 
1208 		perf = (memory_object_perf_info_t) attributes;
1209 
1210 		may_cache = perf->may_cache;
1211 
1212 		break;
1213 	}
1214 
1215 	case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO:
1216 	{
1217 		old_memory_object_attr_info_t   attr;
1218 
1219 		if (count != OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) {
1220 			result = KERN_INVALID_ARGUMENT;
1221 			break;
1222 		}
1223 
1224 		attr = (old_memory_object_attr_info_t) attributes;
1225 
1226 		may_cache = attr->may_cache;
1227 		copy_strategy = attr->copy_strategy;
1228 
1229 		break;
1230 	}
1231 
1232 	case MEMORY_OBJECT_ATTRIBUTE_INFO:
1233 	{
1234 		memory_object_attr_info_t       attr;
1235 
1236 		if (count != MEMORY_OBJECT_ATTR_INFO_COUNT) {
1237 			result = KERN_INVALID_ARGUMENT;
1238 			break;
1239 		}
1240 
1241 		attr = (memory_object_attr_info_t) attributes;
1242 
1243 		copy_strategy = attr->copy_strategy;
1244 		may_cache = attr->may_cache_object;
1245 
1246 		break;
1247 	}
1248 
1249 	default:
1250 		result = KERN_INVALID_ARGUMENT;
1251 		break;
1252 	}
1253 
1254 	if (result != KERN_SUCCESS) {
1255 		return result;
1256 	}
1257 
1258 	if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY) {
1259 		copy_strategy = MEMORY_OBJECT_COPY_DELAY;
1260 	}
1261 
1262 	/*
1263 	 * XXX	may_cache may become a tri-valued variable to handle
1264 	 * XXX	uncache if not in use.
1265 	 */
1266 	return vm_object_set_attributes_common(object,
1267 	           may_cache,
1268 	           copy_strategy);
1269 }
1270 
1271 kern_return_t
memory_object_get_attributes(memory_object_control_t control,memory_object_flavor_t flavor,memory_object_info_t attributes,mach_msg_type_number_t * count)1272 memory_object_get_attributes(
1273 	memory_object_control_t control,
1274 	memory_object_flavor_t  flavor,
1275 	memory_object_info_t    attributes,     /* pointer to OUT array */
1276 	mach_msg_type_number_t  *count)         /* IN/OUT */
1277 {
1278 	kern_return_t           ret = KERN_SUCCESS;
1279 	vm_object_t             object;
1280 
1281 	object = memory_object_control_to_vm_object(control);
1282 	if (object == VM_OBJECT_NULL) {
1283 		return KERN_INVALID_ARGUMENT;
1284 	}
1285 
1286 	vm_object_lock(object);
1287 
1288 	switch (flavor) {
1289 	case OLD_MEMORY_OBJECT_BEHAVIOR_INFO:
1290 	{
1291 		old_memory_object_behave_info_t behave;
1292 
1293 		if (*count < OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1294 			ret = KERN_INVALID_ARGUMENT;
1295 			break;
1296 		}
1297 
1298 		behave = (old_memory_object_behave_info_t) attributes;
1299 		behave->copy_strategy = object->copy_strategy;
1300 		behave->temporary = FALSE;
1301 #if notyet      /* remove when vm_msync complies and clean in place fini */
1302 		behave->invalidate = object->invalidate;
1303 #else
1304 		behave->invalidate = FALSE;
1305 #endif
1306 
1307 		*count = OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT;
1308 		break;
1309 	}
1310 
1311 	case MEMORY_OBJECT_BEHAVIOR_INFO:
1312 	{
1313 		memory_object_behave_info_t     behave;
1314 
1315 		if (*count < MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1316 			ret = KERN_INVALID_ARGUMENT;
1317 			break;
1318 		}
1319 
1320 		behave = (memory_object_behave_info_t) attributes;
1321 		behave->copy_strategy = object->copy_strategy;
1322 		behave->temporary = FALSE;
1323 #if notyet      /* remove when vm_msync complies and clean in place fini */
1324 		behave->invalidate = object->invalidate;
1325 #else
1326 		behave->invalidate = FALSE;
1327 #endif
1328 		behave->advisory_pageout = FALSE;
1329 		behave->silent_overwrite = FALSE;
1330 		*count = MEMORY_OBJECT_BEHAVE_INFO_COUNT;
1331 		break;
1332 	}
1333 
1334 	case MEMORY_OBJECT_PERFORMANCE_INFO:
1335 	{
1336 		memory_object_perf_info_t       perf;
1337 
1338 		if (*count < MEMORY_OBJECT_PERF_INFO_COUNT) {
1339 			ret = KERN_INVALID_ARGUMENT;
1340 			break;
1341 		}
1342 
1343 		perf = (memory_object_perf_info_t) attributes;
1344 		perf->cluster_size = PAGE_SIZE;
1345 		perf->may_cache = object->can_persist;
1346 
1347 		*count = MEMORY_OBJECT_PERF_INFO_COUNT;
1348 		break;
1349 	}
1350 
1351 	case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO:
1352 	{
1353 		old_memory_object_attr_info_t       attr;
1354 
1355 		if (*count < OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) {
1356 			ret = KERN_INVALID_ARGUMENT;
1357 			break;
1358 		}
1359 
1360 		attr = (old_memory_object_attr_info_t) attributes;
1361 		attr->may_cache = object->can_persist;
1362 		attr->copy_strategy = object->copy_strategy;
1363 
1364 		*count = OLD_MEMORY_OBJECT_ATTR_INFO_COUNT;
1365 		break;
1366 	}
1367 
1368 	case MEMORY_OBJECT_ATTRIBUTE_INFO:
1369 	{
1370 		memory_object_attr_info_t       attr;
1371 
1372 		if (*count < MEMORY_OBJECT_ATTR_INFO_COUNT) {
1373 			ret = KERN_INVALID_ARGUMENT;
1374 			break;
1375 		}
1376 
1377 		attr = (memory_object_attr_info_t) attributes;
1378 		attr->copy_strategy = object->copy_strategy;
1379 		attr->cluster_size = PAGE_SIZE;
1380 		attr->may_cache_object = object->can_persist;
1381 		attr->temporary = FALSE;
1382 
1383 		*count = MEMORY_OBJECT_ATTR_INFO_COUNT;
1384 		break;
1385 	}
1386 
1387 	default:
1388 		ret = KERN_INVALID_ARGUMENT;
1389 		break;
1390 	}
1391 
1392 	vm_object_unlock(object);
1393 
1394 	return ret;
1395 }
1396 
1397 
1398 kern_return_t
memory_object_iopl_request(ipc_port_t port,memory_object_offset_t offset,upl_size_t * upl_size,upl_t * upl_ptr,upl_page_info_array_t user_page_list,unsigned int * page_list_count,upl_control_flags_t * flags,vm_tag_t tag)1399 memory_object_iopl_request(
1400 	ipc_port_t              port,
1401 	memory_object_offset_t  offset,
1402 	upl_size_t              *upl_size,
1403 	upl_t                   *upl_ptr,
1404 	upl_page_info_array_t   user_page_list,
1405 	unsigned int            *page_list_count,
1406 	upl_control_flags_t     *flags,
1407 	vm_tag_t                tag)
1408 {
1409 	vm_object_t             object;
1410 	kern_return_t           ret;
1411 	upl_control_flags_t     caller_flags;
1412 	vm_named_entry_t        named_entry;
1413 
1414 	caller_flags = *flags;
1415 
1416 	if (caller_flags & ~UPL_VALID_FLAGS) {
1417 		/*
1418 		 * For forward compatibility's sake,
1419 		 * reject any unknown flag.
1420 		 */
1421 		return KERN_INVALID_VALUE;
1422 	}
1423 
1424 	named_entry = mach_memory_entry_from_port(port);
1425 	if (named_entry != NULL) {
1426 		/* a few checks to make sure user is obeying rules */
1427 		if (*upl_size == 0) {
1428 			if (offset >= named_entry->size) {
1429 				return KERN_INVALID_RIGHT;
1430 			}
1431 			*upl_size = (upl_size_t)(named_entry->size - offset);
1432 			if (*upl_size != named_entry->size - offset) {
1433 				return KERN_INVALID_ARGUMENT;
1434 			}
1435 		}
1436 		if (caller_flags & UPL_COPYOUT_FROM) {
1437 			if ((named_entry->protection & VM_PROT_READ)
1438 			    != VM_PROT_READ) {
1439 				return KERN_INVALID_RIGHT;
1440 			}
1441 		} else {
1442 			if ((named_entry->protection &
1443 			    (VM_PROT_READ | VM_PROT_WRITE))
1444 			    != (VM_PROT_READ | VM_PROT_WRITE)) {
1445 				return KERN_INVALID_RIGHT;
1446 			}
1447 		}
1448 		if (named_entry->size < (offset + *upl_size)) {
1449 			return KERN_INVALID_ARGUMENT;
1450 		}
1451 
1452 		/* the callers parameter offset is defined to be the */
1453 		/* offset from beginning of named entry offset in object */
1454 		offset = offset + named_entry->offset;
1455 		offset += named_entry->data_offset;
1456 
1457 		if (named_entry->is_sub_map ||
1458 		    named_entry->is_copy) {
1459 			return KERN_INVALID_ARGUMENT;
1460 		}
1461 		if (!named_entry->is_object) {
1462 			return KERN_INVALID_ARGUMENT;
1463 		}
1464 
1465 		object = vm_named_entry_to_vm_object(named_entry);
1466 		vm_object_reference(object);
1467 	} else if (ip_kotype(port) == IKOT_MEM_OBJ_CONTROL) {
1468 		panic("unexpected IKOT_MEM_OBJ_CONTROL: %p", port);
1469 	} else {
1470 		return KERN_INVALID_ARGUMENT;
1471 	}
1472 	if (object == VM_OBJECT_NULL) {
1473 		return KERN_INVALID_ARGUMENT;
1474 	}
1475 
1476 	if (!object->private) {
1477 		if (object->phys_contiguous) {
1478 			*flags = UPL_PHYS_CONTIG;
1479 		} else {
1480 			*flags = 0;
1481 		}
1482 	} else {
1483 		*flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
1484 	}
1485 
1486 	ret = vm_object_iopl_request(object,
1487 	    offset,
1488 	    *upl_size,
1489 	    upl_ptr,
1490 	    user_page_list,
1491 	    page_list_count,
1492 	    caller_flags,
1493 	    tag);
1494 	vm_object_deallocate(object);
1495 	return ret;
1496 }
1497 
1498 /*
1499  *	Routine:	memory_object_upl_request [interface]
1500  *	Purpose:
1501  *		Cause the population of a portion of a vm_object.
1502  *		Depending on the nature of the request, the pages
1503  *		returned may be contain valid data or be uninitialized.
1504  *
1505  */
1506 
1507 kern_return_t
memory_object_upl_request(memory_object_control_t control,memory_object_offset_t offset,upl_size_t size,upl_t * upl_ptr,upl_page_info_array_t user_page_list,unsigned int * page_list_count,int cntrl_flags,int tag)1508 memory_object_upl_request(
1509 	memory_object_control_t control,
1510 	memory_object_offset_t  offset,
1511 	upl_size_t              size,
1512 	upl_t                   *upl_ptr,
1513 	upl_page_info_array_t   user_page_list,
1514 	unsigned int            *page_list_count,
1515 	int                     cntrl_flags,
1516 	int                     tag)
1517 {
1518 	vm_object_t             object;
1519 	vm_tag_t                vmtag = (vm_tag_t)tag;
1520 	assert(vmtag == tag);
1521 
1522 	object = memory_object_control_to_vm_object(control);
1523 	if (object == VM_OBJECT_NULL) {
1524 		return KERN_TERMINATED;
1525 	}
1526 
1527 	return vm_object_upl_request(object,
1528 	           offset,
1529 	           size,
1530 	           upl_ptr,
1531 	           user_page_list,
1532 	           page_list_count,
1533 	           (upl_control_flags_t)(unsigned int) cntrl_flags,
1534 	           vmtag);
1535 }
1536 
1537 /*
1538  *	Routine:	memory_object_super_upl_request [interface]
1539  *	Purpose:
1540  *		Cause the population of a portion of a vm_object
1541  *		in much the same way as memory_object_upl_request.
1542  *		Depending on the nature of the request, the pages
1543  *		returned may be contain valid data or be uninitialized.
1544  *		However, the region may be expanded up to the super
1545  *		cluster size provided.
1546  */
1547 
1548 kern_return_t
memory_object_super_upl_request(memory_object_control_t control,memory_object_offset_t offset,upl_size_t size,upl_size_t super_cluster,upl_t * upl,upl_page_info_t * user_page_list,unsigned int * page_list_count,int cntrl_flags,int tag)1549 memory_object_super_upl_request(
1550 	memory_object_control_t control,
1551 	memory_object_offset_t  offset,
1552 	upl_size_t              size,
1553 	upl_size_t              super_cluster,
1554 	upl_t                   *upl,
1555 	upl_page_info_t         *user_page_list,
1556 	unsigned int            *page_list_count,
1557 	int                     cntrl_flags,
1558 	int                     tag)
1559 {
1560 	vm_object_t             object;
1561 	vm_tag_t                vmtag = (vm_tag_t)tag;
1562 	assert(vmtag == tag);
1563 
1564 	object = memory_object_control_to_vm_object(control);
1565 	if (object == VM_OBJECT_NULL) {
1566 		return KERN_INVALID_ARGUMENT;
1567 	}
1568 
1569 	return vm_object_super_upl_request(object,
1570 	           offset,
1571 	           size,
1572 	           super_cluster,
1573 	           upl,
1574 	           user_page_list,
1575 	           page_list_count,
1576 	           (upl_control_flags_t)(unsigned int) cntrl_flags,
1577 	           vmtag);
1578 }
1579 
1580 kern_return_t
memory_object_cluster_size(memory_object_control_t control,memory_object_offset_t * start,vm_size_t * length,uint32_t * io_streaming,memory_object_fault_info_t mo_fault_info)1581 memory_object_cluster_size(
1582 	memory_object_control_t control,
1583 	memory_object_offset_t  *start,
1584 	vm_size_t               *length,
1585 	uint32_t                *io_streaming,
1586 	memory_object_fault_info_t mo_fault_info)
1587 {
1588 	vm_object_t             object;
1589 	vm_object_fault_info_t  fault_info;
1590 
1591 	object = memory_object_control_to_vm_object(control);
1592 
1593 	if (object == VM_OBJECT_NULL || object->paging_offset > *start) {
1594 		return KERN_INVALID_ARGUMENT;
1595 	}
1596 
1597 	*start -= object->paging_offset;
1598 
1599 	fault_info = (vm_object_fault_info_t)(uintptr_t) mo_fault_info;
1600 	vm_object_cluster_size(object,
1601 	    (vm_object_offset_t *)start,
1602 	    length,
1603 	    fault_info,
1604 	    io_streaming);
1605 
1606 	*start += object->paging_offset;
1607 
1608 	return KERN_SUCCESS;
1609 }
1610 
1611 
1612 /*
1613  *	Routine:	host_default_memory_manager [interface]
1614  *	Purpose:
1615  *		set/get the default memory manager port and default cluster
1616  *		size.
1617  *
1618  *		If successful, consumes the supplied naked send right.
1619  */
1620 kern_return_t
host_default_memory_manager(host_priv_t host_priv,memory_object_default_t * default_manager,__unused memory_object_cluster_size_t cluster_size)1621 host_default_memory_manager(
1622 	host_priv_t             host_priv,
1623 	memory_object_default_t *default_manager,
1624 	__unused memory_object_cluster_size_t cluster_size)
1625 {
1626 	memory_object_default_t current_manager;
1627 	memory_object_default_t new_manager;
1628 	memory_object_default_t returned_manager;
1629 	kern_return_t result = KERN_SUCCESS;
1630 
1631 	if (host_priv == HOST_PRIV_NULL) {
1632 		return KERN_INVALID_HOST;
1633 	}
1634 
1635 	new_manager = *default_manager;
1636 	lck_mtx_lock(&memory_manager_default_lock);
1637 	current_manager = memory_manager_default;
1638 	returned_manager = MEMORY_OBJECT_DEFAULT_NULL;
1639 
1640 	if (new_manager == MEMORY_OBJECT_DEFAULT_NULL) {
1641 		/*
1642 		 *	Retrieve the current value.
1643 		 */
1644 		returned_manager = ipc_port_make_send(current_manager);
1645 	} else {
1646 		/*
1647 		 *	Only allow the kernel to change the value.
1648 		 */
1649 		extern task_t kernel_task;
1650 		if (current_task() != kernel_task) {
1651 			result = KERN_NO_ACCESS;
1652 			goto out;
1653 		}
1654 
1655 		/*
1656 		 *	If this is the first non-null manager, start
1657 		 *	up the internal pager support.
1658 		 */
1659 		if (current_manager == MEMORY_OBJECT_DEFAULT_NULL) {
1660 			result = vm_pageout_internal_start();
1661 			if (result != KERN_SUCCESS) {
1662 				goto out;
1663 			}
1664 		}
1665 
1666 		/*
1667 		 *	Retrieve the current value,
1668 		 *	and replace it with the supplied value.
1669 		 *	We return the old reference to the caller
1670 		 *	but we have to take a reference on the new
1671 		 *	one.
1672 		 */
1673 		returned_manager = current_manager;
1674 		memory_manager_default = ipc_port_make_send(new_manager);
1675 
1676 		/*
1677 		 *	In case anyone's been waiting for a memory
1678 		 *	manager to be established, wake them up.
1679 		 */
1680 
1681 		thread_wakeup((event_t) &memory_manager_default);
1682 
1683 		/*
1684 		 * Now that we have a default pager for anonymous memory,
1685 		 * reactivate all the throttled pages (i.e. dirty pages with
1686 		 * no pager).
1687 		 */
1688 		if (current_manager == MEMORY_OBJECT_DEFAULT_NULL) {
1689 			vm_page_reactivate_all_throttled();
1690 		}
1691 	}
1692 out:
1693 	lck_mtx_unlock(&memory_manager_default_lock);
1694 
1695 	*default_manager = returned_manager;
1696 	return result;
1697 }
1698 
1699 /*
1700  *	Routine:	memory_manager_default_reference
1701  *	Purpose:
1702  *		Returns a naked send right for the default
1703  *		memory manager.  The returned right is always
1704  *		valid (not IP_NULL or IP_DEAD).
1705  */
1706 
1707 __private_extern__ memory_object_default_t
memory_manager_default_reference(void)1708 memory_manager_default_reference(void)
1709 {
1710 	memory_object_default_t current_manager;
1711 
1712 	lck_mtx_lock(&memory_manager_default_lock);
1713 	current_manager = memory_manager_default;
1714 	while (current_manager == MEMORY_OBJECT_DEFAULT_NULL) {
1715 		wait_result_t res;
1716 
1717 		res = lck_mtx_sleep(&memory_manager_default_lock,
1718 		    LCK_SLEEP_DEFAULT,
1719 		    (event_t) &memory_manager_default,
1720 		    THREAD_UNINT);
1721 		assert(res == THREAD_AWAKENED);
1722 		current_manager = memory_manager_default;
1723 	}
1724 	current_manager = ipc_port_make_send(current_manager);
1725 	lck_mtx_unlock(&memory_manager_default_lock);
1726 
1727 	return current_manager;
1728 }
1729 
1730 /*
1731  *	Routine:	memory_manager_default_check
1732  *
1733  *	Purpose:
1734  *		Check whether a default memory manager has been set
1735  *		up yet, or not. Returns KERN_SUCCESS if dmm exists,
1736  *		and KERN_FAILURE if dmm does not exist.
1737  *
1738  *		If there is no default memory manager, log an error,
1739  *		but only the first time.
1740  *
1741  */
1742 __private_extern__ kern_return_t
memory_manager_default_check(void)1743 memory_manager_default_check(void)
1744 {
1745 	memory_object_default_t current;
1746 
1747 	lck_mtx_lock(&memory_manager_default_lock);
1748 	current = memory_manager_default;
1749 	if (current == MEMORY_OBJECT_DEFAULT_NULL) {
1750 		static boolean_t logged;        /* initialized to 0 */
1751 		boolean_t       complain = !logged;
1752 		logged = TRUE;
1753 		lck_mtx_unlock(&memory_manager_default_lock);
1754 		if (complain) {
1755 			printf("Warning: No default memory manager\n");
1756 		}
1757 		return KERN_FAILURE;
1758 	} else {
1759 		lck_mtx_unlock(&memory_manager_default_lock);
1760 		return KERN_SUCCESS;
1761 	}
1762 }
1763 
1764 /* Allow manipulation of individual page state.  This is actually part of */
1765 /* the UPL regimen but takes place on the object rather than on a UPL */
1766 
1767 kern_return_t
memory_object_page_op(memory_object_control_t control,memory_object_offset_t offset,int ops,ppnum_t * phys_entry,int * flags)1768 memory_object_page_op(
1769 	memory_object_control_t control,
1770 	memory_object_offset_t  offset,
1771 	int                     ops,
1772 	ppnum_t                 *phys_entry,
1773 	int                     *flags)
1774 {
1775 	vm_object_t             object;
1776 
1777 	object = memory_object_control_to_vm_object(control);
1778 	if (object == VM_OBJECT_NULL) {
1779 		return KERN_INVALID_ARGUMENT;
1780 	}
1781 
1782 	return vm_object_page_op(object, offset, ops, phys_entry, flags);
1783 }
1784 
1785 /*
1786  * memory_object_range_op offers performance enhancement over
1787  * memory_object_page_op for page_op functions which do not require page
1788  * level state to be returned from the call.  Page_op was created to provide
1789  * a low-cost alternative to page manipulation via UPLs when only a single
1790  * page was involved.  The range_op call establishes the ability in the _op
1791  * family of functions to work on multiple pages where the lack of page level
1792  * state handling allows the caller to avoid the overhead of the upl structures.
1793  */
1794 
1795 kern_return_t
memory_object_range_op(memory_object_control_t control,memory_object_offset_t offset_beg,memory_object_offset_t offset_end,int ops,int * range)1796 memory_object_range_op(
1797 	memory_object_control_t control,
1798 	memory_object_offset_t  offset_beg,
1799 	memory_object_offset_t  offset_end,
1800 	int                     ops,
1801 	int                     *range)
1802 {
1803 	vm_object_t             object;
1804 
1805 	object = memory_object_control_to_vm_object(control);
1806 	if (object == VM_OBJECT_NULL) {
1807 		return KERN_INVALID_ARGUMENT;
1808 	}
1809 
1810 	return vm_object_range_op(object,
1811 	           offset_beg,
1812 	           offset_end,
1813 	           ops,
1814 	           (uint32_t *) range);
1815 }
1816 
1817 
1818 void
memory_object_mark_used(memory_object_control_t control)1819 memory_object_mark_used(
1820 	memory_object_control_t control)
1821 {
1822 	vm_object_t             object;
1823 
1824 	if (control == NULL) {
1825 		return;
1826 	}
1827 
1828 	object = memory_object_control_to_vm_object(control);
1829 
1830 	if (object != VM_OBJECT_NULL) {
1831 		vm_object_cache_remove(object);
1832 	}
1833 }
1834 
1835 
1836 void
memory_object_mark_unused(memory_object_control_t control,__unused boolean_t rage)1837 memory_object_mark_unused(
1838 	memory_object_control_t control,
1839 	__unused boolean_t      rage)
1840 {
1841 	vm_object_t             object;
1842 
1843 	if (control == NULL) {
1844 		return;
1845 	}
1846 
1847 	object = memory_object_control_to_vm_object(control);
1848 
1849 	if (object != VM_OBJECT_NULL) {
1850 		vm_object_cache_add(object);
1851 	}
1852 }
1853 
1854 void
memory_object_mark_io_tracking(memory_object_control_t control)1855 memory_object_mark_io_tracking(
1856 	memory_object_control_t control)
1857 {
1858 	vm_object_t             object;
1859 
1860 	if (control == NULL) {
1861 		return;
1862 	}
1863 	object = memory_object_control_to_vm_object(control);
1864 
1865 	if (object != VM_OBJECT_NULL) {
1866 		vm_object_lock(object);
1867 		object->io_tracking = TRUE;
1868 		vm_object_unlock(object);
1869 	}
1870 }
1871 
1872 void
memory_object_mark_trusted(memory_object_control_t control)1873 memory_object_mark_trusted(
1874 	memory_object_control_t control)
1875 {
1876 	vm_object_t             object;
1877 
1878 	if (control == NULL) {
1879 		return;
1880 	}
1881 	object = memory_object_control_to_vm_object(control);
1882 
1883 	if (object != VM_OBJECT_NULL) {
1884 		vm_object_lock(object);
1885 		object->pager_trusted = TRUE;
1886 		vm_object_unlock(object);
1887 	}
1888 }
1889 
1890 #if CONFIG_SECLUDED_MEMORY
1891 void
memory_object_mark_eligible_for_secluded(memory_object_control_t control,boolean_t eligible_for_secluded)1892 memory_object_mark_eligible_for_secluded(
1893 	memory_object_control_t control,
1894 	boolean_t               eligible_for_secluded)
1895 {
1896 	vm_object_t             object;
1897 
1898 	if (control == NULL) {
1899 		return;
1900 	}
1901 	object = memory_object_control_to_vm_object(control);
1902 
1903 	if (object == VM_OBJECT_NULL) {
1904 		return;
1905 	}
1906 
1907 	vm_object_lock(object);
1908 	if (eligible_for_secluded &&
1909 	    secluded_for_filecache && /* global boot-arg */
1910 	    !object->eligible_for_secluded) {
1911 		object->eligible_for_secluded = TRUE;
1912 		vm_page_secluded.eligible_for_secluded += object->resident_page_count;
1913 	} else if (!eligible_for_secluded &&
1914 	    object->eligible_for_secluded) {
1915 		object->eligible_for_secluded = FALSE;
1916 		vm_page_secluded.eligible_for_secluded -= object->resident_page_count;
1917 		if (object->resident_page_count) {
1918 			/* XXX FBDP TODO: flush pages from secluded queue? */
1919 			// printf("FBDP TODO: flush %d pages from %p from secluded queue\n", object->resident_page_count, object);
1920 		}
1921 	}
1922 	vm_object_unlock(object);
1923 }
1924 #endif /* CONFIG_SECLUDED_MEMORY */
1925 
1926 kern_return_t
memory_object_pages_resident(memory_object_control_t control,boolean_t * has_pages_resident)1927 memory_object_pages_resident(
1928 	memory_object_control_t control,
1929 	boolean_t                       *       has_pages_resident)
1930 {
1931 	vm_object_t             object;
1932 
1933 	*has_pages_resident = FALSE;
1934 
1935 	object = memory_object_control_to_vm_object(control);
1936 	if (object == VM_OBJECT_NULL) {
1937 		return KERN_INVALID_ARGUMENT;
1938 	}
1939 
1940 	if (object->resident_page_count) {
1941 		*has_pages_resident = TRUE;
1942 	}
1943 
1944 	return KERN_SUCCESS;
1945 }
1946 
1947 kern_return_t
memory_object_signed(memory_object_control_t control,boolean_t is_signed)1948 memory_object_signed(
1949 	memory_object_control_t control,
1950 	boolean_t               is_signed)
1951 {
1952 	vm_object_t     object;
1953 
1954 	object = memory_object_control_to_vm_object(control);
1955 	if (object == VM_OBJECT_NULL) {
1956 		return KERN_INVALID_ARGUMENT;
1957 	}
1958 
1959 	vm_object_lock(object);
1960 	object->code_signed = is_signed;
1961 	vm_object_unlock(object);
1962 
1963 	return KERN_SUCCESS;
1964 }
1965 
1966 boolean_t
memory_object_is_signed(memory_object_control_t control)1967 memory_object_is_signed(
1968 	memory_object_control_t control)
1969 {
1970 	boolean_t       is_signed;
1971 	vm_object_t     object;
1972 
1973 	object = memory_object_control_to_vm_object(control);
1974 	if (object == VM_OBJECT_NULL) {
1975 		return FALSE;
1976 	}
1977 
1978 	vm_object_lock_shared(object);
1979 	is_signed = object->code_signed;
1980 	vm_object_unlock(object);
1981 
1982 	return is_signed;
1983 }
1984 
1985 boolean_t
memory_object_is_shared_cache(memory_object_control_t control)1986 memory_object_is_shared_cache(
1987 	memory_object_control_t control)
1988 {
1989 	vm_object_t     object = VM_OBJECT_NULL;
1990 
1991 	object = memory_object_control_to_vm_object(control);
1992 	if (object == VM_OBJECT_NULL) {
1993 		return FALSE;
1994 	}
1995 
1996 	return object->object_is_shared_cache;
1997 }
1998 
1999 __private_extern__ memory_object_control_t
memory_object_control_allocate(vm_object_t object)2000 memory_object_control_allocate(
2001 	vm_object_t             object)
2002 {
2003 	return object;
2004 }
2005 
2006 __private_extern__ void
memory_object_control_collapse(memory_object_control_t * control,vm_object_t object)2007 memory_object_control_collapse(
2008 	memory_object_control_t *control,
2009 	vm_object_t             object)
2010 {
2011 	*control = object;
2012 }
2013 
2014 __private_extern__ vm_object_t
memory_object_control_to_vm_object(memory_object_control_t control)2015 memory_object_control_to_vm_object(
2016 	memory_object_control_t control)
2017 {
2018 	return control;
2019 }
2020 
2021 __private_extern__ vm_object_t
memory_object_to_vm_object(memory_object_t mem_obj)2022 memory_object_to_vm_object(
2023 	memory_object_t mem_obj)
2024 {
2025 	memory_object_control_t mo_control;
2026 
2027 	if (mem_obj == MEMORY_OBJECT_NULL) {
2028 		return VM_OBJECT_NULL;
2029 	}
2030 	mo_control = mem_obj->mo_control;
2031 	if (mo_control == NULL) {
2032 		return VM_OBJECT_NULL;
2033 	}
2034 	return memory_object_control_to_vm_object(mo_control);
2035 }
2036 
2037 memory_object_control_t
convert_port_to_mo_control(__unused mach_port_t port)2038 convert_port_to_mo_control(
2039 	__unused mach_port_t    port)
2040 {
2041 	return MEMORY_OBJECT_CONTROL_NULL;
2042 }
2043 
2044 
2045 mach_port_t
convert_mo_control_to_port(__unused memory_object_control_t control)2046 convert_mo_control_to_port(
2047 	__unused memory_object_control_t        control)
2048 {
2049 	return MACH_PORT_NULL;
2050 }
2051 
2052 void
memory_object_control_reference(__unused memory_object_control_t control)2053 memory_object_control_reference(
2054 	__unused memory_object_control_t        control)
2055 {
2056 	return;
2057 }
2058 
2059 /*
2060  * We only every issue one of these references, so kill it
2061  * when that gets released (should switch the real reference
2062  * counting in true port-less EMMI).
2063  */
2064 void
memory_object_control_deallocate(__unused memory_object_control_t control)2065 memory_object_control_deallocate(
2066 	__unused memory_object_control_t control)
2067 {
2068 }
2069 
2070 void
memory_object_control_disable(memory_object_control_t * control)2071 memory_object_control_disable(
2072 	memory_object_control_t *control)
2073 {
2074 	assert(*control != VM_OBJECT_NULL);
2075 	*control = VM_OBJECT_NULL;
2076 }
2077 
2078 memory_object_t
convert_port_to_memory_object(__unused mach_port_t port)2079 convert_port_to_memory_object(
2080 	__unused mach_port_t    port)
2081 {
2082 	return MEMORY_OBJECT_NULL;
2083 }
2084 
2085 
2086 mach_port_t
convert_memory_object_to_port(__unused memory_object_t object)2087 convert_memory_object_to_port(
2088 	__unused memory_object_t        object)
2089 {
2090 	return MACH_PORT_NULL;
2091 }
2092 
2093 
2094 /* Routine memory_object_reference */
2095 void
memory_object_reference(memory_object_t memory_object)2096 memory_object_reference(
2097 	memory_object_t memory_object)
2098 {
2099 	(memory_object->mo_pager_ops->memory_object_reference)(
2100 		memory_object);
2101 }
2102 
2103 /* Routine memory_object_deallocate */
2104 void
memory_object_deallocate(memory_object_t memory_object)2105 memory_object_deallocate(
2106 	memory_object_t memory_object)
2107 {
2108 	(memory_object->mo_pager_ops->memory_object_deallocate)(
2109 		memory_object);
2110 }
2111 
2112 
2113 /* Routine memory_object_init */
2114 kern_return_t
memory_object_init(memory_object_t memory_object,memory_object_control_t memory_control,memory_object_cluster_size_t memory_object_page_size)2115 memory_object_init
2116 (
2117 	memory_object_t memory_object,
2118 	memory_object_control_t memory_control,
2119 	memory_object_cluster_size_t memory_object_page_size
2120 )
2121 {
2122 	return (memory_object->mo_pager_ops->memory_object_init)(
2123 		memory_object,
2124 		memory_control,
2125 		memory_object_page_size);
2126 }
2127 
2128 /* Routine memory_object_terminate */
2129 kern_return_t
memory_object_terminate(memory_object_t memory_object)2130 memory_object_terminate
2131 (
2132 	memory_object_t memory_object
2133 )
2134 {
2135 	return (memory_object->mo_pager_ops->memory_object_terminate)(
2136 		memory_object);
2137 }
2138 
2139 /* Routine memory_object_data_request */
2140 kern_return_t
memory_object_data_request(memory_object_t memory_object,memory_object_offset_t offset,memory_object_cluster_size_t length,vm_prot_t desired_access,memory_object_fault_info_t fault_info)2141 memory_object_data_request
2142 (
2143 	memory_object_t memory_object,
2144 	memory_object_offset_t offset,
2145 	memory_object_cluster_size_t length,
2146 	vm_prot_t desired_access,
2147 	memory_object_fault_info_t fault_info
2148 )
2149 {
2150 	return (memory_object->mo_pager_ops->memory_object_data_request)(
2151 		memory_object,
2152 		offset,
2153 		length,
2154 		desired_access,
2155 		fault_info);
2156 }
2157 
2158 /* Routine memory_object_data_return */
2159 kern_return_t
memory_object_data_return(memory_object_t memory_object,memory_object_offset_t offset,memory_object_cluster_size_t size,memory_object_offset_t * resid_offset,int * io_error,boolean_t dirty,boolean_t kernel_copy,int upl_flags)2160 memory_object_data_return
2161 (
2162 	memory_object_t memory_object,
2163 	memory_object_offset_t offset,
2164 	memory_object_cluster_size_t size,
2165 	memory_object_offset_t *resid_offset,
2166 	int     *io_error,
2167 	boolean_t dirty,
2168 	boolean_t kernel_copy,
2169 	int     upl_flags
2170 )
2171 {
2172 	return (memory_object->mo_pager_ops->memory_object_data_return)(
2173 		memory_object,
2174 		offset,
2175 		size,
2176 		resid_offset,
2177 		io_error,
2178 		dirty,
2179 		kernel_copy,
2180 		upl_flags);
2181 }
2182 
2183 /* Routine memory_object_data_initialize */
2184 kern_return_t
memory_object_data_initialize(memory_object_t memory_object,memory_object_offset_t offset,memory_object_cluster_size_t size)2185 memory_object_data_initialize
2186 (
2187 	memory_object_t memory_object,
2188 	memory_object_offset_t offset,
2189 	memory_object_cluster_size_t size
2190 )
2191 {
2192 	return (memory_object->mo_pager_ops->memory_object_data_initialize)(
2193 		memory_object,
2194 		offset,
2195 		size);
2196 }
2197 
2198 /* Routine memory_object_data_unlock */
2199 kern_return_t
memory_object_data_unlock(memory_object_t memory_object,memory_object_offset_t offset,memory_object_size_t size,vm_prot_t desired_access)2200 memory_object_data_unlock
2201 (
2202 	memory_object_t memory_object,
2203 	memory_object_offset_t offset,
2204 	memory_object_size_t size,
2205 	vm_prot_t desired_access
2206 )
2207 {
2208 	return (memory_object->mo_pager_ops->memory_object_data_unlock)(
2209 		memory_object,
2210 		offset,
2211 		size,
2212 		desired_access);
2213 }
2214 
2215 /* Routine memory_object_synchronize */
2216 kern_return_t
memory_object_synchronize(memory_object_t memory_object,memory_object_offset_t offset,memory_object_size_t size,vm_sync_t sync_flags)2217 memory_object_synchronize
2218 (
2219 	memory_object_t memory_object,
2220 	memory_object_offset_t offset,
2221 	memory_object_size_t size,
2222 	vm_sync_t sync_flags
2223 )
2224 {
2225 	panic("memory_object_syncrhonize no longer supported");
2226 
2227 	return (memory_object->mo_pager_ops->memory_object_synchronize)(
2228 		memory_object,
2229 		offset,
2230 		size,
2231 		sync_flags);
2232 }
2233 
2234 
2235 /*
2236  * memory_object_map() is called by VM (in vm_map_enter() and its variants)
2237  * each time a "named" VM object gets mapped directly or indirectly
2238  * (copy-on-write mapping).  A "named" VM object has an extra reference held
2239  * by the pager to keep it alive until the pager decides that the
2240  * memory object (and its VM object) can be reclaimed.
2241  * VM calls memory_object_last_unmap() (in vm_object_deallocate()) when all
2242  * the mappings of that memory object have been removed.
2243  *
2244  * For a given VM object, calls to memory_object_map() and memory_object_unmap()
2245  * are serialized (through object->mapping_in_progress), to ensure that the
2246  * pager gets a consistent view of the mapping status of the memory object.
2247  *
2248  * This allows the pager to keep track of how many times a memory object
2249  * has been mapped and with which protections, to decide when it can be
2250  * reclaimed.
2251  */
2252 
2253 /* Routine memory_object_map */
2254 kern_return_t
memory_object_map(memory_object_t memory_object,vm_prot_t prot)2255 memory_object_map
2256 (
2257 	memory_object_t memory_object,
2258 	vm_prot_t prot
2259 )
2260 {
2261 	return (memory_object->mo_pager_ops->memory_object_map)(
2262 		memory_object,
2263 		prot);
2264 }
2265 
2266 /* Routine memory_object_last_unmap */
2267 kern_return_t
memory_object_last_unmap(memory_object_t memory_object)2268 memory_object_last_unmap
2269 (
2270 	memory_object_t memory_object
2271 )
2272 {
2273 	return (memory_object->mo_pager_ops->memory_object_last_unmap)(
2274 		memory_object);
2275 }
2276 
2277 /* Routine memory_object_data_reclaim */
2278 kern_return_t
memory_object_data_reclaim(memory_object_t memory_object,boolean_t reclaim_backing_store)2279 memory_object_data_reclaim
2280 (
2281 	memory_object_t memory_object,
2282 	boolean_t       reclaim_backing_store
2283 )
2284 {
2285 	if (memory_object->mo_pager_ops->memory_object_data_reclaim == NULL) {
2286 		return KERN_NOT_SUPPORTED;
2287 	}
2288 	return (memory_object->mo_pager_ops->memory_object_data_reclaim)(
2289 		memory_object,
2290 		reclaim_backing_store);
2291 }
2292 
2293 boolean_t
memory_object_backing_object(memory_object_t memory_object,memory_object_offset_t offset,vm_object_t * backing_object,vm_object_offset_t * backing_offset)2294 memory_object_backing_object
2295 (
2296 	memory_object_t memory_object,
2297 	memory_object_offset_t offset,
2298 	vm_object_t *backing_object,
2299 	vm_object_offset_t *backing_offset)
2300 {
2301 	if (memory_object->mo_pager_ops->memory_object_backing_object == NULL) {
2302 		return FALSE;
2303 	}
2304 	return (memory_object->mo_pager_ops->memory_object_backing_object)(
2305 		memory_object,
2306 		offset,
2307 		backing_object,
2308 		backing_offset);
2309 }
2310 
2311 upl_t
convert_port_to_upl(__unused ipc_port_t port)2312 convert_port_to_upl(
2313 	__unused ipc_port_t      port)
2314 {
2315 	return NULL;
2316 }
2317 
2318 mach_port_t
convert_upl_to_port(__unused upl_t upl)2319 convert_upl_to_port(
2320 	__unused upl_t          upl)
2321 {
2322 	return MACH_PORT_NULL;
2323 }
2324