xref: /xnu-12377.1.9/osfmk/vm/vm_upl.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <vm/vm_upl.h>
30 #include <vm/vm_pageout_internal.h>
31 #include <vm/vm_page_internal.h>
32 #include <vm/vm_map_internal.h>
33 #include <mach/upl_server.h>
34 #include <kern/host_statistics.h>
35 #include <vm/vm_purgeable_internal.h>
36 #include <vm/vm_object_internal.h>
37 #include <vm/vm_ubc.h>
38 
39 extern boolean_t hibernate_cleaning_in_progress;
40 
41 /* map a (whole) upl into an address space */
42 kern_return_t
vm_upl_map(vm_map_t map,upl_t upl,vm_address_t * dst_addr)43 vm_upl_map(
44 	vm_map_t                map,
45 	upl_t                   upl,
46 	vm_address_t            *dst_addr)
47 {
48 	vm_map_offset_t         map_addr;
49 	kern_return_t           kr;
50 
51 	if (VM_MAP_NULL == map) {
52 		return KERN_INVALID_ARGUMENT;
53 	}
54 
55 	kr = vm_map_enter_upl(map, upl, &map_addr);
56 	*dst_addr = CAST_DOWN(vm_address_t, map_addr);
57 	return kr;
58 }
59 
60 kern_return_t
vm_upl_unmap(vm_map_t map,upl_t upl)61 vm_upl_unmap(
62 	vm_map_t                map,
63 	upl_t                   upl)
64 {
65 	if (VM_MAP_NULL == map) {
66 		return KERN_INVALID_ARGUMENT;
67 	}
68 
69 	return vm_map_remove_upl(map, upl);
70 }
71 
72 /* map a part of a upl into an address space with requested protection. */
73 kern_return_t
vm_upl_map_range(vm_map_t map,upl_t upl,vm_offset_t offset_to_map,vm_size_t size_to_map,vm_prot_t prot_to_map,vm_address_t * dst_addr)74 vm_upl_map_range(
75 	vm_map_t                map,
76 	upl_t                   upl,
77 	vm_offset_t             offset_to_map,
78 	vm_size_t               size_to_map,
79 	vm_prot_t               prot_to_map,
80 	vm_address_t            *dst_addr)
81 {
82 	vm_map_offset_t         map_addr, aligned_offset_to_map, adjusted_offset;
83 	kern_return_t           kr;
84 
85 	if (VM_MAP_NULL == map) {
86 		return KERN_INVALID_ARGUMENT;
87 	}
88 	aligned_offset_to_map = vm_map_trunc_page(offset_to_map, vm_map_page_mask(map));
89 	adjusted_offset =  offset_to_map - aligned_offset_to_map;
90 	size_to_map = vm_map_round_page(size_to_map + adjusted_offset, vm_map_page_mask(map));
91 
92 	kr = vm_map_enter_upl_range(map, upl, aligned_offset_to_map, size_to_map, prot_to_map, &map_addr);
93 	*dst_addr = CAST_DOWN(vm_address_t, (map_addr + adjusted_offset));
94 	return kr;
95 }
96 
97 /* unmap a part of a upl that was mapped in the address space. */
98 kern_return_t
vm_upl_unmap_range(vm_map_t map,upl_t upl,vm_offset_t offset_to_unmap,vm_size_t size_to_unmap)99 vm_upl_unmap_range(
100 	vm_map_t                map,
101 	upl_t                   upl,
102 	vm_offset_t             offset_to_unmap,
103 	vm_size_t               size_to_unmap)
104 {
105 	vm_map_offset_t         aligned_offset_to_unmap, page_offset;
106 
107 	if (VM_MAP_NULL == map) {
108 		return KERN_INVALID_ARGUMENT;
109 	}
110 
111 	aligned_offset_to_unmap = vm_map_trunc_page(offset_to_unmap, vm_map_page_mask(map));
112 	page_offset =  offset_to_unmap - aligned_offset_to_unmap;
113 	size_to_unmap = vm_map_round_page(size_to_unmap + page_offset, vm_map_page_mask(map));
114 
115 	return vm_map_remove_upl_range(map, upl, aligned_offset_to_unmap, size_to_unmap);
116 }
117 
118 /* Retrieve a upl for an object underlying an address range in a map */
119 
120 kern_return_t
vm_map_get_upl(vm_map_t map,vm_map_offset_t map_offset,upl_size_t * upl_size,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,upl_control_flags_t * flags,vm_tag_t tag,int force_data_sync)121 vm_map_get_upl(
122 	vm_map_t                map,
123 	vm_map_offset_t         map_offset,
124 	upl_size_t              *upl_size,
125 	upl_t                   *upl,
126 	upl_page_info_array_t   page_list,
127 	unsigned int            *count,
128 	upl_control_flags_t     *flags,
129 	vm_tag_t                tag,
130 	int                     force_data_sync)
131 {
132 	upl_control_flags_t map_flags;
133 	kern_return_t       kr;
134 
135 	if (VM_MAP_NULL == map) {
136 		return KERN_INVALID_ARGUMENT;
137 	}
138 
139 	map_flags = *flags & ~UPL_NOZEROFILL;
140 	if (force_data_sync) {
141 		map_flags |= UPL_FORCE_DATA_SYNC;
142 	}
143 
144 	kr = vm_map_create_upl(map,
145 	    map_offset,
146 	    upl_size,
147 	    upl,
148 	    page_list,
149 	    count,
150 	    &map_flags,
151 	    tag);
152 
153 	*flags = (map_flags & ~UPL_FORCE_DATA_SYNC);
154 	return kr;
155 }
156 
157 uint64_t upl_pages_wired_busy = 0;
158 
159 kern_return_t
upl_abort_range(upl_t upl,upl_offset_t offset,upl_size_t size,int error,boolean_t * empty)160 upl_abort_range(
161 	upl_t                   upl,
162 	upl_offset_t            offset,
163 	upl_size_t              size,
164 	int                     error,
165 	boolean_t               *empty)
166 {
167 	upl_size_t              xfer_size, subupl_size;
168 	vm_object_t             shadow_object;
169 	vm_object_t             object;
170 	vm_object_offset_t      target_offset;
171 	upl_offset_t            subupl_offset = offset;
172 	int                     occupied;
173 	struct  vm_page_delayed_work    dw_array;
174 	struct  vm_page_delayed_work    *dwp, *dwp_start;
175 	bool                    dwp_finish_ctx = TRUE;
176 	int                     dw_count;
177 	int                     dw_limit;
178 	int                     isVectorUPL = 0;
179 	upl_t                   vector_upl = NULL;
180 	vm_object_offset_t      obj_start, obj_end, obj_offset;
181 	kern_return_t           kr = KERN_SUCCESS;
182 
183 //	DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p offset 0x%llx size 0x%llx error 0x%x\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object, (uint64_t)offset, (uint64_t)size, error);
184 
185 	dwp_start = dwp = NULL;
186 
187 	subupl_size = size;
188 	*empty = FALSE;
189 
190 	if (upl == UPL_NULL) {
191 		return KERN_INVALID_ARGUMENT;
192 	}
193 
194 	if ((upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES)) {
195 		return upl_commit_range(upl, offset, size, UPL_COMMIT_FREE_ABSENT, NULL, 0, empty);
196 	}
197 
198 	dw_count = 0;
199 	dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
200 	dwp_start = vm_page_delayed_work_get_ctx();
201 	if (dwp_start == NULL) {
202 		dwp_start = &dw_array;
203 		dw_limit = 1;
204 		dwp_finish_ctx = FALSE;
205 	}
206 
207 	dwp = dwp_start;
208 
209 	if ((isVectorUPL = vector_upl_is_valid(upl))) {
210 		vector_upl = upl;
211 		upl_lock(vector_upl);
212 	} else {
213 		upl_lock(upl);
214 	}
215 
216 process_upl_to_abort:
217 	if (isVectorUPL) {
218 		size = subupl_size;
219 		offset = subupl_offset;
220 		if (size == 0) {
221 			upl_unlock(vector_upl);
222 			kr = KERN_SUCCESS;
223 			goto done;
224 		}
225 		upl =  vector_upl_subupl_byoffset(vector_upl, &offset, &size);
226 		if (upl == NULL) {
227 			upl_unlock(vector_upl);
228 			kr = KERN_FAILURE;
229 			goto done;
230 		}
231 		subupl_size -= size;
232 		subupl_offset += size;
233 	}
234 
235 	*empty = FALSE;
236 
237 #if UPL_DEBUG
238 	if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
239 		upl->upl_commit_records[upl->upl_commit_index].c_btref = btref_get(__builtin_frame_address(0), 0);
240 		upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
241 		upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
242 		upl->upl_commit_records[upl->upl_commit_index].c_aborted = 1;
243 
244 		upl->upl_commit_index++;
245 	}
246 #endif
247 	if (upl->flags & UPL_DEVICE_MEMORY) {
248 		xfer_size = 0;
249 	} else if ((offset + size) <= upl_adjusted_size(upl, PAGE_MASK)) {
250 		xfer_size = size;
251 	} else {
252 		if (!isVectorUPL) {
253 			upl_unlock(upl);
254 		} else {
255 			upl_unlock(vector_upl);
256 		}
257 		DEBUG4K_ERROR("upl %p (u_offset 0x%llx u_size 0x%x) offset 0x%x size 0x%x\n", upl, upl->u_offset, upl->u_size, offset, size);
258 		kr = KERN_FAILURE;
259 		goto done;
260 	}
261 	object = upl->map_object;
262 
263 	if (upl->flags & UPL_SHADOWED) {
264 		vm_object_lock(object);
265 		shadow_object = object->shadow;
266 	} else {
267 		shadow_object = object;
268 	}
269 
270 	target_offset = (vm_object_offset_t)offset;
271 
272 	if (upl->flags & UPL_KERNEL_OBJECT) {
273 		vm_object_lock_shared(shadow_object);
274 	} else {
275 		vm_object_lock(shadow_object);
276 	}
277 
278 	if (upl->flags & UPL_ACCESS_BLOCKED) {
279 		assert(shadow_object->blocked_access);
280 		shadow_object->blocked_access = FALSE;
281 		vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
282 	}
283 
284 	if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT)) {
285 		panic("upl_abort_range: kernel_object being DUMPED");
286 	}
287 
288 	obj_start = target_offset + upl->u_offset - shadow_object->paging_offset;
289 	obj_end = obj_start + xfer_size;
290 	obj_start = vm_object_trunc_page(obj_start);
291 	obj_end = vm_object_round_page(obj_end);
292 	for (obj_offset = obj_start;
293 	    obj_offset < obj_end;
294 	    obj_offset += PAGE_SIZE) {
295 		vm_page_t       t, m;
296 		unsigned int    pg_num;
297 		boolean_t       needed;
298 
299 		pg_num = (unsigned int) (target_offset / PAGE_SIZE);
300 		assert(pg_num == target_offset / PAGE_SIZE);
301 
302 		needed = FALSE;
303 
304 		if (upl->flags & UPL_INTERNAL) {
305 			needed = upl->page_list[pg_num].needed;
306 		}
307 
308 		dwp->dw_mask = 0;
309 		m = VM_PAGE_NULL;
310 
311 		if (upl->flags & UPL_LITE) {
312 			if (bitmap_test(upl->lite_list, pg_num)) {
313 				bitmap_clear(upl->lite_list, pg_num);
314 
315 				if (!(upl->flags & UPL_KERNEL_OBJECT)) {
316 					m = vm_page_lookup(shadow_object, obj_offset);
317 				}
318 			}
319 		}
320 		if (upl->flags & UPL_SHADOWED) {
321 			if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
322 				t->vmp_free_when_done = FALSE;
323 
324 				VM_PAGE_FREE(t);
325 
326 				if (m == VM_PAGE_NULL) {
327 					m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
328 				}
329 			}
330 		}
331 		if ((upl->flags & UPL_KERNEL_OBJECT)) {
332 			goto abort_next_page;
333 		}
334 
335 		if (m != VM_PAGE_NULL) {
336 			assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
337 
338 			if (m->vmp_absent) {
339 				boolean_t must_free = TRUE;
340 
341 				/*
342 				 * COPYOUT = FALSE case
343 				 * check for error conditions which must
344 				 * be passed back to the pages customer
345 				 */
346 				if (error & UPL_ABORT_RESTART) {
347 					m->vmp_restart = TRUE;
348 					m->vmp_absent = FALSE;
349 					m->vmp_unusual = TRUE;
350 					must_free = FALSE;
351 				} else if (error & UPL_ABORT_UNAVAILABLE) {
352 					m->vmp_restart = FALSE;
353 					m->vmp_unusual = TRUE;
354 					must_free = FALSE;
355 				} else if (error & UPL_ABORT_ERROR) {
356 					m->vmp_restart = FALSE;
357 					m->vmp_absent = FALSE;
358 					m->vmp_error = TRUE;
359 					m->vmp_unusual = TRUE;
360 					must_free = FALSE;
361 				}
362 				if (m->vmp_clustered && needed == FALSE) {
363 					/*
364 					 * This page was a part of a speculative
365 					 * read-ahead initiated by the kernel
366 					 * itself.  No one is expecting this
367 					 * page and no one will clean up its
368 					 * error state if it ever becomes valid
369 					 * in the future.
370 					 * We have to free it here.
371 					 */
372 					must_free = TRUE;
373 				}
374 				m->vmp_cleaning = FALSE;
375 
376 				if (m->vmp_overwriting && !m->vmp_busy) {
377 					/*
378 					 * this shouldn't happen since
379 					 * this is an 'absent' page, but
380 					 * it doesn't hurt to check for
381 					 * the 'alternate' method of
382 					 * stabilizing the page...
383 					 * we will mark 'busy' to be cleared
384 					 * in the following code which will
385 					 * take care of the primary stabilzation
386 					 * method (i.e. setting 'busy' to TRUE)
387 					 */
388 					dwp->dw_mask |= DW_vm_page_unwire;
389 				}
390 				m->vmp_overwriting = FALSE;
391 
392 				dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
393 
394 				if (must_free == TRUE) {
395 					dwp->dw_mask |= DW_vm_page_free;
396 				} else {
397 					dwp->dw_mask |= DW_vm_page_activate;
398 				}
399 			} else {
400 				/*
401 				 * Handle the trusted pager throttle.
402 				 */
403 				if (m->vmp_laundry) {
404 					dwp->dw_mask |= DW_vm_pageout_throttle_up;
405 				}
406 
407 				if (upl->flags & UPL_ACCESS_BLOCKED) {
408 					/*
409 					 * We blocked access to the pages in this UPL.
410 					 * Clear the "busy" bit and wake up any waiter
411 					 * for this page.
412 					 */
413 					dwp->dw_mask |= DW_clear_busy;
414 				}
415 				if (m->vmp_overwriting) {
416 					if (VM_PAGE_WIRED(m)) {
417 						/*
418 						 * deal with the 'alternate' method
419 						 * of stabilizing the page...
420 						 * we will either free the page
421 						 * or mark 'busy' to be cleared
422 						 * in the following code which will
423 						 * take care of the primary stabilzation
424 						 * method (i.e. setting 'busy' to TRUE)
425 						 */
426 						if (m->vmp_busy) {
427 //							printf("*******   FBDP %s:%d page %p object %p ofsfet 0x%llx wired and busy\n", __FUNCTION__, __LINE__, m, VM_PAGE_OBJECT(m), m->vmp_offset);
428 							upl_pages_wired_busy++;
429 						}
430 						dwp->dw_mask |= DW_vm_page_unwire;
431 					} else {
432 						assert(m->vmp_busy);
433 						dwp->dw_mask |= DW_clear_busy;
434 					}
435 					m->vmp_overwriting = FALSE;
436 				}
437 				m->vmp_free_when_done = FALSE;
438 				m->vmp_cleaning = FALSE;
439 
440 				if (error & UPL_ABORT_DUMP_PAGES) {
441 					pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
442 
443 					dwp->dw_mask |= DW_vm_page_free;
444 				} else {
445 					if (!(dwp->dw_mask & DW_vm_page_unwire)) {
446 						if (error & UPL_ABORT_REFERENCE) {
447 							/*
448 							 * we've been told to explictly
449 							 * reference this page... for
450 							 * file I/O, this is done by
451 							 * implementing an LRU on the inactive q
452 							 */
453 							dwp->dw_mask |= DW_vm_page_lru;
454 						} else if (!VM_PAGE_PAGEABLE(m)) {
455 							dwp->dw_mask |= DW_vm_page_deactivate_internal;
456 						}
457 					}
458 					dwp->dw_mask |= DW_PAGE_WAKEUP;
459 				}
460 			}
461 		}
462 abort_next_page:
463 		target_offset += PAGE_SIZE_64;
464 		xfer_size -= PAGE_SIZE;
465 
466 		if (dwp->dw_mask) {
467 			if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
468 				VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
469 
470 				if (dw_count >= dw_limit) {
471 					vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
472 
473 					dwp = dwp_start;
474 					dw_count = 0;
475 				}
476 			} else {
477 				if (dwp->dw_mask & DW_clear_busy) {
478 					m->vmp_busy = FALSE;
479 				}
480 
481 				if (dwp->dw_mask & DW_PAGE_WAKEUP) {
482 					vm_page_wakeup(shadow_object, m);
483 				}
484 			}
485 		}
486 	}
487 	if (dw_count) {
488 		vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
489 		dwp = dwp_start;
490 		dw_count = 0;
491 	}
492 
493 	if (upl->flags & UPL_DEVICE_MEMORY) {
494 		occupied = 0;
495 	} else if (upl->flags & UPL_LITE) {
496 		uint32_t pages = (uint32_t)atop(upl_adjusted_size(upl, PAGE_MASK));
497 
498 		occupied = !bitmap_is_empty(upl->lite_list, pages);
499 	} else {
500 		occupied = !vm_page_queue_empty(&upl->map_object->memq);
501 	}
502 	if (occupied == 0) {
503 		/*
504 		 * If this UPL element belongs to a Vector UPL and is
505 		 * empty, then this is the right function to deallocate
506 		 * it. So go ahead set the *empty variable. The flag
507 		 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
508 		 * should be considered relevant for the Vector UPL and
509 		 * not the internal UPLs.
510 		 */
511 		if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) {
512 			*empty = TRUE;
513 		}
514 
515 		if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
516 			/*
517 			 * this is not a paging object
518 			 * so we need to drop the paging reference
519 			 * that was taken when we created the UPL
520 			 * against this object
521 			 */
522 			vm_object_activity_end(shadow_object);
523 			vm_object_collapse(shadow_object, 0, TRUE);
524 		} else {
525 			/*
526 			 * we dontated the paging reference to
527 			 * the map object... vm_pageout_object_terminate
528 			 * will drop this reference
529 			 */
530 		}
531 	}
532 	vm_object_unlock(shadow_object);
533 	if (object != shadow_object) {
534 		vm_object_unlock(object);
535 	}
536 
537 	if (!isVectorUPL) {
538 		upl_unlock(upl);
539 	} else {
540 		/*
541 		 * If we completed our operations on an UPL that is
542 		 * part of a Vectored UPL and if empty is TRUE, then
543 		 * we should go ahead and deallocate this UPL element.
544 		 * Then we check if this was the last of the UPL elements
545 		 * within that Vectored UPL. If so, set empty to TRUE
546 		 * so that in ubc_upl_abort_range or ubc_upl_abort, we
547 		 * can go ahead and deallocate the Vector UPL too.
548 		 */
549 		if (*empty == TRUE) {
550 			*empty = vector_upl_set_subupl(vector_upl, upl, 0);
551 			upl_deallocate(upl);
552 		}
553 		goto process_upl_to_abort;
554 	}
555 
556 	kr = KERN_SUCCESS;
557 
558 done:
559 	if (dwp_start && dwp_finish_ctx) {
560 		vm_page_delayed_work_finish_ctx(dwp_start);
561 		dwp_start = dwp = NULL;
562 	}
563 
564 	return kr;
565 }
566 
567 kern_return_t
upl_abort(upl_t upl,int error)568 upl_abort(
569 	upl_t   upl,
570 	int     error)
571 {
572 	boolean_t       empty;
573 
574 	if (upl == UPL_NULL) {
575 		return KERN_INVALID_ARGUMENT;
576 	}
577 
578 	return upl_abort_range(upl, 0, upl->u_size, error, &empty);
579 }
580 
581 kern_return_t
upl_commit_range(upl_t upl,upl_offset_t offset,upl_size_t size,int flags,upl_page_info_t * page_list,mach_msg_type_number_t count,boolean_t * empty)582 upl_commit_range(
583 	upl_t                   upl,
584 	upl_offset_t            offset,
585 	upl_size_t              size,
586 	int                     flags,
587 	upl_page_info_t         *page_list,
588 	mach_msg_type_number_t  count,
589 	boolean_t               *empty)
590 {
591 	upl_size_t              xfer_size, subupl_size;
592 	vm_object_t             shadow_object;
593 	vm_object_t             object;
594 	vm_object_t             m_object;
595 	vm_object_offset_t      target_offset;
596 	upl_offset_t            subupl_offset = offset;
597 	int                     entry;
598 	int                     occupied;
599 	int                     clear_refmod = 0;
600 	int                     pgpgout_count = 0;
601 	struct  vm_page_delayed_work    dw_array;
602 	struct  vm_page_delayed_work    *dwp, *dwp_start;
603 	bool                    dwp_finish_ctx = TRUE;
604 	int                     dw_count;
605 	int                     dw_limit;
606 	int                     isVectorUPL = 0;
607 	upl_t                   vector_upl = NULL;
608 	boolean_t               should_be_throttled = FALSE;
609 
610 	vm_page_t               nxt_page = VM_PAGE_NULL;
611 	int                     fast_path_possible = 0;
612 	int                     fast_path_full_commit = 0;
613 	int                     throttle_page = 0;
614 	int                     unwired_count = 0;
615 	int                     local_queue_count = 0;
616 	vm_page_t               first_local, last_local;
617 	vm_object_offset_t      obj_start, obj_end, obj_offset;
618 	kern_return_t           kr = KERN_SUCCESS;
619 
620 //	DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p offset 0x%llx size 0x%llx flags 0x%x\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object, (uint64_t)offset, (uint64_t)size, flags);
621 
622 	dwp_start = dwp = NULL;
623 
624 	subupl_size = size;
625 	*empty = FALSE;
626 
627 	if (upl == UPL_NULL) {
628 		return KERN_INVALID_ARGUMENT;
629 	}
630 
631 	dw_count = 0;
632 	dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
633 	dwp_start = vm_page_delayed_work_get_ctx();
634 	if (dwp_start == NULL) {
635 		dwp_start = &dw_array;
636 		dw_limit = 1;
637 		dwp_finish_ctx = FALSE;
638 	}
639 
640 	dwp = dwp_start;
641 
642 	if (count == 0) {
643 		page_list = NULL;
644 	}
645 
646 	if ((isVectorUPL = vector_upl_is_valid(upl))) {
647 		vector_upl = upl;
648 		upl_lock(vector_upl);
649 	} else {
650 		upl_lock(upl);
651 	}
652 
653 process_upl_to_commit:
654 
655 	if (isVectorUPL) {
656 		size = subupl_size;
657 		offset = subupl_offset;
658 		if (size == 0) {
659 			upl_unlock(vector_upl);
660 			kr = KERN_SUCCESS;
661 			goto done;
662 		}
663 		upl =  vector_upl_subupl_byoffset(vector_upl, &offset, &size);
664 		if (upl == NULL) {
665 			upl_unlock(vector_upl);
666 			kr = KERN_FAILURE;
667 			goto done;
668 		}
669 		assertf(upl->flags & UPL_INTERNAL, "%s: sub-upl %p of vector upl %p has no internal page list",
670 		    __func__, upl, vector_upl);
671 		page_list = upl->page_list;
672 		subupl_size -= size;
673 		subupl_offset += size;
674 	}
675 
676 #if UPL_DEBUG
677 	if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
678 		upl->upl_commit_records[upl->upl_commit_index].c_btref = btref_get(__builtin_frame_address(0), 0);
679 		upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
680 		upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
681 
682 		upl->upl_commit_index++;
683 	}
684 #endif
685 	if (upl->flags & UPL_DEVICE_MEMORY) {
686 		xfer_size = 0;
687 	} else if ((offset + size) <= upl_adjusted_size(upl, PAGE_MASK)) {
688 		xfer_size = size;
689 	} else {
690 		if (!isVectorUPL) {
691 			upl_unlock(upl);
692 		} else {
693 			upl_unlock(vector_upl);
694 		}
695 		DEBUG4K_ERROR("upl %p (u_offset 0x%llx u_size 0x%x) offset 0x%x size 0x%x\n", upl, upl->u_offset, upl->u_size, offset, size);
696 		kr = KERN_FAILURE;
697 		goto done;
698 	}
699 	if (upl->flags & UPL_SET_DIRTY) {
700 		flags |= UPL_COMMIT_SET_DIRTY;
701 	}
702 	if (upl->flags & UPL_CLEAR_DIRTY) {
703 		flags |= UPL_COMMIT_CLEAR_DIRTY;
704 	}
705 
706 	object = upl->map_object;
707 
708 	if (upl->flags & UPL_SHADOWED) {
709 		vm_object_lock(object);
710 		shadow_object = object->shadow;
711 	} else {
712 		shadow_object = object;
713 	}
714 	entry = offset / PAGE_SIZE;
715 	target_offset = (vm_object_offset_t)offset;
716 
717 	if (upl->flags & UPL_KERNEL_OBJECT) {
718 		vm_object_lock_shared(shadow_object);
719 	} else {
720 		vm_object_lock(shadow_object);
721 	}
722 
723 	VM_OBJECT_WIRED_PAGE_UPDATE_START(shadow_object);
724 
725 	if (upl->flags & UPL_ACCESS_BLOCKED) {
726 		assert(shadow_object->blocked_access);
727 		shadow_object->blocked_access = FALSE;
728 		vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
729 	}
730 
731 	if (shadow_object->code_signed) {
732 		/*
733 		 * CODE SIGNING:
734 		 * If the object is code-signed, do not let this UPL tell
735 		 * us if the pages are valid or not.  Let the pages be
736 		 * validated by VM the normal way (when they get mapped or
737 		 * copied).
738 		 */
739 		flags &= ~UPL_COMMIT_CS_VALIDATED;
740 	}
741 	if (!page_list) {
742 		/*
743 		 * No page list to get the code-signing info from !?
744 		 */
745 		flags &= ~UPL_COMMIT_CS_VALIDATED;
746 	}
747 	if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal) {
748 		should_be_throttled = TRUE;
749 	}
750 
751 	if ((upl->flags & UPL_IO_WIRE) &&
752 	    !(flags & UPL_COMMIT_FREE_ABSENT) &&
753 	    !isVectorUPL &&
754 	    shadow_object->purgable != VM_PURGABLE_VOLATILE &&
755 	    shadow_object->purgable != VM_PURGABLE_EMPTY) {
756 		if (!vm_page_queue_empty(&shadow_object->memq)) {
757 			if (shadow_object->internal && size == shadow_object->vo_size) {
758 				nxt_page = (vm_page_t)vm_page_queue_first(&shadow_object->memq);
759 				fast_path_full_commit = 1;
760 			}
761 			fast_path_possible = 1;
762 
763 			if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal &&
764 			    (shadow_object->purgable == VM_PURGABLE_DENY ||
765 			    shadow_object->purgable == VM_PURGABLE_NONVOLATILE ||
766 			    shadow_object->purgable == VM_PURGABLE_VOLATILE)) {
767 				throttle_page = 1;
768 			}
769 		}
770 	}
771 	first_local = VM_PAGE_NULL;
772 	last_local = VM_PAGE_NULL;
773 
774 	obj_start = target_offset + upl->u_offset - shadow_object->paging_offset;
775 	obj_end = obj_start + xfer_size;
776 	obj_start = vm_object_trunc_page(obj_start);
777 	obj_end = vm_object_round_page(obj_end);
778 	for (obj_offset = obj_start;
779 	    obj_offset < obj_end;
780 	    obj_offset += PAGE_SIZE) {
781 		vm_page_t       t, m;
782 
783 		dwp->dw_mask = 0;
784 		clear_refmod = 0;
785 
786 		m = VM_PAGE_NULL;
787 
788 		if (upl->flags & UPL_LITE) {
789 			unsigned int    pg_num;
790 
791 			if (nxt_page != VM_PAGE_NULL) {
792 				m = nxt_page;
793 				nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->vmp_listq);
794 				target_offset = m->vmp_offset;
795 			}
796 			pg_num = (unsigned int) (target_offset / PAGE_SIZE);
797 			assert(pg_num == target_offset / PAGE_SIZE);
798 
799 			if (bitmap_test(upl->lite_list, pg_num)) {
800 				bitmap_clear(upl->lite_list, pg_num);
801 
802 				if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) {
803 					m = vm_page_lookup(shadow_object, obj_offset);
804 				}
805 			} else {
806 				m = NULL;
807 			}
808 		}
809 		if (upl->flags & UPL_SHADOWED) {
810 			if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
811 				t->vmp_free_when_done = FALSE;
812 
813 				VM_PAGE_FREE(t);
814 
815 				if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) {
816 					m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
817 				}
818 			}
819 		}
820 		if (m == VM_PAGE_NULL) {
821 			goto commit_next_page;
822 		}
823 
824 		m_object = VM_PAGE_OBJECT(m);
825 
826 		if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
827 			assert(m->vmp_busy);
828 
829 			dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
830 			goto commit_next_page;
831 		}
832 
833 		if (flags & UPL_COMMIT_CS_VALIDATED) {
834 			/*
835 			 * CODE SIGNING:
836 			 * Set the code signing bits according to
837 			 * what the UPL says they should be.
838 			 */
839 			m->vmp_cs_validated |= page_list[entry].cs_validated;
840 			m->vmp_cs_tainted |= page_list[entry].cs_tainted;
841 			m->vmp_cs_nx |= page_list[entry].cs_nx;
842 		}
843 		if (flags & UPL_COMMIT_WRITTEN_BY_KERNEL) {
844 			m->vmp_written_by_kernel = TRUE;
845 		}
846 
847 		if (upl->flags & UPL_IO_WIRE) {
848 			if (page_list) {
849 				page_list[entry].phys_addr = 0;
850 			}
851 
852 			if (flags & UPL_COMMIT_SET_DIRTY) {
853 				SET_PAGE_DIRTY(m, FALSE);
854 			} else if (flags & UPL_COMMIT_CLEAR_DIRTY) {
855 				m->vmp_dirty = FALSE;
856 
857 				if (!(flags & UPL_COMMIT_CS_VALIDATED) &&
858 				    m->vmp_cs_validated &&
859 				    m->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
860 					/*
861 					 * CODE SIGNING:
862 					 * This page is no longer dirty
863 					 * but could have been modified,
864 					 * so it will need to be
865 					 * re-validated.
866 					 */
867 					m->vmp_cs_validated = VMP_CS_ALL_FALSE;
868 
869 					VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
870 
871 					pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
872 				}
873 				clear_refmod |= VM_MEM_MODIFIED;
874 			}
875 			if (upl->flags & UPL_ACCESS_BLOCKED) {
876 				/*
877 				 * We blocked access to the pages in this UPL.
878 				 * Clear the "busy" bit and wake up any waiter
879 				 * for this page.
880 				 */
881 				dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
882 			}
883 			if (fast_path_possible) {
884 				assert(m_object->purgable != VM_PURGABLE_EMPTY);
885 				assert(m_object->purgable != VM_PURGABLE_VOLATILE);
886 				if (m->vmp_absent) {
887 					assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
888 					assert(m->vmp_wire_count == 0);
889 					assert(m->vmp_busy);
890 
891 					m->vmp_absent = FALSE;
892 					dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
893 				} else {
894 					if (m->vmp_wire_count == 0) {
895 						panic("wire_count == 0, m = %p, obj = %p", m, shadow_object);
896 					}
897 					assert(m->vmp_q_state == VM_PAGE_IS_WIRED);
898 
899 					/*
900 					 * XXX FBDP need to update some other
901 					 * counters here (purgeable_wired_count)
902 					 * (ledgers), ...
903 					 */
904 					assert(m->vmp_wire_count > 0);
905 					m->vmp_wire_count--;
906 
907 					if (m->vmp_wire_count == 0) {
908 						m->vmp_q_state = VM_PAGE_NOT_ON_Q;
909 						m->vmp_iopl_wired = false;
910 						unwired_count++;
911 
912 					}
913 				}
914 				if (m->vmp_wire_count == 0) {
915 					assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
916 
917 					if (last_local == VM_PAGE_NULL) {
918 						assert(first_local == VM_PAGE_NULL);
919 
920 						last_local = m;
921 						first_local = m;
922 					} else {
923 						assert(first_local != VM_PAGE_NULL);
924 
925 						m->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
926 						first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(m);
927 						first_local = m;
928 					}
929 					local_queue_count++;
930 
931 					if (throttle_page) {
932 						m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
933 					} else {
934 						if (flags & UPL_COMMIT_INACTIVATE) {
935 							if (shadow_object->internal) {
936 								m->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
937 							} else {
938 								m->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
939 							}
940 						} else {
941 							m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
942 						}
943 					}
944 				}
945 			} else {
946 				if (flags & UPL_COMMIT_INACTIVATE) {
947 					dwp->dw_mask |= DW_vm_page_deactivate_internal;
948 					clear_refmod |= VM_MEM_REFERENCED;
949 				}
950 				if (m->vmp_absent) {
951 					if (flags & UPL_COMMIT_FREE_ABSENT) {
952 						dwp->dw_mask |= DW_vm_page_free;
953 					} else {
954 						m->vmp_absent = FALSE;
955 						dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
956 
957 						if (!(dwp->dw_mask & DW_vm_page_deactivate_internal)) {
958 							dwp->dw_mask |= DW_vm_page_activate;
959 						}
960 					}
961 				} else {
962 					dwp->dw_mask |= DW_vm_page_unwire;
963 				}
964 			}
965 			goto commit_next_page;
966 		}
967 		assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
968 
969 		if (page_list) {
970 			page_list[entry].phys_addr = 0;
971 		}
972 
973 		/*
974 		 * make sure to clear the hardware
975 		 * modify or reference bits before
976 		 * releasing the BUSY bit on this page
977 		 * otherwise we risk losing a legitimate
978 		 * change of state
979 		 */
980 		if (flags & UPL_COMMIT_CLEAR_DIRTY) {
981 			m->vmp_dirty = FALSE;
982 
983 			clear_refmod |= VM_MEM_MODIFIED;
984 		}
985 		if (m->vmp_laundry) {
986 			dwp->dw_mask |= DW_vm_pageout_throttle_up;
987 		}
988 
989 		if (VM_PAGE_WIRED(m)) {
990 			m->vmp_free_when_done = FALSE;
991 		}
992 
993 		if (!(flags & UPL_COMMIT_CS_VALIDATED) &&
994 		    m->vmp_cs_validated &&
995 		    m->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
996 			/*
997 			 * CODE SIGNING:
998 			 * This page is no longer dirty
999 			 * but could have been modified,
1000 			 * so it will need to be
1001 			 * re-validated.
1002 			 */
1003 			m->vmp_cs_validated = VMP_CS_ALL_FALSE;
1004 
1005 			VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
1006 
1007 			pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
1008 		}
1009 		if (m->vmp_overwriting) {
1010 			/*
1011 			 * the (COPY_OUT_FROM == FALSE) request_page_list case
1012 			 */
1013 			if (VM_PAGE_WIRED(m)) {
1014 				/*
1015 				 * alternate (COPY_OUT_FROM == FALSE) page_list case
1016 				 * Occurs when the original page was wired
1017 				 * at the time of the list request
1018 				 */
1019 				if (m->vmp_busy) {
1020 //					printf("*******   FBDP %s:%d page %p object %p ofsfet 0x%llx wired and busy\n", __FUNCTION__, __LINE__, m, VM_PAGE_OBJECT(m), m->vmp_offset);
1021 					upl_pages_wired_busy++;
1022 				}
1023 				assert(!m->vmp_absent);
1024 				dwp->dw_mask |= DW_vm_page_unwire; /* reactivates */
1025 			} else {
1026 				assert(m->vmp_busy);
1027 #if CONFIG_PHANTOM_CACHE
1028 				if (m->vmp_absent && !m_object->internal) {
1029 					dwp->dw_mask |= DW_vm_phantom_cache_update;
1030 				}
1031 #endif
1032 				m->vmp_absent = FALSE;
1033 
1034 				dwp->dw_mask |= DW_clear_busy;
1035 			}
1036 			m->vmp_overwriting = FALSE;
1037 		}
1038 		m->vmp_cleaning = FALSE;
1039 
1040 		if (m->vmp_free_when_done) {
1041 			/*
1042 			 * With the clean queue enabled, UPL_PAGEOUT should
1043 			 * no longer set the pageout bit. Its pages now go
1044 			 * to the clean queue.
1045 			 *
1046 			 * We don't use the cleaned Q anymore and so this
1047 			 * assert isn't correct. The code for the clean Q
1048 			 * still exists and might be used in the future. If we
1049 			 * go back to the cleaned Q, we will re-enable this
1050 			 * assert.
1051 			 *
1052 			 * assert(!(upl->flags & UPL_PAGEOUT));
1053 			 */
1054 			assert(!m_object->internal);
1055 
1056 			m->vmp_free_when_done = FALSE;
1057 
1058 			if ((flags & UPL_COMMIT_SET_DIRTY) ||
1059 			    (m->vmp_pmapped && (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED))) {
1060 				/*
1061 				 * page was re-dirtied after we started
1062 				 * the pageout... reactivate it since
1063 				 * we don't know whether the on-disk
1064 				 * copy matches what is now in memory
1065 				 */
1066 				SET_PAGE_DIRTY(m, FALSE);
1067 
1068 				dwp->dw_mask |= DW_vm_page_activate | DW_PAGE_WAKEUP;
1069 
1070 				if (upl->flags & UPL_PAGEOUT) {
1071 					counter_inc(&vm_statistics_reactivations);
1072 					DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
1073 				}
1074 			} else if (m->vmp_busy && !(upl->flags & UPL_HAS_BUSY)) {
1075 				/*
1076 				 * Someone else might still be handling this
1077 				 * page (vm_fault() for example), so let's not
1078 				 * free it or "un-busy" it!
1079 				 * Put that page in the "speculative" queue
1080 				 * for now (since we would otherwise have freed
1081 				 * it) and let whoever is keeping the page
1082 				 * "busy" move it if needed when they're done
1083 				 * with it.
1084 				 */
1085 				dwp->dw_mask |= DW_vm_page_speculate;
1086 			} else {
1087 				/*
1088 				 * page has been successfully cleaned
1089 				 * go ahead and free it for other use
1090 				 */
1091 				if (m_object->internal) {
1092 					DTRACE_VM2(anonpgout, int, 1, (uint64_t *), NULL);
1093 				} else {
1094 					DTRACE_VM2(fspgout, int, 1, (uint64_t *), NULL);
1095 				}
1096 				m->vmp_dirty = FALSE;
1097 				if (!(upl->flags & UPL_HAS_BUSY)) {
1098 					assert(!m->vmp_busy);
1099 				}
1100 				m->vmp_busy = TRUE;
1101 
1102 				dwp->dw_mask |= DW_vm_page_free;
1103 			}
1104 			goto commit_next_page;
1105 		}
1106 		/*
1107 		 * It is a part of the semantic of COPYOUT_FROM
1108 		 * UPLs that a commit implies cache sync
1109 		 * between the vm page and the backing store
1110 		 * this can be used to strip the precious bit
1111 		 * as well as clean
1112 		 */
1113 		if ((upl->flags & UPL_PAGE_SYNC_DONE) || (flags & UPL_COMMIT_CLEAR_PRECIOUS)) {
1114 			m->vmp_precious = FALSE;
1115 		}
1116 
1117 		if (flags & UPL_COMMIT_SET_DIRTY) {
1118 			SET_PAGE_DIRTY(m, FALSE);
1119 		} else {
1120 			m->vmp_dirty = FALSE;
1121 		}
1122 
1123 		/* with the clean queue on, move *all* cleaned pages to the clean queue */
1124 		if (hibernate_cleaning_in_progress == FALSE && !m->vmp_dirty && (upl->flags & UPL_PAGEOUT)) {
1125 			pgpgout_count++;
1126 
1127 			counter_inc(&vm_statistics_pageouts);
1128 			DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL);
1129 
1130 			dwp->dw_mask |= DW_enqueue_cleaned;
1131 		} else if (should_be_throttled == TRUE && (m->vmp_q_state == VM_PAGE_NOT_ON_Q)) {
1132 			/*
1133 			 * page coming back in from being 'frozen'...
1134 			 * it was dirty before it was frozen, so keep it so
1135 			 * the vm_page_activate will notice that it really belongs
1136 			 * on the throttle queue and put it there
1137 			 */
1138 			SET_PAGE_DIRTY(m, FALSE);
1139 			dwp->dw_mask |= DW_vm_page_activate;
1140 		} else {
1141 			if ((flags & UPL_COMMIT_INACTIVATE) && !m->vmp_clustered && (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q)) {
1142 				dwp->dw_mask |= DW_vm_page_deactivate_internal;
1143 				clear_refmod |= VM_MEM_REFERENCED;
1144 			} else if (!VM_PAGE_PAGEABLE(m)) {
1145 				if (m->vmp_clustered || (flags & UPL_COMMIT_SPECULATE)) {
1146 					dwp->dw_mask |= DW_vm_page_speculate;
1147 				} else if (m->vmp_reference) {
1148 					dwp->dw_mask |= DW_vm_page_activate;
1149 				} else {
1150 					dwp->dw_mask |= DW_vm_page_deactivate_internal;
1151 					clear_refmod |= VM_MEM_REFERENCED;
1152 				}
1153 			}
1154 		}
1155 		if (upl->flags & UPL_ACCESS_BLOCKED) {
1156 			/*
1157 			 * We blocked access to the pages in this URL.
1158 			 * Clear the "busy" bit on this page before we
1159 			 * wake up any waiter.
1160 			 */
1161 			dwp->dw_mask |= DW_clear_busy;
1162 		}
1163 		/*
1164 		 * Wakeup any thread waiting for the page to be un-cleaning.
1165 		 */
1166 		dwp->dw_mask |= DW_PAGE_WAKEUP;
1167 
1168 commit_next_page:
1169 		if (clear_refmod) {
1170 			pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m), clear_refmod);
1171 		}
1172 
1173 		target_offset += PAGE_SIZE_64;
1174 		xfer_size -= PAGE_SIZE;
1175 		entry++;
1176 
1177 		if (dwp->dw_mask) {
1178 			if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
1179 				VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
1180 
1181 				if (dw_count >= dw_limit) {
1182 					vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
1183 
1184 					dwp = dwp_start;
1185 					dw_count = 0;
1186 				}
1187 			} else {
1188 				if (dwp->dw_mask & DW_clear_busy) {
1189 					m->vmp_busy = FALSE;
1190 				}
1191 
1192 				if (dwp->dw_mask & DW_PAGE_WAKEUP) {
1193 					vm_page_wakeup(m_object, m);
1194 				}
1195 			}
1196 		}
1197 	}
1198 	if (dw_count) {
1199 		vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
1200 		dwp = dwp_start;
1201 		dw_count = 0;
1202 	}
1203 
1204 	if (fast_path_possible) {
1205 		assert(shadow_object->purgable != VM_PURGABLE_VOLATILE);
1206 		assert(shadow_object->purgable != VM_PURGABLE_EMPTY);
1207 
1208 		if (local_queue_count || unwired_count) {
1209 			if (local_queue_count) {
1210 				vm_page_t       first_target;
1211 				vm_page_queue_head_t    *target_queue;
1212 
1213 				if (throttle_page) {
1214 					target_queue = &vm_page_queue_throttled;
1215 				} else {
1216 					if (flags & UPL_COMMIT_INACTIVATE) {
1217 						if (shadow_object->internal) {
1218 							target_queue = &vm_page_queue_anonymous;
1219 						} else {
1220 							target_queue = &vm_page_queue_inactive;
1221 						}
1222 					} else {
1223 						target_queue = &vm_page_queue_active;
1224 					}
1225 				}
1226 				/*
1227 				 * Transfer the entire local queue to a regular LRU page queues.
1228 				 */
1229 				vm_page_lockspin_queues();
1230 
1231 				first_target = (vm_page_t) vm_page_queue_first(target_queue);
1232 
1233 				if (vm_page_queue_empty(target_queue)) {
1234 					target_queue->prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
1235 				} else {
1236 					first_target->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
1237 				}
1238 
1239 				target_queue->next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
1240 				first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(target_queue);
1241 				last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_target);
1242 
1243 				/*
1244 				 * Adjust the global page counts.
1245 				 */
1246 				if (throttle_page) {
1247 					vm_page_throttled_count += local_queue_count;
1248 				} else {
1249 					if (flags & UPL_COMMIT_INACTIVATE) {
1250 						if (shadow_object->internal) {
1251 							vm_page_anonymous_count += local_queue_count;
1252 						}
1253 						vm_page_inactive_count += local_queue_count;
1254 
1255 						token_new_pagecount += local_queue_count;
1256 					} else {
1257 						vm_page_active_count += local_queue_count;
1258 					}
1259 
1260 					if (shadow_object->internal) {
1261 						vm_page_pageable_internal_count += local_queue_count;
1262 					} else {
1263 						vm_page_pageable_external_count += local_queue_count;
1264 					}
1265 				}
1266 			} else {
1267 				vm_page_lockspin_queues();
1268 			}
1269 			if (unwired_count) {
1270 				vm_page_wire_count -= unwired_count;
1271 				VM_CHECK_MEMORYSTATUS;
1272 			}
1273 			vm_page_unlock_queues();
1274 
1275 			VM_OBJECT_WIRED_PAGE_COUNT(shadow_object, -unwired_count);
1276 		}
1277 	}
1278 
1279 	if (upl->flags & UPL_DEVICE_MEMORY) {
1280 		occupied = 0;
1281 	} else if (upl->flags & UPL_LITE) {
1282 		uint32_t pages = (uint32_t)atop(upl_adjusted_size(upl, PAGE_MASK));
1283 
1284 		occupied = !fast_path_full_commit &&
1285 		    !bitmap_is_empty(upl->lite_list, pages);
1286 	} else {
1287 		occupied = !vm_page_queue_empty(&upl->map_object->memq);
1288 	}
1289 	if (occupied == 0) {
1290 		/*
1291 		 * If this UPL element belongs to a Vector UPL and is
1292 		 * empty, then this is the right function to deallocate
1293 		 * it. So go ahead set the *empty variable. The flag
1294 		 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
1295 		 * should be considered relevant for the Vector UPL and not
1296 		 * the internal UPLs.
1297 		 */
1298 		if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) {
1299 			*empty = TRUE;
1300 		}
1301 
1302 		if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
1303 			/*
1304 			 * this is not a paging object
1305 			 * so we need to drop the paging reference
1306 			 * that was taken when we created the UPL
1307 			 * against this object
1308 			 */
1309 			vm_object_activity_end(shadow_object);
1310 			vm_object_collapse(shadow_object, 0, TRUE);
1311 		} else {
1312 			/*
1313 			 * we dontated the paging reference to
1314 			 * the map object... vm_pageout_object_terminate
1315 			 * will drop this reference
1316 			 */
1317 		}
1318 	}
1319 	VM_OBJECT_WIRED_PAGE_UPDATE_END(shadow_object, shadow_object->wire_tag);
1320 	vm_object_unlock(shadow_object);
1321 	if (object != shadow_object) {
1322 		vm_object_unlock(object);
1323 	}
1324 
1325 	if (!isVectorUPL) {
1326 		upl_unlock(upl);
1327 	} else {
1328 		/*
1329 		 * If we completed our operations on an UPL that is
1330 		 * part of a Vectored UPL and if empty is TRUE, then
1331 		 * we should go ahead and deallocate this UPL element.
1332 		 * Then we check if this was the last of the UPL elements
1333 		 * within that Vectored UPL. If so, set empty to TRUE
1334 		 * so that in ubc_upl_commit_range or ubc_upl_commit, we
1335 		 * can go ahead and deallocate the Vector UPL too.
1336 		 */
1337 		if (*empty == TRUE) {
1338 			*empty = vector_upl_set_subupl(vector_upl, upl, 0);
1339 			upl_deallocate(upl);
1340 		}
1341 		goto process_upl_to_commit;
1342 	}
1343 	if (pgpgout_count) {
1344 		DTRACE_VM2(pgpgout, int, pgpgout_count, (uint64_t *), NULL);
1345 	}
1346 
1347 	kr = KERN_SUCCESS;
1348 done:
1349 	if (dwp_start && dwp_finish_ctx) {
1350 		vm_page_delayed_work_finish_ctx(dwp_start);
1351 		dwp_start = dwp = NULL;
1352 	}
1353 
1354 	return kr;
1355 }
1356 
1357 /* an option on commit should be wire */
1358 kern_return_t
upl_commit(upl_t upl,upl_page_info_t * page_list,mach_msg_type_number_t count)1359 upl_commit(
1360 	upl_t                   upl,
1361 	upl_page_info_t         *page_list,
1362 	mach_msg_type_number_t  count)
1363 {
1364 	boolean_t       empty;
1365 
1366 	if (upl == UPL_NULL) {
1367 		return KERN_INVALID_ARGUMENT;
1368 	}
1369 
1370 	return upl_commit_range(upl, 0, upl->u_size, 0,
1371 	           page_list, count, &empty);
1372 }
1373