xref: /xnu-12377.41.6/osfmk/vm/vm_upl.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <vm/vm_upl.h>
30 #include <vm/vm_pageout_internal.h>
31 #include <vm/vm_page_internal.h>
32 #include <vm/vm_map_internal.h>
33 #if HAS_MTE
34 #include <vm/vm_mteinfo_internal.h>
35 #endif /* HAS_MTE */
36 #include <mach/upl_server.h>
37 #include <kern/host_statistics.h>
38 #include <vm/vm_purgeable_internal.h>
39 #include <vm/vm_object_internal.h>
40 #include <vm/vm_ubc.h>
41 #include <sys/kdebug.h>
42 #include <sys/kdebug_kernel.h>
43 
44 extern boolean_t hibernate_cleaning_in_progress;
45 
46 /* map a (whole) upl into an address space */
47 kern_return_t
vm_upl_map(vm_map_t map,upl_t upl,vm_address_t * dst_addr)48 vm_upl_map(
49 	vm_map_t                map,
50 	upl_t                   upl,
51 	vm_address_t            *dst_addr)
52 {
53 	vm_map_offset_t         map_addr;
54 	kern_return_t           kr;
55 
56 	if (VM_MAP_NULL == map) {
57 		return KERN_INVALID_ARGUMENT;
58 	}
59 
60 	kr = vm_map_enter_upl(map, upl, &map_addr);
61 	*dst_addr = CAST_DOWN(vm_address_t, map_addr);
62 	return kr;
63 }
64 
65 kern_return_t
vm_upl_unmap(vm_map_t map,upl_t upl)66 vm_upl_unmap(
67 	vm_map_t                map,
68 	upl_t                   upl)
69 {
70 	if (VM_MAP_NULL == map) {
71 		return KERN_INVALID_ARGUMENT;
72 	}
73 
74 	return vm_map_remove_upl(map, upl);
75 }
76 
77 /* map a part of a upl into an address space with requested protection. */
78 kern_return_t
vm_upl_map_range(vm_map_t map,upl_t upl,vm_offset_t offset_to_map,vm_size_t size_to_map,vm_prot_t prot_to_map,vm_address_t * dst_addr)79 vm_upl_map_range(
80 	vm_map_t                map,
81 	upl_t                   upl,
82 	vm_offset_t             offset_to_map,
83 	vm_size_t               size_to_map,
84 	vm_prot_t               prot_to_map,
85 	vm_address_t            *dst_addr)
86 {
87 	vm_map_offset_t         map_addr, aligned_offset_to_map, adjusted_offset;
88 	kern_return_t           kr;
89 
90 	if (VM_MAP_NULL == map) {
91 		return KERN_INVALID_ARGUMENT;
92 	}
93 	aligned_offset_to_map = vm_map_trunc_page(offset_to_map, vm_map_page_mask(map));
94 	adjusted_offset =  offset_to_map - aligned_offset_to_map;
95 	size_to_map = vm_map_round_page(size_to_map + adjusted_offset, vm_map_page_mask(map));
96 
97 	kr = vm_map_enter_upl_range(map, upl, aligned_offset_to_map, size_to_map, prot_to_map, &map_addr);
98 	*dst_addr = CAST_DOWN(vm_address_t, (map_addr + adjusted_offset));
99 	return kr;
100 }
101 
102 /* unmap a part of a upl that was mapped in the address space. */
103 kern_return_t
vm_upl_unmap_range(vm_map_t map,upl_t upl,vm_offset_t offset_to_unmap,vm_size_t size_to_unmap)104 vm_upl_unmap_range(
105 	vm_map_t                map,
106 	upl_t                   upl,
107 	vm_offset_t             offset_to_unmap,
108 	vm_size_t               size_to_unmap)
109 {
110 	vm_map_offset_t         aligned_offset_to_unmap, page_offset;
111 
112 	if (VM_MAP_NULL == map) {
113 		return KERN_INVALID_ARGUMENT;
114 	}
115 
116 	aligned_offset_to_unmap = vm_map_trunc_page(offset_to_unmap, vm_map_page_mask(map));
117 	page_offset =  offset_to_unmap - aligned_offset_to_unmap;
118 	size_to_unmap = vm_map_round_page(size_to_unmap + page_offset, vm_map_page_mask(map));
119 
120 	return vm_map_remove_upl_range(map, upl, aligned_offset_to_unmap, size_to_unmap);
121 }
122 
123 /* Retrieve a upl for an object underlying an address range in a map */
124 
125 kern_return_t
vm_map_get_upl(vm_map_t map,vm_map_offset_t map_offset,upl_size_t * upl_size,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,upl_control_flags_t * flags,vm_tag_t tag,int force_data_sync)126 vm_map_get_upl(
127 	vm_map_t                map,
128 	vm_map_offset_t         map_offset,
129 	upl_size_t              *upl_size,
130 	upl_t                   *upl,
131 	upl_page_info_array_t   page_list,
132 	unsigned int            *count,
133 	upl_control_flags_t     *flags,
134 	vm_tag_t                tag,
135 	int                     force_data_sync)
136 {
137 	upl_control_flags_t map_flags;
138 	kern_return_t       kr;
139 
140 	if (VM_MAP_NULL == map) {
141 		return KERN_INVALID_ARGUMENT;
142 	}
143 
144 	map_flags = *flags & ~UPL_NOZEROFILL;
145 	if (force_data_sync) {
146 		map_flags |= UPL_FORCE_DATA_SYNC;
147 	}
148 
149 	kr = vm_map_create_upl(map,
150 	    map_offset,
151 	    upl_size,
152 	    upl,
153 	    page_list,
154 	    count,
155 	    &map_flags,
156 	    tag);
157 
158 	*flags = (map_flags & ~UPL_FORCE_DATA_SYNC);
159 	return kr;
160 }
161 
162 uint64_t upl_pages_wired_busy = 0;
163 
164 kern_return_t
upl_abort_range(upl_t upl,upl_offset_t offset,upl_size_t size,int error,boolean_t * empty)165 upl_abort_range(
166 	upl_t                   upl,
167 	upl_offset_t            offset,
168 	upl_size_t              size,
169 	int                     error,
170 	boolean_t               *empty)
171 {
172 	upl_size_t              xfer_size, subupl_size;
173 	vm_object_t             shadow_object;
174 	vm_object_t             object;
175 	vm_object_offset_t      target_offset;
176 	upl_offset_t            subupl_offset = offset;
177 	int                     occupied;
178 	struct  vm_page_delayed_work    dw_array;
179 	struct  vm_page_delayed_work    *dwp, *dwp_start;
180 	bool                    dwp_finish_ctx = TRUE;
181 	int                     dw_count;
182 	int                     dw_limit;
183 	int                     isVectorUPL = 0;
184 	upl_t                   vector_upl = NULL;
185 	vm_object_offset_t      obj_start, obj_end, obj_offset;
186 	kern_return_t           kr = KERN_SUCCESS;
187 
188 //	DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p offset 0x%llx size 0x%llx error 0x%x\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object, (uint64_t)offset, (uint64_t)size, error);
189 
190 	dwp_start = dwp = NULL;
191 
192 	subupl_size = size;
193 	*empty = FALSE;
194 
195 	if (upl == UPL_NULL) {
196 		return KERN_INVALID_ARGUMENT;
197 	}
198 
199 	if ((upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES)) {
200 		return upl_commit_range(upl, offset, size, UPL_COMMIT_FREE_ABSENT, NULL, 0, empty);
201 	}
202 
203 	dw_count = 0;
204 	dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
205 	dwp_start = vm_page_delayed_work_get_ctx();
206 	if (dwp_start == NULL) {
207 		dwp_start = &dw_array;
208 		dw_limit = 1;
209 		dwp_finish_ctx = FALSE;
210 	}
211 
212 	dwp = dwp_start;
213 
214 	if ((isVectorUPL = vector_upl_is_valid(upl))) {
215 		vector_upl = upl;
216 		upl_lock(vector_upl);
217 	} else {
218 		upl_lock(upl);
219 	}
220 
221 process_upl_to_abort:
222 	if (isVectorUPL) {
223 		size = subupl_size;
224 		offset = subupl_offset;
225 		if (size == 0) {
226 			upl_unlock(vector_upl);
227 			kr = KERN_SUCCESS;
228 			goto done;
229 		}
230 		upl =  vector_upl_subupl_byoffset(vector_upl, &offset, &size);
231 		if (upl == NULL) {
232 			upl_unlock(vector_upl);
233 			kr = KERN_FAILURE;
234 			goto done;
235 		}
236 		subupl_size -= size;
237 		subupl_offset += size;
238 	}
239 
240 	*empty = FALSE;
241 
242 #if UPL_DEBUG
243 	if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
244 		upl->upl_commit_records[upl->upl_commit_index].c_btref = btref_get(__builtin_frame_address(0), 0);
245 		upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
246 		upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
247 		upl->upl_commit_records[upl->upl_commit_index].c_aborted = 1;
248 
249 		upl->upl_commit_index++;
250 	}
251 #endif
252 	if (upl->flags & UPL_DEVICE_MEMORY) {
253 		xfer_size = 0;
254 	} else if ((offset + size) <= upl_adjusted_size(upl, PAGE_MASK)) {
255 		xfer_size = size;
256 	} else {
257 		if (!isVectorUPL) {
258 			upl_unlock(upl);
259 		} else {
260 			upl_unlock(vector_upl);
261 		}
262 		DEBUG4K_ERROR("upl %p (u_offset 0x%llx u_size 0x%x) offset 0x%x size 0x%x\n", upl, upl->u_offset, upl->u_size, offset, size);
263 		kr = KERN_FAILURE;
264 		goto done;
265 	}
266 	object = upl->map_object;
267 
268 	if (upl->flags & UPL_SHADOWED) {
269 		vm_object_lock(object);
270 		shadow_object = object->shadow;
271 	} else {
272 		shadow_object = object;
273 	}
274 
275 	target_offset = (vm_object_offset_t)offset;
276 
277 	if (upl->flags & UPL_KERNEL_OBJECT) {
278 		vm_object_lock_shared(shadow_object);
279 	} else {
280 		vm_object_lock(shadow_object);
281 	}
282 
283 	if (upl->flags & UPL_ACCESS_BLOCKED) {
284 		assert(shadow_object->blocked_access);
285 		shadow_object->blocked_access = FALSE;
286 		vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
287 	}
288 
289 	if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT)) {
290 		panic("upl_abort_range: kernel_object being DUMPED");
291 	}
292 
293 	obj_start = target_offset + upl->u_offset - shadow_object->paging_offset;
294 	obj_end = obj_start + xfer_size;
295 	obj_start = vm_object_trunc_page(obj_start);
296 	obj_end = vm_object_round_page(obj_end);
297 	for (obj_offset = obj_start;
298 	    obj_offset < obj_end;
299 	    obj_offset += PAGE_SIZE) {
300 		vm_page_t       t, m;
301 		unsigned int    pg_num;
302 		boolean_t       needed;
303 
304 		pg_num = (unsigned int) (target_offset / PAGE_SIZE);
305 		assert(pg_num == target_offset / PAGE_SIZE);
306 
307 		needed = FALSE;
308 
309 		if (upl->flags & UPL_INTERNAL) {
310 			needed = upl->page_list[pg_num].needed;
311 		}
312 
313 		dwp->dw_mask = 0;
314 		m = VM_PAGE_NULL;
315 
316 		if (upl->flags & UPL_LITE) {
317 			if (bitmap_test(upl->lite_list, pg_num)) {
318 				bitmap_clear(upl->lite_list, pg_num);
319 
320 				if (!(upl->flags & UPL_KERNEL_OBJECT)) {
321 					m = vm_page_lookup(shadow_object, obj_offset);
322 				}
323 			}
324 		}
325 		if (upl->flags & UPL_SHADOWED) {
326 			if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
327 				t->vmp_free_when_done = FALSE;
328 
329 				VM_PAGE_FREE(t);
330 
331 				if (m == VM_PAGE_NULL) {
332 					m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
333 				}
334 			}
335 		}
336 		if ((upl->flags & UPL_KERNEL_OBJECT)) {
337 			goto abort_next_page;
338 		}
339 
340 		if (m != VM_PAGE_NULL) {
341 			assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
342 
343 			if (m->vmp_absent) {
344 				boolean_t must_free = TRUE;
345 
346 				/*
347 				 * COPYOUT = FALSE case
348 				 * check for error conditions which must
349 				 * be passed back to the pages customer
350 				 */
351 				if (error & UPL_ABORT_RESTART) {
352 					m->vmp_restart = TRUE;
353 					m->vmp_absent = FALSE;
354 					m->vmp_unusual = TRUE;
355 					must_free = FALSE;
356 				} else if (error & UPL_ABORT_UNAVAILABLE) {
357 					m->vmp_restart = FALSE;
358 					m->vmp_unusual = TRUE;
359 					must_free = FALSE;
360 				} else if (error & UPL_ABORT_ERROR) {
361 					m->vmp_restart = FALSE;
362 					m->vmp_absent = FALSE;
363 					m->vmp_error = TRUE;
364 					m->vmp_unusual = TRUE;
365 					must_free = FALSE;
366 				}
367 				if (m->vmp_clustered && needed == FALSE) {
368 					/*
369 					 * This page was a part of a speculative
370 					 * read-ahead initiated by the kernel
371 					 * itself.  No one is expecting this
372 					 * page and no one will clean up its
373 					 * error state if it ever becomes valid
374 					 * in the future.
375 					 * We have to free it here.
376 					 */
377 					must_free = TRUE;
378 				}
379 				m->vmp_cleaning = FALSE;
380 
381 				if (m->vmp_overwriting && !m->vmp_busy) {
382 					/*
383 					 * this shouldn't happen since
384 					 * this is an 'absent' page, but
385 					 * it doesn't hurt to check for
386 					 * the 'alternate' method of
387 					 * stabilizing the page...
388 					 * we will mark 'busy' to be cleared
389 					 * in the following code which will
390 					 * take care of the primary stabilzation
391 					 * method (i.e. setting 'busy' to TRUE)
392 					 */
393 					dwp->dw_mask |= DW_vm_page_unwire;
394 				}
395 				m->vmp_overwriting = FALSE;
396 
397 				dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
398 
399 				if (must_free == TRUE) {
400 					dwp->dw_mask |= DW_vm_page_free;
401 				} else {
402 					dwp->dw_mask |= DW_vm_page_activate;
403 				}
404 			} else {
405 				/*
406 				 * Handle the trusted pager throttle.
407 				 */
408 				if (m->vmp_laundry) {
409 					dwp->dw_mask |= DW_vm_pageout_throttle_up;
410 				}
411 
412 				if (upl->flags & UPL_ACCESS_BLOCKED) {
413 					/*
414 					 * We blocked access to the pages in this UPL.
415 					 * Clear the "busy" bit and wake up any waiter
416 					 * for this page.
417 					 */
418 					dwp->dw_mask |= DW_clear_busy;
419 				}
420 				if (m->vmp_overwriting) {
421 					if (VM_PAGE_WIRED(m)) {
422 						/*
423 						 * deal with the 'alternate' method
424 						 * of stabilizing the page...
425 						 * we will either free the page
426 						 * or mark 'busy' to be cleared
427 						 * in the following code which will
428 						 * take care of the primary stabilzation
429 						 * method (i.e. setting 'busy' to TRUE)
430 						 */
431 						if (m->vmp_busy) {
432 //							printf("*******   FBDP %s:%d page %p object %p ofsfet 0x%llx wired and busy\n", __FUNCTION__, __LINE__, m, VM_PAGE_OBJECT(m), m->vmp_offset);
433 							upl_pages_wired_busy++;
434 						}
435 						dwp->dw_mask |= DW_vm_page_unwire;
436 					} else {
437 						assert(m->vmp_busy);
438 						dwp->dw_mask |= DW_clear_busy;
439 					}
440 					m->vmp_overwriting = FALSE;
441 				}
442 				m->vmp_free_when_done = FALSE;
443 				m->vmp_cleaning = FALSE;
444 
445 				if (error & UPL_ABORT_DUMP_PAGES) {
446 					pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
447 
448 					dwp->dw_mask |= DW_vm_page_free;
449 				} else {
450 					if (!(dwp->dw_mask & DW_vm_page_unwire)) {
451 						if (error & UPL_ABORT_REFERENCE) {
452 							/*
453 							 * we've been told to explictly
454 							 * reference this page... for
455 							 * file I/O, this is done by
456 							 * implementing an LRU on the inactive q
457 							 */
458 							dwp->dw_mask |= DW_vm_page_lru;
459 						} else if (!VM_PAGE_PAGEABLE(m)) {
460 							dwp->dw_mask |= DW_vm_page_deactivate_internal;
461 						}
462 					}
463 					dwp->dw_mask |= DW_PAGE_WAKEUP;
464 				}
465 			}
466 		}
467 abort_next_page:
468 		target_offset += PAGE_SIZE_64;
469 		xfer_size -= PAGE_SIZE;
470 
471 		if (dwp->dw_mask) {
472 			if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
473 				VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
474 
475 				if (dw_count >= dw_limit) {
476 					vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
477 
478 					dwp = dwp_start;
479 					dw_count = 0;
480 				}
481 			} else {
482 				if (dwp->dw_mask & DW_clear_busy) {
483 					m->vmp_busy = FALSE;
484 				}
485 
486 				if (dwp->dw_mask & DW_PAGE_WAKEUP) {
487 					vm_page_wakeup(shadow_object, m);
488 				}
489 			}
490 		}
491 	}
492 	if (dw_count) {
493 		vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
494 		dwp = dwp_start;
495 		dw_count = 0;
496 	}
497 
498 	if (upl->flags & UPL_DEVICE_MEMORY) {
499 		occupied = 0;
500 	} else if (upl->flags & UPL_LITE) {
501 		uint32_t pages = (uint32_t)atop(upl_adjusted_size(upl, PAGE_MASK));
502 
503 		occupied = !bitmap_is_empty(upl->lite_list, pages);
504 	} else {
505 		occupied = !vm_page_queue_empty(&upl->map_object->memq);
506 	}
507 	if (occupied == 0) {
508 		/*
509 		 * If this UPL element belongs to a Vector UPL and is
510 		 * empty, then this is the right function to deallocate
511 		 * it. So go ahead set the *empty variable. The flag
512 		 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
513 		 * should be considered relevant for the Vector UPL and
514 		 * not the internal UPLs.
515 		 */
516 		if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) {
517 			*empty = TRUE;
518 		}
519 
520 		if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
521 			/*
522 			 * this is not a paging object
523 			 * so we need to drop the paging reference
524 			 * that was taken when we created the UPL
525 			 * against this object
526 			 */
527 			vm_object_activity_end(shadow_object);
528 			vm_object_collapse(shadow_object, 0, TRUE);
529 		} else {
530 			/*
531 			 * we dontated the paging reference to
532 			 * the map object... vm_pageout_object_terminate
533 			 * will drop this reference
534 			 */
535 		}
536 	}
537 	vm_object_unlock(shadow_object);
538 	if (object != shadow_object) {
539 		vm_object_unlock(object);
540 	}
541 
542 	if (!isVectorUPL) {
543 		upl_unlock(upl);
544 	} else {
545 		/*
546 		 * If we completed our operations on an UPL that is
547 		 * part of a Vectored UPL and if empty is TRUE, then
548 		 * we should go ahead and deallocate this UPL element.
549 		 * Then we check if this was the last of the UPL elements
550 		 * within that Vectored UPL. If so, set empty to TRUE
551 		 * so that in ubc_upl_abort_range or ubc_upl_abort, we
552 		 * can go ahead and deallocate the Vector UPL too.
553 		 */
554 		if (*empty == TRUE) {
555 			*empty = vector_upl_set_subupl(vector_upl, upl, 0);
556 			upl_deallocate(upl);
557 		}
558 		goto process_upl_to_abort;
559 	}
560 
561 	kr = KERN_SUCCESS;
562 
563 done:
564 	if (dwp_start && dwp_finish_ctx) {
565 		vm_page_delayed_work_finish_ctx(dwp_start);
566 		dwp_start = dwp = NULL;
567 	}
568 
569 	return kr;
570 }
571 
572 kern_return_t
upl_abort(upl_t upl,int error)573 upl_abort(
574 	upl_t   upl,
575 	int     error)
576 {
577 	boolean_t       empty;
578 
579 	if (upl == UPL_NULL) {
580 		return KERN_INVALID_ARGUMENT;
581 	}
582 
583 	return upl_abort_range(upl, 0, upl->u_size, error, &empty);
584 }
585 
586 kern_return_t
upl_commit_range(upl_t upl,upl_offset_t offset,upl_size_t size,int flags,upl_page_info_t * page_list,mach_msg_type_number_t count,boolean_t * empty)587 upl_commit_range(
588 	upl_t                   upl,
589 	upl_offset_t            offset,
590 	upl_size_t              size,
591 	int                     flags,
592 	upl_page_info_t         *page_list,
593 	mach_msg_type_number_t  count,
594 	boolean_t               *empty)
595 {
596 	upl_size_t              xfer_size, subupl_size;
597 	vm_object_t             shadow_object;
598 	vm_object_t             object;
599 	vm_object_t             m_object;
600 	vm_object_offset_t      target_offset;
601 	upl_offset_t            subupl_offset = offset;
602 	int                     entry;
603 	int                     occupied;
604 	int                     clear_refmod = 0;
605 	int                     pgpgout_count = 0;
606 	struct  vm_page_delayed_work    dw_array;
607 	struct  vm_page_delayed_work    *dwp, *dwp_start;
608 	bool                    dwp_finish_ctx = TRUE;
609 	int                     dw_count;
610 	int                     dw_limit;
611 	int                     isVectorUPL = 0;
612 	upl_t                   vector_upl = NULL;
613 	boolean_t               should_be_throttled = FALSE;
614 
615 	vm_page_t               nxt_page = VM_PAGE_NULL;
616 	int                     fast_path_possible = 0;
617 	int                     fast_path_full_commit = 0;
618 	int                     throttle_page = 0;
619 	int                     unwired_count = 0;
620 	int                     local_queue_count = 0;
621 	vm_page_t               first_local, last_local;
622 	vm_object_offset_t      obj_start, obj_end, obj_offset;
623 	kern_return_t           kr = KERN_SUCCESS;
624 
625 //	DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p offset 0x%llx size 0x%llx flags 0x%x\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object, (uint64_t)offset, (uint64_t)size, flags);
626 
627 	dwp_start = dwp = NULL;
628 
629 	subupl_size = size;
630 	*empty = FALSE;
631 
632 	if (upl == UPL_NULL) {
633 		return KERN_INVALID_ARGUMENT;
634 	}
635 
636 	dw_count = 0;
637 	dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
638 	dwp_start = vm_page_delayed_work_get_ctx();
639 	if (dwp_start == NULL) {
640 		dwp_start = &dw_array;
641 		dw_limit = 1;
642 		dwp_finish_ctx = FALSE;
643 	}
644 
645 	dwp = dwp_start;
646 
647 	if (count == 0) {
648 		page_list = NULL;
649 	}
650 
651 	if ((isVectorUPL = vector_upl_is_valid(upl))) {
652 		vector_upl = upl;
653 		upl_lock(vector_upl);
654 	} else {
655 		upl_lock(upl);
656 	}
657 
658 process_upl_to_commit:
659 
660 	if (isVectorUPL) {
661 		size = subupl_size;
662 		offset = subupl_offset;
663 		if (size == 0) {
664 			upl_unlock(vector_upl);
665 			kr = KERN_SUCCESS;
666 			goto done;
667 		}
668 		upl =  vector_upl_subupl_byoffset(vector_upl, &offset, &size);
669 		if (upl == NULL) {
670 			upl_unlock(vector_upl);
671 			kr = KERN_FAILURE;
672 			goto done;
673 		}
674 		assertf(upl->flags & UPL_INTERNAL, "%s: sub-upl %p of vector upl %p has no internal page list",
675 		    __func__, upl, vector_upl);
676 		page_list = upl->page_list;
677 		subupl_size -= size;
678 		subupl_offset += size;
679 	}
680 
681 #if UPL_DEBUG
682 	if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
683 		upl->upl_commit_records[upl->upl_commit_index].c_btref = btref_get(__builtin_frame_address(0), 0);
684 		upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
685 		upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
686 
687 		upl->upl_commit_index++;
688 	}
689 #endif
690 	if (upl->flags & UPL_DEVICE_MEMORY) {
691 		xfer_size = 0;
692 	} else if ((offset + size) <= upl_adjusted_size(upl, PAGE_MASK)) {
693 		xfer_size = size;
694 	} else {
695 		if (!isVectorUPL) {
696 			upl_unlock(upl);
697 		} else {
698 			upl_unlock(vector_upl);
699 		}
700 		DEBUG4K_ERROR("upl %p (u_offset 0x%llx u_size 0x%x) offset 0x%x size 0x%x\n", upl, upl->u_offset, upl->u_size, offset, size);
701 		kr = KERN_FAILURE;
702 		goto done;
703 	}
704 	if (upl->flags & UPL_SET_DIRTY) {
705 		flags |= UPL_COMMIT_SET_DIRTY;
706 	}
707 	if (upl->flags & UPL_CLEAR_DIRTY) {
708 		flags |= UPL_COMMIT_CLEAR_DIRTY;
709 	}
710 
711 	object = upl->map_object;
712 
713 	if (upl->flags & UPL_SHADOWED) {
714 		vm_object_lock(object);
715 		shadow_object = object->shadow;
716 	} else {
717 		shadow_object = object;
718 	}
719 	entry = offset / PAGE_SIZE;
720 	target_offset = (vm_object_offset_t)offset;
721 
722 	if (upl->flags & UPL_KERNEL_OBJECT) {
723 		vm_object_lock_shared(shadow_object);
724 	} else {
725 		vm_object_lock(shadow_object);
726 	}
727 
728 	if (upl->flags & UPL_IO_WIRE &&
729 	    !(flags & (UPL_COMMIT_INACTIVATE | UPL_COMMIT_SPECULATE)) &&
730 	    !is_kernel_object(shadow_object) &&
731 	    vm_page_deactivate_behind &&
732 	    (shadow_object->resident_page_count - shadow_object->wired_page_count + atop_64(xfer_size) >
733 	    vm_page_active_count / vm_page_deactivate_behind_min_resident_ratio)) {
734 		/*
735 		 * We're being asked to un-wire pages from a very-large resident vm-object
736 		 * Naively inserting the pages into the active queue is likely to induce
737 		 * thrashing with the backing store -- i.e. we will be forced to
738 		 * evict hot pages that are likely to be re-faulted before we can get to
739 		 * this UPL's pages in the LRU. Immediately deactivate the pages instead so
740 		 * that we can evict them before currently-active pages.
741 		 */
742 		flags |= UPL_COMMIT_INACTIVATE;
743 		KDBG(VMDBG_CODE(DBG_VM_UPL_COMMIT_FORCE_DEACTIVATE) | DBG_FUNC_NONE,
744 		    VM_KERNEL_ADDRHIDE(shadow_object), upl->u_offset, xfer_size);
745 	}
746 
747 	VM_OBJECT_WIRED_PAGE_UPDATE_START(shadow_object);
748 
749 	if (upl->flags & UPL_ACCESS_BLOCKED) {
750 		assert(shadow_object->blocked_access);
751 		shadow_object->blocked_access = FALSE;
752 		vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
753 	}
754 
755 	if (shadow_object->code_signed) {
756 		/*
757 		 * CODE SIGNING:
758 		 * If the object is code-signed, do not let this UPL tell
759 		 * us if the pages are valid or not.  Let the pages be
760 		 * validated by VM the normal way (when they get mapped or
761 		 * copied).
762 		 */
763 		flags &= ~UPL_COMMIT_CS_VALIDATED;
764 	}
765 	if (!page_list) {
766 		/*
767 		 * No page list to get the code-signing info from !?
768 		 */
769 		flags &= ~UPL_COMMIT_CS_VALIDATED;
770 	}
771 	if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal) {
772 		should_be_throttled = TRUE;
773 	}
774 
775 	if ((upl->flags & UPL_IO_WIRE) &&
776 	    !(flags & UPL_COMMIT_FREE_ABSENT) &&
777 	    !isVectorUPL &&
778 	    shadow_object->purgable != VM_PURGABLE_VOLATILE &&
779 	    shadow_object->purgable != VM_PURGABLE_EMPTY) {
780 		if (!vm_page_queue_empty(&shadow_object->memq)) {
781 			if (shadow_object->internal && size == shadow_object->vo_size) {
782 				nxt_page = (vm_page_t)vm_page_queue_first(&shadow_object->memq);
783 				fast_path_full_commit = 1;
784 			}
785 			fast_path_possible = 1;
786 
787 			if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal &&
788 			    (shadow_object->purgable == VM_PURGABLE_DENY ||
789 			    shadow_object->purgable == VM_PURGABLE_NONVOLATILE ||
790 			    shadow_object->purgable == VM_PURGABLE_VOLATILE)) {
791 				throttle_page = 1;
792 			}
793 		}
794 	}
795 	first_local = VM_PAGE_NULL;
796 	last_local = VM_PAGE_NULL;
797 
798 	obj_start = target_offset + upl->u_offset - shadow_object->paging_offset;
799 	obj_end = obj_start + xfer_size;
800 	obj_start = vm_object_trunc_page(obj_start);
801 	obj_end = vm_object_round_page(obj_end);
802 	for (obj_offset = obj_start;
803 	    obj_offset < obj_end;
804 	    obj_offset += PAGE_SIZE) {
805 		vm_page_t       t, m;
806 
807 		dwp->dw_mask = 0;
808 		clear_refmod = 0;
809 
810 		m = VM_PAGE_NULL;
811 
812 		if (upl->flags & UPL_LITE) {
813 			unsigned int    pg_num;
814 
815 			if (nxt_page != VM_PAGE_NULL) {
816 				m = nxt_page;
817 				nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->vmp_listq);
818 				target_offset = m->vmp_offset;
819 			}
820 			pg_num = (unsigned int) (target_offset / PAGE_SIZE);
821 			assert(pg_num == target_offset / PAGE_SIZE);
822 
823 			if (bitmap_test(upl->lite_list, pg_num)) {
824 				bitmap_clear(upl->lite_list, pg_num);
825 
826 				if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) {
827 					m = vm_page_lookup(shadow_object, obj_offset);
828 				}
829 			} else {
830 				m = NULL;
831 			}
832 		}
833 		if (upl->flags & UPL_SHADOWED) {
834 			if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
835 				t->vmp_free_when_done = FALSE;
836 
837 				VM_PAGE_FREE(t);
838 
839 				if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) {
840 					m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
841 				}
842 			}
843 		}
844 		if (m == VM_PAGE_NULL) {
845 			goto commit_next_page;
846 		}
847 
848 		m_object = VM_PAGE_OBJECT(m);
849 
850 		if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
851 			assert(m->vmp_busy);
852 
853 			dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
854 #if HAS_MTE
855 			if (vm_page_is_tag_storage_pnum(m, VM_PAGE_GET_PHYS_PAGE(m)) &&
856 			    m->vmp_ts_wanted) {
857 				dwp->dw_mask |= DW_vm_page_wakeup_tag_storage;
858 			}
859 #endif /* HAS_MTE */
860 			goto commit_next_page;
861 		}
862 
863 		if (flags & UPL_COMMIT_CS_VALIDATED) {
864 			/*
865 			 * CODE SIGNING:
866 			 * Set the code signing bits according to
867 			 * what the UPL says they should be.
868 			 */
869 			m->vmp_cs_validated |= page_list[entry].cs_validated;
870 			m->vmp_cs_tainted |= page_list[entry].cs_tainted;
871 			m->vmp_cs_nx |= page_list[entry].cs_nx;
872 		}
873 		if (flags & UPL_COMMIT_WRITTEN_BY_KERNEL) {
874 			m->vmp_written_by_kernel = TRUE;
875 		}
876 
877 		if (upl->flags & UPL_IO_WIRE) {
878 			if (page_list) {
879 				page_list[entry].phys_addr = 0;
880 			}
881 
882 			if (flags & UPL_COMMIT_SET_DIRTY) {
883 				SET_PAGE_DIRTY(m, FALSE);
884 			} else if (flags & UPL_COMMIT_CLEAR_DIRTY) {
885 				m->vmp_dirty = FALSE;
886 
887 				if (!(flags & UPL_COMMIT_CS_VALIDATED) &&
888 				    m->vmp_cs_validated &&
889 				    m->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
890 					/*
891 					 * CODE SIGNING:
892 					 * This page is no longer dirty
893 					 * but could have been modified,
894 					 * so it will need to be
895 					 * re-validated.
896 					 */
897 					m->vmp_cs_validated = VMP_CS_ALL_FALSE;
898 
899 					VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
900 
901 					pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
902 				}
903 				clear_refmod |= VM_MEM_MODIFIED;
904 			}
905 			if (upl->flags & UPL_ACCESS_BLOCKED) {
906 				/*
907 				 * We blocked access to the pages in this UPL.
908 				 * Clear the "busy" bit and wake up any waiter
909 				 * for this page.
910 				 */
911 				dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
912 			}
913 			if (fast_path_possible) {
914 				assert(m_object->purgable != VM_PURGABLE_EMPTY);
915 				assert(m_object->purgable != VM_PURGABLE_VOLATILE);
916 				if (m->vmp_absent) {
917 					assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
918 					assert(m->vmp_wire_count == 0);
919 					assert(m->vmp_busy);
920 
921 					m->vmp_absent = FALSE;
922 					dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
923 				} else {
924 					if (m->vmp_wire_count == 0) {
925 						panic("wire_count == 0, m = %p, obj = %p", m, shadow_object);
926 					}
927 					assert(m->vmp_q_state == VM_PAGE_IS_WIRED);
928 
929 					/*
930 					 * XXX FBDP need to update some other
931 					 * counters here (purgeable_wired_count)
932 					 * (ledgers), ...
933 					 */
934 					assert(m->vmp_wire_count > 0);
935 					m->vmp_wire_count--;
936 
937 					if (m->vmp_wire_count == 0) {
938 						m->vmp_q_state = VM_PAGE_NOT_ON_Q;
939 						m->vmp_iopl_wired = false;
940 						unwired_count++;
941 
942 #if HAS_MTE
943 						mteinfo_decrement_wire_count(m, false);
944 #endif /* HAS_MTE */
945 					}
946 				}
947 				if (m->vmp_wire_count == 0) {
948 					assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
949 
950 					if (last_local == VM_PAGE_NULL) {
951 						assert(first_local == VM_PAGE_NULL);
952 
953 						last_local = m;
954 						first_local = m;
955 					} else {
956 						assert(first_local != VM_PAGE_NULL);
957 
958 						m->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
959 						first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(m);
960 						first_local = m;
961 					}
962 					local_queue_count++;
963 
964 					if (throttle_page) {
965 						m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
966 					} else {
967 						if (flags & UPL_COMMIT_INACTIVATE) {
968 							if (shadow_object->internal) {
969 								m->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
970 							} else {
971 								m->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
972 							}
973 						} else {
974 							m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
975 						}
976 					}
977 				}
978 			} else {
979 				if (flags & UPL_COMMIT_INACTIVATE) {
980 					dwp->dw_mask |= DW_vm_page_deactivate_internal;
981 					clear_refmod |= VM_MEM_REFERENCED;
982 				}
983 				if (m->vmp_absent) {
984 					if (flags & UPL_COMMIT_FREE_ABSENT) {
985 						dwp->dw_mask |= DW_vm_page_free;
986 					} else {
987 						m->vmp_absent = FALSE;
988 						dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
989 
990 						if (!(dwp->dw_mask & DW_vm_page_deactivate_internal)) {
991 							dwp->dw_mask |= DW_vm_page_activate;
992 						}
993 					}
994 				} else {
995 					dwp->dw_mask |= DW_vm_page_unwire;
996 				}
997 			}
998 			goto commit_next_page;
999 		}
1000 		assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
1001 
1002 		if (page_list) {
1003 			page_list[entry].phys_addr = 0;
1004 		}
1005 
1006 		/*
1007 		 * make sure to clear the hardware
1008 		 * modify or reference bits before
1009 		 * releasing the BUSY bit on this page
1010 		 * otherwise we risk losing a legitimate
1011 		 * change of state
1012 		 */
1013 		if (flags & UPL_COMMIT_CLEAR_DIRTY) {
1014 			m->vmp_dirty = FALSE;
1015 
1016 			clear_refmod |= VM_MEM_MODIFIED;
1017 		}
1018 		if (m->vmp_laundry) {
1019 			dwp->dw_mask |= DW_vm_pageout_throttle_up;
1020 		}
1021 
1022 		if (VM_PAGE_WIRED(m)) {
1023 			m->vmp_free_when_done = FALSE;
1024 		}
1025 
1026 		if (!(flags & UPL_COMMIT_CS_VALIDATED) &&
1027 		    m->vmp_cs_validated &&
1028 		    m->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
1029 			/*
1030 			 * CODE SIGNING:
1031 			 * This page is no longer dirty
1032 			 * but could have been modified,
1033 			 * so it will need to be
1034 			 * re-validated.
1035 			 */
1036 			m->vmp_cs_validated = VMP_CS_ALL_FALSE;
1037 
1038 			VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
1039 
1040 			pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
1041 		}
1042 		if (m->vmp_overwriting) {
1043 			/*
1044 			 * the (COPY_OUT_FROM == FALSE) request_page_list case
1045 			 */
1046 			if (VM_PAGE_WIRED(m)) {
1047 				/*
1048 				 * alternate (COPY_OUT_FROM == FALSE) page_list case
1049 				 * Occurs when the original page was wired
1050 				 * at the time of the list request
1051 				 */
1052 				if (m->vmp_busy) {
1053 //					printf("*******   FBDP %s:%d page %p object %p ofsfet 0x%llx wired and busy\n", __FUNCTION__, __LINE__, m, VM_PAGE_OBJECT(m), m->vmp_offset);
1054 					upl_pages_wired_busy++;
1055 				}
1056 				assert(!m->vmp_absent);
1057 				dwp->dw_mask |= DW_vm_page_unwire; /* reactivates */
1058 			} else {
1059 				assert(m->vmp_busy);
1060 #if CONFIG_PHANTOM_CACHE
1061 				if (m->vmp_absent && !m_object->internal) {
1062 					dwp->dw_mask |= DW_vm_phantom_cache_update;
1063 				}
1064 #endif
1065 				m->vmp_absent = FALSE;
1066 
1067 				dwp->dw_mask |= DW_clear_busy;
1068 			}
1069 			m->vmp_overwriting = FALSE;
1070 		}
1071 		m->vmp_cleaning = FALSE;
1072 
1073 		if (m->vmp_free_when_done) {
1074 			/*
1075 			 * With the clean queue enabled, UPL_PAGEOUT should
1076 			 * no longer set the pageout bit. Its pages now go
1077 			 * to the clean queue.
1078 			 *
1079 			 * We don't use the cleaned Q anymore and so this
1080 			 * assert isn't correct. The code for the clean Q
1081 			 * still exists and might be used in the future. If we
1082 			 * go back to the cleaned Q, we will re-enable this
1083 			 * assert.
1084 			 *
1085 			 * assert(!(upl->flags & UPL_PAGEOUT));
1086 			 */
1087 			assert(!m_object->internal);
1088 
1089 			m->vmp_free_when_done = FALSE;
1090 
1091 			if ((flags & UPL_COMMIT_SET_DIRTY) ||
1092 			    (m->vmp_pmapped && (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED))) {
1093 				/*
1094 				 * page was re-dirtied after we started
1095 				 * the pageout... reactivate it since
1096 				 * we don't know whether the on-disk
1097 				 * copy matches what is now in memory
1098 				 */
1099 				SET_PAGE_DIRTY(m, FALSE);
1100 
1101 				dwp->dw_mask |= DW_vm_page_activate | DW_PAGE_WAKEUP;
1102 
1103 				if (upl->flags & UPL_PAGEOUT) {
1104 					counter_inc(&vm_statistics_reactivations);
1105 					DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
1106 				}
1107 			} else if (m->vmp_busy && !(upl->flags & UPL_HAS_BUSY)) {
1108 				/*
1109 				 * Someone else might still be handling this
1110 				 * page (vm_fault() for example), so let's not
1111 				 * free it or "un-busy" it!
1112 				 * Put that page in the "speculative" queue
1113 				 * for now (since we would otherwise have freed
1114 				 * it) and let whoever is keeping the page
1115 				 * "busy" move it if needed when they're done
1116 				 * with it.
1117 				 */
1118 				dwp->dw_mask |= DW_vm_page_speculate;
1119 			} else {
1120 				/*
1121 				 * page has been successfully cleaned
1122 				 * go ahead and free it for other use
1123 				 */
1124 				if (m_object->internal) {
1125 					DTRACE_VM2(anonpgout, int, 1, (uint64_t *), NULL);
1126 				} else {
1127 					DTRACE_VM2(fspgout, int, 1, (uint64_t *), NULL);
1128 				}
1129 				m->vmp_dirty = FALSE;
1130 				if (!(upl->flags & UPL_HAS_BUSY)) {
1131 					assert(!m->vmp_busy);
1132 				}
1133 				m->vmp_busy = TRUE;
1134 
1135 				dwp->dw_mask |= DW_vm_page_free;
1136 			}
1137 			goto commit_next_page;
1138 		}
1139 		/*
1140 		 * It is a part of the semantic of COPYOUT_FROM
1141 		 * UPLs that a commit implies cache sync
1142 		 * between the vm page and the backing store
1143 		 * this can be used to strip the precious bit
1144 		 * as well as clean
1145 		 */
1146 		if ((upl->flags & UPL_PAGE_SYNC_DONE) || (flags & UPL_COMMIT_CLEAR_PRECIOUS)) {
1147 			m->vmp_precious = FALSE;
1148 		}
1149 
1150 		if (flags & UPL_COMMIT_SET_DIRTY) {
1151 			SET_PAGE_DIRTY(m, FALSE);
1152 		} else {
1153 			m->vmp_dirty = FALSE;
1154 		}
1155 
1156 		/* with the clean queue on, move *all* cleaned pages to the clean queue */
1157 		if (hibernate_cleaning_in_progress == FALSE && !m->vmp_dirty && (upl->flags & UPL_PAGEOUT)) {
1158 			pgpgout_count++;
1159 
1160 			counter_inc(&vm_statistics_pageouts);
1161 			DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL);
1162 
1163 			dwp->dw_mask |= DW_enqueue_cleaned;
1164 		} else if (should_be_throttled == TRUE && (m->vmp_q_state == VM_PAGE_NOT_ON_Q)) {
1165 			/*
1166 			 * page coming back in from being 'frozen'...
1167 			 * it was dirty before it was frozen, so keep it so
1168 			 * the vm_page_activate will notice that it really belongs
1169 			 * on the throttle queue and put it there
1170 			 */
1171 			SET_PAGE_DIRTY(m, FALSE);
1172 			dwp->dw_mask |= DW_vm_page_activate;
1173 		} else {
1174 			if ((flags & UPL_COMMIT_INACTIVATE) && !m->vmp_clustered && (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q)) {
1175 				dwp->dw_mask |= DW_vm_page_deactivate_internal;
1176 				clear_refmod |= VM_MEM_REFERENCED;
1177 			} else if (!VM_PAGE_PAGEABLE(m)) {
1178 				if (m->vmp_clustered || (flags & UPL_COMMIT_SPECULATE)) {
1179 					dwp->dw_mask |= DW_vm_page_speculate;
1180 				} else if (m->vmp_reference) {
1181 					dwp->dw_mask |= DW_vm_page_activate;
1182 				} else {
1183 					dwp->dw_mask |= DW_vm_page_deactivate_internal;
1184 					clear_refmod |= VM_MEM_REFERENCED;
1185 				}
1186 			}
1187 		}
1188 		if (upl->flags & UPL_ACCESS_BLOCKED) {
1189 			/*
1190 			 * We blocked access to the pages in this URL.
1191 			 * Clear the "busy" bit on this page before we
1192 			 * wake up any waiter.
1193 			 */
1194 			dwp->dw_mask |= DW_clear_busy;
1195 		}
1196 		/*
1197 		 * Wakeup any thread waiting for the page to be un-cleaning.
1198 		 */
1199 		dwp->dw_mask |= DW_PAGE_WAKEUP;
1200 
1201 commit_next_page:
1202 		if (clear_refmod) {
1203 			pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m), clear_refmod);
1204 		}
1205 
1206 		target_offset += PAGE_SIZE_64;
1207 		xfer_size -= PAGE_SIZE;
1208 		entry++;
1209 
1210 		if (dwp->dw_mask) {
1211 			if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
1212 				VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
1213 
1214 				if (dw_count >= dw_limit) {
1215 					vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
1216 
1217 					dwp = dwp_start;
1218 					dw_count = 0;
1219 				}
1220 			} else {
1221 				if (dwp->dw_mask & DW_clear_busy) {
1222 					m->vmp_busy = FALSE;
1223 				}
1224 
1225 				if (dwp->dw_mask & DW_PAGE_WAKEUP) {
1226 					vm_page_wakeup(m_object, m);
1227 				}
1228 			}
1229 		}
1230 	}
1231 	if (dw_count) {
1232 		vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
1233 		dwp = dwp_start;
1234 		dw_count = 0;
1235 	}
1236 
1237 	if (fast_path_possible) {
1238 		assert(shadow_object->purgable != VM_PURGABLE_VOLATILE);
1239 		assert(shadow_object->purgable != VM_PURGABLE_EMPTY);
1240 
1241 		if (local_queue_count || unwired_count) {
1242 			if (local_queue_count) {
1243 				vm_page_t       first_target;
1244 				vm_page_queue_head_t    *target_queue;
1245 
1246 				if (throttle_page) {
1247 					target_queue = &vm_page_queue_throttled;
1248 				} else {
1249 					if (flags & UPL_COMMIT_INACTIVATE) {
1250 						if (shadow_object->internal) {
1251 							target_queue = &vm_page_queue_anonymous;
1252 						} else {
1253 							target_queue = &vm_page_queue_inactive;
1254 						}
1255 					} else {
1256 						target_queue = &vm_page_queue_active;
1257 					}
1258 				}
1259 				/*
1260 				 * Transfer the entire local queue to a regular LRU page queues.
1261 				 */
1262 				vm_page_lockspin_queues();
1263 
1264 				first_target = (vm_page_t) vm_page_queue_first(target_queue);
1265 
1266 				if (vm_page_queue_empty(target_queue)) {
1267 					target_queue->prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
1268 				} else {
1269 					first_target->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
1270 				}
1271 
1272 				target_queue->next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
1273 				first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(target_queue);
1274 				last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_target);
1275 
1276 				/*
1277 				 * Adjust the global page counts.
1278 				 */
1279 				if (throttle_page) {
1280 					vm_page_throttled_count += local_queue_count;
1281 				} else {
1282 					if (flags & UPL_COMMIT_INACTIVATE) {
1283 						if (shadow_object->internal) {
1284 							vm_page_anonymous_count += local_queue_count;
1285 						}
1286 						vm_page_inactive_count += local_queue_count;
1287 
1288 						token_new_pagecount += local_queue_count;
1289 					} else {
1290 						vm_page_active_count += local_queue_count;
1291 					}
1292 
1293 					if (shadow_object->internal) {
1294 						vm_page_pageable_internal_count += local_queue_count;
1295 					} else {
1296 						vm_page_pageable_external_count += local_queue_count;
1297 					}
1298 				}
1299 			} else {
1300 				vm_page_lockspin_queues();
1301 			}
1302 			if (unwired_count) {
1303 				vm_page_wire_count -= unwired_count;
1304 				VM_CHECK_MEMORYSTATUS;
1305 			}
1306 			vm_page_unlock_queues();
1307 
1308 			VM_OBJECT_WIRED_PAGE_COUNT(shadow_object, -unwired_count);
1309 		}
1310 	}
1311 
1312 	if (upl->flags & UPL_DEVICE_MEMORY) {
1313 		occupied = 0;
1314 	} else if (upl->flags & UPL_LITE) {
1315 		uint32_t pages = (uint32_t)atop(upl_adjusted_size(upl, PAGE_MASK));
1316 
1317 		occupied = !fast_path_full_commit &&
1318 		    !bitmap_is_empty(upl->lite_list, pages);
1319 	} else {
1320 		occupied = !vm_page_queue_empty(&upl->map_object->memq);
1321 	}
1322 	if (occupied == 0) {
1323 		/*
1324 		 * If this UPL element belongs to a Vector UPL and is
1325 		 * empty, then this is the right function to deallocate
1326 		 * it. So go ahead set the *empty variable. The flag
1327 		 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
1328 		 * should be considered relevant for the Vector UPL and not
1329 		 * the internal UPLs.
1330 		 */
1331 		if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) {
1332 			*empty = TRUE;
1333 		}
1334 
1335 		if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
1336 			/*
1337 			 * this is not a paging object
1338 			 * so we need to drop the paging reference
1339 			 * that was taken when we created the UPL
1340 			 * against this object
1341 			 */
1342 			vm_object_activity_end(shadow_object);
1343 			vm_object_collapse(shadow_object, 0, TRUE);
1344 		} else {
1345 			/*
1346 			 * we dontated the paging reference to
1347 			 * the map object... vm_pageout_object_terminate
1348 			 * will drop this reference
1349 			 */
1350 		}
1351 	}
1352 	VM_OBJECT_WIRED_PAGE_UPDATE_END(shadow_object, shadow_object->wire_tag);
1353 	vm_object_unlock(shadow_object);
1354 	if (object != shadow_object) {
1355 		vm_object_unlock(object);
1356 	}
1357 
1358 	if (!isVectorUPL) {
1359 		upl_unlock(upl);
1360 	} else {
1361 		/*
1362 		 * If we completed our operations on an UPL that is
1363 		 * part of a Vectored UPL and if empty is TRUE, then
1364 		 * we should go ahead and deallocate this UPL element.
1365 		 * Then we check if this was the last of the UPL elements
1366 		 * within that Vectored UPL. If so, set empty to TRUE
1367 		 * so that in ubc_upl_commit_range or ubc_upl_commit, we
1368 		 * can go ahead and deallocate the Vector UPL too.
1369 		 */
1370 		if (*empty == TRUE) {
1371 			*empty = vector_upl_set_subupl(vector_upl, upl, 0);
1372 			upl_deallocate(upl);
1373 		}
1374 		goto process_upl_to_commit;
1375 	}
1376 	if (pgpgout_count) {
1377 		DTRACE_VM2(pgpgout, int, pgpgout_count, (uint64_t *), NULL);
1378 	}
1379 
1380 	kr = KERN_SUCCESS;
1381 done:
1382 	if (dwp_start && dwp_finish_ctx) {
1383 		vm_page_delayed_work_finish_ctx(dwp_start);
1384 		dwp_start = dwp = NULL;
1385 	}
1386 
1387 	return kr;
1388 }
1389 
1390 /* an option on commit should be wire */
1391 kern_return_t
upl_commit(upl_t upl,upl_page_info_t * page_list,mach_msg_type_number_t count)1392 upl_commit(
1393 	upl_t                   upl,
1394 	upl_page_info_t         *page_list,
1395 	mach_msg_type_number_t  count)
1396 {
1397 	boolean_t       empty;
1398 
1399 	if (upl == UPL_NULL) {
1400 		return KERN_INVALID_ARGUMENT;
1401 	}
1402 
1403 	return upl_commit_range(upl, 0, upl->u_size, 0,
1404 	           page_list, count, &empty);
1405 }
1406