1 /*
2 * Copyright (c) 2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <vm/vm_upl.h>
30 #include <vm/vm_pageout_internal.h>
31 #include <vm/vm_page_internal.h>
32 #include <vm/vm_map_internal.h>
33 #include <mach/upl_server.h>
34 #include <kern/host_statistics.h>
35 #include <vm/vm_purgeable_internal.h>
36 #include <vm/vm_object_internal.h>
37 #include <vm/vm_ubc.h>
38
39 extern boolean_t hibernate_cleaning_in_progress;
40
41 /* map a (whole) upl into an address space */
42 kern_return_t
vm_upl_map(vm_map_t map,upl_t upl,vm_address_t * dst_addr)43 vm_upl_map(
44 vm_map_t map,
45 upl_t upl,
46 vm_address_t *dst_addr)
47 {
48 vm_map_offset_t map_addr;
49 kern_return_t kr;
50
51 if (VM_MAP_NULL == map) {
52 return KERN_INVALID_ARGUMENT;
53 }
54
55 kr = vm_map_enter_upl(map, upl, &map_addr);
56 *dst_addr = CAST_DOWN(vm_address_t, map_addr);
57 return kr;
58 }
59
60 kern_return_t
vm_upl_unmap(vm_map_t map,upl_t upl)61 vm_upl_unmap(
62 vm_map_t map,
63 upl_t upl)
64 {
65 if (VM_MAP_NULL == map) {
66 return KERN_INVALID_ARGUMENT;
67 }
68
69 return vm_map_remove_upl(map, upl);
70 }
71
72 /* map a part of a upl into an address space with requested protection. */
73 kern_return_t
vm_upl_map_range(vm_map_t map,upl_t upl,vm_offset_t offset_to_map,vm_size_t size_to_map,vm_prot_t prot_to_map,vm_address_t * dst_addr)74 vm_upl_map_range(
75 vm_map_t map,
76 upl_t upl,
77 vm_offset_t offset_to_map,
78 vm_size_t size_to_map,
79 vm_prot_t prot_to_map,
80 vm_address_t *dst_addr)
81 {
82 vm_map_offset_t map_addr, aligned_offset_to_map, adjusted_offset;
83 kern_return_t kr;
84
85 if (VM_MAP_NULL == map) {
86 return KERN_INVALID_ARGUMENT;
87 }
88 aligned_offset_to_map = vm_map_trunc_page(offset_to_map, vm_map_page_mask(map));
89 adjusted_offset = offset_to_map - aligned_offset_to_map;
90 size_to_map = vm_map_round_page(size_to_map + adjusted_offset, vm_map_page_mask(map));
91
92 kr = vm_map_enter_upl_range(map, upl, aligned_offset_to_map, size_to_map, prot_to_map, &map_addr);
93 *dst_addr = CAST_DOWN(vm_address_t, (map_addr + adjusted_offset));
94 return kr;
95 }
96
97 /* unmap a part of a upl that was mapped in the address space. */
98 kern_return_t
vm_upl_unmap_range(vm_map_t map,upl_t upl,vm_offset_t offset_to_unmap,vm_size_t size_to_unmap)99 vm_upl_unmap_range(
100 vm_map_t map,
101 upl_t upl,
102 vm_offset_t offset_to_unmap,
103 vm_size_t size_to_unmap)
104 {
105 vm_map_offset_t aligned_offset_to_unmap, page_offset;
106
107 if (VM_MAP_NULL == map) {
108 return KERN_INVALID_ARGUMENT;
109 }
110
111 aligned_offset_to_unmap = vm_map_trunc_page(offset_to_unmap, vm_map_page_mask(map));
112 page_offset = offset_to_unmap - aligned_offset_to_unmap;
113 size_to_unmap = vm_map_round_page(size_to_unmap + page_offset, vm_map_page_mask(map));
114
115 return vm_map_remove_upl_range(map, upl, aligned_offset_to_unmap, size_to_unmap);
116 }
117
118 /* Retrieve a upl for an object underlying an address range in a map */
119
120 kern_return_t
vm_map_get_upl(vm_map_t map,vm_map_offset_t map_offset,upl_size_t * upl_size,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,upl_control_flags_t * flags,vm_tag_t tag,int force_data_sync)121 vm_map_get_upl(
122 vm_map_t map,
123 vm_map_offset_t map_offset,
124 upl_size_t *upl_size,
125 upl_t *upl,
126 upl_page_info_array_t page_list,
127 unsigned int *count,
128 upl_control_flags_t *flags,
129 vm_tag_t tag,
130 int force_data_sync)
131 {
132 upl_control_flags_t map_flags;
133 kern_return_t kr;
134
135 if (VM_MAP_NULL == map) {
136 return KERN_INVALID_ARGUMENT;
137 }
138
139 map_flags = *flags & ~UPL_NOZEROFILL;
140 if (force_data_sync) {
141 map_flags |= UPL_FORCE_DATA_SYNC;
142 }
143
144 kr = vm_map_create_upl(map,
145 map_offset,
146 upl_size,
147 upl,
148 page_list,
149 count,
150 &map_flags,
151 tag);
152
153 *flags = (map_flags & ~UPL_FORCE_DATA_SYNC);
154 return kr;
155 }
156
157 kern_return_t
upl_abort_range(upl_t upl,upl_offset_t offset,upl_size_t size,int error,boolean_t * empty)158 upl_abort_range(
159 upl_t upl,
160 upl_offset_t offset,
161 upl_size_t size,
162 int error,
163 boolean_t *empty)
164 {
165 upl_size_t xfer_size, subupl_size;
166 vm_object_t shadow_object;
167 vm_object_t object;
168 vm_object_offset_t target_offset;
169 upl_offset_t subupl_offset = offset;
170 int occupied;
171 struct vm_page_delayed_work dw_array;
172 struct vm_page_delayed_work *dwp, *dwp_start;
173 bool dwp_finish_ctx = TRUE;
174 int dw_count;
175 int dw_limit;
176 int isVectorUPL = 0;
177 upl_t vector_upl = NULL;
178 vm_object_offset_t obj_start, obj_end, obj_offset;
179 kern_return_t kr = KERN_SUCCESS;
180
181 // DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p offset 0x%llx size 0x%llx error 0x%x\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object, (uint64_t)offset, (uint64_t)size, error);
182
183 dwp_start = dwp = NULL;
184
185 subupl_size = size;
186 *empty = FALSE;
187
188 if (upl == UPL_NULL) {
189 return KERN_INVALID_ARGUMENT;
190 }
191
192 if ((upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES)) {
193 return upl_commit_range(upl, offset, size, UPL_COMMIT_FREE_ABSENT, NULL, 0, empty);
194 }
195
196 dw_count = 0;
197 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
198 dwp_start = vm_page_delayed_work_get_ctx();
199 if (dwp_start == NULL) {
200 dwp_start = &dw_array;
201 dw_limit = 1;
202 dwp_finish_ctx = FALSE;
203 }
204
205 dwp = dwp_start;
206
207 if ((isVectorUPL = vector_upl_is_valid(upl))) {
208 vector_upl = upl;
209 upl_lock(vector_upl);
210 } else {
211 upl_lock(upl);
212 }
213
214 process_upl_to_abort:
215 if (isVectorUPL) {
216 size = subupl_size;
217 offset = subupl_offset;
218 if (size == 0) {
219 upl_unlock(vector_upl);
220 kr = KERN_SUCCESS;
221 goto done;
222 }
223 upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
224 if (upl == NULL) {
225 upl_unlock(vector_upl);
226 kr = KERN_FAILURE;
227 goto done;
228 }
229 subupl_size -= size;
230 subupl_offset += size;
231 }
232
233 *empty = FALSE;
234
235 #if UPL_DEBUG
236 if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
237 upl->upl_commit_records[upl->upl_commit_index].c_btref = btref_get(__builtin_frame_address(0), 0);
238 upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
239 upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
240 upl->upl_commit_records[upl->upl_commit_index].c_aborted = 1;
241
242 upl->upl_commit_index++;
243 }
244 #endif
245 if (upl->flags & UPL_DEVICE_MEMORY) {
246 xfer_size = 0;
247 } else if ((offset + size) <= upl_adjusted_size(upl, PAGE_MASK)) {
248 xfer_size = size;
249 } else {
250 if (!isVectorUPL) {
251 upl_unlock(upl);
252 } else {
253 upl_unlock(vector_upl);
254 }
255 DEBUG4K_ERROR("upl %p (u_offset 0x%llx u_size 0x%x) offset 0x%x size 0x%x\n", upl, upl->u_offset, upl->u_size, offset, size);
256 kr = KERN_FAILURE;
257 goto done;
258 }
259 object = upl->map_object;
260
261 if (upl->flags & UPL_SHADOWED) {
262 vm_object_lock(object);
263 shadow_object = object->shadow;
264 } else {
265 shadow_object = object;
266 }
267
268 target_offset = (vm_object_offset_t)offset;
269
270 if (upl->flags & UPL_KERNEL_OBJECT) {
271 vm_object_lock_shared(shadow_object);
272 } else {
273 vm_object_lock(shadow_object);
274 }
275
276 if (upl->flags & UPL_ACCESS_BLOCKED) {
277 assert(shadow_object->blocked_access);
278 shadow_object->blocked_access = FALSE;
279 vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
280 }
281
282 if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT)) {
283 panic("upl_abort_range: kernel_object being DUMPED");
284 }
285
286 obj_start = target_offset + upl->u_offset - shadow_object->paging_offset;
287 obj_end = obj_start + xfer_size;
288 obj_start = vm_object_trunc_page(obj_start);
289 obj_end = vm_object_round_page(obj_end);
290 for (obj_offset = obj_start;
291 obj_offset < obj_end;
292 obj_offset += PAGE_SIZE) {
293 vm_page_t t, m;
294 unsigned int pg_num;
295 boolean_t needed;
296
297 pg_num = (unsigned int) (target_offset / PAGE_SIZE);
298 assert(pg_num == target_offset / PAGE_SIZE);
299
300 needed = FALSE;
301
302 if (upl->flags & UPL_INTERNAL) {
303 needed = upl->page_list[pg_num].needed;
304 }
305
306 dwp->dw_mask = 0;
307 m = VM_PAGE_NULL;
308
309 if (upl->flags & UPL_LITE) {
310 if (bitmap_test(upl->lite_list, pg_num)) {
311 bitmap_clear(upl->lite_list, pg_num);
312
313 if (!(upl->flags & UPL_KERNEL_OBJECT)) {
314 m = vm_page_lookup(shadow_object, obj_offset);
315 }
316 }
317 }
318 if (upl->flags & UPL_SHADOWED) {
319 if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
320 t->vmp_free_when_done = FALSE;
321
322 VM_PAGE_FREE(t);
323
324 if (m == VM_PAGE_NULL) {
325 m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
326 }
327 }
328 }
329 if ((upl->flags & UPL_KERNEL_OBJECT)) {
330 goto abort_next_page;
331 }
332
333 if (m != VM_PAGE_NULL) {
334 assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
335
336 if (m->vmp_absent) {
337 boolean_t must_free = TRUE;
338
339 /*
340 * COPYOUT = FALSE case
341 * check for error conditions which must
342 * be passed back to the pages customer
343 */
344 if (error & UPL_ABORT_RESTART) {
345 m->vmp_restart = TRUE;
346 m->vmp_absent = FALSE;
347 m->vmp_unusual = TRUE;
348 must_free = FALSE;
349 } else if (error & UPL_ABORT_UNAVAILABLE) {
350 m->vmp_restart = FALSE;
351 m->vmp_unusual = TRUE;
352 must_free = FALSE;
353 } else if (error & UPL_ABORT_ERROR) {
354 m->vmp_restart = FALSE;
355 m->vmp_absent = FALSE;
356 m->vmp_error = TRUE;
357 m->vmp_unusual = TRUE;
358 must_free = FALSE;
359 }
360 if (m->vmp_clustered && needed == FALSE) {
361 /*
362 * This page was a part of a speculative
363 * read-ahead initiated by the kernel
364 * itself. No one is expecting this
365 * page and no one will clean up its
366 * error state if it ever becomes valid
367 * in the future.
368 * We have to free it here.
369 */
370 must_free = TRUE;
371 }
372 m->vmp_cleaning = FALSE;
373
374 if (m->vmp_overwriting && !m->vmp_busy) {
375 /*
376 * this shouldn't happen since
377 * this is an 'absent' page, but
378 * it doesn't hurt to check for
379 * the 'alternate' method of
380 * stabilizing the page...
381 * we will mark 'busy' to be cleared
382 * in the following code which will
383 * take care of the primary stabilzation
384 * method (i.e. setting 'busy' to TRUE)
385 */
386 dwp->dw_mask |= DW_vm_page_unwire;
387 }
388 m->vmp_overwriting = FALSE;
389
390 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
391
392 if (must_free == TRUE) {
393 dwp->dw_mask |= DW_vm_page_free;
394 } else {
395 dwp->dw_mask |= DW_vm_page_activate;
396 }
397 } else {
398 /*
399 * Handle the trusted pager throttle.
400 */
401 if (m->vmp_laundry) {
402 dwp->dw_mask |= DW_vm_pageout_throttle_up;
403 }
404
405 if (upl->flags & UPL_ACCESS_BLOCKED) {
406 /*
407 * We blocked access to the pages in this UPL.
408 * Clear the "busy" bit and wake up any waiter
409 * for this page.
410 */
411 dwp->dw_mask |= DW_clear_busy;
412 }
413 if (m->vmp_overwriting) {
414 if (m->vmp_busy) {
415 dwp->dw_mask |= DW_clear_busy;
416 } else {
417 /*
418 * deal with the 'alternate' method
419 * of stabilizing the page...
420 * we will either free the page
421 * or mark 'busy' to be cleared
422 * in the following code which will
423 * take care of the primary stabilzation
424 * method (i.e. setting 'busy' to TRUE)
425 */
426 dwp->dw_mask |= DW_vm_page_unwire;
427 }
428 m->vmp_overwriting = FALSE;
429 }
430 m->vmp_free_when_done = FALSE;
431 m->vmp_cleaning = FALSE;
432
433 if (error & UPL_ABORT_DUMP_PAGES) {
434 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
435
436 dwp->dw_mask |= DW_vm_page_free;
437 } else {
438 if (!(dwp->dw_mask & DW_vm_page_unwire)) {
439 if (error & UPL_ABORT_REFERENCE) {
440 /*
441 * we've been told to explictly
442 * reference this page... for
443 * file I/O, this is done by
444 * implementing an LRU on the inactive q
445 */
446 dwp->dw_mask |= DW_vm_page_lru;
447 } else if (!VM_PAGE_PAGEABLE(m)) {
448 dwp->dw_mask |= DW_vm_page_deactivate_internal;
449 }
450 }
451 dwp->dw_mask |= DW_PAGE_WAKEUP;
452 }
453 }
454 }
455 abort_next_page:
456 target_offset += PAGE_SIZE_64;
457 xfer_size -= PAGE_SIZE;
458
459 if (dwp->dw_mask) {
460 if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
461 VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
462
463 if (dw_count >= dw_limit) {
464 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
465
466 dwp = dwp_start;
467 dw_count = 0;
468 }
469 } else {
470 if (dwp->dw_mask & DW_clear_busy) {
471 m->vmp_busy = FALSE;
472 }
473
474 if (dwp->dw_mask & DW_PAGE_WAKEUP) {
475 vm_page_wakeup(shadow_object, m);
476 }
477 }
478 }
479 }
480 if (dw_count) {
481 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
482 dwp = dwp_start;
483 dw_count = 0;
484 }
485
486 if (upl->flags & UPL_DEVICE_MEMORY) {
487 occupied = 0;
488 } else if (upl->flags & UPL_LITE) {
489 uint32_t pages = (uint32_t)atop(upl_adjusted_size(upl, PAGE_MASK));
490
491 occupied = !bitmap_is_empty(upl->lite_list, pages);
492 } else {
493 occupied = !vm_page_queue_empty(&upl->map_object->memq);
494 }
495 if (occupied == 0) {
496 /*
497 * If this UPL element belongs to a Vector UPL and is
498 * empty, then this is the right function to deallocate
499 * it. So go ahead set the *empty variable. The flag
500 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
501 * should be considered relevant for the Vector UPL and
502 * not the internal UPLs.
503 */
504 if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) {
505 *empty = TRUE;
506 }
507
508 if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
509 /*
510 * this is not a paging object
511 * so we need to drop the paging reference
512 * that was taken when we created the UPL
513 * against this object
514 */
515 vm_object_activity_end(shadow_object);
516 vm_object_collapse(shadow_object, 0, TRUE);
517 } else {
518 /*
519 * we dontated the paging reference to
520 * the map object... vm_pageout_object_terminate
521 * will drop this reference
522 */
523 }
524 }
525 vm_object_unlock(shadow_object);
526 if (object != shadow_object) {
527 vm_object_unlock(object);
528 }
529
530 if (!isVectorUPL) {
531 upl_unlock(upl);
532 } else {
533 /*
534 * If we completed our operations on an UPL that is
535 * part of a Vectored UPL and if empty is TRUE, then
536 * we should go ahead and deallocate this UPL element.
537 * Then we check if this was the last of the UPL elements
538 * within that Vectored UPL. If so, set empty to TRUE
539 * so that in ubc_upl_abort_range or ubc_upl_abort, we
540 * can go ahead and deallocate the Vector UPL too.
541 */
542 if (*empty == TRUE) {
543 *empty = vector_upl_set_subupl(vector_upl, upl, 0);
544 upl_deallocate(upl);
545 }
546 goto process_upl_to_abort;
547 }
548
549 kr = KERN_SUCCESS;
550
551 done:
552 if (dwp_start && dwp_finish_ctx) {
553 vm_page_delayed_work_finish_ctx(dwp_start);
554 dwp_start = dwp = NULL;
555 }
556
557 return kr;
558 }
559
560 kern_return_t
upl_abort(upl_t upl,int error)561 upl_abort(
562 upl_t upl,
563 int error)
564 {
565 boolean_t empty;
566
567 if (upl == UPL_NULL) {
568 return KERN_INVALID_ARGUMENT;
569 }
570
571 return upl_abort_range(upl, 0, upl->u_size, error, &empty);
572 }
573
574 kern_return_t
upl_commit_range(upl_t upl,upl_offset_t offset,upl_size_t size,int flags,upl_page_info_t * page_list,mach_msg_type_number_t count,boolean_t * empty)575 upl_commit_range(
576 upl_t upl,
577 upl_offset_t offset,
578 upl_size_t size,
579 int flags,
580 upl_page_info_t *page_list,
581 mach_msg_type_number_t count,
582 boolean_t *empty)
583 {
584 upl_size_t xfer_size, subupl_size;
585 vm_object_t shadow_object;
586 vm_object_t object;
587 vm_object_t m_object;
588 vm_object_offset_t target_offset;
589 upl_offset_t subupl_offset = offset;
590 int entry;
591 int occupied;
592 int clear_refmod = 0;
593 int pgpgout_count = 0;
594 struct vm_page_delayed_work dw_array;
595 struct vm_page_delayed_work *dwp, *dwp_start;
596 bool dwp_finish_ctx = TRUE;
597 int dw_count;
598 int dw_limit;
599 int isVectorUPL = 0;
600 upl_t vector_upl = NULL;
601 boolean_t should_be_throttled = FALSE;
602
603 vm_page_t nxt_page = VM_PAGE_NULL;
604 int fast_path_possible = 0;
605 int fast_path_full_commit = 0;
606 int throttle_page = 0;
607 int unwired_count = 0;
608 int local_queue_count = 0;
609 vm_page_t first_local, last_local;
610 vm_object_offset_t obj_start, obj_end, obj_offset;
611 kern_return_t kr = KERN_SUCCESS;
612
613 // DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p offset 0x%llx size 0x%llx flags 0x%x\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object, (uint64_t)offset, (uint64_t)size, flags);
614
615 dwp_start = dwp = NULL;
616
617 subupl_size = size;
618 *empty = FALSE;
619
620 if (upl == UPL_NULL) {
621 return KERN_INVALID_ARGUMENT;
622 }
623
624 dw_count = 0;
625 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
626 dwp_start = vm_page_delayed_work_get_ctx();
627 if (dwp_start == NULL) {
628 dwp_start = &dw_array;
629 dw_limit = 1;
630 dwp_finish_ctx = FALSE;
631 }
632
633 dwp = dwp_start;
634
635 if (count == 0) {
636 page_list = NULL;
637 }
638
639 if ((isVectorUPL = vector_upl_is_valid(upl))) {
640 vector_upl = upl;
641 upl_lock(vector_upl);
642 } else {
643 upl_lock(upl);
644 }
645
646 process_upl_to_commit:
647
648 if (isVectorUPL) {
649 size = subupl_size;
650 offset = subupl_offset;
651 if (size == 0) {
652 upl_unlock(vector_upl);
653 kr = KERN_SUCCESS;
654 goto done;
655 }
656 upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
657 if (upl == NULL) {
658 upl_unlock(vector_upl);
659 kr = KERN_FAILURE;
660 goto done;
661 }
662 page_list = upl->page_list;
663 subupl_size -= size;
664 subupl_offset += size;
665 }
666
667 #if UPL_DEBUG
668 if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
669 upl->upl_commit_records[upl->upl_commit_index].c_btref = btref_get(__builtin_frame_address(0), 0);
670 upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
671 upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
672
673 upl->upl_commit_index++;
674 }
675 #endif
676 if (upl->flags & UPL_DEVICE_MEMORY) {
677 xfer_size = 0;
678 } else if ((offset + size) <= upl_adjusted_size(upl, PAGE_MASK)) {
679 xfer_size = size;
680 } else {
681 if (!isVectorUPL) {
682 upl_unlock(upl);
683 } else {
684 upl_unlock(vector_upl);
685 }
686 DEBUG4K_ERROR("upl %p (u_offset 0x%llx u_size 0x%x) offset 0x%x size 0x%x\n", upl, upl->u_offset, upl->u_size, offset, size);
687 kr = KERN_FAILURE;
688 goto done;
689 }
690 if (upl->flags & UPL_SET_DIRTY) {
691 flags |= UPL_COMMIT_SET_DIRTY;
692 }
693 if (upl->flags & UPL_CLEAR_DIRTY) {
694 flags |= UPL_COMMIT_CLEAR_DIRTY;
695 }
696
697 object = upl->map_object;
698
699 if (upl->flags & UPL_SHADOWED) {
700 vm_object_lock(object);
701 shadow_object = object->shadow;
702 } else {
703 shadow_object = object;
704 }
705 entry = offset / PAGE_SIZE;
706 target_offset = (vm_object_offset_t)offset;
707
708 if (upl->flags & UPL_KERNEL_OBJECT) {
709 vm_object_lock_shared(shadow_object);
710 } else {
711 vm_object_lock(shadow_object);
712 }
713
714 VM_OBJECT_WIRED_PAGE_UPDATE_START(shadow_object);
715
716 if (upl->flags & UPL_ACCESS_BLOCKED) {
717 assert(shadow_object->blocked_access);
718 shadow_object->blocked_access = FALSE;
719 vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
720 }
721
722 if (shadow_object->code_signed) {
723 /*
724 * CODE SIGNING:
725 * If the object is code-signed, do not let this UPL tell
726 * us if the pages are valid or not. Let the pages be
727 * validated by VM the normal way (when they get mapped or
728 * copied).
729 */
730 flags &= ~UPL_COMMIT_CS_VALIDATED;
731 }
732 if (!page_list) {
733 /*
734 * No page list to get the code-signing info from !?
735 */
736 flags &= ~UPL_COMMIT_CS_VALIDATED;
737 }
738 if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal) {
739 should_be_throttled = TRUE;
740 }
741
742 if ((upl->flags & UPL_IO_WIRE) &&
743 !(flags & UPL_COMMIT_FREE_ABSENT) &&
744 !isVectorUPL &&
745 shadow_object->purgable != VM_PURGABLE_VOLATILE &&
746 shadow_object->purgable != VM_PURGABLE_EMPTY) {
747 if (!vm_page_queue_empty(&shadow_object->memq)) {
748 if (shadow_object->internal && size == shadow_object->vo_size) {
749 nxt_page = (vm_page_t)vm_page_queue_first(&shadow_object->memq);
750 fast_path_full_commit = 1;
751 }
752 fast_path_possible = 1;
753
754 if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal &&
755 (shadow_object->purgable == VM_PURGABLE_DENY ||
756 shadow_object->purgable == VM_PURGABLE_NONVOLATILE ||
757 shadow_object->purgable == VM_PURGABLE_VOLATILE)) {
758 throttle_page = 1;
759 }
760 }
761 }
762 first_local = VM_PAGE_NULL;
763 last_local = VM_PAGE_NULL;
764
765 obj_start = target_offset + upl->u_offset - shadow_object->paging_offset;
766 obj_end = obj_start + xfer_size;
767 obj_start = vm_object_trunc_page(obj_start);
768 obj_end = vm_object_round_page(obj_end);
769 for (obj_offset = obj_start;
770 obj_offset < obj_end;
771 obj_offset += PAGE_SIZE) {
772 vm_page_t t, m;
773
774 dwp->dw_mask = 0;
775 clear_refmod = 0;
776
777 m = VM_PAGE_NULL;
778
779 if (upl->flags & UPL_LITE) {
780 unsigned int pg_num;
781
782 if (nxt_page != VM_PAGE_NULL) {
783 m = nxt_page;
784 nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->vmp_listq);
785 target_offset = m->vmp_offset;
786 }
787 pg_num = (unsigned int) (target_offset / PAGE_SIZE);
788 assert(pg_num == target_offset / PAGE_SIZE);
789
790 if (bitmap_test(upl->lite_list, pg_num)) {
791 bitmap_clear(upl->lite_list, pg_num);
792
793 if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) {
794 m = vm_page_lookup(shadow_object, obj_offset);
795 }
796 } else {
797 m = NULL;
798 }
799 }
800 if (upl->flags & UPL_SHADOWED) {
801 if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
802 t->vmp_free_when_done = FALSE;
803
804 VM_PAGE_FREE(t);
805
806 if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) {
807 m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
808 }
809 }
810 }
811 if (m == VM_PAGE_NULL) {
812 goto commit_next_page;
813 }
814
815 m_object = VM_PAGE_OBJECT(m);
816
817 if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
818 assert(m->vmp_busy);
819
820 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
821 goto commit_next_page;
822 }
823
824 if (flags & UPL_COMMIT_CS_VALIDATED) {
825 /*
826 * CODE SIGNING:
827 * Set the code signing bits according to
828 * what the UPL says they should be.
829 */
830 m->vmp_cs_validated |= page_list[entry].cs_validated;
831 m->vmp_cs_tainted |= page_list[entry].cs_tainted;
832 m->vmp_cs_nx |= page_list[entry].cs_nx;
833 }
834 if (flags & UPL_COMMIT_WRITTEN_BY_KERNEL) {
835 m->vmp_written_by_kernel = TRUE;
836 }
837
838 if (upl->flags & UPL_IO_WIRE) {
839 if (page_list) {
840 page_list[entry].phys_addr = 0;
841 }
842
843 if (flags & UPL_COMMIT_SET_DIRTY) {
844 SET_PAGE_DIRTY(m, FALSE);
845 } else if (flags & UPL_COMMIT_CLEAR_DIRTY) {
846 m->vmp_dirty = FALSE;
847
848 if (!(flags & UPL_COMMIT_CS_VALIDATED) &&
849 m->vmp_cs_validated &&
850 m->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
851 /*
852 * CODE SIGNING:
853 * This page is no longer dirty
854 * but could have been modified,
855 * so it will need to be
856 * re-validated.
857 */
858 m->vmp_cs_validated = VMP_CS_ALL_FALSE;
859
860 VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
861
862 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
863 }
864 clear_refmod |= VM_MEM_MODIFIED;
865 }
866 if (upl->flags & UPL_ACCESS_BLOCKED) {
867 /*
868 * We blocked access to the pages in this UPL.
869 * Clear the "busy" bit and wake up any waiter
870 * for this page.
871 */
872 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
873 }
874 if (fast_path_possible) {
875 assert(m_object->purgable != VM_PURGABLE_EMPTY);
876 assert(m_object->purgable != VM_PURGABLE_VOLATILE);
877 if (m->vmp_absent) {
878 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
879 assert(m->vmp_wire_count == 0);
880 assert(m->vmp_busy);
881
882 m->vmp_absent = FALSE;
883 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
884 } else {
885 if (m->vmp_wire_count == 0) {
886 panic("wire_count == 0, m = %p, obj = %p", m, shadow_object);
887 }
888 assert(m->vmp_q_state == VM_PAGE_IS_WIRED);
889
890 /*
891 * XXX FBDP need to update some other
892 * counters here (purgeable_wired_count)
893 * (ledgers), ...
894 */
895 assert(m->vmp_wire_count > 0);
896 m->vmp_wire_count--;
897
898 if (m->vmp_wire_count == 0) {
899 m->vmp_q_state = VM_PAGE_NOT_ON_Q;
900 unwired_count++;
901 }
902 }
903 if (m->vmp_wire_count == 0) {
904 assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
905
906 if (last_local == VM_PAGE_NULL) {
907 assert(first_local == VM_PAGE_NULL);
908
909 last_local = m;
910 first_local = m;
911 } else {
912 assert(first_local != VM_PAGE_NULL);
913
914 m->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
915 first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(m);
916 first_local = m;
917 }
918 local_queue_count++;
919
920 if (throttle_page) {
921 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
922 } else {
923 if (flags & UPL_COMMIT_INACTIVATE) {
924 if (shadow_object->internal) {
925 m->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
926 } else {
927 m->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
928 }
929 } else {
930 m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
931 }
932 }
933 }
934 } else {
935 if (flags & UPL_COMMIT_INACTIVATE) {
936 dwp->dw_mask |= DW_vm_page_deactivate_internal;
937 clear_refmod |= VM_MEM_REFERENCED;
938 }
939 if (m->vmp_absent) {
940 if (flags & UPL_COMMIT_FREE_ABSENT) {
941 dwp->dw_mask |= DW_vm_page_free;
942 } else {
943 m->vmp_absent = FALSE;
944 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
945
946 if (!(dwp->dw_mask & DW_vm_page_deactivate_internal)) {
947 dwp->dw_mask |= DW_vm_page_activate;
948 }
949 }
950 } else {
951 dwp->dw_mask |= DW_vm_page_unwire;
952 }
953 }
954 goto commit_next_page;
955 }
956 assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
957
958 if (page_list) {
959 page_list[entry].phys_addr = 0;
960 }
961
962 /*
963 * make sure to clear the hardware
964 * modify or reference bits before
965 * releasing the BUSY bit on this page
966 * otherwise we risk losing a legitimate
967 * change of state
968 */
969 if (flags & UPL_COMMIT_CLEAR_DIRTY) {
970 m->vmp_dirty = FALSE;
971
972 clear_refmod |= VM_MEM_MODIFIED;
973 }
974 if (m->vmp_laundry) {
975 dwp->dw_mask |= DW_vm_pageout_throttle_up;
976 }
977
978 if (VM_PAGE_WIRED(m)) {
979 m->vmp_free_when_done = FALSE;
980 }
981
982 if (!(flags & UPL_COMMIT_CS_VALIDATED) &&
983 m->vmp_cs_validated &&
984 m->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
985 /*
986 * CODE SIGNING:
987 * This page is no longer dirty
988 * but could have been modified,
989 * so it will need to be
990 * re-validated.
991 */
992 m->vmp_cs_validated = VMP_CS_ALL_FALSE;
993
994 VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
995
996 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
997 }
998 if (m->vmp_overwriting) {
999 /*
1000 * the (COPY_OUT_FROM == FALSE) request_page_list case
1001 */
1002 if (m->vmp_busy) {
1003 #if CONFIG_PHANTOM_CACHE
1004 if (m->vmp_absent && !m_object->internal) {
1005 dwp->dw_mask |= DW_vm_phantom_cache_update;
1006 }
1007 #endif
1008 m->vmp_absent = FALSE;
1009
1010 dwp->dw_mask |= DW_clear_busy;
1011 } else {
1012 /*
1013 * alternate (COPY_OUT_FROM == FALSE) page_list case
1014 * Occurs when the original page was wired
1015 * at the time of the list request
1016 */
1017 assert(VM_PAGE_WIRED(m));
1018
1019 dwp->dw_mask |= DW_vm_page_unwire; /* reactivates */
1020 }
1021 m->vmp_overwriting = FALSE;
1022 }
1023 m->vmp_cleaning = FALSE;
1024
1025 if (m->vmp_free_when_done) {
1026 /*
1027 * With the clean queue enabled, UPL_PAGEOUT should
1028 * no longer set the pageout bit. Its pages now go
1029 * to the clean queue.
1030 *
1031 * We don't use the cleaned Q anymore and so this
1032 * assert isn't correct. The code for the clean Q
1033 * still exists and might be used in the future. If we
1034 * go back to the cleaned Q, we will re-enable this
1035 * assert.
1036 *
1037 * assert(!(upl->flags & UPL_PAGEOUT));
1038 */
1039 assert(!m_object->internal);
1040
1041 m->vmp_free_when_done = FALSE;
1042
1043 if ((flags & UPL_COMMIT_SET_DIRTY) ||
1044 (m->vmp_pmapped && (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED))) {
1045 /*
1046 * page was re-dirtied after we started
1047 * the pageout... reactivate it since
1048 * we don't know whether the on-disk
1049 * copy matches what is now in memory
1050 */
1051 SET_PAGE_DIRTY(m, FALSE);
1052
1053 dwp->dw_mask |= DW_vm_page_activate | DW_PAGE_WAKEUP;
1054
1055 if (upl->flags & UPL_PAGEOUT) {
1056 counter_inc(&vm_statistics_reactivations);
1057 DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
1058 }
1059 } else if (m->vmp_busy && !(upl->flags & UPL_HAS_BUSY)) {
1060 /*
1061 * Someone else might still be handling this
1062 * page (vm_fault() for example), so let's not
1063 * free it or "un-busy" it!
1064 * Put that page in the "speculative" queue
1065 * for now (since we would otherwise have freed
1066 * it) and let whoever is keeping the page
1067 * "busy" move it if needed when they're done
1068 * with it.
1069 */
1070 dwp->dw_mask |= DW_vm_page_speculate;
1071 } else {
1072 /*
1073 * page has been successfully cleaned
1074 * go ahead and free it for other use
1075 */
1076 if (m_object->internal) {
1077 DTRACE_VM2(anonpgout, int, 1, (uint64_t *), NULL);
1078 } else {
1079 DTRACE_VM2(fspgout, int, 1, (uint64_t *), NULL);
1080 }
1081 m->vmp_dirty = FALSE;
1082 if (!(upl->flags & UPL_HAS_BUSY)) {
1083 assert(!m->vmp_busy);
1084 }
1085 m->vmp_busy = TRUE;
1086
1087 dwp->dw_mask |= DW_vm_page_free;
1088 }
1089 goto commit_next_page;
1090 }
1091 /*
1092 * It is a part of the semantic of COPYOUT_FROM
1093 * UPLs that a commit implies cache sync
1094 * between the vm page and the backing store
1095 * this can be used to strip the precious bit
1096 * as well as clean
1097 */
1098 if ((upl->flags & UPL_PAGE_SYNC_DONE) || (flags & UPL_COMMIT_CLEAR_PRECIOUS)) {
1099 m->vmp_precious = FALSE;
1100 }
1101
1102 if (flags & UPL_COMMIT_SET_DIRTY) {
1103 SET_PAGE_DIRTY(m, FALSE);
1104 } else {
1105 m->vmp_dirty = FALSE;
1106 }
1107
1108 /* with the clean queue on, move *all* cleaned pages to the clean queue */
1109 if (hibernate_cleaning_in_progress == FALSE && !m->vmp_dirty && (upl->flags & UPL_PAGEOUT)) {
1110 pgpgout_count++;
1111
1112 counter_inc(&vm_statistics_pageouts);
1113 DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL);
1114
1115 dwp->dw_mask |= DW_enqueue_cleaned;
1116 } else if (should_be_throttled == TRUE && (m->vmp_q_state == VM_PAGE_NOT_ON_Q)) {
1117 /*
1118 * page coming back in from being 'frozen'...
1119 * it was dirty before it was frozen, so keep it so
1120 * the vm_page_activate will notice that it really belongs
1121 * on the throttle queue and put it there
1122 */
1123 SET_PAGE_DIRTY(m, FALSE);
1124 dwp->dw_mask |= DW_vm_page_activate;
1125 } else {
1126 if ((flags & UPL_COMMIT_INACTIVATE) && !m->vmp_clustered && (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q)) {
1127 dwp->dw_mask |= DW_vm_page_deactivate_internal;
1128 clear_refmod |= VM_MEM_REFERENCED;
1129 } else if (!VM_PAGE_PAGEABLE(m)) {
1130 if (m->vmp_clustered || (flags & UPL_COMMIT_SPECULATE)) {
1131 dwp->dw_mask |= DW_vm_page_speculate;
1132 } else if (m->vmp_reference) {
1133 dwp->dw_mask |= DW_vm_page_activate;
1134 } else {
1135 dwp->dw_mask |= DW_vm_page_deactivate_internal;
1136 clear_refmod |= VM_MEM_REFERENCED;
1137 }
1138 }
1139 }
1140 if (upl->flags & UPL_ACCESS_BLOCKED) {
1141 /*
1142 * We blocked access to the pages in this URL.
1143 * Clear the "busy" bit on this page before we
1144 * wake up any waiter.
1145 */
1146 dwp->dw_mask |= DW_clear_busy;
1147 }
1148 /*
1149 * Wakeup any thread waiting for the page to be un-cleaning.
1150 */
1151 dwp->dw_mask |= DW_PAGE_WAKEUP;
1152
1153 commit_next_page:
1154 if (clear_refmod) {
1155 pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m), clear_refmod);
1156 }
1157
1158 target_offset += PAGE_SIZE_64;
1159 xfer_size -= PAGE_SIZE;
1160 entry++;
1161
1162 if (dwp->dw_mask) {
1163 if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
1164 VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
1165
1166 if (dw_count >= dw_limit) {
1167 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
1168
1169 dwp = dwp_start;
1170 dw_count = 0;
1171 }
1172 } else {
1173 if (dwp->dw_mask & DW_clear_busy) {
1174 m->vmp_busy = FALSE;
1175 }
1176
1177 if (dwp->dw_mask & DW_PAGE_WAKEUP) {
1178 vm_page_wakeup(m_object, m);
1179 }
1180 }
1181 }
1182 }
1183 if (dw_count) {
1184 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
1185 dwp = dwp_start;
1186 dw_count = 0;
1187 }
1188
1189 if (fast_path_possible) {
1190 assert(shadow_object->purgable != VM_PURGABLE_VOLATILE);
1191 assert(shadow_object->purgable != VM_PURGABLE_EMPTY);
1192
1193 if (local_queue_count || unwired_count) {
1194 if (local_queue_count) {
1195 vm_page_t first_target;
1196 vm_page_queue_head_t *target_queue;
1197
1198 if (throttle_page) {
1199 target_queue = &vm_page_queue_throttled;
1200 } else {
1201 if (flags & UPL_COMMIT_INACTIVATE) {
1202 if (shadow_object->internal) {
1203 target_queue = &vm_page_queue_anonymous;
1204 } else {
1205 target_queue = &vm_page_queue_inactive;
1206 }
1207 } else {
1208 target_queue = &vm_page_queue_active;
1209 }
1210 }
1211 /*
1212 * Transfer the entire local queue to a regular LRU page queues.
1213 */
1214 vm_page_lockspin_queues();
1215
1216 first_target = (vm_page_t) vm_page_queue_first(target_queue);
1217
1218 if (vm_page_queue_empty(target_queue)) {
1219 target_queue->prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
1220 } else {
1221 first_target->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
1222 }
1223
1224 target_queue->next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
1225 first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(target_queue);
1226 last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_target);
1227
1228 /*
1229 * Adjust the global page counts.
1230 */
1231 if (throttle_page) {
1232 vm_page_throttled_count += local_queue_count;
1233 } else {
1234 if (flags & UPL_COMMIT_INACTIVATE) {
1235 if (shadow_object->internal) {
1236 vm_page_anonymous_count += local_queue_count;
1237 }
1238 vm_page_inactive_count += local_queue_count;
1239
1240 token_new_pagecount += local_queue_count;
1241 } else {
1242 vm_page_active_count += local_queue_count;
1243 }
1244
1245 if (shadow_object->internal) {
1246 vm_page_pageable_internal_count += local_queue_count;
1247 } else {
1248 vm_page_pageable_external_count += local_queue_count;
1249 }
1250 }
1251 } else {
1252 vm_page_lockspin_queues();
1253 }
1254 if (unwired_count) {
1255 vm_page_wire_count -= unwired_count;
1256 VM_CHECK_MEMORYSTATUS;
1257 }
1258 vm_page_unlock_queues();
1259
1260 VM_OBJECT_WIRED_PAGE_COUNT(shadow_object, -unwired_count);
1261 }
1262 }
1263
1264 if (upl->flags & UPL_DEVICE_MEMORY) {
1265 occupied = 0;
1266 } else if (upl->flags & UPL_LITE) {
1267 uint32_t pages = (uint32_t)atop(upl_adjusted_size(upl, PAGE_MASK));
1268
1269 occupied = !fast_path_full_commit &&
1270 !bitmap_is_empty(upl->lite_list, pages);
1271 } else {
1272 occupied = !vm_page_queue_empty(&upl->map_object->memq);
1273 }
1274 if (occupied == 0) {
1275 /*
1276 * If this UPL element belongs to a Vector UPL and is
1277 * empty, then this is the right function to deallocate
1278 * it. So go ahead set the *empty variable. The flag
1279 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
1280 * should be considered relevant for the Vector UPL and not
1281 * the internal UPLs.
1282 */
1283 if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) {
1284 *empty = TRUE;
1285 }
1286
1287 if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
1288 /*
1289 * this is not a paging object
1290 * so we need to drop the paging reference
1291 * that was taken when we created the UPL
1292 * against this object
1293 */
1294 vm_object_activity_end(shadow_object);
1295 vm_object_collapse(shadow_object, 0, TRUE);
1296 } else {
1297 /*
1298 * we dontated the paging reference to
1299 * the map object... vm_pageout_object_terminate
1300 * will drop this reference
1301 */
1302 }
1303 }
1304 VM_OBJECT_WIRED_PAGE_UPDATE_END(shadow_object, shadow_object->wire_tag);
1305 vm_object_unlock(shadow_object);
1306 if (object != shadow_object) {
1307 vm_object_unlock(object);
1308 }
1309
1310 if (!isVectorUPL) {
1311 upl_unlock(upl);
1312 } else {
1313 /*
1314 * If we completed our operations on an UPL that is
1315 * part of a Vectored UPL and if empty is TRUE, then
1316 * we should go ahead and deallocate this UPL element.
1317 * Then we check if this was the last of the UPL elements
1318 * within that Vectored UPL. If so, set empty to TRUE
1319 * so that in ubc_upl_commit_range or ubc_upl_commit, we
1320 * can go ahead and deallocate the Vector UPL too.
1321 */
1322 if (*empty == TRUE) {
1323 *empty = vector_upl_set_subupl(vector_upl, upl, 0);
1324 upl_deallocate(upl);
1325 }
1326 goto process_upl_to_commit;
1327 }
1328 if (pgpgout_count) {
1329 DTRACE_VM2(pgpgout, int, pgpgout_count, (uint64_t *), NULL);
1330 }
1331
1332 kr = KERN_SUCCESS;
1333 done:
1334 if (dwp_start && dwp_finish_ctx) {
1335 vm_page_delayed_work_finish_ctx(dwp_start);
1336 dwp_start = dwp = NULL;
1337 }
1338
1339 return kr;
1340 }
1341
1342 /* an option on commit should be wire */
1343 kern_return_t
upl_commit(upl_t upl,upl_page_info_t * page_list,mach_msg_type_number_t count)1344 upl_commit(
1345 upl_t upl,
1346 upl_page_info_t *page_list,
1347 mach_msg_type_number_t count)
1348 {
1349 boolean_t empty;
1350
1351 if (upl == UPL_NULL) {
1352 return KERN_INVALID_ARGUMENT;
1353 }
1354
1355 return upl_commit_range(upl, 0, upl->u_size, 0,
1356 page_list, count, &empty);
1357 }
1358