1 /*
2 * Copyright (c) 2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <vm/vm_upl.h>
30 #include <vm/vm_pageout_internal.h>
31 #include <vm/vm_page_internal.h>
32 #include <vm/vm_map_internal.h>
33 #include <mach/upl_server.h>
34 #include <kern/host_statistics.h>
35 #include <vm/vm_purgeable_internal.h>
36 #include <vm/vm_object_internal.h>
37 #include <vm/vm_ubc.h>
38
39 extern boolean_t hibernate_cleaning_in_progress;
40
41 /* map a (whole) upl into an address space */
42 kern_return_t
vm_upl_map(vm_map_t map,upl_t upl,vm_address_t * dst_addr)43 vm_upl_map(
44 vm_map_t map,
45 upl_t upl,
46 vm_address_t *dst_addr)
47 {
48 vm_map_offset_t map_addr;
49 kern_return_t kr;
50
51 if (VM_MAP_NULL == map) {
52 return KERN_INVALID_ARGUMENT;
53 }
54
55 kr = vm_map_enter_upl(map, upl, &map_addr);
56 *dst_addr = CAST_DOWN(vm_address_t, map_addr);
57 return kr;
58 }
59
60 kern_return_t
vm_upl_unmap(vm_map_t map,upl_t upl)61 vm_upl_unmap(
62 vm_map_t map,
63 upl_t upl)
64 {
65 if (VM_MAP_NULL == map) {
66 return KERN_INVALID_ARGUMENT;
67 }
68
69 return vm_map_remove_upl(map, upl);
70 }
71
72 /* map a part of a upl into an address space with requested protection. */
73 kern_return_t
vm_upl_map_range(vm_map_t map,upl_t upl,vm_offset_t offset_to_map,vm_size_t size_to_map,vm_prot_t prot_to_map,vm_address_t * dst_addr)74 vm_upl_map_range(
75 vm_map_t map,
76 upl_t upl,
77 vm_offset_t offset_to_map,
78 vm_size_t size_to_map,
79 vm_prot_t prot_to_map,
80 vm_address_t *dst_addr)
81 {
82 vm_map_offset_t map_addr, aligned_offset_to_map, adjusted_offset;
83 kern_return_t kr;
84
85 if (VM_MAP_NULL == map) {
86 return KERN_INVALID_ARGUMENT;
87 }
88 aligned_offset_to_map = vm_map_trunc_page(offset_to_map, vm_map_page_mask(map));
89 adjusted_offset = offset_to_map - aligned_offset_to_map;
90 size_to_map = vm_map_round_page(size_to_map + adjusted_offset, vm_map_page_mask(map));
91
92 kr = vm_map_enter_upl_range(map, upl, aligned_offset_to_map, size_to_map, prot_to_map, &map_addr);
93 *dst_addr = CAST_DOWN(vm_address_t, (map_addr + adjusted_offset));
94 return kr;
95 }
96
97 /* unmap a part of a upl that was mapped in the address space. */
98 kern_return_t
vm_upl_unmap_range(vm_map_t map,upl_t upl,vm_offset_t offset_to_unmap,vm_size_t size_to_unmap)99 vm_upl_unmap_range(
100 vm_map_t map,
101 upl_t upl,
102 vm_offset_t offset_to_unmap,
103 vm_size_t size_to_unmap)
104 {
105 vm_map_offset_t aligned_offset_to_unmap, page_offset;
106
107 if (VM_MAP_NULL == map) {
108 return KERN_INVALID_ARGUMENT;
109 }
110
111 aligned_offset_to_unmap = vm_map_trunc_page(offset_to_unmap, vm_map_page_mask(map));
112 page_offset = offset_to_unmap - aligned_offset_to_unmap;
113 size_to_unmap = vm_map_round_page(size_to_unmap + page_offset, vm_map_page_mask(map));
114
115 return vm_map_remove_upl_range(map, upl, aligned_offset_to_unmap, size_to_unmap);
116 }
117
118 /* Retrieve a upl for an object underlying an address range in a map */
119
120 kern_return_t
vm_map_get_upl(vm_map_t map,vm_map_offset_t map_offset,upl_size_t * upl_size,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,upl_control_flags_t * flags,vm_tag_t tag,int force_data_sync)121 vm_map_get_upl(
122 vm_map_t map,
123 vm_map_offset_t map_offset,
124 upl_size_t *upl_size,
125 upl_t *upl,
126 upl_page_info_array_t page_list,
127 unsigned int *count,
128 upl_control_flags_t *flags,
129 vm_tag_t tag,
130 int force_data_sync)
131 {
132 upl_control_flags_t map_flags;
133 kern_return_t kr;
134
135 if (VM_MAP_NULL == map) {
136 return KERN_INVALID_ARGUMENT;
137 }
138
139 map_flags = *flags & ~UPL_NOZEROFILL;
140 if (force_data_sync) {
141 map_flags |= UPL_FORCE_DATA_SYNC;
142 }
143
144 kr = vm_map_create_upl(map,
145 map_offset,
146 upl_size,
147 upl,
148 page_list,
149 count,
150 &map_flags,
151 tag);
152
153 *flags = (map_flags & ~UPL_FORCE_DATA_SYNC);
154 return kr;
155 }
156
157 uint64_t upl_pages_wired_busy = 0;
158
159 kern_return_t
upl_abort_range(upl_t upl,upl_offset_t offset,upl_size_t size,int error,boolean_t * empty)160 upl_abort_range(
161 upl_t upl,
162 upl_offset_t offset,
163 upl_size_t size,
164 int error,
165 boolean_t *empty)
166 {
167 upl_size_t xfer_size, subupl_size;
168 vm_object_t shadow_object;
169 vm_object_t object;
170 vm_object_offset_t target_offset;
171 upl_offset_t subupl_offset = offset;
172 int occupied;
173 struct vm_page_delayed_work dw_array;
174 struct vm_page_delayed_work *dwp, *dwp_start;
175 bool dwp_finish_ctx = TRUE;
176 int dw_count;
177 int dw_limit;
178 int isVectorUPL = 0;
179 upl_t vector_upl = NULL;
180 vm_object_offset_t obj_start, obj_end, obj_offset;
181 kern_return_t kr = KERN_SUCCESS;
182
183 // DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p offset 0x%llx size 0x%llx error 0x%x\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object, (uint64_t)offset, (uint64_t)size, error);
184
185 dwp_start = dwp = NULL;
186
187 subupl_size = size;
188 *empty = FALSE;
189
190 if (upl == UPL_NULL) {
191 return KERN_INVALID_ARGUMENT;
192 }
193
194 if ((upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES)) {
195 return upl_commit_range(upl, offset, size, UPL_COMMIT_FREE_ABSENT, NULL, 0, empty);
196 }
197
198 dw_count = 0;
199 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
200 dwp_start = vm_page_delayed_work_get_ctx();
201 if (dwp_start == NULL) {
202 dwp_start = &dw_array;
203 dw_limit = 1;
204 dwp_finish_ctx = FALSE;
205 }
206
207 dwp = dwp_start;
208
209 if ((isVectorUPL = vector_upl_is_valid(upl))) {
210 vector_upl = upl;
211 upl_lock(vector_upl);
212 } else {
213 upl_lock(upl);
214 }
215
216 process_upl_to_abort:
217 if (isVectorUPL) {
218 size = subupl_size;
219 offset = subupl_offset;
220 if (size == 0) {
221 upl_unlock(vector_upl);
222 kr = KERN_SUCCESS;
223 goto done;
224 }
225 upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
226 if (upl == NULL) {
227 upl_unlock(vector_upl);
228 kr = KERN_FAILURE;
229 goto done;
230 }
231 subupl_size -= size;
232 subupl_offset += size;
233 }
234
235 *empty = FALSE;
236
237 #if UPL_DEBUG
238 if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
239 upl->upl_commit_records[upl->upl_commit_index].c_btref = btref_get(__builtin_frame_address(0), 0);
240 upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
241 upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
242 upl->upl_commit_records[upl->upl_commit_index].c_aborted = 1;
243
244 upl->upl_commit_index++;
245 }
246 #endif
247 if (upl->flags & UPL_DEVICE_MEMORY) {
248 xfer_size = 0;
249 } else if ((offset + size) <= upl_adjusted_size(upl, PAGE_MASK)) {
250 xfer_size = size;
251 } else {
252 if (!isVectorUPL) {
253 upl_unlock(upl);
254 } else {
255 upl_unlock(vector_upl);
256 }
257 DEBUG4K_ERROR("upl %p (u_offset 0x%llx u_size 0x%x) offset 0x%x size 0x%x\n", upl, upl->u_offset, upl->u_size, offset, size);
258 kr = KERN_FAILURE;
259 goto done;
260 }
261 object = upl->map_object;
262
263 if (upl->flags & UPL_SHADOWED) {
264 vm_object_lock(object);
265 shadow_object = object->shadow;
266 } else {
267 shadow_object = object;
268 }
269
270 target_offset = (vm_object_offset_t)offset;
271
272 if (upl->flags & UPL_KERNEL_OBJECT) {
273 vm_object_lock_shared(shadow_object);
274 } else {
275 vm_object_lock(shadow_object);
276 }
277
278 if (upl->flags & UPL_ACCESS_BLOCKED) {
279 assert(shadow_object->blocked_access);
280 shadow_object->blocked_access = FALSE;
281 vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
282 }
283
284 if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT)) {
285 panic("upl_abort_range: kernel_object being DUMPED");
286 }
287
288 obj_start = target_offset + upl->u_offset - shadow_object->paging_offset;
289 obj_end = obj_start + xfer_size;
290 obj_start = vm_object_trunc_page(obj_start);
291 obj_end = vm_object_round_page(obj_end);
292 for (obj_offset = obj_start;
293 obj_offset < obj_end;
294 obj_offset += PAGE_SIZE) {
295 vm_page_t t, m;
296 unsigned int pg_num;
297 boolean_t needed;
298
299 pg_num = (unsigned int) (target_offset / PAGE_SIZE);
300 assert(pg_num == target_offset / PAGE_SIZE);
301
302 needed = FALSE;
303
304 if (upl->flags & UPL_INTERNAL) {
305 needed = upl->page_list[pg_num].needed;
306 }
307
308 dwp->dw_mask = 0;
309 m = VM_PAGE_NULL;
310
311 if (upl->flags & UPL_LITE) {
312 if (bitmap_test(upl->lite_list, pg_num)) {
313 bitmap_clear(upl->lite_list, pg_num);
314
315 if (!(upl->flags & UPL_KERNEL_OBJECT)) {
316 m = vm_page_lookup(shadow_object, obj_offset);
317 }
318 }
319 }
320 if (upl->flags & UPL_SHADOWED) {
321 if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
322 t->vmp_free_when_done = FALSE;
323
324 VM_PAGE_FREE(t);
325
326 if (m == VM_PAGE_NULL) {
327 m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
328 }
329 }
330 }
331 if ((upl->flags & UPL_KERNEL_OBJECT)) {
332 goto abort_next_page;
333 }
334
335 if (m != VM_PAGE_NULL) {
336 assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
337
338 if (m->vmp_absent) {
339 boolean_t must_free = TRUE;
340
341 /*
342 * COPYOUT = FALSE case
343 * check for error conditions which must
344 * be passed back to the pages customer
345 */
346 if (error & UPL_ABORT_RESTART) {
347 m->vmp_restart = TRUE;
348 m->vmp_absent = FALSE;
349 m->vmp_unusual = TRUE;
350 must_free = FALSE;
351 } else if (error & UPL_ABORT_UNAVAILABLE) {
352 m->vmp_restart = FALSE;
353 m->vmp_unusual = TRUE;
354 must_free = FALSE;
355 } else if (error & UPL_ABORT_ERROR) {
356 m->vmp_restart = FALSE;
357 m->vmp_absent = FALSE;
358 m->vmp_error = TRUE;
359 m->vmp_unusual = TRUE;
360 must_free = FALSE;
361 }
362 if (m->vmp_clustered && needed == FALSE) {
363 /*
364 * This page was a part of a speculative
365 * read-ahead initiated by the kernel
366 * itself. No one is expecting this
367 * page and no one will clean up its
368 * error state if it ever becomes valid
369 * in the future.
370 * We have to free it here.
371 */
372 must_free = TRUE;
373 }
374 m->vmp_cleaning = FALSE;
375
376 if (m->vmp_overwriting && !m->vmp_busy) {
377 /*
378 * this shouldn't happen since
379 * this is an 'absent' page, but
380 * it doesn't hurt to check for
381 * the 'alternate' method of
382 * stabilizing the page...
383 * we will mark 'busy' to be cleared
384 * in the following code which will
385 * take care of the primary stabilzation
386 * method (i.e. setting 'busy' to TRUE)
387 */
388 dwp->dw_mask |= DW_vm_page_unwire;
389 }
390 m->vmp_overwriting = FALSE;
391
392 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
393
394 if (must_free == TRUE) {
395 dwp->dw_mask |= DW_vm_page_free;
396 } else {
397 dwp->dw_mask |= DW_vm_page_activate;
398 }
399 } else {
400 /*
401 * Handle the trusted pager throttle.
402 */
403 if (m->vmp_laundry) {
404 dwp->dw_mask |= DW_vm_pageout_throttle_up;
405 }
406
407 if (upl->flags & UPL_ACCESS_BLOCKED) {
408 /*
409 * We blocked access to the pages in this UPL.
410 * Clear the "busy" bit and wake up any waiter
411 * for this page.
412 */
413 dwp->dw_mask |= DW_clear_busy;
414 }
415 if (m->vmp_overwriting) {
416 if (VM_PAGE_WIRED(m)) {
417 /*
418 * deal with the 'alternate' method
419 * of stabilizing the page...
420 * we will either free the page
421 * or mark 'busy' to be cleared
422 * in the following code which will
423 * take care of the primary stabilzation
424 * method (i.e. setting 'busy' to TRUE)
425 */
426 if (m->vmp_busy) {
427 // printf("******* FBDP %s:%d page %p object %p ofsfet 0x%llx wired and busy\n", __FUNCTION__, __LINE__, m, VM_PAGE_OBJECT(m), m->vmp_offset);
428 upl_pages_wired_busy++;
429 }
430 dwp->dw_mask |= DW_vm_page_unwire;
431 } else {
432 assert(m->vmp_busy);
433 dwp->dw_mask |= DW_clear_busy;
434 }
435 m->vmp_overwriting = FALSE;
436 }
437 m->vmp_free_when_done = FALSE;
438 m->vmp_cleaning = FALSE;
439
440 if (error & UPL_ABORT_DUMP_PAGES) {
441 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
442
443 dwp->dw_mask |= DW_vm_page_free;
444 } else {
445 if (!(dwp->dw_mask & DW_vm_page_unwire)) {
446 if (error & UPL_ABORT_REFERENCE) {
447 /*
448 * we've been told to explictly
449 * reference this page... for
450 * file I/O, this is done by
451 * implementing an LRU on the inactive q
452 */
453 dwp->dw_mask |= DW_vm_page_lru;
454 } else if (!VM_PAGE_PAGEABLE(m)) {
455 dwp->dw_mask |= DW_vm_page_deactivate_internal;
456 }
457 }
458 dwp->dw_mask |= DW_PAGE_WAKEUP;
459 }
460 }
461 }
462 abort_next_page:
463 target_offset += PAGE_SIZE_64;
464 xfer_size -= PAGE_SIZE;
465
466 if (dwp->dw_mask) {
467 if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
468 VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
469
470 if (dw_count >= dw_limit) {
471 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
472
473 dwp = dwp_start;
474 dw_count = 0;
475 }
476 } else {
477 if (dwp->dw_mask & DW_clear_busy) {
478 m->vmp_busy = FALSE;
479 }
480
481 if (dwp->dw_mask & DW_PAGE_WAKEUP) {
482 vm_page_wakeup(shadow_object, m);
483 }
484 }
485 }
486 }
487 if (dw_count) {
488 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
489 dwp = dwp_start;
490 dw_count = 0;
491 }
492
493 if (upl->flags & UPL_DEVICE_MEMORY) {
494 occupied = 0;
495 } else if (upl->flags & UPL_LITE) {
496 uint32_t pages = (uint32_t)atop(upl_adjusted_size(upl, PAGE_MASK));
497
498 occupied = !bitmap_is_empty(upl->lite_list, pages);
499 } else {
500 occupied = !vm_page_queue_empty(&upl->map_object->memq);
501 }
502 if (occupied == 0) {
503 /*
504 * If this UPL element belongs to a Vector UPL and is
505 * empty, then this is the right function to deallocate
506 * it. So go ahead set the *empty variable. The flag
507 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
508 * should be considered relevant for the Vector UPL and
509 * not the internal UPLs.
510 */
511 if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) {
512 *empty = TRUE;
513 }
514
515 if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
516 /*
517 * this is not a paging object
518 * so we need to drop the paging reference
519 * that was taken when we created the UPL
520 * against this object
521 */
522 vm_object_activity_end(shadow_object);
523 vm_object_collapse(shadow_object, 0, TRUE);
524 } else {
525 /*
526 * we dontated the paging reference to
527 * the map object... vm_pageout_object_terminate
528 * will drop this reference
529 */
530 }
531 }
532 vm_object_unlock(shadow_object);
533 if (object != shadow_object) {
534 vm_object_unlock(object);
535 }
536
537 if (!isVectorUPL) {
538 upl_unlock(upl);
539 } else {
540 /*
541 * If we completed our operations on an UPL that is
542 * part of a Vectored UPL and if empty is TRUE, then
543 * we should go ahead and deallocate this UPL element.
544 * Then we check if this was the last of the UPL elements
545 * within that Vectored UPL. If so, set empty to TRUE
546 * so that in ubc_upl_abort_range or ubc_upl_abort, we
547 * can go ahead and deallocate the Vector UPL too.
548 */
549 if (*empty == TRUE) {
550 *empty = vector_upl_set_subupl(vector_upl, upl, 0);
551 upl_deallocate(upl);
552 }
553 goto process_upl_to_abort;
554 }
555
556 kr = KERN_SUCCESS;
557
558 done:
559 if (dwp_start && dwp_finish_ctx) {
560 vm_page_delayed_work_finish_ctx(dwp_start);
561 dwp_start = dwp = NULL;
562 }
563
564 return kr;
565 }
566
567 kern_return_t
upl_abort(upl_t upl,int error)568 upl_abort(
569 upl_t upl,
570 int error)
571 {
572 boolean_t empty;
573
574 if (upl == UPL_NULL) {
575 return KERN_INVALID_ARGUMENT;
576 }
577
578 return upl_abort_range(upl, 0, upl->u_size, error, &empty);
579 }
580
581 kern_return_t
upl_commit_range(upl_t upl,upl_offset_t offset,upl_size_t size,int flags,upl_page_info_t * page_list,mach_msg_type_number_t count,boolean_t * empty)582 upl_commit_range(
583 upl_t upl,
584 upl_offset_t offset,
585 upl_size_t size,
586 int flags,
587 upl_page_info_t *page_list,
588 mach_msg_type_number_t count,
589 boolean_t *empty)
590 {
591 upl_size_t xfer_size, subupl_size;
592 vm_object_t shadow_object;
593 vm_object_t object;
594 vm_object_t m_object;
595 vm_object_offset_t target_offset;
596 upl_offset_t subupl_offset = offset;
597 int entry;
598 int occupied;
599 int clear_refmod = 0;
600 int pgpgout_count = 0;
601 struct vm_page_delayed_work dw_array;
602 struct vm_page_delayed_work *dwp, *dwp_start;
603 bool dwp_finish_ctx = TRUE;
604 int dw_count;
605 int dw_limit;
606 int isVectorUPL = 0;
607 upl_t vector_upl = NULL;
608 boolean_t should_be_throttled = FALSE;
609
610 vm_page_t nxt_page = VM_PAGE_NULL;
611 int fast_path_possible = 0;
612 int fast_path_full_commit = 0;
613 int throttle_page = 0;
614 int unwired_count = 0;
615 int local_queue_count = 0;
616 vm_page_t first_local, last_local;
617 vm_object_offset_t obj_start, obj_end, obj_offset;
618 kern_return_t kr = KERN_SUCCESS;
619
620 // DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p offset 0x%llx size 0x%llx flags 0x%x\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object, (uint64_t)offset, (uint64_t)size, flags);
621
622 dwp_start = dwp = NULL;
623
624 subupl_size = size;
625 *empty = FALSE;
626
627 if (upl == UPL_NULL) {
628 return KERN_INVALID_ARGUMENT;
629 }
630
631 dw_count = 0;
632 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
633 dwp_start = vm_page_delayed_work_get_ctx();
634 if (dwp_start == NULL) {
635 dwp_start = &dw_array;
636 dw_limit = 1;
637 dwp_finish_ctx = FALSE;
638 }
639
640 dwp = dwp_start;
641
642 if (count == 0) {
643 page_list = NULL;
644 }
645
646 if ((isVectorUPL = vector_upl_is_valid(upl))) {
647 vector_upl = upl;
648 upl_lock(vector_upl);
649 } else {
650 upl_lock(upl);
651 }
652
653 process_upl_to_commit:
654
655 if (isVectorUPL) {
656 size = subupl_size;
657 offset = subupl_offset;
658 if (size == 0) {
659 upl_unlock(vector_upl);
660 kr = KERN_SUCCESS;
661 goto done;
662 }
663 upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
664 if (upl == NULL) {
665 upl_unlock(vector_upl);
666 kr = KERN_FAILURE;
667 goto done;
668 }
669 page_list = upl->page_list;
670 subupl_size -= size;
671 subupl_offset += size;
672 }
673
674 #if UPL_DEBUG
675 if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
676 upl->upl_commit_records[upl->upl_commit_index].c_btref = btref_get(__builtin_frame_address(0), 0);
677 upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
678 upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
679
680 upl->upl_commit_index++;
681 }
682 #endif
683 if (upl->flags & UPL_DEVICE_MEMORY) {
684 xfer_size = 0;
685 } else if ((offset + size) <= upl_adjusted_size(upl, PAGE_MASK)) {
686 xfer_size = size;
687 } else {
688 if (!isVectorUPL) {
689 upl_unlock(upl);
690 } else {
691 upl_unlock(vector_upl);
692 }
693 DEBUG4K_ERROR("upl %p (u_offset 0x%llx u_size 0x%x) offset 0x%x size 0x%x\n", upl, upl->u_offset, upl->u_size, offset, size);
694 kr = KERN_FAILURE;
695 goto done;
696 }
697 if (upl->flags & UPL_SET_DIRTY) {
698 flags |= UPL_COMMIT_SET_DIRTY;
699 }
700 if (upl->flags & UPL_CLEAR_DIRTY) {
701 flags |= UPL_COMMIT_CLEAR_DIRTY;
702 }
703
704 object = upl->map_object;
705
706 if (upl->flags & UPL_SHADOWED) {
707 vm_object_lock(object);
708 shadow_object = object->shadow;
709 } else {
710 shadow_object = object;
711 }
712 entry = offset / PAGE_SIZE;
713 target_offset = (vm_object_offset_t)offset;
714
715 if (upl->flags & UPL_KERNEL_OBJECT) {
716 vm_object_lock_shared(shadow_object);
717 } else {
718 vm_object_lock(shadow_object);
719 }
720
721 VM_OBJECT_WIRED_PAGE_UPDATE_START(shadow_object);
722
723 if (upl->flags & UPL_ACCESS_BLOCKED) {
724 assert(shadow_object->blocked_access);
725 shadow_object->blocked_access = FALSE;
726 vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
727 }
728
729 if (shadow_object->code_signed) {
730 /*
731 * CODE SIGNING:
732 * If the object is code-signed, do not let this UPL tell
733 * us if the pages are valid or not. Let the pages be
734 * validated by VM the normal way (when they get mapped or
735 * copied).
736 */
737 flags &= ~UPL_COMMIT_CS_VALIDATED;
738 }
739 if (!page_list) {
740 /*
741 * No page list to get the code-signing info from !?
742 */
743 flags &= ~UPL_COMMIT_CS_VALIDATED;
744 }
745 if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal) {
746 should_be_throttled = TRUE;
747 }
748
749 if ((upl->flags & UPL_IO_WIRE) &&
750 !(flags & UPL_COMMIT_FREE_ABSENT) &&
751 !isVectorUPL &&
752 shadow_object->purgable != VM_PURGABLE_VOLATILE &&
753 shadow_object->purgable != VM_PURGABLE_EMPTY) {
754 if (!vm_page_queue_empty(&shadow_object->memq)) {
755 if (shadow_object->internal && size == shadow_object->vo_size) {
756 nxt_page = (vm_page_t)vm_page_queue_first(&shadow_object->memq);
757 fast_path_full_commit = 1;
758 }
759 fast_path_possible = 1;
760
761 if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal &&
762 (shadow_object->purgable == VM_PURGABLE_DENY ||
763 shadow_object->purgable == VM_PURGABLE_NONVOLATILE ||
764 shadow_object->purgable == VM_PURGABLE_VOLATILE)) {
765 throttle_page = 1;
766 }
767 }
768 }
769 first_local = VM_PAGE_NULL;
770 last_local = VM_PAGE_NULL;
771
772 obj_start = target_offset + upl->u_offset - shadow_object->paging_offset;
773 obj_end = obj_start + xfer_size;
774 obj_start = vm_object_trunc_page(obj_start);
775 obj_end = vm_object_round_page(obj_end);
776 for (obj_offset = obj_start;
777 obj_offset < obj_end;
778 obj_offset += PAGE_SIZE) {
779 vm_page_t t, m;
780
781 dwp->dw_mask = 0;
782 clear_refmod = 0;
783
784 m = VM_PAGE_NULL;
785
786 if (upl->flags & UPL_LITE) {
787 unsigned int pg_num;
788
789 if (nxt_page != VM_PAGE_NULL) {
790 m = nxt_page;
791 nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->vmp_listq);
792 target_offset = m->vmp_offset;
793 }
794 pg_num = (unsigned int) (target_offset / PAGE_SIZE);
795 assert(pg_num == target_offset / PAGE_SIZE);
796
797 if (bitmap_test(upl->lite_list, pg_num)) {
798 bitmap_clear(upl->lite_list, pg_num);
799
800 if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) {
801 m = vm_page_lookup(shadow_object, obj_offset);
802 }
803 } else {
804 m = NULL;
805 }
806 }
807 if (upl->flags & UPL_SHADOWED) {
808 if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
809 t->vmp_free_when_done = FALSE;
810
811 VM_PAGE_FREE(t);
812
813 if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) {
814 m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
815 }
816 }
817 }
818 if (m == VM_PAGE_NULL) {
819 goto commit_next_page;
820 }
821
822 m_object = VM_PAGE_OBJECT(m);
823
824 if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
825 assert(m->vmp_busy);
826
827 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
828 goto commit_next_page;
829 }
830
831 if (flags & UPL_COMMIT_CS_VALIDATED) {
832 /*
833 * CODE SIGNING:
834 * Set the code signing bits according to
835 * what the UPL says they should be.
836 */
837 m->vmp_cs_validated |= page_list[entry].cs_validated;
838 m->vmp_cs_tainted |= page_list[entry].cs_tainted;
839 m->vmp_cs_nx |= page_list[entry].cs_nx;
840 }
841 if (flags & UPL_COMMIT_WRITTEN_BY_KERNEL) {
842 m->vmp_written_by_kernel = TRUE;
843 }
844
845 if (upl->flags & UPL_IO_WIRE) {
846 if (page_list) {
847 page_list[entry].phys_addr = 0;
848 }
849
850 if (flags & UPL_COMMIT_SET_DIRTY) {
851 SET_PAGE_DIRTY(m, FALSE);
852 } else if (flags & UPL_COMMIT_CLEAR_DIRTY) {
853 m->vmp_dirty = FALSE;
854
855 if (!(flags & UPL_COMMIT_CS_VALIDATED) &&
856 m->vmp_cs_validated &&
857 m->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
858 /*
859 * CODE SIGNING:
860 * This page is no longer dirty
861 * but could have been modified,
862 * so it will need to be
863 * re-validated.
864 */
865 m->vmp_cs_validated = VMP_CS_ALL_FALSE;
866
867 VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
868
869 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
870 }
871 clear_refmod |= VM_MEM_MODIFIED;
872 }
873 if (upl->flags & UPL_ACCESS_BLOCKED) {
874 /*
875 * We blocked access to the pages in this UPL.
876 * Clear the "busy" bit and wake up any waiter
877 * for this page.
878 */
879 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
880 }
881 if (fast_path_possible) {
882 assert(m_object->purgable != VM_PURGABLE_EMPTY);
883 assert(m_object->purgable != VM_PURGABLE_VOLATILE);
884 if (m->vmp_absent) {
885 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
886 assert(m->vmp_wire_count == 0);
887 assert(m->vmp_busy);
888
889 m->vmp_absent = FALSE;
890 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
891 } else {
892 if (m->vmp_wire_count == 0) {
893 panic("wire_count == 0, m = %p, obj = %p", m, shadow_object);
894 }
895 assert(m->vmp_q_state == VM_PAGE_IS_WIRED);
896
897 /*
898 * XXX FBDP need to update some other
899 * counters here (purgeable_wired_count)
900 * (ledgers), ...
901 */
902 assert(m->vmp_wire_count > 0);
903 m->vmp_wire_count--;
904
905 if (m->vmp_wire_count == 0) {
906 m->vmp_q_state = VM_PAGE_NOT_ON_Q;
907 unwired_count++;
908
909 }
910 }
911 if (m->vmp_wire_count == 0) {
912 assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
913
914 if (last_local == VM_PAGE_NULL) {
915 assert(first_local == VM_PAGE_NULL);
916
917 last_local = m;
918 first_local = m;
919 } else {
920 assert(first_local != VM_PAGE_NULL);
921
922 m->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
923 first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(m);
924 first_local = m;
925 }
926 local_queue_count++;
927
928 if (throttle_page) {
929 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
930 } else {
931 if (flags & UPL_COMMIT_INACTIVATE) {
932 if (shadow_object->internal) {
933 m->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
934 } else {
935 m->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
936 }
937 } else {
938 m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
939 }
940 }
941 }
942 } else {
943 if (flags & UPL_COMMIT_INACTIVATE) {
944 dwp->dw_mask |= DW_vm_page_deactivate_internal;
945 clear_refmod |= VM_MEM_REFERENCED;
946 }
947 if (m->vmp_absent) {
948 if (flags & UPL_COMMIT_FREE_ABSENT) {
949 dwp->dw_mask |= DW_vm_page_free;
950 } else {
951 m->vmp_absent = FALSE;
952 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
953
954 if (!(dwp->dw_mask & DW_vm_page_deactivate_internal)) {
955 dwp->dw_mask |= DW_vm_page_activate;
956 }
957 }
958 } else {
959 dwp->dw_mask |= DW_vm_page_unwire;
960 }
961 }
962 goto commit_next_page;
963 }
964 assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
965
966 if (page_list) {
967 page_list[entry].phys_addr = 0;
968 }
969
970 /*
971 * make sure to clear the hardware
972 * modify or reference bits before
973 * releasing the BUSY bit on this page
974 * otherwise we risk losing a legitimate
975 * change of state
976 */
977 if (flags & UPL_COMMIT_CLEAR_DIRTY) {
978 m->vmp_dirty = FALSE;
979
980 clear_refmod |= VM_MEM_MODIFIED;
981 }
982 if (m->vmp_laundry) {
983 dwp->dw_mask |= DW_vm_pageout_throttle_up;
984 }
985
986 if (VM_PAGE_WIRED(m)) {
987 m->vmp_free_when_done = FALSE;
988 }
989
990 if (!(flags & UPL_COMMIT_CS_VALIDATED) &&
991 m->vmp_cs_validated &&
992 m->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
993 /*
994 * CODE SIGNING:
995 * This page is no longer dirty
996 * but could have been modified,
997 * so it will need to be
998 * re-validated.
999 */
1000 m->vmp_cs_validated = VMP_CS_ALL_FALSE;
1001
1002 VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
1003
1004 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
1005 }
1006 if (m->vmp_overwriting) {
1007 /*
1008 * the (COPY_OUT_FROM == FALSE) request_page_list case
1009 */
1010 if (VM_PAGE_WIRED(m)) {
1011 /*
1012 * alternate (COPY_OUT_FROM == FALSE) page_list case
1013 * Occurs when the original page was wired
1014 * at the time of the list request
1015 */
1016 if (m->vmp_busy) {
1017 // printf("******* FBDP %s:%d page %p object %p ofsfet 0x%llx wired and busy\n", __FUNCTION__, __LINE__, m, VM_PAGE_OBJECT(m), m->vmp_offset);
1018 upl_pages_wired_busy++;
1019 }
1020 assert(!m->vmp_absent);
1021 dwp->dw_mask |= DW_vm_page_unwire; /* reactivates */
1022 } else {
1023 assert(m->vmp_busy);
1024 #if CONFIG_PHANTOM_CACHE
1025 if (m->vmp_absent && !m_object->internal) {
1026 dwp->dw_mask |= DW_vm_phantom_cache_update;
1027 }
1028 #endif
1029 m->vmp_absent = FALSE;
1030
1031 dwp->dw_mask |= DW_clear_busy;
1032 }
1033 m->vmp_overwriting = FALSE;
1034 }
1035 m->vmp_cleaning = FALSE;
1036
1037 if (m->vmp_free_when_done) {
1038 /*
1039 * With the clean queue enabled, UPL_PAGEOUT should
1040 * no longer set the pageout bit. Its pages now go
1041 * to the clean queue.
1042 *
1043 * We don't use the cleaned Q anymore and so this
1044 * assert isn't correct. The code for the clean Q
1045 * still exists and might be used in the future. If we
1046 * go back to the cleaned Q, we will re-enable this
1047 * assert.
1048 *
1049 * assert(!(upl->flags & UPL_PAGEOUT));
1050 */
1051 assert(!m_object->internal);
1052
1053 m->vmp_free_when_done = FALSE;
1054
1055 if ((flags & UPL_COMMIT_SET_DIRTY) ||
1056 (m->vmp_pmapped && (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED))) {
1057 /*
1058 * page was re-dirtied after we started
1059 * the pageout... reactivate it since
1060 * we don't know whether the on-disk
1061 * copy matches what is now in memory
1062 */
1063 SET_PAGE_DIRTY(m, FALSE);
1064
1065 dwp->dw_mask |= DW_vm_page_activate | DW_PAGE_WAKEUP;
1066
1067 if (upl->flags & UPL_PAGEOUT) {
1068 counter_inc(&vm_statistics_reactivations);
1069 DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
1070 }
1071 } else if (m->vmp_busy && !(upl->flags & UPL_HAS_BUSY)) {
1072 /*
1073 * Someone else might still be handling this
1074 * page (vm_fault() for example), so let's not
1075 * free it or "un-busy" it!
1076 * Put that page in the "speculative" queue
1077 * for now (since we would otherwise have freed
1078 * it) and let whoever is keeping the page
1079 * "busy" move it if needed when they're done
1080 * with it.
1081 */
1082 dwp->dw_mask |= DW_vm_page_speculate;
1083 } else {
1084 /*
1085 * page has been successfully cleaned
1086 * go ahead and free it for other use
1087 */
1088 if (m_object->internal) {
1089 DTRACE_VM2(anonpgout, int, 1, (uint64_t *), NULL);
1090 } else {
1091 DTRACE_VM2(fspgout, int, 1, (uint64_t *), NULL);
1092 }
1093 m->vmp_dirty = FALSE;
1094 if (!(upl->flags & UPL_HAS_BUSY)) {
1095 assert(!m->vmp_busy);
1096 }
1097 m->vmp_busy = TRUE;
1098
1099 dwp->dw_mask |= DW_vm_page_free;
1100 }
1101 goto commit_next_page;
1102 }
1103 /*
1104 * It is a part of the semantic of COPYOUT_FROM
1105 * UPLs that a commit implies cache sync
1106 * between the vm page and the backing store
1107 * this can be used to strip the precious bit
1108 * as well as clean
1109 */
1110 if ((upl->flags & UPL_PAGE_SYNC_DONE) || (flags & UPL_COMMIT_CLEAR_PRECIOUS)) {
1111 m->vmp_precious = FALSE;
1112 }
1113
1114 if (flags & UPL_COMMIT_SET_DIRTY) {
1115 SET_PAGE_DIRTY(m, FALSE);
1116 } else {
1117 m->vmp_dirty = FALSE;
1118 }
1119
1120 /* with the clean queue on, move *all* cleaned pages to the clean queue */
1121 if (hibernate_cleaning_in_progress == FALSE && !m->vmp_dirty && (upl->flags & UPL_PAGEOUT)) {
1122 pgpgout_count++;
1123
1124 counter_inc(&vm_statistics_pageouts);
1125 DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL);
1126
1127 dwp->dw_mask |= DW_enqueue_cleaned;
1128 } else if (should_be_throttled == TRUE && (m->vmp_q_state == VM_PAGE_NOT_ON_Q)) {
1129 /*
1130 * page coming back in from being 'frozen'...
1131 * it was dirty before it was frozen, so keep it so
1132 * the vm_page_activate will notice that it really belongs
1133 * on the throttle queue and put it there
1134 */
1135 SET_PAGE_DIRTY(m, FALSE);
1136 dwp->dw_mask |= DW_vm_page_activate;
1137 } else {
1138 if ((flags & UPL_COMMIT_INACTIVATE) && !m->vmp_clustered && (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q)) {
1139 dwp->dw_mask |= DW_vm_page_deactivate_internal;
1140 clear_refmod |= VM_MEM_REFERENCED;
1141 } else if (!VM_PAGE_PAGEABLE(m)) {
1142 if (m->vmp_clustered || (flags & UPL_COMMIT_SPECULATE)) {
1143 dwp->dw_mask |= DW_vm_page_speculate;
1144 } else if (m->vmp_reference) {
1145 dwp->dw_mask |= DW_vm_page_activate;
1146 } else {
1147 dwp->dw_mask |= DW_vm_page_deactivate_internal;
1148 clear_refmod |= VM_MEM_REFERENCED;
1149 }
1150 }
1151 }
1152 if (upl->flags & UPL_ACCESS_BLOCKED) {
1153 /*
1154 * We blocked access to the pages in this URL.
1155 * Clear the "busy" bit on this page before we
1156 * wake up any waiter.
1157 */
1158 dwp->dw_mask |= DW_clear_busy;
1159 }
1160 /*
1161 * Wakeup any thread waiting for the page to be un-cleaning.
1162 */
1163 dwp->dw_mask |= DW_PAGE_WAKEUP;
1164
1165 commit_next_page:
1166 if (clear_refmod) {
1167 pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m), clear_refmod);
1168 }
1169
1170 target_offset += PAGE_SIZE_64;
1171 xfer_size -= PAGE_SIZE;
1172 entry++;
1173
1174 if (dwp->dw_mask) {
1175 if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
1176 VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
1177
1178 if (dw_count >= dw_limit) {
1179 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
1180
1181 dwp = dwp_start;
1182 dw_count = 0;
1183 }
1184 } else {
1185 if (dwp->dw_mask & DW_clear_busy) {
1186 m->vmp_busy = FALSE;
1187 }
1188
1189 if (dwp->dw_mask & DW_PAGE_WAKEUP) {
1190 vm_page_wakeup(m_object, m);
1191 }
1192 }
1193 }
1194 }
1195 if (dw_count) {
1196 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
1197 dwp = dwp_start;
1198 dw_count = 0;
1199 }
1200
1201 if (fast_path_possible) {
1202 assert(shadow_object->purgable != VM_PURGABLE_VOLATILE);
1203 assert(shadow_object->purgable != VM_PURGABLE_EMPTY);
1204
1205 if (local_queue_count || unwired_count) {
1206 if (local_queue_count) {
1207 vm_page_t first_target;
1208 vm_page_queue_head_t *target_queue;
1209
1210 if (throttle_page) {
1211 target_queue = &vm_page_queue_throttled;
1212 } else {
1213 if (flags & UPL_COMMIT_INACTIVATE) {
1214 if (shadow_object->internal) {
1215 target_queue = &vm_page_queue_anonymous;
1216 } else {
1217 target_queue = &vm_page_queue_inactive;
1218 }
1219 } else {
1220 target_queue = &vm_page_queue_active;
1221 }
1222 }
1223 /*
1224 * Transfer the entire local queue to a regular LRU page queues.
1225 */
1226 vm_page_lockspin_queues();
1227
1228 first_target = (vm_page_t) vm_page_queue_first(target_queue);
1229
1230 if (vm_page_queue_empty(target_queue)) {
1231 target_queue->prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
1232 } else {
1233 first_target->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
1234 }
1235
1236 target_queue->next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
1237 first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(target_queue);
1238 last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_target);
1239
1240 /*
1241 * Adjust the global page counts.
1242 */
1243 if (throttle_page) {
1244 vm_page_throttled_count += local_queue_count;
1245 } else {
1246 if (flags & UPL_COMMIT_INACTIVATE) {
1247 if (shadow_object->internal) {
1248 vm_page_anonymous_count += local_queue_count;
1249 }
1250 vm_page_inactive_count += local_queue_count;
1251
1252 token_new_pagecount += local_queue_count;
1253 } else {
1254 vm_page_active_count += local_queue_count;
1255 }
1256
1257 if (shadow_object->internal) {
1258 vm_page_pageable_internal_count += local_queue_count;
1259 } else {
1260 vm_page_pageable_external_count += local_queue_count;
1261 }
1262 }
1263 } else {
1264 vm_page_lockspin_queues();
1265 }
1266 if (unwired_count) {
1267 vm_page_wire_count -= unwired_count;
1268 VM_CHECK_MEMORYSTATUS;
1269 }
1270 vm_page_unlock_queues();
1271
1272 VM_OBJECT_WIRED_PAGE_COUNT(shadow_object, -unwired_count);
1273 }
1274 }
1275
1276 if (upl->flags & UPL_DEVICE_MEMORY) {
1277 occupied = 0;
1278 } else if (upl->flags & UPL_LITE) {
1279 uint32_t pages = (uint32_t)atop(upl_adjusted_size(upl, PAGE_MASK));
1280
1281 occupied = !fast_path_full_commit &&
1282 !bitmap_is_empty(upl->lite_list, pages);
1283 } else {
1284 occupied = !vm_page_queue_empty(&upl->map_object->memq);
1285 }
1286 if (occupied == 0) {
1287 /*
1288 * If this UPL element belongs to a Vector UPL and is
1289 * empty, then this is the right function to deallocate
1290 * it. So go ahead set the *empty variable. The flag
1291 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
1292 * should be considered relevant for the Vector UPL and not
1293 * the internal UPLs.
1294 */
1295 if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) {
1296 *empty = TRUE;
1297 }
1298
1299 if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
1300 /*
1301 * this is not a paging object
1302 * so we need to drop the paging reference
1303 * that was taken when we created the UPL
1304 * against this object
1305 */
1306 vm_object_activity_end(shadow_object);
1307 vm_object_collapse(shadow_object, 0, TRUE);
1308 } else {
1309 /*
1310 * we dontated the paging reference to
1311 * the map object... vm_pageout_object_terminate
1312 * will drop this reference
1313 */
1314 }
1315 }
1316 VM_OBJECT_WIRED_PAGE_UPDATE_END(shadow_object, shadow_object->wire_tag);
1317 vm_object_unlock(shadow_object);
1318 if (object != shadow_object) {
1319 vm_object_unlock(object);
1320 }
1321
1322 if (!isVectorUPL) {
1323 upl_unlock(upl);
1324 } else {
1325 /*
1326 * If we completed our operations on an UPL that is
1327 * part of a Vectored UPL and if empty is TRUE, then
1328 * we should go ahead and deallocate this UPL element.
1329 * Then we check if this was the last of the UPL elements
1330 * within that Vectored UPL. If so, set empty to TRUE
1331 * so that in ubc_upl_commit_range or ubc_upl_commit, we
1332 * can go ahead and deallocate the Vector UPL too.
1333 */
1334 if (*empty == TRUE) {
1335 *empty = vector_upl_set_subupl(vector_upl, upl, 0);
1336 upl_deallocate(upl);
1337 }
1338 goto process_upl_to_commit;
1339 }
1340 if (pgpgout_count) {
1341 DTRACE_VM2(pgpgout, int, pgpgout_count, (uint64_t *), NULL);
1342 }
1343
1344 kr = KERN_SUCCESS;
1345 done:
1346 if (dwp_start && dwp_finish_ctx) {
1347 vm_page_delayed_work_finish_ctx(dwp_start);
1348 dwp_start = dwp = NULL;
1349 }
1350
1351 return kr;
1352 }
1353
1354 /* an option on commit should be wire */
1355 kern_return_t
upl_commit(upl_t upl,upl_page_info_t * page_list,mach_msg_type_number_t count)1356 upl_commit(
1357 upl_t upl,
1358 upl_page_info_t *page_list,
1359 mach_msg_type_number_t count)
1360 {
1361 boolean_t empty;
1362
1363 if (upl == UPL_NULL) {
1364 return KERN_INVALID_ARGUMENT;
1365 }
1366
1367 return upl_commit_range(upl, 0, upl->u_size, 0,
1368 page_list, count, &empty);
1369 }
1370