xref: /xnu-12377.41.6/osfmk/vm/device_vm.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/errno.h>
30 
31 #include <mach/mach_types.h>
32 #include <mach/kern_return.h>
33 #include <mach/memory_object_control.h>
34 #include <mach/memory_object_types.h>
35 #include <mach/port.h>
36 #include <mach/policy.h>
37 #include <mach/upl.h>
38 #include <kern/kern_types.h>
39 #include <kern/ipc_kobject.h>
40 #include <kern/host.h>
41 #include <kern/thread.h>
42 #include <device/device_port.h>
43 #include <vm/memory_object_internal.h>
44 #include <vm/vm_pageout.h>
45 #include <vm/vm_map.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_pageout.h>
48 #include <vm/vm_protos_internal.h>
49 #include <mach/sdt.h>
50 #include <os/refcnt.h>
51 #include <vm/vm_ubc.h>
52 #include <vm/vm_iokit.h>
53 #include <vm/vm_object_internal.h>
54 
55 
56 /* Device VM COMPONENT INTERFACES */
57 
58 
59 /*
60  * Device PAGER
61  */
62 
63 
64 /* until component support available */
65 
66 
67 
68 /* until component support available */
69 const struct memory_object_pager_ops device_pager_ops = {
70 	.memory_object_reference = device_pager_reference,
71 	.memory_object_deallocate = device_pager_deallocate,
72 	.memory_object_init = device_pager_init,
73 	.memory_object_terminate = device_pager_terminate,
74 	.memory_object_data_request = device_pager_data_request,
75 	.memory_object_data_return = device_pager_data_return,
76 	.memory_object_data_initialize = device_pager_data_initialize,
77 	.memory_object_map = device_pager_map,
78 	.memory_object_last_unmap = device_pager_last_unmap,
79 	.memory_object_backing_object = NULL,
80 	.memory_object_pager_name = "device pager"
81 };
82 
83 typedef uintptr_t device_port_t;
84 
85 /*
86  * The start of "struct device_pager" MUST match a "struct memory_object".
87  */
88 typedef struct device_pager {
89 	/* mandatory generic header */
90 	struct memory_object dev_pgr_hdr;
91 
92 	/* pager-specific data */
93 	lck_mtx_t       lock;
94 	device_port_t   device_handle;  /* device_handle */
95 	vm_size_t       size;
96 #if MEMORY_OBJECT_HAS_REFCOUNT
97 #define dev_pgr_hdr_ref dev_pgr_hdr.mo_ref
98 #else
99 	os_ref_atomic_t dev_pgr_hdr_ref;
100 #endif
101 	int             flags;
102 	boolean_t       is_mapped;
103 } *device_pager_t;
104 
105 __header_always_inline os_ref_count_t
device_pager_get_refcount(device_pager_t device_object)106 device_pager_get_refcount(device_pager_t device_object)
107 {
108 	return os_ref_get_count_raw(&device_object->dev_pgr_hdr_ref);
109 }
110 
111 LCK_GRP_DECLARE(device_pager_lck_grp, "device_pager");
112 
113 KALLOC_TYPE_DEFINE(device_pager_zone, struct device_pager, KT_DEFAULT);
114 
115 #define device_pager_lock_init(pager) \
116 	lck_mtx_init(&(pager)->lock, &device_pager_lck_grp, LCK_ATTR_NULL)
117 #define device_pager_lock_destroy(pager) \
118 	lck_mtx_destroy(&(pager)->lock, &device_pager_lck_grp)
119 #define device_pager_lock(pager) lck_mtx_lock(&(pager)->lock)
120 #define device_pager_unlock(pager) lck_mtx_unlock(&(pager)->lock)
121 
122 device_pager_t
123 device_pager_lookup(            /* forward */
124 	memory_object_t);
125 
126 device_pager_t
127 device_object_create(void);     /* forward */
128 
129 #define DEVICE_PAGER_NULL       ((device_pager_t) 0)
130 
131 #define MAX_DNODE               10000
132 
133 
134 /*
135  *
136  */
137 memory_object_t
device_pager_setup(__unused memory_object_t device,uintptr_t device_handle,vm_size_t size,int flags)138 device_pager_setup(
139 	__unused memory_object_t device,
140 	uintptr_t               device_handle,
141 	vm_size_t       size,
142 	int             flags)
143 {
144 	device_pager_t  device_object;
145 	memory_object_control_t control;
146 	vm_object_t     object;
147 
148 	device_object = device_object_create();
149 	if (device_object == DEVICE_PAGER_NULL) {
150 		panic("device_pager_setup: device_object_create() failed");
151 	}
152 
153 	device_object->device_handle = device_handle;
154 	device_object->size = size;
155 	device_object->flags = flags;
156 
157 	memory_object_create_named((memory_object_t) device_object,
158 	    size,
159 	    &control);
160 	object = memory_object_control_to_vm_object(control);
161 
162 	memory_object_mark_trusted(control);
163 
164 	assert(object != VM_OBJECT_NULL);
165 	vm_object_lock(object);
166 	VM_OBJECT_SET_TRUE_SHARE(object, TRUE);
167 	if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
168 		object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
169 	}
170 	vm_object_unlock(object);
171 
172 	return (memory_object_t)device_object;
173 }
174 
175 /*
176  *
177  */
178 kern_return_t
device_pager_populate_object(memory_object_t device,memory_object_offset_t offset,ppnum_t page_num,vm_size_t size)179 device_pager_populate_object(
180 	memory_object_t         device,
181 	memory_object_offset_t  offset,
182 	ppnum_t                 page_num,
183 	vm_size_t               size)
184 {
185 	device_pager_t  device_object;
186 	vm_object_t     vm_object;
187 	kern_return_t   kr;
188 	upl_t           upl;
189 
190 	device_object = device_pager_lookup(device);
191 	if (device_object == DEVICE_PAGER_NULL) {
192 		return KERN_FAILURE;
193 	}
194 
195 	vm_object = (vm_object_t)memory_object_control_to_vm_object(
196 		device_object->dev_pgr_hdr.mo_control);
197 	if (vm_object == NULL) {
198 		return KERN_FAILURE;
199 	}
200 
201 	kr =  vm_object_populate_with_private(
202 		vm_object, offset, page_num, size);
203 	if (kr != KERN_SUCCESS) {
204 		return kr;
205 	}
206 
207 	if (!vm_object->phys_contiguous) {
208 		unsigned int null_size = 0;
209 		assert((upl_size_t) size == size);
210 		kr = vm_object_upl_request(vm_object,
211 		    (vm_object_offset_t)offset,
212 		    (upl_size_t) size, &upl, NULL,
213 		    &null_size,
214 		    (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE),
215 		    VM_KERN_MEMORY_NONE);
216 		if (kr != KERN_SUCCESS) {
217 			panic("device_pager_populate_object: list_req failed");
218 		}
219 
220 		upl_commit(upl, NULL, 0);
221 		upl_deallocate(upl);
222 	}
223 
224 
225 	return kr;
226 }
227 
228 /*
229  *
230  */
231 device_pager_t
device_pager_lookup(memory_object_t mem_obj)232 device_pager_lookup(
233 	memory_object_t mem_obj)
234 {
235 	device_pager_t  device_object;
236 
237 	assert(mem_obj->mo_pager_ops == &device_pager_ops);
238 	device_object = (device_pager_t)mem_obj;
239 	assert(device_pager_get_refcount(device_object) > 0);
240 	return device_object;
241 }
242 
243 /*
244  *
245  */
246 kern_return_t
device_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)247 device_pager_init(
248 	memory_object_t mem_obj,
249 	memory_object_control_t control,
250 	__unused memory_object_cluster_size_t pg_size)
251 {
252 	device_pager_t   device_object;
253 	kern_return_t   kr;
254 	memory_object_attr_info_data_t  attributes;
255 
256 	vm_object_t     vm_object;
257 
258 
259 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
260 		return KERN_INVALID_ARGUMENT;
261 	}
262 
263 	device_object = device_pager_lookup(mem_obj);
264 
265 	memory_object_control_reference(control);
266 	device_object->dev_pgr_hdr.mo_control = control;
267 
268 
269 /* The following settings should be done through an expanded change */
270 /* attributes call */
271 
272 	vm_object = (vm_object_t)memory_object_control_to_vm_object(control);
273 	vm_object_lock(vm_object);
274 	VM_OBJECT_SET_PRIVATE(vm_object, TRUE);
275 	if (device_object->flags & DEVICE_PAGER_CONTIGUOUS) {
276 		VM_OBJECT_SET_PHYS_CONTIGUOUS(vm_object, TRUE);
277 	}
278 	if (device_object->flags & DEVICE_PAGER_NOPHYSCACHE) {
279 		VM_OBJECT_SET_NOPHYSCACHE(vm_object, TRUE);
280 	}
281 
282 	vm_object->wimg_bits = device_object->flags & VM_WIMG_MASK;
283 	vm_object_unlock(vm_object);
284 
285 
286 	attributes.copy_strategy = MEMORY_OBJECT_COPY_NONE;
287 	/* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
288 	attributes.cluster_size = (1 << (PAGE_SHIFT));
289 	attributes.may_cache_object = FALSE;
290 	attributes.temporary = TRUE;
291 
292 	kr = memory_object_change_attributes(
293 		control,
294 		MEMORY_OBJECT_ATTRIBUTE_INFO,
295 		(memory_object_info_t) &attributes,
296 		MEMORY_OBJECT_ATTR_INFO_COUNT);
297 	if (kr != KERN_SUCCESS) {
298 		panic("device_pager_init: memory_object_change_attributes() failed");
299 	}
300 
301 	return KERN_SUCCESS;
302 }
303 
304 static kern_return_t
device_pager_data_action(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,vm_prot_t protection)305 device_pager_data_action(
306 	memory_object_t                 mem_obj,
307 	memory_object_offset_t          offset,
308 	memory_object_cluster_size_t    length,
309 	vm_prot_t                       protection)
310 {
311 	device_pager_t  device_object;
312 	memory_object_offset_t end_offset;
313 	kern_return_t kr;
314 
315 	device_object = device_pager_lookup(mem_obj);
316 
317 	if (device_object == DEVICE_PAGER_NULL) {
318 		panic("%s: lookup failed", __func__);
319 	}
320 
321 	if (offset >= device_object->size ||
322 	    os_add_overflow(offset, length, &end_offset) ||
323 	    end_offset > device_object->size) {
324 		return KERN_INVALID_VALUE;
325 	}
326 
327 	__IGNORE_WCASTALIGN(kr = device_data_action(device_object->device_handle,
328 	    (ipc_port_t) device_object, protection, offset, length));
329 
330 	return kr;
331 }
332 
333 /*
334  *
335  */
336 /*ARGSUSED6*/
337 kern_return_t
device_pager_data_return(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)338 device_pager_data_return(
339 	memory_object_t                 mem_obj,
340 	memory_object_offset_t          offset,
341 	memory_object_cluster_size_t                    data_cnt,
342 	__unused memory_object_offset_t *resid_offset,
343 	__unused int                    *io_error,
344 	__unused boolean_t              dirty,
345 	__unused boolean_t              kernel_copy,
346 	__unused int                    upl_flags)
347 {
348 	return device_pager_data_action(mem_obj, offset, data_cnt,
349 	           VM_PROT_READ | VM_PROT_WRITE);
350 }
351 
352 /*
353  *
354  */
355 kern_return_t
device_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,__unused memory_object_fault_info_t fault_info)356 device_pager_data_request(
357 	memory_object_t         mem_obj,
358 	memory_object_offset_t  offset,
359 	memory_object_cluster_size_t            length,
360 	__unused vm_prot_t      protection_required,
361 	__unused memory_object_fault_info_t     fault_info)
362 {
363 	return device_pager_data_action(mem_obj, offset, length, VM_PROT_READ);
364 }
365 
366 /*
367  *
368  */
369 void
device_pager_reference(memory_object_t mem_obj)370 device_pager_reference(
371 	memory_object_t         mem_obj)
372 {
373 	device_pager_t          device_object;
374 
375 	device_object = device_pager_lookup(mem_obj);
376 	os_ref_retain_raw(&device_object->dev_pgr_hdr_ref, NULL);
377 	DTRACE_VM2(device_pager_reference,
378 	    device_pager_t, device_object,
379 	    unsigned int, device_pager_get_refcount(device_object));
380 }
381 
382 /*
383  *
384  */
385 void
device_pager_deallocate(memory_object_t mem_obj)386 device_pager_deallocate(
387 	memory_object_t         mem_obj)
388 {
389 	device_pager_t          device_object;
390 	memory_object_control_t device_control;
391 	os_ref_count_t          ref_count;
392 
393 	device_object = device_pager_lookup(mem_obj);
394 
395 	DTRACE_VM2(device_pager_deallocate,
396 	    device_pager_t, device_object,
397 	    unsigned int, device_pager_get_refcount(device_object));
398 
399 	ref_count = os_ref_release_raw(&device_object->dev_pgr_hdr_ref, NULL);
400 
401 	if (ref_count == 1) {
402 		/*
403 		 * The last reference is our "named" reference.
404 		 * Close the device and "destroy" the VM object.
405 		 */
406 
407 		DTRACE_VM2(device_pager_destroy,
408 		    device_pager_t, device_object,
409 		    unsigned int, device_pager_get_refcount(device_object));
410 
411 		assert(device_object->is_mapped == FALSE);
412 		if (device_object->device_handle != (device_port_t) NULL) {
413 			device_close(device_object->device_handle);
414 			device_object->device_handle = (device_port_t) NULL;
415 		}
416 		device_control = device_object->dev_pgr_hdr.mo_control;
417 		memory_object_destroy(device_control, VM_OBJECT_DESTROY_PAGER);
418 	} else if (ref_count == 0) {
419 		/*
420 		 * No more references: free the pager.
421 		 */
422 		DTRACE_VM2(device_pager_free,
423 		    device_pager_t, device_object,
424 		    unsigned int, device_pager_get_refcount(device_object));
425 
426 		device_control = device_object->dev_pgr_hdr.mo_control;
427 
428 		if (device_control != MEMORY_OBJECT_CONTROL_NULL) {
429 			memory_object_control_deallocate(device_control);
430 			device_object->dev_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
431 		}
432 		device_pager_lock_destroy(device_object);
433 
434 		zfree(device_pager_zone, device_object);
435 	}
436 	return;
437 }
438 
439 kern_return_t
device_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)440 device_pager_data_initialize(
441 	__unused memory_object_t                mem_obj,
442 	__unused memory_object_offset_t offset,
443 	__unused memory_object_cluster_size_t           data_cnt)
444 {
445 	panic("device_pager_data_initialize");
446 	return KERN_FAILURE;
447 }
448 
449 kern_return_t
device_pager_terminate(__unused memory_object_t mem_obj)450 device_pager_terminate(
451 	__unused memory_object_t        mem_obj)
452 {
453 	return KERN_SUCCESS;
454 }
455 
456 
457 /*
458  *
459  */
460 kern_return_t
device_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)461 device_pager_map(
462 	memory_object_t mem_obj,
463 	__unused vm_prot_t              prot)
464 {
465 	device_pager_t          device_object;
466 
467 	device_object = device_pager_lookup(mem_obj);
468 
469 	device_pager_lock(device_object);
470 	assert(device_pager_get_refcount(device_object) > 0);
471 	if (device_object->is_mapped == FALSE) {
472 		/*
473 		 * First mapping of this pager: take an extra reference
474 		 * that will remain until all the mappings of this pager
475 		 * are removed.
476 		 */
477 		device_object->is_mapped = TRUE;
478 		device_pager_reference(mem_obj);
479 	}
480 	device_pager_unlock(device_object);
481 
482 	return KERN_SUCCESS;
483 }
484 
485 kern_return_t
device_pager_last_unmap(memory_object_t mem_obj)486 device_pager_last_unmap(
487 	memory_object_t mem_obj)
488 {
489 	device_pager_t  device_object;
490 	boolean_t       drop_ref;
491 
492 	device_object = device_pager_lookup(mem_obj);
493 
494 	device_pager_lock(device_object);
495 	assert(device_pager_get_refcount(device_object) > 0);
496 	if (device_object->is_mapped) {
497 		device_object->is_mapped = FALSE;
498 		drop_ref = TRUE;
499 	} else {
500 		drop_ref = FALSE;
501 	}
502 	device_pager_unlock(device_object);
503 
504 	if (drop_ref) {
505 		device_pager_deallocate(mem_obj);
506 	}
507 
508 	return KERN_SUCCESS;
509 }
510 
511 
512 
513 /*
514  *
515  */
516 device_pager_t
device_object_create(void)517 device_object_create(void)
518 {
519 	device_pager_t  device_object;
520 
521 	device_object = zalloc_flags(device_pager_zone,
522 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
523 
524 	device_object->dev_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
525 	device_object->dev_pgr_hdr.mo_pager_ops = &device_pager_ops;
526 	device_object->dev_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
527 	device_object->dev_pgr_hdr.mo_last_unmap_ctid = 0;
528 
529 	device_pager_lock_init(device_object);
530 	os_ref_init_raw(&device_object->dev_pgr_hdr_ref, NULL);
531 	device_object->is_mapped = FALSE;
532 
533 	DTRACE_VM2(device_pager_create,
534 	    device_pager_t, device_object,
535 	    unsigned int, device_pager_get_refcount(device_object));
536 
537 	return device_object;
538 }
539 
540 boolean_t
is_device_pager_ops(const struct memory_object_pager_ops * pager_ops)541 is_device_pager_ops(const struct memory_object_pager_ops *pager_ops)
542 {
543 	if (pager_ops == &device_pager_ops) {
544 		return TRUE;
545 	}
546 	return FALSE;
547 }
548