xref: /xnu-11417.140.69/osfmk/vm/device_vm.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/errno.h>
30 
31 #include <mach/mach_types.h>
32 #include <mach/kern_return.h>
33 #include <mach/memory_object_control.h>
34 #include <mach/memory_object_types.h>
35 #include <mach/port.h>
36 #include <mach/policy.h>
37 #include <mach/upl.h>
38 #include <kern/kern_types.h>
39 #include <kern/ipc_kobject.h>
40 #include <kern/host.h>
41 #include <kern/thread.h>
42 #include <ipc/ipc_port.h>
43 #include <ipc/ipc_space.h>
44 #include <device/device_port.h>
45 #include <vm/memory_object_internal.h>
46 #include <vm/vm_pageout.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_kern.h>
49 #include <vm/vm_pageout.h>
50 #include <vm/vm_protos_internal.h>
51 #include <mach/sdt.h>
52 #include <os/refcnt.h>
53 #include <vm/vm_ubc.h>
54 #include <vm/vm_iokit.h>
55 #include <vm/vm_object_internal.h>
56 
57 
58 /* Device VM COMPONENT INTERFACES */
59 
60 
61 /*
62  * Device PAGER
63  */
64 
65 
66 /* until component support available */
67 
68 
69 
70 /* until component support available */
71 const struct memory_object_pager_ops device_pager_ops = {
72 	.memory_object_reference = device_pager_reference,
73 	.memory_object_deallocate = device_pager_deallocate,
74 	.memory_object_init = device_pager_init,
75 	.memory_object_terminate = device_pager_terminate,
76 	.memory_object_data_request = device_pager_data_request,
77 	.memory_object_data_return = device_pager_data_return,
78 	.memory_object_data_initialize = device_pager_data_initialize,
79 	.memory_object_map = device_pager_map,
80 	.memory_object_last_unmap = device_pager_last_unmap,
81 	.memory_object_backing_object = NULL,
82 	.memory_object_pager_name = "device pager"
83 };
84 
85 typedef uintptr_t device_port_t;
86 
87 /*
88  * The start of "struct device_pager" MUST match a "struct memory_object".
89  */
90 typedef struct device_pager {
91 	/* mandatory generic header */
92 	struct memory_object dev_pgr_hdr;
93 
94 	/* pager-specific data */
95 	lck_mtx_t       lock;
96 	device_port_t   device_handle;  /* device_handle */
97 	vm_size_t       size;
98 #if MEMORY_OBJECT_HAS_REFCOUNT
99 #define dev_pgr_hdr_ref dev_pgr_hdr.mo_ref
100 #else
101 	os_ref_atomic_t dev_pgr_hdr_ref;
102 #endif
103 	int             flags;
104 	boolean_t       is_mapped;
105 } *device_pager_t;
106 
107 __header_always_inline os_ref_count_t
device_pager_get_refcount(device_pager_t device_object)108 device_pager_get_refcount(device_pager_t device_object)
109 {
110 	return os_ref_get_count_raw(&device_object->dev_pgr_hdr_ref);
111 }
112 
113 LCK_GRP_DECLARE(device_pager_lck_grp, "device_pager");
114 
115 KALLOC_TYPE_DEFINE(device_pager_zone, struct device_pager, KT_DEFAULT);
116 
117 #define device_pager_lock_init(pager) \
118 	lck_mtx_init(&(pager)->lock, &device_pager_lck_grp, LCK_ATTR_NULL)
119 #define device_pager_lock_destroy(pager) \
120 	lck_mtx_destroy(&(pager)->lock, &device_pager_lck_grp)
121 #define device_pager_lock(pager) lck_mtx_lock(&(pager)->lock)
122 #define device_pager_unlock(pager) lck_mtx_unlock(&(pager)->lock)
123 
124 device_pager_t
125 device_pager_lookup(            /* forward */
126 	memory_object_t);
127 
128 device_pager_t
129 device_object_create(void);     /* forward */
130 
131 #define DEVICE_PAGER_NULL       ((device_pager_t) 0)
132 
133 #define MAX_DNODE               10000
134 
135 
136 /*
137  *
138  */
139 memory_object_t
device_pager_setup(__unused memory_object_t device,uintptr_t device_handle,vm_size_t size,int flags)140 device_pager_setup(
141 	__unused memory_object_t device,
142 	uintptr_t               device_handle,
143 	vm_size_t       size,
144 	int             flags)
145 {
146 	device_pager_t  device_object;
147 	memory_object_control_t control;
148 	vm_object_t     object;
149 
150 	device_object = device_object_create();
151 	if (device_object == DEVICE_PAGER_NULL) {
152 		panic("device_pager_setup: device_object_create() failed");
153 	}
154 
155 	device_object->device_handle = device_handle;
156 	device_object->size = size;
157 	device_object->flags = flags;
158 
159 	memory_object_create_named((memory_object_t) device_object,
160 	    size,
161 	    &control);
162 	object = memory_object_control_to_vm_object(control);
163 
164 	memory_object_mark_trusted(control);
165 
166 	assert(object != VM_OBJECT_NULL);
167 	vm_object_lock(object);
168 	VM_OBJECT_SET_TRUE_SHARE(object, TRUE);
169 	if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
170 		object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
171 	}
172 	vm_object_unlock(object);
173 
174 	return (memory_object_t)device_object;
175 }
176 
177 /*
178  *
179  */
180 kern_return_t
device_pager_populate_object(memory_object_t device,memory_object_offset_t offset,ppnum_t page_num,vm_size_t size)181 device_pager_populate_object(
182 	memory_object_t         device,
183 	memory_object_offset_t  offset,
184 	ppnum_t                 page_num,
185 	vm_size_t               size)
186 {
187 	device_pager_t  device_object;
188 	vm_object_t     vm_object;
189 	kern_return_t   kr;
190 	upl_t           upl;
191 
192 	device_object = device_pager_lookup(device);
193 	if (device_object == DEVICE_PAGER_NULL) {
194 		return KERN_FAILURE;
195 	}
196 
197 	vm_object = (vm_object_t)memory_object_control_to_vm_object(
198 		device_object->dev_pgr_hdr.mo_control);
199 	if (vm_object == NULL) {
200 		return KERN_FAILURE;
201 	}
202 
203 	kr =  vm_object_populate_with_private(
204 		vm_object, offset, page_num, size);
205 	if (kr != KERN_SUCCESS) {
206 		return kr;
207 	}
208 
209 	if (!vm_object->phys_contiguous) {
210 		unsigned int null_size = 0;
211 		assert((upl_size_t) size == size);
212 		kr = vm_object_upl_request(vm_object,
213 		    (vm_object_offset_t)offset,
214 		    (upl_size_t) size, &upl, NULL,
215 		    &null_size,
216 		    (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE),
217 		    VM_KERN_MEMORY_NONE);
218 		if (kr != KERN_SUCCESS) {
219 			panic("device_pager_populate_object: list_req failed");
220 		}
221 
222 		upl_commit(upl, NULL, 0);
223 		upl_deallocate(upl);
224 	}
225 
226 
227 	return kr;
228 }
229 
230 /*
231  *
232  */
233 device_pager_t
device_pager_lookup(memory_object_t mem_obj)234 device_pager_lookup(
235 	memory_object_t mem_obj)
236 {
237 	device_pager_t  device_object;
238 
239 	assert(mem_obj->mo_pager_ops == &device_pager_ops);
240 	device_object = (device_pager_t)mem_obj;
241 	assert(device_pager_get_refcount(device_object) > 0);
242 	return device_object;
243 }
244 
245 /*
246  *
247  */
248 kern_return_t
device_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)249 device_pager_init(
250 	memory_object_t mem_obj,
251 	memory_object_control_t control,
252 	__unused memory_object_cluster_size_t pg_size)
253 {
254 	device_pager_t   device_object;
255 	kern_return_t   kr;
256 	memory_object_attr_info_data_t  attributes;
257 
258 	vm_object_t     vm_object;
259 
260 
261 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
262 		return KERN_INVALID_ARGUMENT;
263 	}
264 
265 	device_object = device_pager_lookup(mem_obj);
266 
267 	memory_object_control_reference(control);
268 	device_object->dev_pgr_hdr.mo_control = control;
269 
270 
271 /* The following settings should be done through an expanded change */
272 /* attributes call */
273 
274 	vm_object = (vm_object_t)memory_object_control_to_vm_object(control);
275 	vm_object_lock(vm_object);
276 	VM_OBJECT_SET_PRIVATE(vm_object, TRUE);
277 	if (device_object->flags & DEVICE_PAGER_CONTIGUOUS) {
278 		VM_OBJECT_SET_PHYS_CONTIGUOUS(vm_object, TRUE);
279 	}
280 	if (device_object->flags & DEVICE_PAGER_NOPHYSCACHE) {
281 		VM_OBJECT_SET_NOPHYSCACHE(vm_object, TRUE);
282 	}
283 
284 	vm_object->wimg_bits = device_object->flags & VM_WIMG_MASK;
285 	vm_object_unlock(vm_object);
286 
287 
288 	attributes.copy_strategy = MEMORY_OBJECT_COPY_NONE;
289 	/* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
290 	attributes.cluster_size = (1 << (PAGE_SHIFT));
291 	attributes.may_cache_object = FALSE;
292 	attributes.temporary = TRUE;
293 
294 	kr = memory_object_change_attributes(
295 		control,
296 		MEMORY_OBJECT_ATTRIBUTE_INFO,
297 		(memory_object_info_t) &attributes,
298 		MEMORY_OBJECT_ATTR_INFO_COUNT);
299 	if (kr != KERN_SUCCESS) {
300 		panic("device_pager_init: memory_object_change_attributes() failed");
301 	}
302 
303 	return KERN_SUCCESS;
304 }
305 
306 static kern_return_t
device_pager_data_action(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,vm_prot_t protection)307 device_pager_data_action(
308 	memory_object_t                 mem_obj,
309 	memory_object_offset_t          offset,
310 	memory_object_cluster_size_t    length,
311 	vm_prot_t                       protection)
312 {
313 	device_pager_t  device_object;
314 	memory_object_offset_t end_offset;
315 	kern_return_t kr;
316 
317 	device_object = device_pager_lookup(mem_obj);
318 
319 	if (device_object == DEVICE_PAGER_NULL) {
320 		panic("%s: lookup failed", __func__);
321 	}
322 
323 	if (offset >= device_object->size ||
324 	    os_add_overflow(offset, length, &end_offset) ||
325 	    end_offset > device_object->size) {
326 		return KERN_INVALID_VALUE;
327 	}
328 
329 	__IGNORE_WCASTALIGN(kr = device_data_action(device_object->device_handle,
330 	    (ipc_port_t) device_object, protection, offset, length));
331 
332 	return kr;
333 }
334 
335 /*
336  *
337  */
338 /*ARGSUSED6*/
339 kern_return_t
device_pager_data_return(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)340 device_pager_data_return(
341 	memory_object_t                 mem_obj,
342 	memory_object_offset_t          offset,
343 	memory_object_cluster_size_t                    data_cnt,
344 	__unused memory_object_offset_t *resid_offset,
345 	__unused int                    *io_error,
346 	__unused boolean_t              dirty,
347 	__unused boolean_t              kernel_copy,
348 	__unused int                    upl_flags)
349 {
350 	return device_pager_data_action(mem_obj, offset, data_cnt,
351 	           VM_PROT_READ | VM_PROT_WRITE);
352 }
353 
354 /*
355  *
356  */
357 kern_return_t
device_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,__unused memory_object_fault_info_t fault_info)358 device_pager_data_request(
359 	memory_object_t         mem_obj,
360 	memory_object_offset_t  offset,
361 	memory_object_cluster_size_t            length,
362 	__unused vm_prot_t      protection_required,
363 	__unused memory_object_fault_info_t     fault_info)
364 {
365 	return device_pager_data_action(mem_obj, offset, length, VM_PROT_READ);
366 }
367 
368 /*
369  *
370  */
371 void
device_pager_reference(memory_object_t mem_obj)372 device_pager_reference(
373 	memory_object_t         mem_obj)
374 {
375 	device_pager_t          device_object;
376 
377 	device_object = device_pager_lookup(mem_obj);
378 	os_ref_retain_raw(&device_object->dev_pgr_hdr_ref, NULL);
379 	DTRACE_VM2(device_pager_reference,
380 	    device_pager_t, device_object,
381 	    unsigned int, device_pager_get_refcount(device_object));
382 }
383 
384 /*
385  *
386  */
387 void
device_pager_deallocate(memory_object_t mem_obj)388 device_pager_deallocate(
389 	memory_object_t         mem_obj)
390 {
391 	device_pager_t          device_object;
392 	memory_object_control_t device_control;
393 	os_ref_count_t          ref_count;
394 
395 	device_object = device_pager_lookup(mem_obj);
396 
397 	DTRACE_VM2(device_pager_deallocate,
398 	    device_pager_t, device_object,
399 	    unsigned int, device_pager_get_refcount(device_object));
400 
401 	ref_count = os_ref_release_raw(&device_object->dev_pgr_hdr_ref, NULL);
402 
403 	if (ref_count == 1) {
404 		/*
405 		 * The last reference is our "named" reference.
406 		 * Close the device and "destroy" the VM object.
407 		 */
408 
409 		DTRACE_VM2(device_pager_destroy,
410 		    device_pager_t, device_object,
411 		    unsigned int, device_pager_get_refcount(device_object));
412 
413 		assert(device_object->is_mapped == FALSE);
414 		if (device_object->device_handle != (device_port_t) NULL) {
415 			device_close(device_object->device_handle);
416 			device_object->device_handle = (device_port_t) NULL;
417 		}
418 		device_control = device_object->dev_pgr_hdr.mo_control;
419 		memory_object_destroy(device_control, VM_OBJECT_DESTROY_PAGER);
420 	} else if (ref_count == 0) {
421 		/*
422 		 * No more references: free the pager.
423 		 */
424 		DTRACE_VM2(device_pager_free,
425 		    device_pager_t, device_object,
426 		    unsigned int, device_pager_get_refcount(device_object));
427 
428 		device_control = device_object->dev_pgr_hdr.mo_control;
429 
430 		if (device_control != MEMORY_OBJECT_CONTROL_NULL) {
431 			memory_object_control_deallocate(device_control);
432 			device_object->dev_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
433 		}
434 		device_pager_lock_destroy(device_object);
435 
436 		zfree(device_pager_zone, device_object);
437 	}
438 	return;
439 }
440 
441 kern_return_t
device_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)442 device_pager_data_initialize(
443 	__unused memory_object_t                mem_obj,
444 	__unused memory_object_offset_t offset,
445 	__unused memory_object_cluster_size_t           data_cnt)
446 {
447 	panic("device_pager_data_initialize");
448 	return KERN_FAILURE;
449 }
450 
451 kern_return_t
device_pager_terminate(__unused memory_object_t mem_obj)452 device_pager_terminate(
453 	__unused memory_object_t        mem_obj)
454 {
455 	return KERN_SUCCESS;
456 }
457 
458 
459 /*
460  *
461  */
462 kern_return_t
device_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)463 device_pager_map(
464 	memory_object_t mem_obj,
465 	__unused vm_prot_t              prot)
466 {
467 	device_pager_t          device_object;
468 
469 	device_object = device_pager_lookup(mem_obj);
470 
471 	device_pager_lock(device_object);
472 	assert(device_pager_get_refcount(device_object) > 0);
473 	if (device_object->is_mapped == FALSE) {
474 		/*
475 		 * First mapping of this pager: take an extra reference
476 		 * that will remain until all the mappings of this pager
477 		 * are removed.
478 		 */
479 		device_object->is_mapped = TRUE;
480 		device_pager_reference(mem_obj);
481 	}
482 	device_pager_unlock(device_object);
483 
484 	return KERN_SUCCESS;
485 }
486 
487 kern_return_t
device_pager_last_unmap(memory_object_t mem_obj)488 device_pager_last_unmap(
489 	memory_object_t mem_obj)
490 {
491 	device_pager_t  device_object;
492 	boolean_t       drop_ref;
493 
494 	device_object = device_pager_lookup(mem_obj);
495 
496 	device_pager_lock(device_object);
497 	assert(device_pager_get_refcount(device_object) > 0);
498 	if (device_object->is_mapped) {
499 		device_object->is_mapped = FALSE;
500 		drop_ref = TRUE;
501 	} else {
502 		drop_ref = FALSE;
503 	}
504 	device_pager_unlock(device_object);
505 
506 	if (drop_ref) {
507 		device_pager_deallocate(mem_obj);
508 	}
509 
510 	return KERN_SUCCESS;
511 }
512 
513 
514 
515 /*
516  *
517  */
518 device_pager_t
device_object_create(void)519 device_object_create(void)
520 {
521 	device_pager_t  device_object;
522 
523 	device_object = zalloc_flags(device_pager_zone,
524 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
525 
526 	device_object->dev_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
527 	device_object->dev_pgr_hdr.mo_pager_ops = &device_pager_ops;
528 	device_object->dev_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
529 
530 	device_pager_lock_init(device_object);
531 	os_ref_init_raw(&device_object->dev_pgr_hdr_ref, NULL);
532 	device_object->is_mapped = FALSE;
533 
534 	DTRACE_VM2(device_pager_create,
535 	    device_pager_t, device_object,
536 	    unsigned int, device_pager_get_refcount(device_object));
537 
538 	return device_object;
539 }
540 
541 boolean_t
is_device_pager_ops(const struct memory_object_pager_ops * pager_ops)542 is_device_pager_ops(const struct memory_object_pager_ops *pager_ops)
543 {
544 	if (pager_ops == &device_pager_ops) {
545 		return TRUE;
546 	}
547 	return FALSE;
548 }
549