xref: /xnu-8020.121.3/osfmk/vm/device_vm.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/errno.h>
30 
31 #include <mach/mach_types.h>
32 #include <mach/kern_return.h>
33 #include <mach/memory_object_control.h>
34 #include <mach/memory_object_types.h>
35 #include <mach/port.h>
36 #include <mach/policy.h>
37 #include <mach/upl.h>
38 #include <kern/kern_types.h>
39 #include <kern/ipc_kobject.h>
40 #include <kern/host.h>
41 #include <kern/thread.h>
42 #include <ipc/ipc_port.h>
43 #include <ipc/ipc_space.h>
44 #include <device/device_port.h>
45 #include <vm/memory_object.h>
46 #include <vm/vm_pageout.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_kern.h>
49 #include <vm/vm_pageout.h>
50 #include <vm/vm_protos.h>
51 #include <mach/sdt.h>
52 #include <os/refcnt.h>
53 
54 
55 /* Device VM COMPONENT INTERFACES */
56 
57 
58 /*
59  * Device PAGER
60  */
61 
62 
63 /* until component support available */
64 
65 
66 
67 /* until component support available */
68 const struct memory_object_pager_ops device_pager_ops = {
69 	.memory_object_reference = device_pager_reference,
70 	.memory_object_deallocate = device_pager_deallocate,
71 	.memory_object_init = device_pager_init,
72 	.memory_object_terminate = device_pager_terminate,
73 	.memory_object_data_request = device_pager_data_request,
74 	.memory_object_data_return = device_pager_data_return,
75 	.memory_object_data_initialize = device_pager_data_initialize,
76 	.memory_object_data_unlock = device_pager_data_unlock,
77 	.memory_object_synchronize = device_pager_synchronize,
78 	.memory_object_map = device_pager_map,
79 	.memory_object_last_unmap = device_pager_last_unmap,
80 	.memory_object_data_reclaim = NULL,
81 	.memory_object_backing_object = NULL,
82 	.memory_object_pager_name = "device pager"
83 };
84 
85 typedef uintptr_t device_port_t;
86 
87 /*
88  * The start of "struct device_pager" MUST match a "struct memory_object".
89  */
90 typedef struct device_pager {
91 	/* mandatory generic header */
92 	struct memory_object dev_pgr_hdr;
93 
94 	/* pager-specific data */
95 	lck_mtx_t       lock;
96 	device_port_t   device_handle;  /* device_handle */
97 	vm_size_t       size;
98 #if MEMORY_OBJECT_HAS_REFCOUNT
99 #define dev_pgr_hdr_ref dev_pgr_hdr.mo_ref
100 #else
101 	os_ref_atomic_t dev_pgr_hdr_ref;
102 #endif
103 	int             flags;
104 	boolean_t       is_mapped;
105 } *device_pager_t;
106 
107 __header_always_inline os_ref_count_t
device_pager_get_refcount(device_pager_t device_object)108 device_pager_get_refcount(device_pager_t device_object)
109 {
110 	return os_ref_get_count_raw(&device_object->dev_pgr_hdr_ref);
111 }
112 
113 LCK_GRP_DECLARE(device_pager_lck_grp, "device_pager");
114 
115 KALLOC_TYPE_DEFINE(device_pager_zone, struct device_pager, KT_DEFAULT);
116 
117 #define device_pager_lock_init(pager) \
118 	lck_mtx_init(&(pager)->lock, &device_pager_lck_grp, LCK_ATTR_NULL)
119 #define device_pager_lock_destroy(pager) \
120 	lck_mtx_destroy(&(pager)->lock, &device_pager_lck_grp)
121 #define device_pager_lock(pager) lck_mtx_lock(&(pager)->lock)
122 #define device_pager_unlock(pager) lck_mtx_unlock(&(pager)->lock)
123 
124 device_pager_t
125 device_pager_lookup(            /* forward */
126 	memory_object_t);
127 
128 device_pager_t
129 device_object_create(void);     /* forward */
130 
131 #define DEVICE_PAGER_NULL       ((device_pager_t) 0)
132 
133 #define MAX_DNODE               10000
134 
135 
136 /*
137  *
138  */
139 memory_object_t
device_pager_setup(__unused memory_object_t device,uintptr_t device_handle,vm_size_t size,int flags)140 device_pager_setup(
141 	__unused memory_object_t device,
142 	uintptr_t               device_handle,
143 	vm_size_t       size,
144 	int             flags)
145 {
146 	device_pager_t  device_object;
147 	memory_object_control_t control;
148 	vm_object_t     object;
149 
150 	device_object = device_object_create();
151 	if (device_object == DEVICE_PAGER_NULL) {
152 		panic("device_pager_setup: device_object_create() failed");
153 	}
154 
155 	device_object->device_handle = device_handle;
156 	device_object->size = size;
157 	device_object->flags = flags;
158 
159 	memory_object_create_named((memory_object_t) device_object,
160 	    size,
161 	    &control);
162 	object = memory_object_control_to_vm_object(control);
163 
164 	memory_object_mark_trusted(control);
165 
166 	assert(object != VM_OBJECT_NULL);
167 	vm_object_lock(object);
168 	object->true_share = TRUE;
169 	if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
170 		object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
171 	}
172 	vm_object_unlock(object);
173 
174 	return (memory_object_t)device_object;
175 }
176 
177 /*
178  *
179  */
180 kern_return_t
device_pager_populate_object(memory_object_t device,memory_object_offset_t offset,ppnum_t page_num,vm_size_t size)181 device_pager_populate_object(
182 	memory_object_t         device,
183 	memory_object_offset_t  offset,
184 	ppnum_t                 page_num,
185 	vm_size_t               size)
186 {
187 	device_pager_t  device_object;
188 	vm_object_t     vm_object;
189 	kern_return_t   kr;
190 	upl_t           upl;
191 
192 	device_object = device_pager_lookup(device);
193 	if (device_object == DEVICE_PAGER_NULL) {
194 		return KERN_FAILURE;
195 	}
196 
197 	vm_object = (vm_object_t)memory_object_control_to_vm_object(
198 		device_object->dev_pgr_hdr.mo_control);
199 	if (vm_object == NULL) {
200 		return KERN_FAILURE;
201 	}
202 
203 	kr =  vm_object_populate_with_private(
204 		vm_object, offset, page_num, size);
205 	if (kr != KERN_SUCCESS) {
206 		return kr;
207 	}
208 
209 	if (!vm_object->phys_contiguous) {
210 		unsigned int null_size = 0;
211 		assert((upl_size_t) size == size);
212 		kr = vm_object_upl_request(vm_object,
213 		    (vm_object_offset_t)offset,
214 		    (upl_size_t) size, &upl, NULL,
215 		    &null_size,
216 		    (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE),
217 		    VM_KERN_MEMORY_NONE);
218 		if (kr != KERN_SUCCESS) {
219 			panic("device_pager_populate_object: list_req failed");
220 		}
221 
222 		upl_commit(upl, NULL, 0);
223 		upl_deallocate(upl);
224 	}
225 
226 
227 	return kr;
228 }
229 
230 /*
231  *
232  */
233 device_pager_t
device_pager_lookup(memory_object_t mem_obj)234 device_pager_lookup(
235 	memory_object_t mem_obj)
236 {
237 	device_pager_t  device_object;
238 
239 	assert(mem_obj->mo_pager_ops == &device_pager_ops);
240 	device_object = (device_pager_t)mem_obj;
241 	assert(device_pager_get_refcount(device_object) > 0);
242 	return device_object;
243 }
244 
245 /*
246  *
247  */
248 kern_return_t
device_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)249 device_pager_init(
250 	memory_object_t mem_obj,
251 	memory_object_control_t control,
252 	__unused memory_object_cluster_size_t pg_size)
253 {
254 	device_pager_t   device_object;
255 	kern_return_t   kr;
256 	memory_object_attr_info_data_t  attributes;
257 
258 	vm_object_t     vm_object;
259 
260 
261 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
262 		return KERN_INVALID_ARGUMENT;
263 	}
264 
265 	device_object = device_pager_lookup(mem_obj);
266 
267 	memory_object_control_reference(control);
268 	device_object->dev_pgr_hdr.mo_control = control;
269 
270 
271 /* The following settings should be done through an expanded change */
272 /* attributes call */
273 
274 	vm_object = (vm_object_t)memory_object_control_to_vm_object(control);
275 	vm_object_lock(vm_object);
276 	vm_object->private = TRUE;
277 	if (device_object->flags & DEVICE_PAGER_CONTIGUOUS) {
278 		vm_object->phys_contiguous = TRUE;
279 	}
280 	if (device_object->flags & DEVICE_PAGER_NOPHYSCACHE) {
281 		vm_object->nophyscache = TRUE;
282 	}
283 
284 	vm_object->wimg_bits = device_object->flags & VM_WIMG_MASK;
285 	vm_object_unlock(vm_object);
286 
287 
288 	attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
289 	/* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
290 	attributes.cluster_size = (1 << (PAGE_SHIFT));
291 	attributes.may_cache_object = FALSE;
292 	attributes.temporary = TRUE;
293 
294 	kr = memory_object_change_attributes(
295 		control,
296 		MEMORY_OBJECT_ATTRIBUTE_INFO,
297 		(memory_object_info_t) &attributes,
298 		MEMORY_OBJECT_ATTR_INFO_COUNT);
299 	if (kr != KERN_SUCCESS) {
300 		panic("device_pager_init: memory_object_change_attributes() failed");
301 	}
302 
303 	return KERN_SUCCESS;
304 }
305 
306 /*
307  *
308  */
309 /*ARGSUSED6*/
310 kern_return_t
device_pager_data_return(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)311 device_pager_data_return(
312 	memory_object_t                 mem_obj,
313 	memory_object_offset_t          offset,
314 	memory_object_cluster_size_t                    data_cnt,
315 	__unused memory_object_offset_t *resid_offset,
316 	__unused int                    *io_error,
317 	__unused boolean_t              dirty,
318 	__unused boolean_t              kernel_copy,
319 	__unused int                    upl_flags)
320 {
321 	device_pager_t  device_object;
322 
323 	device_object = device_pager_lookup(mem_obj);
324 	if (device_object == DEVICE_PAGER_NULL) {
325 		panic("device_pager_data_return: lookup failed");
326 	}
327 
328 	__IGNORE_WCASTALIGN(return device_data_action(device_object->device_handle,
329 	    (ipc_port_t) device_object,
330 	    VM_PROT_READ | VM_PROT_WRITE,
331 	    offset, data_cnt));
332 }
333 
334 /*
335  *
336  */
337 kern_return_t
device_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,__unused memory_object_fault_info_t fault_info)338 device_pager_data_request(
339 	memory_object_t         mem_obj,
340 	memory_object_offset_t  offset,
341 	memory_object_cluster_size_t            length,
342 	__unused vm_prot_t      protection_required,
343 	__unused memory_object_fault_info_t     fault_info)
344 {
345 	device_pager_t  device_object;
346 
347 	device_object = device_pager_lookup(mem_obj);
348 
349 	if (device_object == DEVICE_PAGER_NULL) {
350 		panic("device_pager_data_request: lookup failed");
351 	}
352 
353 	__IGNORE_WCASTALIGN(device_data_action(device_object->device_handle,
354 	    (ipc_port_t) device_object,
355 	    VM_PROT_READ, offset, length));
356 	return KERN_SUCCESS;
357 }
358 
359 /*
360  *
361  */
362 void
device_pager_reference(memory_object_t mem_obj)363 device_pager_reference(
364 	memory_object_t         mem_obj)
365 {
366 	device_pager_t          device_object;
367 
368 	device_object = device_pager_lookup(mem_obj);
369 	os_ref_retain_raw(&device_object->dev_pgr_hdr_ref, NULL);
370 	DTRACE_VM2(device_pager_reference,
371 	    device_pager_t, device_object,
372 	    unsigned int, device_pager_get_refcount(device_object));
373 }
374 
375 /*
376  *
377  */
378 void
device_pager_deallocate(memory_object_t mem_obj)379 device_pager_deallocate(
380 	memory_object_t         mem_obj)
381 {
382 	device_pager_t          device_object;
383 	memory_object_control_t device_control;
384 	os_ref_count_t          ref_count;
385 
386 	device_object = device_pager_lookup(mem_obj);
387 
388 	DTRACE_VM2(device_pager_deallocate,
389 	    device_pager_t, device_object,
390 	    unsigned int, device_pager_get_refcount(device_object));
391 
392 	ref_count = os_ref_release_raw(&device_object->dev_pgr_hdr_ref, NULL);
393 
394 	if (ref_count == 1) {
395 		/*
396 		 * The last reference is our "named" reference.
397 		 * Close the device and "destroy" the VM object.
398 		 */
399 
400 		DTRACE_VM2(device_pager_destroy,
401 		    device_pager_t, device_object,
402 		    unsigned int, device_pager_get_refcount(device_object));
403 
404 		assert(device_object->is_mapped == FALSE);
405 		if (device_object->device_handle != (device_port_t) NULL) {
406 			device_close(device_object->device_handle);
407 			device_object->device_handle = (device_port_t) NULL;
408 		}
409 		device_control = device_object->dev_pgr_hdr.mo_control;
410 		memory_object_destroy(device_control, 0);
411 	} else if (ref_count == 0) {
412 		/*
413 		 * No more references: free the pager.
414 		 */
415 		DTRACE_VM2(device_pager_free,
416 		    device_pager_t, device_object,
417 		    unsigned int, device_pager_get_refcount(device_object));
418 
419 		device_control = device_object->dev_pgr_hdr.mo_control;
420 
421 		if (device_control != MEMORY_OBJECT_CONTROL_NULL) {
422 			memory_object_control_deallocate(device_control);
423 			device_object->dev_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
424 		}
425 		device_pager_lock_destroy(device_object);
426 
427 		zfree(device_pager_zone, device_object);
428 	}
429 	return;
430 }
431 
432 kern_return_t
device_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)433 device_pager_data_initialize(
434 	__unused memory_object_t                mem_obj,
435 	__unused memory_object_offset_t offset,
436 	__unused memory_object_cluster_size_t           data_cnt)
437 {
438 	panic("device_pager_data_initialize");
439 	return KERN_FAILURE;
440 }
441 
442 kern_return_t
device_pager_data_unlock(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_size_t size,__unused vm_prot_t desired_access)443 device_pager_data_unlock(
444 	__unused memory_object_t                mem_obj,
445 	__unused memory_object_offset_t offset,
446 	__unused memory_object_size_t           size,
447 	__unused vm_prot_t              desired_access)
448 {
449 	return KERN_FAILURE;
450 }
451 
452 kern_return_t
device_pager_terminate(__unused memory_object_t mem_obj)453 device_pager_terminate(
454 	__unused memory_object_t        mem_obj)
455 {
456 	return KERN_SUCCESS;
457 }
458 
459 
460 
461 /*
462  *
463  */
464 kern_return_t
device_pager_synchronize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_size_t length,__unused vm_sync_t sync_flags)465 device_pager_synchronize(
466 	__unused memory_object_t        mem_obj,
467 	__unused memory_object_offset_t offset,
468 	__unused memory_object_size_t   length,
469 	__unused vm_sync_t              sync_flags)
470 {
471 	panic("device_pager_synchronize: memory_object_synchronize no longer supported");
472 	return KERN_FAILURE;
473 }
474 
475 /*
476  *
477  */
478 kern_return_t
device_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)479 device_pager_map(
480 	memory_object_t mem_obj,
481 	__unused vm_prot_t              prot)
482 {
483 	device_pager_t          device_object;
484 
485 	device_object = device_pager_lookup(mem_obj);
486 
487 	device_pager_lock(device_object);
488 	assert(device_pager_get_refcount(device_object) > 0);
489 	if (device_object->is_mapped == FALSE) {
490 		/*
491 		 * First mapping of this pager: take an extra reference
492 		 * that will remain until all the mappings of this pager
493 		 * are removed.
494 		 */
495 		device_object->is_mapped = TRUE;
496 		device_pager_reference(mem_obj);
497 	}
498 	device_pager_unlock(device_object);
499 
500 	return KERN_SUCCESS;
501 }
502 
503 kern_return_t
device_pager_last_unmap(memory_object_t mem_obj)504 device_pager_last_unmap(
505 	memory_object_t mem_obj)
506 {
507 	device_pager_t  device_object;
508 	boolean_t       drop_ref;
509 
510 	device_object = device_pager_lookup(mem_obj);
511 
512 	device_pager_lock(device_object);
513 	assert(device_pager_get_refcount(device_object) > 0);
514 	if (device_object->is_mapped) {
515 		device_object->is_mapped = FALSE;
516 		drop_ref = TRUE;
517 	} else {
518 		drop_ref = FALSE;
519 	}
520 	device_pager_unlock(device_object);
521 
522 	if (drop_ref) {
523 		device_pager_deallocate(mem_obj);
524 	}
525 
526 	return KERN_SUCCESS;
527 }
528 
529 
530 
531 /*
532  *
533  */
534 device_pager_t
device_object_create(void)535 device_object_create(void)
536 {
537 	device_pager_t  device_object;
538 
539 	device_object = zalloc_flags(device_pager_zone,
540 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
541 
542 	device_object->dev_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
543 	device_object->dev_pgr_hdr.mo_pager_ops = &device_pager_ops;
544 	device_object->dev_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
545 
546 	device_pager_lock_init(device_object);
547 	os_ref_init_raw(&device_object->dev_pgr_hdr_ref, NULL);
548 	device_object->is_mapped = FALSE;
549 
550 	DTRACE_VM2(device_pager_create,
551 	    device_pager_t, device_object,
552 	    unsigned int, device_pager_get_refcount(device_object));
553 
554 	return device_object;
555 }
556 
557 boolean_t
is_device_pager_ops(const struct memory_object_pager_ops * pager_ops)558 is_device_pager_ops(const struct memory_object_pager_ops *pager_ops)
559 {
560 	if (pager_ops == &device_pager_ops) {
561 		return TRUE;
562 	}
563 	return FALSE;
564 }
565