1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/kern_return.h>
33 #include <mach/memory_object_control.h>
34 #include <mach/memory_object_types.h>
35 #include <mach/port.h>
36 #include <mach/policy.h>
37 #include <mach/upl.h>
38 #include <kern/kern_types.h>
39 #include <kern/ipc_kobject.h>
40 #include <kern/host.h>
41 #include <kern/thread.h>
42 #include <ipc/ipc_port.h>
43 #include <ipc/ipc_space.h>
44 #include <device/device_port.h>
45 #include <vm/memory_object.h>
46 #include <vm/vm_pageout.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_kern.h>
49 #include <vm/vm_pageout.h>
50 #include <vm/vm_protos.h>
51 #include <mach/sdt.h>
52 #include <os/refcnt.h>
53
54
55 /* Device VM COMPONENT INTERFACES */
56
57
58 /*
59 * Device PAGER
60 */
61
62
63 /* until component support available */
64
65
66
67 /* until component support available */
68 const struct memory_object_pager_ops device_pager_ops = {
69 .memory_object_reference = device_pager_reference,
70 .memory_object_deallocate = device_pager_deallocate,
71 .memory_object_init = device_pager_init,
72 .memory_object_terminate = device_pager_terminate,
73 .memory_object_data_request = device_pager_data_request,
74 .memory_object_data_return = device_pager_data_return,
75 .memory_object_data_initialize = device_pager_data_initialize,
76 .memory_object_map = device_pager_map,
77 .memory_object_last_unmap = device_pager_last_unmap,
78 .memory_object_backing_object = NULL,
79 .memory_object_pager_name = "device pager"
80 };
81
82 typedef uintptr_t device_port_t;
83
84 /*
85 * The start of "struct device_pager" MUST match a "struct memory_object".
86 */
87 typedef struct device_pager {
88 /* mandatory generic header */
89 struct memory_object dev_pgr_hdr;
90
91 /* pager-specific data */
92 lck_mtx_t lock;
93 device_port_t device_handle; /* device_handle */
94 vm_size_t size;
95 #if MEMORY_OBJECT_HAS_REFCOUNT
96 #define dev_pgr_hdr_ref dev_pgr_hdr.mo_ref
97 #else
98 os_ref_atomic_t dev_pgr_hdr_ref;
99 #endif
100 int flags;
101 boolean_t is_mapped;
102 } *device_pager_t;
103
104 __header_always_inline os_ref_count_t
device_pager_get_refcount(device_pager_t device_object)105 device_pager_get_refcount(device_pager_t device_object)
106 {
107 return os_ref_get_count_raw(&device_object->dev_pgr_hdr_ref);
108 }
109
110 LCK_GRP_DECLARE(device_pager_lck_grp, "device_pager");
111
112 KALLOC_TYPE_DEFINE(device_pager_zone, struct device_pager, KT_DEFAULT);
113
114 #define device_pager_lock_init(pager) \
115 lck_mtx_init(&(pager)->lock, &device_pager_lck_grp, LCK_ATTR_NULL)
116 #define device_pager_lock_destroy(pager) \
117 lck_mtx_destroy(&(pager)->lock, &device_pager_lck_grp)
118 #define device_pager_lock(pager) lck_mtx_lock(&(pager)->lock)
119 #define device_pager_unlock(pager) lck_mtx_unlock(&(pager)->lock)
120
121 device_pager_t
122 device_pager_lookup( /* forward */
123 memory_object_t);
124
125 device_pager_t
126 device_object_create(void); /* forward */
127
128 #define DEVICE_PAGER_NULL ((device_pager_t) 0)
129
130 #define MAX_DNODE 10000
131
132
133 /*
134 *
135 */
136 memory_object_t
device_pager_setup(__unused memory_object_t device,uintptr_t device_handle,vm_size_t size,int flags)137 device_pager_setup(
138 __unused memory_object_t device,
139 uintptr_t device_handle,
140 vm_size_t size,
141 int flags)
142 {
143 device_pager_t device_object;
144 memory_object_control_t control;
145 vm_object_t object;
146
147 device_object = device_object_create();
148 if (device_object == DEVICE_PAGER_NULL) {
149 panic("device_pager_setup: device_object_create() failed");
150 }
151
152 device_object->device_handle = device_handle;
153 device_object->size = size;
154 device_object->flags = flags;
155
156 memory_object_create_named((memory_object_t) device_object,
157 size,
158 &control);
159 object = memory_object_control_to_vm_object(control);
160
161 memory_object_mark_trusted(control);
162
163 assert(object != VM_OBJECT_NULL);
164 vm_object_lock(object);
165 object->true_share = TRUE;
166 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
167 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
168 }
169 vm_object_unlock(object);
170
171 return (memory_object_t)device_object;
172 }
173
174 /*
175 *
176 */
177 kern_return_t
device_pager_populate_object(memory_object_t device,memory_object_offset_t offset,ppnum_t page_num,vm_size_t size)178 device_pager_populate_object(
179 memory_object_t device,
180 memory_object_offset_t offset,
181 ppnum_t page_num,
182 vm_size_t size)
183 {
184 device_pager_t device_object;
185 vm_object_t vm_object;
186 kern_return_t kr;
187 upl_t upl;
188
189 device_object = device_pager_lookup(device);
190 if (device_object == DEVICE_PAGER_NULL) {
191 return KERN_FAILURE;
192 }
193
194 vm_object = (vm_object_t)memory_object_control_to_vm_object(
195 device_object->dev_pgr_hdr.mo_control);
196 if (vm_object == NULL) {
197 return KERN_FAILURE;
198 }
199
200 kr = vm_object_populate_with_private(
201 vm_object, offset, page_num, size);
202 if (kr != KERN_SUCCESS) {
203 return kr;
204 }
205
206 if (!vm_object->phys_contiguous) {
207 unsigned int null_size = 0;
208 assert((upl_size_t) size == size);
209 kr = vm_object_upl_request(vm_object,
210 (vm_object_offset_t)offset,
211 (upl_size_t) size, &upl, NULL,
212 &null_size,
213 (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE),
214 VM_KERN_MEMORY_NONE);
215 if (kr != KERN_SUCCESS) {
216 panic("device_pager_populate_object: list_req failed");
217 }
218
219 upl_commit(upl, NULL, 0);
220 upl_deallocate(upl);
221 }
222
223
224 return kr;
225 }
226
227 /*
228 *
229 */
230 device_pager_t
device_pager_lookup(memory_object_t mem_obj)231 device_pager_lookup(
232 memory_object_t mem_obj)
233 {
234 device_pager_t device_object;
235
236 assert(mem_obj->mo_pager_ops == &device_pager_ops);
237 device_object = (device_pager_t)mem_obj;
238 assert(device_pager_get_refcount(device_object) > 0);
239 return device_object;
240 }
241
242 /*
243 *
244 */
245 kern_return_t
device_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)246 device_pager_init(
247 memory_object_t mem_obj,
248 memory_object_control_t control,
249 __unused memory_object_cluster_size_t pg_size)
250 {
251 device_pager_t device_object;
252 kern_return_t kr;
253 memory_object_attr_info_data_t attributes;
254
255 vm_object_t vm_object;
256
257
258 if (control == MEMORY_OBJECT_CONTROL_NULL) {
259 return KERN_INVALID_ARGUMENT;
260 }
261
262 device_object = device_pager_lookup(mem_obj);
263
264 memory_object_control_reference(control);
265 device_object->dev_pgr_hdr.mo_control = control;
266
267
268 /* The following settings should be done through an expanded change */
269 /* attributes call */
270
271 vm_object = (vm_object_t)memory_object_control_to_vm_object(control);
272 vm_object_lock(vm_object);
273 vm_object->private = TRUE;
274 if (device_object->flags & DEVICE_PAGER_CONTIGUOUS) {
275 vm_object->phys_contiguous = TRUE;
276 }
277 if (device_object->flags & DEVICE_PAGER_NOPHYSCACHE) {
278 vm_object->nophyscache = TRUE;
279 }
280
281 vm_object->wimg_bits = device_object->flags & VM_WIMG_MASK;
282 vm_object_unlock(vm_object);
283
284
285 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
286 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
287 attributes.cluster_size = (1 << (PAGE_SHIFT));
288 attributes.may_cache_object = FALSE;
289 attributes.temporary = TRUE;
290
291 kr = memory_object_change_attributes(
292 control,
293 MEMORY_OBJECT_ATTRIBUTE_INFO,
294 (memory_object_info_t) &attributes,
295 MEMORY_OBJECT_ATTR_INFO_COUNT);
296 if (kr != KERN_SUCCESS) {
297 panic("device_pager_init: memory_object_change_attributes() failed");
298 }
299
300 return KERN_SUCCESS;
301 }
302
303 /*
304 *
305 */
306 /*ARGSUSED6*/
307 kern_return_t
device_pager_data_return(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)308 device_pager_data_return(
309 memory_object_t mem_obj,
310 memory_object_offset_t offset,
311 memory_object_cluster_size_t data_cnt,
312 __unused memory_object_offset_t *resid_offset,
313 __unused int *io_error,
314 __unused boolean_t dirty,
315 __unused boolean_t kernel_copy,
316 __unused int upl_flags)
317 {
318 device_pager_t device_object;
319
320 device_object = device_pager_lookup(mem_obj);
321 if (device_object == DEVICE_PAGER_NULL) {
322 panic("device_pager_data_return: lookup failed");
323 }
324
325 __IGNORE_WCASTALIGN(return device_data_action(device_object->device_handle,
326 (ipc_port_t) device_object,
327 VM_PROT_READ | VM_PROT_WRITE,
328 offset, data_cnt));
329 }
330
331 /*
332 *
333 */
334 kern_return_t
device_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,__unused memory_object_fault_info_t fault_info)335 device_pager_data_request(
336 memory_object_t mem_obj,
337 memory_object_offset_t offset,
338 memory_object_cluster_size_t length,
339 __unused vm_prot_t protection_required,
340 __unused memory_object_fault_info_t fault_info)
341 {
342 device_pager_t device_object;
343
344 device_object = device_pager_lookup(mem_obj);
345
346 if (device_object == DEVICE_PAGER_NULL) {
347 panic("device_pager_data_request: lookup failed");
348 }
349
350 __IGNORE_WCASTALIGN(device_data_action(device_object->device_handle,
351 (ipc_port_t) device_object,
352 VM_PROT_READ, offset, length));
353 return KERN_SUCCESS;
354 }
355
356 /*
357 *
358 */
359 void
device_pager_reference(memory_object_t mem_obj)360 device_pager_reference(
361 memory_object_t mem_obj)
362 {
363 device_pager_t device_object;
364
365 device_object = device_pager_lookup(mem_obj);
366 os_ref_retain_raw(&device_object->dev_pgr_hdr_ref, NULL);
367 DTRACE_VM2(device_pager_reference,
368 device_pager_t, device_object,
369 unsigned int, device_pager_get_refcount(device_object));
370 }
371
372 /*
373 *
374 */
375 void
device_pager_deallocate(memory_object_t mem_obj)376 device_pager_deallocate(
377 memory_object_t mem_obj)
378 {
379 device_pager_t device_object;
380 memory_object_control_t device_control;
381 os_ref_count_t ref_count;
382
383 device_object = device_pager_lookup(mem_obj);
384
385 DTRACE_VM2(device_pager_deallocate,
386 device_pager_t, device_object,
387 unsigned int, device_pager_get_refcount(device_object));
388
389 ref_count = os_ref_release_raw(&device_object->dev_pgr_hdr_ref, NULL);
390
391 if (ref_count == 1) {
392 /*
393 * The last reference is our "named" reference.
394 * Close the device and "destroy" the VM object.
395 */
396
397 DTRACE_VM2(device_pager_destroy,
398 device_pager_t, device_object,
399 unsigned int, device_pager_get_refcount(device_object));
400
401 assert(device_object->is_mapped == FALSE);
402 if (device_object->device_handle != (device_port_t) NULL) {
403 device_close(device_object->device_handle);
404 device_object->device_handle = (device_port_t) NULL;
405 }
406 device_control = device_object->dev_pgr_hdr.mo_control;
407 memory_object_destroy(device_control, 0);
408 } else if (ref_count == 0) {
409 /*
410 * No more references: free the pager.
411 */
412 DTRACE_VM2(device_pager_free,
413 device_pager_t, device_object,
414 unsigned int, device_pager_get_refcount(device_object));
415
416 device_control = device_object->dev_pgr_hdr.mo_control;
417
418 if (device_control != MEMORY_OBJECT_CONTROL_NULL) {
419 memory_object_control_deallocate(device_control);
420 device_object->dev_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
421 }
422 device_pager_lock_destroy(device_object);
423
424 zfree(device_pager_zone, device_object);
425 }
426 return;
427 }
428
429 kern_return_t
device_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)430 device_pager_data_initialize(
431 __unused memory_object_t mem_obj,
432 __unused memory_object_offset_t offset,
433 __unused memory_object_cluster_size_t data_cnt)
434 {
435 panic("device_pager_data_initialize");
436 return KERN_FAILURE;
437 }
438
439 kern_return_t
device_pager_terminate(__unused memory_object_t mem_obj)440 device_pager_terminate(
441 __unused memory_object_t mem_obj)
442 {
443 return KERN_SUCCESS;
444 }
445
446
447 /*
448 *
449 */
450 kern_return_t
device_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)451 device_pager_map(
452 memory_object_t mem_obj,
453 __unused vm_prot_t prot)
454 {
455 device_pager_t device_object;
456
457 device_object = device_pager_lookup(mem_obj);
458
459 device_pager_lock(device_object);
460 assert(device_pager_get_refcount(device_object) > 0);
461 if (device_object->is_mapped == FALSE) {
462 /*
463 * First mapping of this pager: take an extra reference
464 * that will remain until all the mappings of this pager
465 * are removed.
466 */
467 device_object->is_mapped = TRUE;
468 device_pager_reference(mem_obj);
469 }
470 device_pager_unlock(device_object);
471
472 return KERN_SUCCESS;
473 }
474
475 kern_return_t
device_pager_last_unmap(memory_object_t mem_obj)476 device_pager_last_unmap(
477 memory_object_t mem_obj)
478 {
479 device_pager_t device_object;
480 boolean_t drop_ref;
481
482 device_object = device_pager_lookup(mem_obj);
483
484 device_pager_lock(device_object);
485 assert(device_pager_get_refcount(device_object) > 0);
486 if (device_object->is_mapped) {
487 device_object->is_mapped = FALSE;
488 drop_ref = TRUE;
489 } else {
490 drop_ref = FALSE;
491 }
492 device_pager_unlock(device_object);
493
494 if (drop_ref) {
495 device_pager_deallocate(mem_obj);
496 }
497
498 return KERN_SUCCESS;
499 }
500
501
502
503 /*
504 *
505 */
506 device_pager_t
device_object_create(void)507 device_object_create(void)
508 {
509 device_pager_t device_object;
510
511 device_object = zalloc_flags(device_pager_zone,
512 Z_WAITOK | Z_ZERO | Z_NOFAIL);
513
514 device_object->dev_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
515 device_object->dev_pgr_hdr.mo_pager_ops = &device_pager_ops;
516 device_object->dev_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
517
518 device_pager_lock_init(device_object);
519 os_ref_init_raw(&device_object->dev_pgr_hdr_ref, NULL);
520 device_object->is_mapped = FALSE;
521
522 DTRACE_VM2(device_pager_create,
523 device_pager_t, device_object,
524 unsigned int, device_pager_get_refcount(device_object));
525
526 return device_object;
527 }
528
529 boolean_t
is_device_pager_ops(const struct memory_object_pager_ops * pager_ops)530 is_device_pager_ops(const struct memory_object_pager_ops *pager_ops)
531 {
532 if (pager_ops == &device_pager_ops) {
533 return TRUE;
534 }
535 return FALSE;
536 }
537