1 /*
2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 /*
58 * Compressor Pager.
59 * Memory Object Management.
60 */
61
62 #include <kern/host_statistics.h>
63 #include <kern/kalloc.h>
64 #include <kern/ipc_kobject.h>
65
66 #include <machine/atomic.h>
67
68 #include <mach/memory_object_control.h>
69 #include <mach/memory_object_types.h>
70 #include <mach/upl.h>
71
72 #include <vm/memory_object.h>
73 #include <vm/vm_compressor_internal.h>
74 #include <vm/vm_compressor_pager_internal.h>
75 #include <vm/vm_external.h>
76 #include <vm/vm_fault.h>
77 #include <vm/vm_pageout.h>
78 #include <vm/vm_protos_internal.h>
79 #include <vm/vm_object_internal.h>
80
81 #include <sys/kdebug_triage.h>
82
83 /* memory_object interfaces */
84 void compressor_memory_object_reference(memory_object_t mem_obj);
85 void compressor_memory_object_deallocate(memory_object_t mem_obj);
86 kern_return_t compressor_memory_object_init(
87 memory_object_t mem_obj,
88 memory_object_control_t control,
89 memory_object_cluster_size_t pager_page_size);
90 kern_return_t compressor_memory_object_terminate(memory_object_t mem_obj);
91 kern_return_t compressor_memory_object_data_request(
92 memory_object_t mem_obj,
93 memory_object_offset_t offset,
94 memory_object_cluster_size_t length,
95 __unused vm_prot_t protection_required,
96 memory_object_fault_info_t fault_info);
97 kern_return_t compressor_memory_object_data_return(
98 memory_object_t mem_obj,
99 memory_object_offset_t offset,
100 memory_object_cluster_size_t size,
101 __unused memory_object_offset_t *resid_offset,
102 __unused int *io_error,
103 __unused boolean_t dirty,
104 __unused boolean_t kernel_copy,
105 __unused int upl_flags);
106 kern_return_t compressor_memory_object_data_initialize(
107 memory_object_t mem_obj,
108 memory_object_offset_t offset,
109 memory_object_cluster_size_t size);
110 kern_return_t compressor_memory_object_map(
111 __unused memory_object_t mem_obj,
112 __unused vm_prot_t prot);
113 kern_return_t compressor_memory_object_last_unmap(memory_object_t mem_obj);
114
115 const struct memory_object_pager_ops compressor_pager_ops = {
116 .memory_object_reference = compressor_memory_object_reference,
117 .memory_object_deallocate = compressor_memory_object_deallocate,
118 .memory_object_init = compressor_memory_object_init,
119 .memory_object_terminate = compressor_memory_object_terminate,
120 .memory_object_data_request = compressor_memory_object_data_request,
121 .memory_object_data_return = compressor_memory_object_data_return,
122 .memory_object_data_initialize = compressor_memory_object_data_initialize,
123 .memory_object_map = compressor_memory_object_map,
124 .memory_object_last_unmap = compressor_memory_object_last_unmap,
125 .memory_object_backing_object = NULL,
126 .memory_object_pager_name = "compressor pager"
127 };
128
129 /* internal data structures */
130
131 struct {
132 uint64_t data_returns;
133 uint64_t data_requests;
134 uint64_t put;
135 uint64_t get;
136 uint64_t state_clr;
137 uint64_t state_get;
138 uint64_t transfer;
139 } compressor_pager_stats;
140
141 typedef int compressor_slot_t; /* stand-in for c_slot_mapping */
142
143 typedef struct compressor_pager {
144 /* mandatory generic header */
145 struct memory_object cpgr_hdr;
146
147 /* pager-specific data */
148 lck_mtx_t cpgr_lock;
149 #if MEMORY_OBJECT_HAS_REFCOUNT
150 #define cpgr_references cpgr_hdr.mo_ref
151 #else
152 os_ref_atomic_t cpgr_references;
153 #endif
154 unsigned int cpgr_num_slots;
155 unsigned int cpgr_num_slots_occupied;
156 union {
157 compressor_slot_t cpgr_eslots[2]; /* embedded slots */
158 compressor_slot_t *cpgr_dslots; /* direct slots */
159 compressor_slot_t **cpgr_islots; /* indirect slots */
160 } cpgr_slots;
161 } *compressor_pager_t;
162
163 #define compressor_pager_lookup(_mem_obj_, _cpgr_) \
164 MACRO_BEGIN \
165 if (_mem_obj_ == NULL || \
166 _mem_obj_->mo_pager_ops != &compressor_pager_ops) { \
167 _cpgr_ = NULL; \
168 } else { \
169 _cpgr_ = (compressor_pager_t) _mem_obj_; \
170 } \
171 MACRO_END
172
173 /* embedded slot pointers in compressor_pager get packed, so VA restricted */
174 static ZONE_DEFINE_TYPE(compressor_pager_zone, "compressor_pager",
175 struct compressor_pager, ZC_NOENCRYPT | ZC_VM);
176
177 LCK_GRP_DECLARE(compressor_pager_lck_grp, "compressor_pager");
178
179 #define compressor_pager_lock(_cpgr_) \
180 lck_mtx_lock(&(_cpgr_)->cpgr_lock)
181 #define compressor_pager_unlock(_cpgr_) \
182 lck_mtx_unlock(&(_cpgr_)->cpgr_lock)
183 #define compressor_pager_lock_init(_cpgr_) \
184 lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, LCK_ATTR_NULL)
185 #define compressor_pager_lock_destroy(_cpgr_) \
186 lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp)
187
188 #define COMPRESSOR_SLOTS_CHUNK_SIZE (512)
189 #define COMPRESSOR_SLOTS_PER_CHUNK (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t))
190
191 /* forward declarations */
192 unsigned int compressor_pager_slots_chunk_free(compressor_slot_t *chunk,
193 int num_slots,
194 vm_compressor_options_t flags,
195 int *failures);
196 void compressor_pager_slot_lookup(
197 compressor_pager_t pager,
198 boolean_t do_alloc,
199 memory_object_offset_t offset,
200 compressor_slot_t **slot_pp);
201
202 #if defined(__LP64__)
203
204 /* restricted VA zones for slots */
205
206 #define NUM_SLOTS_ZONES 3
207
208 static const size_t compressor_slots_zones_sizes[NUM_SLOTS_ZONES] = {
209 16,
210 64,
211 COMPRESSOR_SLOTS_CHUNK_SIZE
212 };
213
214 static const char * compressor_slots_zones_names[NUM_SLOTS_ZONES] = {
215 "compressor_slots.16",
216 "compressor_slots.64",
217 "compressor_slots.512"
218 };
219
220 static zone_t
221 compressor_slots_zones[NUM_SLOTS_ZONES];
222
223 #endif /* defined(__LP64__) */
224
225 static void
226 zfree_slot_array(compressor_slot_t *slots, size_t size);
227 static compressor_slot_t *
228 zalloc_slot_array(size_t size, zalloc_flags_t);
229
230 static inline unsigned int
compressor_pager_num_chunks(compressor_pager_t pager)231 compressor_pager_num_chunks(
232 compressor_pager_t pager)
233 {
234 unsigned int num_chunks;
235
236 num_chunks = pager->cpgr_num_slots / COMPRESSOR_SLOTS_PER_CHUNK;
237 if (num_chunks * COMPRESSOR_SLOTS_PER_CHUNK < pager->cpgr_num_slots) {
238 num_chunks++; /* do the equivalent of ceil() instead of trunc() for the above division */
239 }
240 return num_chunks;
241 }
242
243 kern_return_t
compressor_memory_object_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pager_page_size)244 compressor_memory_object_init(
245 memory_object_t mem_obj,
246 memory_object_control_t control,
247 __unused memory_object_cluster_size_t pager_page_size)
248 {
249 compressor_pager_t pager;
250
251 assert(pager_page_size == PAGE_SIZE);
252
253 memory_object_control_reference(control);
254
255 compressor_pager_lookup(mem_obj, pager);
256 compressor_pager_lock(pager);
257
258 if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
259 panic("compressor_memory_object_init: bad request");
260 }
261 pager->cpgr_hdr.mo_control = control;
262
263 compressor_pager_unlock(pager);
264
265 return KERN_SUCCESS;
266 }
267
268 kern_return_t
compressor_memory_object_map(__unused memory_object_t mem_obj,__unused vm_prot_t prot)269 compressor_memory_object_map(
270 __unused memory_object_t mem_obj,
271 __unused vm_prot_t prot)
272 {
273 panic("compressor_memory_object_map");
274 return KERN_FAILURE;
275 }
276
277 kern_return_t
compressor_memory_object_last_unmap(__unused memory_object_t mem_obj)278 compressor_memory_object_last_unmap(
279 __unused memory_object_t mem_obj)
280 {
281 panic("compressor_memory_object_last_unmap");
282 return KERN_FAILURE;
283 }
284
285 kern_return_t
compressor_memory_object_terminate(memory_object_t mem_obj)286 compressor_memory_object_terminate(
287 memory_object_t mem_obj)
288 {
289 memory_object_control_t control;
290 compressor_pager_t pager;
291
292 /*
293 * control port is a receive right, not a send right.
294 */
295
296 compressor_pager_lookup(mem_obj, pager);
297 compressor_pager_lock(pager);
298
299 /*
300 * After memory_object_terminate both memory_object_init
301 * and a no-senders notification are possible, so we need
302 * to clean up our reference to the memory_object_control
303 * to prepare for a new init.
304 */
305
306 control = pager->cpgr_hdr.mo_control;
307 pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
308
309 compressor_pager_unlock(pager);
310
311 /*
312 * Now we deallocate our reference on the control.
313 */
314 memory_object_control_deallocate(control);
315 return KERN_SUCCESS;
316 }
317
318 void
compressor_memory_object_reference(memory_object_t mem_obj)319 compressor_memory_object_reference(
320 memory_object_t mem_obj)
321 {
322 compressor_pager_t pager;
323
324 compressor_pager_lookup(mem_obj, pager);
325 if (pager == NULL) {
326 return;
327 }
328
329 compressor_pager_lock(pager);
330 os_ref_retain_locked_raw(&pager->cpgr_references, NULL);
331 compressor_pager_unlock(pager);
332 }
333
334 void
compressor_memory_object_deallocate(memory_object_t mem_obj)335 compressor_memory_object_deallocate(
336 memory_object_t mem_obj)
337 {
338 compressor_pager_t pager;
339 unsigned int num_slots_freed;
340
341 /*
342 * Because we don't give out multiple first references
343 * for a memory object, there can't be a race
344 * between getting a deallocate call and creating
345 * a new reference for the object.
346 */
347
348 compressor_pager_lookup(mem_obj, pager);
349 if (pager == NULL) {
350 return;
351 }
352
353 compressor_pager_lock(pager);
354 if (os_ref_release_locked_raw(&pager->cpgr_references, NULL) > 0) {
355 compressor_pager_unlock(pager);
356 return;
357 }
358
359 /*
360 * We shouldn't get a deallocation call
361 * when the kernel has the object cached.
362 */
363 if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
364 panic("compressor_memory_object_deallocate(): bad request");
365 }
366
367 /*
368 * Unlock the pager (though there should be no one
369 * waiting for it).
370 */
371 compressor_pager_unlock(pager);
372
373 /* free the compressor slots */
374 unsigned int num_chunks;
375 unsigned int i;
376 compressor_slot_t *chunk;
377
378 num_chunks = compressor_pager_num_chunks(pager);
379 if (num_chunks > 1) {
380 /* we have an array of chunks */
381 for (i = 0; i < num_chunks; i++) {
382 chunk = pager->cpgr_slots.cpgr_islots[i];
383 if (chunk != NULL) {
384 num_slots_freed =
385 compressor_pager_slots_chunk_free(
386 chunk,
387 COMPRESSOR_SLOTS_PER_CHUNK,
388 0,
389 NULL);
390 pager->cpgr_slots.cpgr_islots[i] = NULL;
391 zfree_slot_array(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
392 }
393 }
394 kfree_type(compressor_slot_t *, num_chunks,
395 pager->cpgr_slots.cpgr_islots);
396 pager->cpgr_slots.cpgr_islots = NULL;
397 } else if (pager->cpgr_num_slots > 2) {
398 chunk = pager->cpgr_slots.cpgr_dslots;
399 num_slots_freed =
400 compressor_pager_slots_chunk_free(
401 chunk,
402 pager->cpgr_num_slots,
403 0,
404 NULL);
405 pager->cpgr_slots.cpgr_dslots = NULL;
406 zfree_slot_array(chunk,
407 (pager->cpgr_num_slots *
408 sizeof(pager->cpgr_slots.cpgr_dslots[0])));
409 } else {
410 chunk = &pager->cpgr_slots.cpgr_eslots[0];
411 num_slots_freed =
412 compressor_pager_slots_chunk_free(
413 chunk,
414 pager->cpgr_num_slots,
415 0,
416 NULL);
417 }
418
419 compressor_pager_lock_destroy(pager);
420 zfree(compressor_pager_zone, pager);
421 }
422
423 kern_return_t
compressor_memory_object_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,__unused memory_object_fault_info_t fault_info)424 compressor_memory_object_data_request(
425 memory_object_t mem_obj,
426 memory_object_offset_t offset,
427 memory_object_cluster_size_t length,
428 __unused vm_prot_t protection_required,
429 __unused memory_object_fault_info_t fault_info)
430 {
431 compressor_pager_t pager;
432 kern_return_t kr;
433 compressor_slot_t *slot_p;
434
435 compressor_pager_stats.data_requests++;
436
437 /*
438 * Request must be on a page boundary and a multiple of pages.
439 */
440 if ((offset & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0) {
441 panic("compressor_memory_object_data_request(): bad alignment");
442 }
443
444 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
445 panic("%s: offset 0x%llx overflow",
446 __FUNCTION__, (uint64_t) offset);
447 return KERN_FAILURE;
448 }
449
450 compressor_pager_lookup(mem_obj, pager);
451
452 if (length == 0) {
453 /* we're only querying the pager for this page */
454 } else {
455 panic("compressor: data_request");
456 }
457
458 /* find the compressor slot for that page */
459 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
460
461 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
462 /* out of range */
463 kr = KERN_FAILURE;
464 } else if (slot_p == NULL || *slot_p == 0) {
465 /* compressor does not have this page */
466 kr = KERN_FAILURE;
467 } else {
468 /* compressor does have this page */
469 kr = KERN_SUCCESS;
470 }
471 return kr;
472 }
473
474 /*
475 * memory_object_data_initialize: check whether we already have each page, and
476 * write it if we do not. The implementation is far from optimized, and
477 * also assumes that the default_pager is single-threaded.
478 */
479 /* It is questionable whether or not a pager should decide what is relevant */
480 /* and what is not in data sent from the kernel. Data initialize has been */
481 /* changed to copy back all data sent to it in preparation for its eventual */
482 /* merge with data return. It is the kernel that should decide what pages */
483 /* to write back. As of the writing of this note, this is indeed the case */
484 /* the kernel writes back one page at a time through this interface */
485
486 kern_return_t
compressor_memory_object_data_initialize(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t size)487 compressor_memory_object_data_initialize(
488 memory_object_t mem_obj,
489 memory_object_offset_t offset,
490 memory_object_cluster_size_t size)
491 {
492 compressor_pager_t pager;
493 memory_object_offset_t cur_offset;
494
495 compressor_pager_lookup(mem_obj, pager);
496 compressor_pager_lock(pager);
497
498 for (cur_offset = offset;
499 cur_offset < offset + size;
500 cur_offset += PAGE_SIZE) {
501 panic("do a data_return() if slot for this page is empty");
502 }
503
504 compressor_pager_unlock(pager);
505
506 return KERN_SUCCESS;
507 }
508
509
510 /*ARGSUSED*/
511 kern_return_t
compressor_memory_object_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t size,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)512 compressor_memory_object_data_return(
513 __unused memory_object_t mem_obj,
514 __unused memory_object_offset_t offset,
515 __unused memory_object_cluster_size_t size,
516 __unused memory_object_offset_t *resid_offset,
517 __unused int *io_error,
518 __unused boolean_t dirty,
519 __unused boolean_t kernel_copy,
520 __unused int upl_flags)
521 {
522 panic("compressor: data_return");
523 return KERN_FAILURE;
524 }
525
526 /*
527 * Routine: default_pager_memory_object_create
528 * Purpose:
529 * Handle requests for memory objects from the
530 * kernel.
531 * Notes:
532 * Because we only give out the default memory
533 * manager port to the kernel, we don't have to
534 * be so paranoid about the contents.
535 */
536 kern_return_t
compressor_memory_object_create(memory_object_size_t new_size,memory_object_t * new_mem_obj)537 compressor_memory_object_create(
538 memory_object_size_t new_size,
539 memory_object_t *new_mem_obj)
540 {
541 compressor_pager_t pager;
542 unsigned int num_chunks;
543
544 if ((uint32_t)(new_size / PAGE_SIZE) != (new_size / PAGE_SIZE)) {
545 /* 32-bit overflow for number of pages */
546 panic("%s: size 0x%llx overflow",
547 __FUNCTION__, (uint64_t) new_size);
548 return KERN_INVALID_ARGUMENT;
549 }
550
551 pager = zalloc_flags(compressor_pager_zone, Z_WAITOK | Z_NOFAIL);
552
553 compressor_pager_lock_init(pager);
554 os_ref_init_raw(&pager->cpgr_references, NULL);
555 pager->cpgr_num_slots = (uint32_t)(new_size / PAGE_SIZE);
556 pager->cpgr_num_slots_occupied = 0;
557
558 num_chunks = compressor_pager_num_chunks(pager);
559 if (num_chunks > 1) {
560 /* islots points to an array of chunks pointer. every chunk has 512/sizeof(int)=128 slot_mapping */
561 pager->cpgr_slots.cpgr_islots = kalloc_type(compressor_slot_t *,
562 num_chunks, Z_WAITOK | Z_ZERO);
563 } else if (pager->cpgr_num_slots > 2) {
564 pager->cpgr_slots.cpgr_dslots = zalloc_slot_array(pager->cpgr_num_slots *
565 sizeof(pager->cpgr_slots.cpgr_dslots[0]), Z_WAITOK | Z_ZERO);
566 } else {
567 pager->cpgr_slots.cpgr_eslots[0] = 0;
568 pager->cpgr_slots.cpgr_eslots[1] = 0;
569 }
570
571 /*
572 * Set up associations between this memory object
573 * and this compressor_pager structure
574 */
575 pager->cpgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
576 pager->cpgr_hdr.mo_pager_ops = &compressor_pager_ops;
577 pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
578 pager->cpgr_hdr.mo_last_unmap_ctid = 0;
579
580 *new_mem_obj = (memory_object_t) pager;
581 return KERN_SUCCESS;
582 }
583
584
585 unsigned int
compressor_pager_slots_chunk_free(compressor_slot_t * chunk,int num_slots,vm_compressor_options_t flags,int * failures)586 compressor_pager_slots_chunk_free(
587 compressor_slot_t *chunk,
588 int num_slots,
589 vm_compressor_options_t flags,
590 int *failures)
591 {
592 int i;
593 vm_decompress_result_t retval;
594 unsigned int num_slots_freed;
595
596 if (failures) {
597 *failures = 0;
598 }
599 num_slots_freed = 0;
600 for (i = 0; i < num_slots; i++) {
601 if (chunk[i] != 0) {
602 retval = vm_compressor_free(&chunk[i], flags);
603
604 if (retval == DECOMPRESS_SUCCESS) {
605 num_slots_freed++;
606 } else {
607 assert3s(retval, <, 0); /* it's not DECOMPRESS_SUCCESS_* */
608 if (retval == DECOMPRESS_NEED_BLOCK) {
609 assert(flags & C_DONT_BLOCK);
610 }
611
612 if (failures) {
613 *failures += 1;
614 }
615 }
616 }
617 }
618 return num_slots_freed;
619 }
620
621 /* check if this pager has a slot_mapping spot for this page, if so give its position, if not, make place for it */
622 void
compressor_pager_slot_lookup(compressor_pager_t pager,boolean_t do_alloc,memory_object_offset_t offset,compressor_slot_t ** slot_pp)623 compressor_pager_slot_lookup(
624 compressor_pager_t pager,
625 boolean_t do_alloc,
626 memory_object_offset_t offset,
627 compressor_slot_t **slot_pp /* OUT */)
628 {
629 unsigned int num_chunks;
630 uint32_t page_num;
631 unsigned int chunk_idx;
632 int slot_idx;
633 compressor_slot_t *chunk;
634 compressor_slot_t *t_chunk;
635
636 /* offset is relative to the pager, first page of the first vm_object that created the pager has an offset of 0 */
637 page_num = (uint32_t)(offset / PAGE_SIZE);
638 if (page_num != (offset / PAGE_SIZE)) {
639 /* overflow */
640 panic("%s: offset 0x%llx overflow",
641 __FUNCTION__, (uint64_t) offset);
642 *slot_pp = NULL;
643 return;
644 }
645 if (page_num >= pager->cpgr_num_slots) {
646 /* out of range */
647 *slot_pp = NULL;
648 return;
649 }
650 num_chunks = compressor_pager_num_chunks(pager);
651 if (num_chunks > 1) {
652 /* we have an array of chunks */
653 chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK;
654 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
655
656 if (chunk == NULL && do_alloc) {
657 t_chunk = zalloc_slot_array(COMPRESSOR_SLOTS_CHUNK_SIZE,
658 Z_WAITOK | Z_ZERO);
659
660 compressor_pager_lock(pager);
661
662 if ((chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]) == NULL) {
663 /*
664 * On some platforms, the memory stores from
665 * the bzero(t_chunk) above might not have been
666 * made visible and another thread might see
667 * the contents of this new chunk before it's
668 * been fully zero-filled.
669 * This memory barrier should take care of this
670 * according to the platform requirements.
671 */
672 os_atomic_thread_fence(release);
673
674 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx] = t_chunk;
675 t_chunk = NULL;
676 }
677 compressor_pager_unlock(pager);
678
679 if (t_chunk) {
680 zfree_slot_array(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
681 }
682 }
683 if (chunk == NULL) {
684 *slot_pp = NULL;
685 } else {
686 slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
687 *slot_pp = &chunk[slot_idx];
688 }
689 } else if (pager->cpgr_num_slots > 2) {
690 slot_idx = page_num;
691 *slot_pp = &pager->cpgr_slots.cpgr_dslots[slot_idx];
692 } else {
693 slot_idx = page_num;
694 *slot_pp = &pager->cpgr_slots.cpgr_eslots[slot_idx];
695 }
696 }
697
698 #if defined(__LP64__)
699 __startup_func
700 static void
vm_compressor_slots_init(void)701 vm_compressor_slots_init(void)
702 {
703 for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) {
704 compressor_slots_zones[idx] = zone_create(
705 compressor_slots_zones_names[idx],
706 compressor_slots_zones_sizes[idx],
707 ZC_VM);
708 }
709 }
710 STARTUP(ZALLOC, STARTUP_RANK_MIDDLE, vm_compressor_slots_init);
711 #endif /* defined(__LP64__) */
712
713 static compressor_slot_t *
zalloc_slot_array(size_t size,zalloc_flags_t flags)714 zalloc_slot_array(size_t size, zalloc_flags_t flags)
715 {
716 #if defined(__LP64__)
717 compressor_slot_t *slots = NULL;
718
719 assert(size <= COMPRESSOR_SLOTS_CHUNK_SIZE);
720 for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) {
721 if (size > compressor_slots_zones_sizes[idx]) {
722 continue;
723 }
724 slots = zalloc_flags(compressor_slots_zones[idx], flags);
725 break;
726 }
727 return slots;
728 #else /* defined(__LP64__) */
729 return kalloc_data(size, flags);
730 #endif /* !defined(__LP64__) */
731 }
732
733 static void
zfree_slot_array(compressor_slot_t * slots,size_t size)734 zfree_slot_array(compressor_slot_t *slots, size_t size)
735 {
736 #if defined(__LP64__)
737 assert(size <= COMPRESSOR_SLOTS_CHUNK_SIZE);
738 for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) {
739 if (size > compressor_slots_zones_sizes[idx]) {
740 continue;
741 }
742 zfree(compressor_slots_zones[idx], slots);
743 break;
744 }
745 #else /* defined(__LP64__) */
746 kfree_data(slots, size);
747 #endif /* !defined(__LP64__) */
748 }
749
750 kern_return_t
vm_compressor_pager_put(memory_object_t mem_obj,memory_object_offset_t offset,ppnum_t ppnum,void ** current_chead,char * scratch_buf,int * compressed_count_delta_p,vm_compressor_options_t flags)751 vm_compressor_pager_put(
752 memory_object_t mem_obj,
753 memory_object_offset_t offset,
754 ppnum_t ppnum,
755 void **current_chead,
756 char *scratch_buf,
757 int *compressed_count_delta_p, /* OUT */
758 vm_compressor_options_t flags)
759 {
760 compressor_pager_t pager;
761 compressor_slot_t *slot_p;
762 kern_return_t kr;
763
764 compressor_pager_stats.put++;
765
766 *compressed_count_delta_p = 0;
767
768 /* This routine is called by the pageout thread. The pageout thread */
769 /* cannot be blocked by read activities unless the read activities */
770 /* Therefore the grant of vs lock must be done on a try versus a */
771 /* blocking basis. The code below relies on the fact that the */
772 /* interface is synchronous. Should this interface be again async */
773 /* for some type of pager in the future the pages will have to be */
774 /* returned through a separate, asynchronous path. */
775
776 compressor_pager_lookup(mem_obj, pager);
777
778 uint32_t dummy_conv;
779 if (os_convert_overflow(offset / PAGE_SIZE, &dummy_conv)) {
780 /* overflow, page number doesn't fit in a uint32 */
781 panic("%s: offset 0x%llx overflow", __FUNCTION__, (uint64_t) offset);
782 return KERN_RESOURCE_SHORTAGE;
783 }
784
785 /* we're looking for the slot_mapping that corresponds to the offset, which vm_compressor_put() is then going to
786 * set a value into after it allocates the slot. if the slot_mapping doesn't exist, this will create it */
787 compressor_pager_slot_lookup(pager, TRUE, offset, &slot_p);
788
789 if (slot_p == NULL) {
790 /* out of range ? */
791 panic("vm_compressor_pager_put: out of range");
792 }
793 if (*slot_p != 0) {
794 /*
795 * Already compressed: forget about the old one.
796 *
797 * This can happen after a vm_object_do_collapse() when
798 * the "backing_object" had some pages paged out and the
799 * "object" had an equivalent page resident.
800 */
801 vm_compressor_free(slot_p, flags);
802 *compressed_count_delta_p -= 1;
803 }
804
805 /*
806 * If the compressor operation succeeds, we presumably don't need to
807 * undo any previous WIMG update, as all live mappings should be
808 * disconnected.
809 */
810
811 kr = vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf, flags);
812 if (kr == KERN_SUCCESS) {
813 *compressed_count_delta_p += 1;
814 }
815 return kr;
816 }
817
818
819 kern_return_t
vm_compressor_pager_get(memory_object_t mem_obj,memory_object_offset_t offset,ppnum_t ppnum,int * my_fault_type,vm_compressor_options_t flags,int * compressed_count_delta_p)820 vm_compressor_pager_get(
821 memory_object_t mem_obj,
822 memory_object_offset_t offset,
823 ppnum_t ppnum,
824 int *my_fault_type,
825 vm_compressor_options_t flags,
826 int *compressed_count_delta_p)
827 {
828 compressor_pager_t pager;
829 kern_return_t kr;
830 compressor_slot_t *slot_p;
831
832 compressor_pager_stats.get++;
833
834 *compressed_count_delta_p = 0;
835
836 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
837 panic("%s: offset 0x%llx overflow",
838 __FUNCTION__, (uint64_t) offset);
839 return KERN_MEMORY_ERROR;
840 }
841
842 compressor_pager_lookup(mem_obj, pager);
843
844 /* find the compressor slot for that page */
845 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
846
847 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
848 /* out of range */
849 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COMPRESSOR_GET_OUT_OF_RANGE), 0 /* arg */);
850 kr = KERN_MEMORY_FAILURE;
851 } else if (slot_p == NULL || *slot_p == 0) {
852 /* compressor does not have this page */
853 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COMPRESSOR_GET_NO_PAGE), 0 /* arg */);
854 kr = KERN_MEMORY_ERROR;
855 } else {
856 /* compressor does have this page */
857 kr = KERN_SUCCESS;
858 }
859 *my_fault_type = DBG_COMPRESSOR_FAULT;
860
861 if (kr == KERN_SUCCESS) {
862 int retval;
863 bool unmodified = (vm_compressor_is_slot_compressed(slot_p) == false);
864 /* get the page from the compressor */
865 retval = vm_compressor_get(ppnum, slot_p, (unmodified ? (flags | C_PAGE_UNMODIFIED) : flags));
866 if (retval <= DECOMPRESS_FIRST_FAIL_CODE) {
867 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COMPRESSOR_DECOMPRESS_FAILED), (uintptr_t)retval /* arg */);
868 kr = KERN_MEMORY_FAILURE;
869 } else if (retval == DECOMPRESS_SUCCESS_SWAPPEDIN) {
870 *my_fault_type = DBG_COMPRESSOR_SWAPIN_FAULT;
871 } else if (retval == DECOMPRESS_NEED_BLOCK) {
872 assert((flags & C_DONT_BLOCK));
873 /*
874 * Not a fatal failure because we just retry with a blocking get later. So we skip ktriage to avoid noise.
875 */
876 kr = KERN_FAILURE;
877 }
878 }
879
880 if (kr == KERN_SUCCESS) {
881 assert(slot_p != NULL);
882 if (*slot_p != 0) {
883 /*
884 * We got the page for a copy-on-write fault
885 * and we kept the original in place. Slot
886 * is still occupied.
887 */
888 } else {
889 *compressed_count_delta_p -= 1;
890 }
891 }
892
893 return kr;
894 }
895
896 unsigned int
vm_compressor_pager_state_clr(memory_object_t mem_obj,memory_object_offset_t offset)897 vm_compressor_pager_state_clr(
898 memory_object_t mem_obj,
899 memory_object_offset_t offset)
900 {
901 compressor_pager_t pager;
902 compressor_slot_t *slot_p;
903 unsigned int num_slots_freed;
904
905 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
906
907 compressor_pager_stats.state_clr++;
908
909 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
910 /* overflow */
911 panic("%s: offset 0x%llx overflow",
912 __FUNCTION__, (uint64_t) offset);
913 return 0;
914 }
915
916 compressor_pager_lookup(mem_obj, pager);
917
918 /* find the compressor slot for that page */
919 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
920
921 num_slots_freed = 0;
922 if (slot_p && *slot_p != 0) {
923 vm_compressor_free(slot_p, 0);
924 num_slots_freed++;
925 assert(*slot_p == 0);
926 }
927
928 return num_slots_freed;
929 }
930
931 vm_external_state_t
vm_compressor_pager_state_get(memory_object_t mem_obj,memory_object_offset_t offset)932 vm_compressor_pager_state_get(
933 memory_object_t mem_obj,
934 memory_object_offset_t offset)
935 {
936 compressor_pager_t pager;
937 compressor_slot_t *slot_p;
938
939 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
940
941 compressor_pager_stats.state_get++;
942
943 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
944 /* overflow */
945 panic("%s: offset 0x%llx overflow",
946 __FUNCTION__, (uint64_t) offset);
947 return VM_EXTERNAL_STATE_ABSENT;
948 }
949
950 compressor_pager_lookup(mem_obj, pager);
951
952 /* find the compressor slot for that page */
953 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
954
955 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
956 /* out of range */
957 return VM_EXTERNAL_STATE_ABSENT;
958 } else if (slot_p == NULL || *slot_p == 0) {
959 /* compressor does not have this page */
960 return VM_EXTERNAL_STATE_ABSENT;
961 } else {
962 /* compressor does have this page */
963 return VM_EXTERNAL_STATE_EXISTS;
964 }
965 }
966
967 unsigned int
vm_compressor_pager_reap_pages(memory_object_t mem_obj,vm_compressor_options_t flags)968 vm_compressor_pager_reap_pages(
969 memory_object_t mem_obj,
970 vm_compressor_options_t flags)
971 {
972 compressor_pager_t pager;
973 unsigned int num_chunks;
974 int failures;
975 unsigned int i;
976 compressor_slot_t *chunk;
977 unsigned int num_slots_freed;
978
979 compressor_pager_lookup(mem_obj, pager);
980 if (pager == NULL) {
981 return 0;
982 }
983
984 compressor_pager_lock(pager);
985
986 /* reap the compressor slots */
987 num_slots_freed = 0;
988
989 num_chunks = compressor_pager_num_chunks(pager);
990 if (num_chunks > 1) {
991 /* we have an array of chunks */
992 for (i = 0; i < num_chunks; i++) {
993 chunk = pager->cpgr_slots.cpgr_islots[i];
994 if (chunk != NULL) {
995 num_slots_freed +=
996 compressor_pager_slots_chunk_free(
997 chunk,
998 COMPRESSOR_SLOTS_PER_CHUNK,
999 flags,
1000 &failures);
1001 if (failures == 0) {
1002 pager->cpgr_slots.cpgr_islots[i] = NULL;
1003 zfree_slot_array(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
1004 }
1005 }
1006 }
1007 } else if (pager->cpgr_num_slots > 2) {
1008 chunk = pager->cpgr_slots.cpgr_dslots;
1009 num_slots_freed +=
1010 compressor_pager_slots_chunk_free(
1011 chunk,
1012 pager->cpgr_num_slots,
1013 flags,
1014 NULL);
1015 } else {
1016 chunk = &pager->cpgr_slots.cpgr_eslots[0];
1017 num_slots_freed +=
1018 compressor_pager_slots_chunk_free(
1019 chunk,
1020 pager->cpgr_num_slots,
1021 flags,
1022 NULL);
1023 }
1024
1025 compressor_pager_unlock(pager);
1026
1027 return num_slots_freed;
1028 }
1029
1030 void
vm_compressor_pager_transfer(memory_object_t dst_mem_obj,memory_object_offset_t dst_offset,memory_object_t src_mem_obj,memory_object_offset_t src_offset)1031 vm_compressor_pager_transfer(
1032 memory_object_t dst_mem_obj,
1033 memory_object_offset_t dst_offset,
1034 memory_object_t src_mem_obj,
1035 memory_object_offset_t src_offset)
1036 {
1037 compressor_pager_t src_pager, dst_pager;
1038 compressor_slot_t *src_slot_p, *dst_slot_p;
1039
1040 compressor_pager_stats.transfer++;
1041
1042 /* find the compressor slot for the destination */
1043 compressor_pager_lookup(dst_mem_obj, dst_pager);
1044 assert(dst_offset / PAGE_SIZE < dst_pager->cpgr_num_slots);
1045 compressor_pager_slot_lookup(dst_pager, TRUE, dst_offset, &dst_slot_p);
1046 assert(dst_slot_p != NULL);
1047 assert(*dst_slot_p == 0);
1048
1049 /* find the compressor slot for the source */
1050 compressor_pager_lookup(src_mem_obj, src_pager);
1051 assert(src_offset / PAGE_SIZE < src_pager->cpgr_num_slots);
1052 compressor_pager_slot_lookup(src_pager, FALSE, src_offset, &src_slot_p);
1053 assert(src_slot_p != NULL);
1054 assert(*src_slot_p != 0);
1055
1056 /* transfer the slot from source to destination */
1057 vm_compressor_transfer(dst_slot_p, src_slot_p);
1058 os_atomic_dec(&src_pager->cpgr_num_slots_occupied, relaxed);
1059 os_atomic_inc(&dst_pager->cpgr_num_slots_occupied, relaxed);
1060 }
1061
1062 memory_object_offset_t
vm_compressor_pager_next_compressed(memory_object_t mem_obj,memory_object_offset_t offset)1063 vm_compressor_pager_next_compressed(
1064 memory_object_t mem_obj,
1065 memory_object_offset_t offset)
1066 {
1067 compressor_pager_t pager;
1068 unsigned int num_chunks;
1069 uint32_t page_num;
1070 unsigned int chunk_idx;
1071 uint32_t slot_idx;
1072 compressor_slot_t *chunk;
1073
1074 compressor_pager_lookup(mem_obj, pager);
1075
1076 page_num = (uint32_t)(offset / PAGE_SIZE);
1077 if (page_num != (offset / PAGE_SIZE)) {
1078 /* overflow */
1079 return (memory_object_offset_t) -1;
1080 }
1081 if (page_num >= pager->cpgr_num_slots) {
1082 /* out of range */
1083 return (memory_object_offset_t) -1;
1084 }
1085
1086 num_chunks = compressor_pager_num_chunks(pager);
1087 if (num_chunks == 1) {
1088 if (pager->cpgr_num_slots > 2) {
1089 chunk = pager->cpgr_slots.cpgr_dslots;
1090 } else {
1091 chunk = &pager->cpgr_slots.cpgr_eslots[0];
1092 }
1093 for (slot_idx = page_num;
1094 slot_idx < pager->cpgr_num_slots;
1095 slot_idx++) {
1096 if (chunk[slot_idx] != 0) {
1097 /* found a non-NULL slot in this chunk */
1098 return (memory_object_offset_t) slot_idx *
1099 PAGE_SIZE;
1100 }
1101 }
1102 return (memory_object_offset_t) -1;
1103 }
1104
1105 /* we have an array of chunks; find the next non-NULL chunk */
1106 chunk = NULL;
1107 for (chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK,
1108 slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
1109 chunk_idx < num_chunks;
1110 chunk_idx++,
1111 slot_idx = 0) {
1112 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
1113 if (chunk == NULL) {
1114 /* no chunk here: try the next one */
1115 continue;
1116 }
1117 /* search for an occupied slot in this chunk */
1118 for (;
1119 slot_idx < COMPRESSOR_SLOTS_PER_CHUNK;
1120 slot_idx++) {
1121 if (chunk[slot_idx] != 0) {
1122 /* found an occupied slot in this chunk */
1123 uint32_t next_slot;
1124
1125 next_slot = ((chunk_idx *
1126 COMPRESSOR_SLOTS_PER_CHUNK) +
1127 slot_idx);
1128 if (next_slot >= pager->cpgr_num_slots) {
1129 /* went beyond end of object */
1130 return (memory_object_offset_t) -1;
1131 }
1132 return (memory_object_offset_t) next_slot *
1133 PAGE_SIZE;
1134 }
1135 }
1136 }
1137 return (memory_object_offset_t) -1;
1138 }
1139
1140 unsigned int
vm_compressor_pager_get_count(memory_object_t mem_obj)1141 vm_compressor_pager_get_count(
1142 memory_object_t mem_obj)
1143 {
1144 compressor_pager_t pager;
1145
1146 compressor_pager_lookup(mem_obj, pager);
1147 if (pager == NULL) {
1148 return 0;
1149 }
1150
1151 /*
1152 * The caller should have the VM object locked and one
1153 * needs that lock to do a page-in or page-out, so no
1154 * need to lock the pager here.
1155 */
1156 assert(pager->cpgr_num_slots_occupied >= 0);
1157
1158 return pager->cpgr_num_slots_occupied;
1159 }
1160
1161 /* Add page count to the counter in the pager */
1162 void
vm_compressor_pager_count(memory_object_t mem_obj,int compressed_count_delta,boolean_t shared_lock,vm_object_t object __unused)1163 vm_compressor_pager_count(
1164 memory_object_t mem_obj,
1165 int compressed_count_delta,
1166 boolean_t shared_lock,
1167 vm_object_t object __unused)
1168 {
1169 compressor_pager_t pager;
1170
1171 if (compressed_count_delta == 0) {
1172 return;
1173 }
1174
1175 compressor_pager_lookup(mem_obj, pager);
1176 if (pager == NULL) {
1177 return;
1178 }
1179
1180 if (compressed_count_delta < 0) {
1181 assert(pager->cpgr_num_slots_occupied >=
1182 (unsigned int) -compressed_count_delta);
1183 }
1184
1185 /*
1186 * The caller should have the VM object locked,
1187 * shared or exclusive.
1188 */
1189 if (shared_lock) {
1190 vm_object_lock_assert_shared(object);
1191 os_atomic_add(&pager->cpgr_num_slots_occupied, compressed_count_delta,
1192 relaxed);
1193 } else {
1194 vm_object_lock_assert_exclusive(object);
1195 pager->cpgr_num_slots_occupied += compressed_count_delta;
1196 }
1197 }
1198
1199 #if CONFIG_FREEZE
1200 kern_return_t
vm_compressor_pager_relocate(memory_object_t mem_obj,memory_object_offset_t offset,void ** current_chead)1201 vm_compressor_pager_relocate(
1202 memory_object_t mem_obj,
1203 memory_object_offset_t offset,
1204 void **current_chead)
1205 {
1206 /*
1207 * Has the page at this offset been compressed?
1208 */
1209
1210 compressor_slot_t *slot_p;
1211 compressor_pager_t dst_pager;
1212
1213 assert(mem_obj);
1214
1215 compressor_pager_lookup(mem_obj, dst_pager);
1216 if (dst_pager == NULL) {
1217 return KERN_FAILURE;
1218 }
1219
1220 compressor_pager_slot_lookup(dst_pager, FALSE, offset, &slot_p);
1221 return vm_compressor_relocate(current_chead, slot_p);
1222 }
1223 #endif /* CONFIG_FREEZE */
1224
1225 #if DEVELOPMENT || DEBUG
1226
1227 kern_return_t
vm_compressor_pager_inject_error(memory_object_t mem_obj,memory_object_offset_t offset)1228 vm_compressor_pager_inject_error(memory_object_t mem_obj,
1229 memory_object_offset_t offset)
1230 {
1231 kern_return_t result = KERN_FAILURE;
1232 compressor_slot_t *slot_p;
1233 compressor_pager_t pager;
1234
1235 assert(mem_obj);
1236
1237 compressor_pager_lookup(mem_obj, pager);
1238 if (pager != NULL) {
1239 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
1240 if (slot_p != NULL && *slot_p != 0) {
1241 vm_compressor_inject_error(slot_p);
1242 result = KERN_SUCCESS;
1243 }
1244 }
1245
1246 return result;
1247 }
1248
1249
1250 /*
1251 * Write debugging information about the pager to the given buffer
1252 * returns: true on success, false if there was not enough space
1253 * argument size - in: bytes free in the buffer, out: bytes written
1254 */
1255 kern_return_t
vm_compressor_pager_dump(memory_object_t mem_obj,__unused char * buf,__unused size_t * size,bool * is_compressor,unsigned int * slot_count)1256 vm_compressor_pager_dump(memory_object_t mem_obj, /* IN */
1257 __unused char *buf, /* IN buffer to write to */
1258 __unused size_t *size, /* IN-OUT */
1259 bool *is_compressor, /* OUT */
1260 unsigned int *slot_count) /* OUT */
1261 {
1262 compressor_pager_t pager = NULL;
1263 compressor_pager_lookup(mem_obj, pager);
1264
1265 *size = 0;
1266 if (pager == NULL) {
1267 *is_compressor = false;
1268 *slot_count = 0;
1269 return KERN_SUCCESS;
1270 }
1271 *is_compressor = true;
1272 *slot_count = pager->cpgr_num_slots_occupied;
1273
1274 /*
1275 * size_t insize = *size;
1276 * unsigned int needed_size = 0; // pager->cpgr_num_slots_occupied * sizeof(compressor_slot_t) / sizeof(int);
1277 * if (needed_size > insize) {
1278 * return KERN_NO_SPACE;
1279 * }
1280 * TODO: not fully implemented yet, need to dump out the mappings
1281 * size = 0;
1282 */
1283 return KERN_SUCCESS;
1284 }
1285
1286 #endif
1287