xref: /xnu-11417.140.69/osfmk/vm/vm_compressor_pager.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 
57 /*
58  *	Compressor Pager.
59  *		Memory Object Management.
60  */
61 
62 #include <kern/host_statistics.h>
63 #include <kern/kalloc.h>
64 #include <kern/ipc_kobject.h>
65 
66 #include <machine/atomic.h>
67 
68 #include <mach/memory_object_control.h>
69 #include <mach/memory_object_types.h>
70 #include <mach/upl.h>
71 
72 #include <vm/memory_object.h>
73 #include <vm/vm_compressor_pager_internal.h>
74 #include <vm/vm_external.h>
75 #include <vm/vm_fault.h>
76 #include <vm/vm_pageout.h>
77 #include <vm/vm_protos_internal.h>
78 #include <vm/vm_object_internal.h>
79 
80 #include <sys/kdebug_triage.h>
81 
82 /* memory_object interfaces */
83 void compressor_memory_object_reference(memory_object_t mem_obj);
84 void compressor_memory_object_deallocate(memory_object_t mem_obj);
85 kern_return_t compressor_memory_object_init(
86 	memory_object_t         mem_obj,
87 	memory_object_control_t control,
88 	memory_object_cluster_size_t pager_page_size);
89 kern_return_t compressor_memory_object_terminate(memory_object_t mem_obj);
90 kern_return_t compressor_memory_object_data_request(
91 	memory_object_t         mem_obj,
92 	memory_object_offset_t  offset,
93 	memory_object_cluster_size_t            length,
94 	__unused vm_prot_t      protection_required,
95 	memory_object_fault_info_t      fault_info);
96 kern_return_t compressor_memory_object_data_return(
97 	memory_object_t         mem_obj,
98 	memory_object_offset_t  offset,
99 	memory_object_cluster_size_t                    size,
100 	__unused memory_object_offset_t *resid_offset,
101 	__unused int            *io_error,
102 	__unused boolean_t      dirty,
103 	__unused boolean_t      kernel_copy,
104 	__unused int    upl_flags);
105 kern_return_t compressor_memory_object_data_initialize(
106 	memory_object_t         mem_obj,
107 	memory_object_offset_t  offset,
108 	memory_object_cluster_size_t            size);
109 kern_return_t compressor_memory_object_map(
110 	__unused memory_object_t        mem_obj,
111 	__unused vm_prot_t              prot);
112 kern_return_t compressor_memory_object_last_unmap(memory_object_t mem_obj);
113 
114 const struct memory_object_pager_ops compressor_pager_ops = {
115 	.memory_object_reference = compressor_memory_object_reference,
116 	.memory_object_deallocate = compressor_memory_object_deallocate,
117 	.memory_object_init = compressor_memory_object_init,
118 	.memory_object_terminate = compressor_memory_object_terminate,
119 	.memory_object_data_request = compressor_memory_object_data_request,
120 	.memory_object_data_return = compressor_memory_object_data_return,
121 	.memory_object_data_initialize = compressor_memory_object_data_initialize,
122 	.memory_object_map = compressor_memory_object_map,
123 	.memory_object_last_unmap = compressor_memory_object_last_unmap,
124 	.memory_object_backing_object = NULL,
125 	.memory_object_pager_name = "compressor pager"
126 };
127 
128 /* internal data structures */
129 
130 struct {
131 	uint64_t        data_returns;
132 	uint64_t        data_requests;
133 	uint64_t        put;
134 	uint64_t        get;
135 	uint64_t        state_clr;
136 	uint64_t        state_get;
137 	uint64_t        transfer;
138 } compressor_pager_stats;
139 
140 typedef int compressor_slot_t; /* stand-in for c_slot_mapping */
141 
142 typedef struct compressor_pager {
143 	/* mandatory generic header */
144 	struct memory_object cpgr_hdr;
145 
146 	/* pager-specific data */
147 	lck_mtx_t                       cpgr_lock;
148 #if MEMORY_OBJECT_HAS_REFCOUNT
149 #define cpgr_references                 cpgr_hdr.mo_ref
150 #else
151 	os_ref_atomic_t                 cpgr_references;
152 #endif
153 	unsigned int                    cpgr_num_slots;
154 	unsigned int                    cpgr_num_slots_occupied;
155 	union {
156 		compressor_slot_t       cpgr_eslots[2]; /* embedded slots */
157 		compressor_slot_t       *cpgr_dslots;   /* direct slots */
158 		compressor_slot_t       **cpgr_islots;  /* indirect slots */
159 	} cpgr_slots;
160 } *compressor_pager_t;
161 
162 #define compressor_pager_lookup(_mem_obj_, _cpgr_)                      \
163 	MACRO_BEGIN                                                     \
164 	if (_mem_obj_ == NULL ||                                        \
165 	    _mem_obj_->mo_pager_ops != &compressor_pager_ops) {         \
166 	        _cpgr_ = NULL;                                          \
167 	} else {                                                        \
168 	        _cpgr_ = (compressor_pager_t) _mem_obj_;                \
169 	}                                                               \
170 	MACRO_END
171 
172 /* embedded slot pointers in compressor_pager get packed, so VA restricted */
173 static ZONE_DEFINE_TYPE(compressor_pager_zone, "compressor_pager",
174     struct compressor_pager, ZC_NOENCRYPT | ZC_VM);
175 
176 LCK_GRP_DECLARE(compressor_pager_lck_grp, "compressor_pager");
177 
178 #define compressor_pager_lock(_cpgr_) \
179 	lck_mtx_lock(&(_cpgr_)->cpgr_lock)
180 #define compressor_pager_unlock(_cpgr_) \
181 	lck_mtx_unlock(&(_cpgr_)->cpgr_lock)
182 #define compressor_pager_lock_init(_cpgr_) \
183 	lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, LCK_ATTR_NULL)
184 #define compressor_pager_lock_destroy(_cpgr_) \
185 	lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp)
186 
187 #define COMPRESSOR_SLOTS_CHUNK_SIZE     (512)
188 #define COMPRESSOR_SLOTS_PER_CHUNK      (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t))
189 
190 /* forward declarations */
191 unsigned int compressor_pager_slots_chunk_free(compressor_slot_t *chunk,
192     int num_slots,
193     vm_compressor_options_t flags,
194     int *failures);
195 void compressor_pager_slot_lookup(
196 	compressor_pager_t      pager,
197 	boolean_t               do_alloc,
198 	memory_object_offset_t  offset,
199 	compressor_slot_t       **slot_pp);
200 
201 #if     defined(__LP64__)
202 
203 /* restricted VA zones for slots */
204 
205 #define NUM_SLOTS_ZONES         3
206 
207 static const size_t compressor_slots_zones_sizes[NUM_SLOTS_ZONES] = {
208 	16,
209 	64,
210 	COMPRESSOR_SLOTS_CHUNK_SIZE
211 };
212 
213 static const char * compressor_slots_zones_names[NUM_SLOTS_ZONES] = {
214 	"compressor_slots.16",
215 	"compressor_slots.64",
216 	"compressor_slots.512"
217 };
218 
219 static zone_t
220     compressor_slots_zones[NUM_SLOTS_ZONES];
221 
222 #endif /* defined(__LP64__) */
223 
224 static void
225 zfree_slot_array(compressor_slot_t *slots, size_t size);
226 static compressor_slot_t *
227 zalloc_slot_array(size_t size, zalloc_flags_t);
228 
229 static inline unsigned int
compressor_pager_num_chunks(compressor_pager_t pager)230 compressor_pager_num_chunks(
231 	compressor_pager_t      pager)
232 {
233 	unsigned int num_chunks;
234 
235 	num_chunks = pager->cpgr_num_slots / COMPRESSOR_SLOTS_PER_CHUNK;
236 	if (num_chunks * COMPRESSOR_SLOTS_PER_CHUNK < pager->cpgr_num_slots) {
237 		num_chunks++;  /* do the equivalent of ceil() instead of trunc() for the above division */
238 	}
239 	return num_chunks;
240 }
241 
242 kern_return_t
compressor_memory_object_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pager_page_size)243 compressor_memory_object_init(
244 	memory_object_t         mem_obj,
245 	memory_object_control_t control,
246 	__unused memory_object_cluster_size_t pager_page_size)
247 {
248 	compressor_pager_t              pager;
249 
250 	assert(pager_page_size == PAGE_SIZE);
251 
252 	memory_object_control_reference(control);
253 
254 	compressor_pager_lookup(mem_obj, pager);
255 	compressor_pager_lock(pager);
256 
257 	if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
258 		panic("compressor_memory_object_init: bad request");
259 	}
260 	pager->cpgr_hdr.mo_control = control;
261 
262 	compressor_pager_unlock(pager);
263 
264 	return KERN_SUCCESS;
265 }
266 
267 kern_return_t
compressor_memory_object_map(__unused memory_object_t mem_obj,__unused vm_prot_t prot)268 compressor_memory_object_map(
269 	__unused memory_object_t        mem_obj,
270 	__unused vm_prot_t              prot)
271 {
272 	panic("compressor_memory_object_map");
273 	return KERN_FAILURE;
274 }
275 
276 kern_return_t
compressor_memory_object_last_unmap(__unused memory_object_t mem_obj)277 compressor_memory_object_last_unmap(
278 	__unused memory_object_t        mem_obj)
279 {
280 	panic("compressor_memory_object_last_unmap");
281 	return KERN_FAILURE;
282 }
283 
284 kern_return_t
compressor_memory_object_terminate(memory_object_t mem_obj)285 compressor_memory_object_terminate(
286 	memory_object_t         mem_obj)
287 {
288 	memory_object_control_t control;
289 	compressor_pager_t      pager;
290 
291 	/*
292 	 * control port is a receive right, not a send right.
293 	 */
294 
295 	compressor_pager_lookup(mem_obj, pager);
296 	compressor_pager_lock(pager);
297 
298 	/*
299 	 * After memory_object_terminate both memory_object_init
300 	 * and a no-senders notification are possible, so we need
301 	 * to clean up our reference to the memory_object_control
302 	 * to prepare for a new init.
303 	 */
304 
305 	control = pager->cpgr_hdr.mo_control;
306 	pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
307 
308 	compressor_pager_unlock(pager);
309 
310 	/*
311 	 * Now we deallocate our reference on the control.
312 	 */
313 	memory_object_control_deallocate(control);
314 	return KERN_SUCCESS;
315 }
316 
317 void
compressor_memory_object_reference(memory_object_t mem_obj)318 compressor_memory_object_reference(
319 	memory_object_t         mem_obj)
320 {
321 	compressor_pager_t      pager;
322 
323 	compressor_pager_lookup(mem_obj, pager);
324 	if (pager == NULL) {
325 		return;
326 	}
327 
328 	compressor_pager_lock(pager);
329 	os_ref_retain_locked_raw(&pager->cpgr_references, NULL);
330 	compressor_pager_unlock(pager);
331 }
332 
333 void
compressor_memory_object_deallocate(memory_object_t mem_obj)334 compressor_memory_object_deallocate(
335 	memory_object_t         mem_obj)
336 {
337 	compressor_pager_t      pager;
338 	unsigned int            num_slots_freed;
339 
340 	/*
341 	 * Because we don't give out multiple first references
342 	 * for a memory object, there can't be a race
343 	 * between getting a deallocate call and creating
344 	 * a new reference for the object.
345 	 */
346 
347 	compressor_pager_lookup(mem_obj, pager);
348 	if (pager == NULL) {
349 		return;
350 	}
351 
352 	compressor_pager_lock(pager);
353 	if (os_ref_release_locked_raw(&pager->cpgr_references, NULL) > 0) {
354 		compressor_pager_unlock(pager);
355 		return;
356 	}
357 
358 	/*
359 	 * We shouldn't get a deallocation call
360 	 * when the kernel has the object cached.
361 	 */
362 	if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
363 		panic("compressor_memory_object_deallocate(): bad request");
364 	}
365 
366 	/*
367 	 * Unlock the pager (though there should be no one
368 	 * waiting for it).
369 	 */
370 	compressor_pager_unlock(pager);
371 
372 	/* free the compressor slots */
373 	unsigned int num_chunks;
374 	unsigned int i;
375 	compressor_slot_t *chunk;
376 
377 	num_chunks = compressor_pager_num_chunks(pager);
378 	if (num_chunks > 1) {
379 		/* we have an array of chunks */
380 		for (i = 0; i < num_chunks; i++) {
381 			chunk = pager->cpgr_slots.cpgr_islots[i];
382 			if (chunk != NULL) {
383 				num_slots_freed =
384 				    compressor_pager_slots_chunk_free(
385 					chunk,
386 					COMPRESSOR_SLOTS_PER_CHUNK,
387 					0,
388 					NULL);
389 				pager->cpgr_slots.cpgr_islots[i] = NULL;
390 				zfree_slot_array(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
391 			}
392 		}
393 		kfree_type(compressor_slot_t *, num_chunks,
394 		    pager->cpgr_slots.cpgr_islots);
395 		pager->cpgr_slots.cpgr_islots = NULL;
396 	} else if (pager->cpgr_num_slots > 2) {
397 		chunk = pager->cpgr_slots.cpgr_dslots;
398 		num_slots_freed =
399 		    compressor_pager_slots_chunk_free(
400 			chunk,
401 			pager->cpgr_num_slots,
402 			0,
403 			NULL);
404 		pager->cpgr_slots.cpgr_dslots = NULL;
405 		zfree_slot_array(chunk,
406 		    (pager->cpgr_num_slots *
407 		    sizeof(pager->cpgr_slots.cpgr_dslots[0])));
408 	} else {
409 		chunk = &pager->cpgr_slots.cpgr_eslots[0];
410 		num_slots_freed =
411 		    compressor_pager_slots_chunk_free(
412 			chunk,
413 			pager->cpgr_num_slots,
414 			0,
415 			NULL);
416 	}
417 
418 	compressor_pager_lock_destroy(pager);
419 	zfree(compressor_pager_zone, pager);
420 }
421 
422 kern_return_t
compressor_memory_object_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,__unused memory_object_fault_info_t fault_info)423 compressor_memory_object_data_request(
424 	memory_object_t         mem_obj,
425 	memory_object_offset_t  offset,
426 	memory_object_cluster_size_t            length,
427 	__unused vm_prot_t      protection_required,
428 	__unused memory_object_fault_info_t     fault_info)
429 {
430 	compressor_pager_t      pager;
431 	kern_return_t           kr;
432 	compressor_slot_t       *slot_p;
433 
434 	compressor_pager_stats.data_requests++;
435 
436 	/*
437 	 * Request must be on a page boundary and a multiple of pages.
438 	 */
439 	if ((offset & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0) {
440 		panic("compressor_memory_object_data_request(): bad alignment");
441 	}
442 
443 	if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
444 		panic("%s: offset 0x%llx overflow",
445 		    __FUNCTION__, (uint64_t) offset);
446 		return KERN_FAILURE;
447 	}
448 
449 	compressor_pager_lookup(mem_obj, pager);
450 
451 	if (length == 0) {
452 		/* we're only querying the pager for this page */
453 	} else {
454 		panic("compressor: data_request");
455 	}
456 
457 	/* find the compressor slot for that page */
458 	compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
459 
460 	if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
461 		/* out of range */
462 		kr = KERN_FAILURE;
463 	} else if (slot_p == NULL || *slot_p == 0) {
464 		/* compressor does not have this page */
465 		kr = KERN_FAILURE;
466 	} else {
467 		/* compressor does have this page */
468 		kr = KERN_SUCCESS;
469 	}
470 	return kr;
471 }
472 
473 /*
474  * memory_object_data_initialize: check whether we already have each page, and
475  * write it if we do not.  The implementation is far from optimized, and
476  * also assumes that the default_pager is single-threaded.
477  */
478 /*  It is questionable whether or not a pager should decide what is relevant */
479 /* and what is not in data sent from the kernel.  Data initialize has been */
480 /* changed to copy back all data sent to it in preparation for its eventual */
481 /* merge with data return.  It is the kernel that should decide what pages */
482 /* to write back.  As of the writing of this note, this is indeed the case */
483 /* the kernel writes back one page at a time through this interface */
484 
485 kern_return_t
compressor_memory_object_data_initialize(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t size)486 compressor_memory_object_data_initialize(
487 	memory_object_t         mem_obj,
488 	memory_object_offset_t  offset,
489 	memory_object_cluster_size_t            size)
490 {
491 	compressor_pager_t      pager;
492 	memory_object_offset_t  cur_offset;
493 
494 	compressor_pager_lookup(mem_obj, pager);
495 	compressor_pager_lock(pager);
496 
497 	for (cur_offset = offset;
498 	    cur_offset < offset + size;
499 	    cur_offset += PAGE_SIZE) {
500 		panic("do a data_return() if slot for this page is empty");
501 	}
502 
503 	compressor_pager_unlock(pager);
504 
505 	return KERN_SUCCESS;
506 }
507 
508 
509 /*ARGSUSED*/
510 kern_return_t
compressor_memory_object_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t size,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)511 compressor_memory_object_data_return(
512 	__unused memory_object_t                        mem_obj,
513 	__unused memory_object_offset_t         offset,
514 	__unused memory_object_cluster_size_t   size,
515 	__unused memory_object_offset_t *resid_offset,
516 	__unused int            *io_error,
517 	__unused boolean_t      dirty,
518 	__unused boolean_t      kernel_copy,
519 	__unused int            upl_flags)
520 {
521 	panic("compressor: data_return");
522 	return KERN_FAILURE;
523 }
524 
525 /*
526  * Routine:	default_pager_memory_object_create
527  * Purpose:
528  *      Handle requests for memory objects from the
529  *      kernel.
530  * Notes:
531  *      Because we only give out the default memory
532  *      manager port to the kernel, we don't have to
533  *      be so paranoid about the contents.
534  */
535 kern_return_t
compressor_memory_object_create(memory_object_size_t new_size,memory_object_t * new_mem_obj)536 compressor_memory_object_create(
537 	memory_object_size_t    new_size,
538 	memory_object_t         *new_mem_obj)
539 {
540 	compressor_pager_t      pager;
541 	unsigned int            num_chunks;
542 
543 	if ((uint32_t)(new_size / PAGE_SIZE) != (new_size / PAGE_SIZE)) {
544 		/* 32-bit overflow for number of pages */
545 		panic("%s: size 0x%llx overflow",
546 		    __FUNCTION__, (uint64_t) new_size);
547 		return KERN_INVALID_ARGUMENT;
548 	}
549 
550 	pager = zalloc_flags(compressor_pager_zone, Z_WAITOK | Z_NOFAIL);
551 
552 	compressor_pager_lock_init(pager);
553 	os_ref_init_raw(&pager->cpgr_references, NULL);
554 	pager->cpgr_num_slots = (uint32_t)(new_size / PAGE_SIZE);
555 	pager->cpgr_num_slots_occupied = 0;
556 
557 	num_chunks = compressor_pager_num_chunks(pager);
558 	if (num_chunks > 1) {
559 		/* islots points to an array of chunks pointer. every chunk has 512/sizeof(int)=128 slot_mapping */
560 		pager->cpgr_slots.cpgr_islots = kalloc_type(compressor_slot_t *,
561 		    num_chunks, Z_WAITOK | Z_ZERO);
562 	} else if (pager->cpgr_num_slots > 2) {
563 		pager->cpgr_slots.cpgr_dslots = zalloc_slot_array(pager->cpgr_num_slots *
564 		    sizeof(pager->cpgr_slots.cpgr_dslots[0]), Z_WAITOK | Z_ZERO);
565 	} else {
566 		pager->cpgr_slots.cpgr_eslots[0] = 0;
567 		pager->cpgr_slots.cpgr_eslots[1] = 0;
568 	}
569 
570 	/*
571 	 * Set up associations between this memory object
572 	 * and this compressor_pager structure
573 	 */
574 	pager->cpgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
575 	pager->cpgr_hdr.mo_pager_ops = &compressor_pager_ops;
576 	pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
577 
578 	*new_mem_obj = (memory_object_t) pager;
579 	return KERN_SUCCESS;
580 }
581 
582 
583 unsigned int
compressor_pager_slots_chunk_free(compressor_slot_t * chunk,int num_slots,vm_compressor_options_t flags,int * failures)584 compressor_pager_slots_chunk_free(
585 	compressor_slot_t       *chunk,
586 	int                     num_slots,
587 	vm_compressor_options_t flags,
588 	int                     *failures)
589 {
590 	int i;
591 	vm_decompress_result_t retval;
592 	unsigned int num_slots_freed;
593 
594 	if (failures) {
595 		*failures = 0;
596 	}
597 	num_slots_freed = 0;
598 	for (i = 0; i < num_slots; i++) {
599 		if (chunk[i] != 0) {
600 			retval = vm_compressor_free(&chunk[i], flags);
601 
602 			if (retval == DECOMPRESS_SUCCESS) {
603 				num_slots_freed++;
604 			} else {
605 				assert3s(retval, <, 0); /* it's not DECOMPRESS_SUCCESS_* */
606 				if (retval == DECOMPRESS_NEED_BLOCK) {
607 					assert(flags & C_DONT_BLOCK);
608 				}
609 
610 				if (failures) {
611 					*failures += 1;
612 				}
613 			}
614 		}
615 	}
616 	return num_slots_freed;
617 }
618 
619 /* check if this pager has a slot_mapping spot for this page, if so give its position, if not, make place for it */
620 void
compressor_pager_slot_lookup(compressor_pager_t pager,boolean_t do_alloc,memory_object_offset_t offset,compressor_slot_t ** slot_pp)621 compressor_pager_slot_lookup(
622 	compressor_pager_t      pager,
623 	boolean_t               do_alloc,
624 	memory_object_offset_t  offset,
625 	compressor_slot_t       **slot_pp /* OUT */)
626 {
627 	unsigned int            num_chunks;
628 	uint32_t                page_num;
629 	unsigned int            chunk_idx;
630 	int                     slot_idx;
631 	compressor_slot_t       *chunk;
632 	compressor_slot_t       *t_chunk;
633 
634 	/* offset is relative to the pager, first page of the first vm_object that created the pager has an offset of 0 */
635 	page_num = (uint32_t)(offset / PAGE_SIZE);
636 	if (page_num != (offset / PAGE_SIZE)) {
637 		/* overflow */
638 		panic("%s: offset 0x%llx overflow",
639 		    __FUNCTION__, (uint64_t) offset);
640 		*slot_pp = NULL;
641 		return;
642 	}
643 	if (page_num >= pager->cpgr_num_slots) {
644 		/* out of range */
645 		*slot_pp = NULL;
646 		return;
647 	}
648 	num_chunks = compressor_pager_num_chunks(pager);
649 	if (num_chunks > 1) {
650 		/* we have an array of chunks */
651 		chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK;
652 		chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
653 
654 		if (chunk == NULL && do_alloc) {
655 			t_chunk = zalloc_slot_array(COMPRESSOR_SLOTS_CHUNK_SIZE,
656 			    Z_WAITOK | Z_ZERO);
657 
658 			compressor_pager_lock(pager);
659 
660 			if ((chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]) == NULL) {
661 				/*
662 				 * On some platforms, the memory stores from
663 				 * the bzero(t_chunk) above might not have been
664 				 * made visible and another thread might see
665 				 * the contents of this new chunk before it's
666 				 * been fully zero-filled.
667 				 * This memory barrier should take care of this
668 				 * according to the platform requirements.
669 				 */
670 				os_atomic_thread_fence(release);
671 
672 				chunk = pager->cpgr_slots.cpgr_islots[chunk_idx] = t_chunk;
673 				t_chunk = NULL;
674 			}
675 			compressor_pager_unlock(pager);
676 
677 			if (t_chunk) {
678 				zfree_slot_array(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
679 			}
680 		}
681 		if (chunk == NULL) {
682 			*slot_pp = NULL;
683 		} else {
684 			slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
685 			*slot_pp = &chunk[slot_idx];
686 		}
687 	} else if (pager->cpgr_num_slots > 2) {
688 		slot_idx = page_num;
689 		*slot_pp = &pager->cpgr_slots.cpgr_dslots[slot_idx];
690 	} else {
691 		slot_idx = page_num;
692 		*slot_pp = &pager->cpgr_slots.cpgr_eslots[slot_idx];
693 	}
694 }
695 
696 #if defined(__LP64__)
697 __startup_func
698 static void
vm_compressor_slots_init(void)699 vm_compressor_slots_init(void)
700 {
701 	for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) {
702 		compressor_slots_zones[idx] = zone_create(
703 			compressor_slots_zones_names[idx],
704 			compressor_slots_zones_sizes[idx],
705 			ZC_PGZ_USE_GUARDS | ZC_VM);
706 	}
707 }
708 STARTUP(ZALLOC, STARTUP_RANK_MIDDLE, vm_compressor_slots_init);
709 #endif /* defined(__LP64__) */
710 
711 static compressor_slot_t *
zalloc_slot_array(size_t size,zalloc_flags_t flags)712 zalloc_slot_array(size_t size, zalloc_flags_t flags)
713 {
714 #if defined(__LP64__)
715 	compressor_slot_t *slots = NULL;
716 
717 	assert(size <= COMPRESSOR_SLOTS_CHUNK_SIZE);
718 	for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) {
719 		if (size > compressor_slots_zones_sizes[idx]) {
720 			continue;
721 		}
722 		slots = zalloc_flags(compressor_slots_zones[idx], flags);
723 		break;
724 	}
725 	return slots;
726 #else  /* defined(__LP64__) */
727 	return kalloc_data(size, flags);
728 #endif /* !defined(__LP64__) */
729 }
730 
731 static void
zfree_slot_array(compressor_slot_t * slots,size_t size)732 zfree_slot_array(compressor_slot_t *slots, size_t size)
733 {
734 #if defined(__LP64__)
735 	assert(size <= COMPRESSOR_SLOTS_CHUNK_SIZE);
736 	for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) {
737 		if (size > compressor_slots_zones_sizes[idx]) {
738 			continue;
739 		}
740 		zfree(compressor_slots_zones[idx], slots);
741 		break;
742 	}
743 #else  /* defined(__LP64__) */
744 	kfree_data(slots, size);
745 #endif /* !defined(__LP64__) */
746 }
747 
748 kern_return_t
vm_compressor_pager_put(memory_object_t mem_obj,memory_object_offset_t offset,ppnum_t ppnum,void ** current_chead,char * scratch_buf,int * compressed_count_delta_p,vm_compressor_options_t flags)749 vm_compressor_pager_put(
750 	memory_object_t                 mem_obj,
751 	memory_object_offset_t          offset,
752 	ppnum_t                         ppnum,
753 	void                            **current_chead,
754 	char                            *scratch_buf,
755 	int                             *compressed_count_delta_p, /* OUT */
756 	vm_compressor_options_t         flags)
757 {
758 	compressor_pager_t pager;
759 	compressor_slot_t *slot_p;
760 	kern_return_t kr;
761 
762 	compressor_pager_stats.put++;
763 
764 	*compressed_count_delta_p = 0;
765 
766 	/* This routine is called by the pageout thread.  The pageout thread */
767 	/* cannot be blocked by read activities unless the read activities   */
768 	/* Therefore the grant of vs lock must be done on a try versus a      */
769 	/* blocking basis.  The code below relies on the fact that the       */
770 	/* interface is synchronous.  Should this interface be again async   */
771 	/* for some type  of pager in the future the pages will have to be   */
772 	/* returned through a separate, asynchronous path.		     */
773 
774 	compressor_pager_lookup(mem_obj, pager);
775 
776 	uint32_t dummy_conv;
777 	if (os_convert_overflow(offset / PAGE_SIZE, &dummy_conv)) {
778 		/* overflow, page number doesn't fit in a uint32 */
779 		panic("%s: offset 0x%llx overflow", __FUNCTION__, (uint64_t) offset);
780 		return KERN_RESOURCE_SHORTAGE;
781 	}
782 
783 	/* we're looking for the slot_mapping that corresponds to the offset, which vm_compressor_put() is then going to
784 	 * set a value into after it allocates the slot. if the slot_mapping doesn't exist, this will create it */
785 	compressor_pager_slot_lookup(pager, TRUE, offset, &slot_p);
786 
787 	if (slot_p == NULL) {
788 		/* out of range ? */
789 		panic("vm_compressor_pager_put: out of range");
790 	}
791 	if (*slot_p != 0) {
792 		/*
793 		 * Already compressed: forget about the old one.
794 		 *
795 		 * This can happen after a vm_object_do_collapse() when
796 		 * the "backing_object" had some pages paged out and the
797 		 * "object" had an equivalent page resident.
798 		 */
799 		vm_compressor_free(slot_p, flags);
800 		*compressed_count_delta_p -= 1;
801 	}
802 
803 	/*
804 	 * If the compressor operation succeeds, we presumably don't need to
805 	 * undo any previous WIMG update, as all live mappings should be
806 	 * disconnected.
807 	 */
808 
809 	kr = vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf, flags);
810 	if (kr == KERN_SUCCESS) {
811 		*compressed_count_delta_p += 1;
812 	}
813 	return kr;
814 }
815 
816 
817 kern_return_t
vm_compressor_pager_get(memory_object_t mem_obj,memory_object_offset_t offset,ppnum_t ppnum,int * my_fault_type,vm_compressor_options_t flags,int * compressed_count_delta_p)818 vm_compressor_pager_get(
819 	memory_object_t         mem_obj,
820 	memory_object_offset_t  offset,
821 	ppnum_t                 ppnum,
822 	int                     *my_fault_type,
823 	vm_compressor_options_t flags,
824 	int                     *compressed_count_delta_p)
825 {
826 	compressor_pager_t      pager;
827 	kern_return_t           kr;
828 	compressor_slot_t       *slot_p;
829 
830 	compressor_pager_stats.get++;
831 
832 	*compressed_count_delta_p = 0;
833 
834 	if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
835 		panic("%s: offset 0x%llx overflow",
836 		    __FUNCTION__, (uint64_t) offset);
837 		return KERN_MEMORY_ERROR;
838 	}
839 
840 	compressor_pager_lookup(mem_obj, pager);
841 
842 	/* find the compressor slot for that page */
843 	compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
844 
845 	if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
846 		/* out of range */
847 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COMPRESSOR_GET_OUT_OF_RANGE), 0 /* arg */);
848 		kr = KERN_MEMORY_FAILURE;
849 	} else if (slot_p == NULL || *slot_p == 0) {
850 		/* compressor does not have this page */
851 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COMPRESSOR_GET_NO_PAGE), 0 /* arg */);
852 		kr = KERN_MEMORY_ERROR;
853 	} else {
854 		/* compressor does have this page */
855 		kr = KERN_SUCCESS;
856 	}
857 	*my_fault_type = DBG_COMPRESSOR_FAULT;
858 
859 	if (kr == KERN_SUCCESS) {
860 		int     retval;
861 		bool unmodified = (vm_compressor_is_slot_compressed(slot_p) == false);
862 		/* get the page from the compressor */
863 		retval = vm_compressor_get(ppnum, slot_p, (unmodified ? (flags | C_PAGE_UNMODIFIED) : flags));
864 		if (retval <= DECOMPRESS_FIRST_FAIL_CODE) {
865 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COMPRESSOR_DECOMPRESS_FAILED), (uintptr_t)retval /* arg */);
866 			kr = KERN_MEMORY_FAILURE;
867 		} else if (retval == DECOMPRESS_SUCCESS_SWAPPEDIN) {
868 			*my_fault_type = DBG_COMPRESSOR_SWAPIN_FAULT;
869 		} else if (retval == DECOMPRESS_NEED_BLOCK) {
870 			assert((flags & C_DONT_BLOCK));
871 			/*
872 			 * Not a fatal failure because we just retry with a blocking get later. So we skip ktriage to avoid noise.
873 			 */
874 			kr = KERN_FAILURE;
875 		}
876 	}
877 
878 	if (kr == KERN_SUCCESS) {
879 		assert(slot_p != NULL);
880 		if (*slot_p != 0) {
881 			/*
882 			 * We got the page for a copy-on-write fault
883 			 * and we kept the original in place.  Slot
884 			 * is still occupied.
885 			 */
886 		} else {
887 			*compressed_count_delta_p -= 1;
888 		}
889 	}
890 
891 	return kr;
892 }
893 
894 unsigned int
vm_compressor_pager_state_clr(memory_object_t mem_obj,memory_object_offset_t offset)895 vm_compressor_pager_state_clr(
896 	memory_object_t         mem_obj,
897 	memory_object_offset_t  offset)
898 {
899 	compressor_pager_t      pager;
900 	compressor_slot_t       *slot_p;
901 	unsigned int            num_slots_freed;
902 
903 	assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
904 
905 	compressor_pager_stats.state_clr++;
906 
907 	if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
908 		/* overflow */
909 		panic("%s: offset 0x%llx overflow",
910 		    __FUNCTION__, (uint64_t) offset);
911 		return 0;
912 	}
913 
914 	compressor_pager_lookup(mem_obj, pager);
915 
916 	/* find the compressor slot for that page */
917 	compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
918 
919 	num_slots_freed = 0;
920 	if (slot_p && *slot_p != 0) {
921 		vm_compressor_free(slot_p, 0);
922 		num_slots_freed++;
923 		assert(*slot_p == 0);
924 	}
925 
926 	return num_slots_freed;
927 }
928 
929 vm_external_state_t
vm_compressor_pager_state_get(memory_object_t mem_obj,memory_object_offset_t offset)930 vm_compressor_pager_state_get(
931 	memory_object_t         mem_obj,
932 	memory_object_offset_t  offset)
933 {
934 	compressor_pager_t      pager;
935 	compressor_slot_t       *slot_p;
936 
937 	assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
938 
939 	compressor_pager_stats.state_get++;
940 
941 	if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
942 		/* overflow */
943 		panic("%s: offset 0x%llx overflow",
944 		    __FUNCTION__, (uint64_t) offset);
945 		return VM_EXTERNAL_STATE_ABSENT;
946 	}
947 
948 	compressor_pager_lookup(mem_obj, pager);
949 
950 	/* find the compressor slot for that page */
951 	compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
952 
953 	if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
954 		/* out of range */
955 		return VM_EXTERNAL_STATE_ABSENT;
956 	} else if (slot_p == NULL || *slot_p == 0) {
957 		/* compressor does not have this page */
958 		return VM_EXTERNAL_STATE_ABSENT;
959 	} else {
960 		/* compressor does have this page */
961 		return VM_EXTERNAL_STATE_EXISTS;
962 	}
963 }
964 
965 unsigned int
vm_compressor_pager_reap_pages(memory_object_t mem_obj,vm_compressor_options_t flags)966 vm_compressor_pager_reap_pages(
967 	memory_object_t         mem_obj,
968 	vm_compressor_options_t flags)
969 {
970 	compressor_pager_t      pager;
971 	unsigned int            num_chunks;
972 	int                     failures;
973 	unsigned int            i;
974 	compressor_slot_t       *chunk;
975 	unsigned int            num_slots_freed;
976 
977 	compressor_pager_lookup(mem_obj, pager);
978 	if (pager == NULL) {
979 		return 0;
980 	}
981 
982 	compressor_pager_lock(pager);
983 
984 	/* reap the compressor slots */
985 	num_slots_freed = 0;
986 
987 	num_chunks = compressor_pager_num_chunks(pager);
988 	if (num_chunks > 1) {
989 		/* we have an array of chunks */
990 		for (i = 0; i < num_chunks; i++) {
991 			chunk = pager->cpgr_slots.cpgr_islots[i];
992 			if (chunk != NULL) {
993 				num_slots_freed +=
994 				    compressor_pager_slots_chunk_free(
995 					chunk,
996 					COMPRESSOR_SLOTS_PER_CHUNK,
997 					flags,
998 					&failures);
999 				if (failures == 0) {
1000 					pager->cpgr_slots.cpgr_islots[i] = NULL;
1001 					zfree_slot_array(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
1002 				}
1003 			}
1004 		}
1005 	} else if (pager->cpgr_num_slots > 2) {
1006 		chunk = pager->cpgr_slots.cpgr_dslots;
1007 		num_slots_freed +=
1008 		    compressor_pager_slots_chunk_free(
1009 			chunk,
1010 			pager->cpgr_num_slots,
1011 			flags,
1012 			NULL);
1013 	} else {
1014 		chunk = &pager->cpgr_slots.cpgr_eslots[0];
1015 		num_slots_freed +=
1016 		    compressor_pager_slots_chunk_free(
1017 			chunk,
1018 			pager->cpgr_num_slots,
1019 			flags,
1020 			NULL);
1021 	}
1022 
1023 	compressor_pager_unlock(pager);
1024 
1025 	return num_slots_freed;
1026 }
1027 
1028 void
vm_compressor_pager_transfer(memory_object_t dst_mem_obj,memory_object_offset_t dst_offset,memory_object_t src_mem_obj,memory_object_offset_t src_offset)1029 vm_compressor_pager_transfer(
1030 	memory_object_t         dst_mem_obj,
1031 	memory_object_offset_t  dst_offset,
1032 	memory_object_t         src_mem_obj,
1033 	memory_object_offset_t  src_offset)
1034 {
1035 	compressor_pager_t      src_pager, dst_pager;
1036 	compressor_slot_t       *src_slot_p, *dst_slot_p;
1037 
1038 	compressor_pager_stats.transfer++;
1039 
1040 	/* find the compressor slot for the destination */
1041 	compressor_pager_lookup(dst_mem_obj, dst_pager);
1042 	assert(dst_offset / PAGE_SIZE < dst_pager->cpgr_num_slots);
1043 	compressor_pager_slot_lookup(dst_pager, TRUE, dst_offset, &dst_slot_p);
1044 	assert(dst_slot_p != NULL);
1045 	assert(*dst_slot_p == 0);
1046 
1047 	/* find the compressor slot for the source */
1048 	compressor_pager_lookup(src_mem_obj, src_pager);
1049 	assert(src_offset / PAGE_SIZE < src_pager->cpgr_num_slots);
1050 	compressor_pager_slot_lookup(src_pager, FALSE, src_offset, &src_slot_p);
1051 	assert(src_slot_p != NULL);
1052 	assert(*src_slot_p != 0);
1053 
1054 	/* transfer the slot from source to destination */
1055 	vm_compressor_transfer(dst_slot_p, src_slot_p);
1056 	os_atomic_dec(&src_pager->cpgr_num_slots_occupied, relaxed);
1057 	os_atomic_inc(&dst_pager->cpgr_num_slots_occupied, relaxed);
1058 }
1059 
1060 memory_object_offset_t
vm_compressor_pager_next_compressed(memory_object_t mem_obj,memory_object_offset_t offset)1061 vm_compressor_pager_next_compressed(
1062 	memory_object_t         mem_obj,
1063 	memory_object_offset_t  offset)
1064 {
1065 	compressor_pager_t      pager;
1066 	unsigned int            num_chunks;
1067 	uint32_t                page_num;
1068 	unsigned int            chunk_idx;
1069 	uint32_t                slot_idx;
1070 	compressor_slot_t       *chunk;
1071 
1072 	compressor_pager_lookup(mem_obj, pager);
1073 
1074 	page_num = (uint32_t)(offset / PAGE_SIZE);
1075 	if (page_num != (offset / PAGE_SIZE)) {
1076 		/* overflow */
1077 		return (memory_object_offset_t) -1;
1078 	}
1079 	if (page_num >= pager->cpgr_num_slots) {
1080 		/* out of range */
1081 		return (memory_object_offset_t) -1;
1082 	}
1083 
1084 	num_chunks = compressor_pager_num_chunks(pager);
1085 	if (num_chunks == 1) {
1086 		if (pager->cpgr_num_slots > 2) {
1087 			chunk = pager->cpgr_slots.cpgr_dslots;
1088 		} else {
1089 			chunk = &pager->cpgr_slots.cpgr_eslots[0];
1090 		}
1091 		for (slot_idx = page_num;
1092 		    slot_idx < pager->cpgr_num_slots;
1093 		    slot_idx++) {
1094 			if (chunk[slot_idx] != 0) {
1095 				/* found a non-NULL slot in this chunk */
1096 				return (memory_object_offset_t) slot_idx *
1097 				       PAGE_SIZE;
1098 			}
1099 		}
1100 		return (memory_object_offset_t) -1;
1101 	}
1102 
1103 	/* we have an array of chunks; find the next non-NULL chunk */
1104 	chunk = NULL;
1105 	for (chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK,
1106 	    slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
1107 	    chunk_idx < num_chunks;
1108 	    chunk_idx++,
1109 	    slot_idx = 0) {
1110 		chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
1111 		if (chunk == NULL) {
1112 			/* no chunk here: try the next one */
1113 			continue;
1114 		}
1115 		/* search for an occupied slot in this chunk */
1116 		for (;
1117 		    slot_idx < COMPRESSOR_SLOTS_PER_CHUNK;
1118 		    slot_idx++) {
1119 			if (chunk[slot_idx] != 0) {
1120 				/* found an occupied slot in this chunk */
1121 				uint32_t next_slot;
1122 
1123 				next_slot = ((chunk_idx *
1124 				    COMPRESSOR_SLOTS_PER_CHUNK) +
1125 				    slot_idx);
1126 				if (next_slot >= pager->cpgr_num_slots) {
1127 					/* went beyond end of object */
1128 					return (memory_object_offset_t) -1;
1129 				}
1130 				return (memory_object_offset_t) next_slot *
1131 				       PAGE_SIZE;
1132 			}
1133 		}
1134 	}
1135 	return (memory_object_offset_t) -1;
1136 }
1137 
1138 unsigned int
vm_compressor_pager_get_count(memory_object_t mem_obj)1139 vm_compressor_pager_get_count(
1140 	memory_object_t mem_obj)
1141 {
1142 	compressor_pager_t      pager;
1143 
1144 	compressor_pager_lookup(mem_obj, pager);
1145 	if (pager == NULL) {
1146 		return 0;
1147 	}
1148 
1149 	/*
1150 	 * The caller should have the VM object locked and one
1151 	 * needs that lock to do a page-in or page-out, so no
1152 	 * need to lock the pager here.
1153 	 */
1154 	assert(pager->cpgr_num_slots_occupied >= 0);
1155 
1156 	return pager->cpgr_num_slots_occupied;
1157 }
1158 
1159 /* Add page count to the counter in the pager */
1160 void
vm_compressor_pager_count(memory_object_t mem_obj,int compressed_count_delta,boolean_t shared_lock,vm_object_t object __unused)1161 vm_compressor_pager_count(
1162 	memory_object_t mem_obj,
1163 	int             compressed_count_delta,
1164 	boolean_t       shared_lock,
1165 	vm_object_t     object __unused)
1166 {
1167 	compressor_pager_t      pager;
1168 
1169 	if (compressed_count_delta == 0) {
1170 		return;
1171 	}
1172 
1173 	compressor_pager_lookup(mem_obj, pager);
1174 	if (pager == NULL) {
1175 		return;
1176 	}
1177 
1178 	if (compressed_count_delta < 0) {
1179 		assert(pager->cpgr_num_slots_occupied >=
1180 		    (unsigned int) -compressed_count_delta);
1181 	}
1182 
1183 	/*
1184 	 * The caller should have the VM object locked,
1185 	 * shared or exclusive.
1186 	 */
1187 	if (shared_lock) {
1188 		vm_object_lock_assert_shared(object);
1189 		os_atomic_add(&pager->cpgr_num_slots_occupied, compressed_count_delta,
1190 		    relaxed);
1191 	} else {
1192 		vm_object_lock_assert_exclusive(object);
1193 		pager->cpgr_num_slots_occupied += compressed_count_delta;
1194 	}
1195 }
1196 
1197 #if CONFIG_FREEZE
1198 kern_return_t
vm_compressor_pager_relocate(memory_object_t mem_obj,memory_object_offset_t offset,void ** current_chead)1199 vm_compressor_pager_relocate(
1200 	memory_object_t         mem_obj,
1201 	memory_object_offset_t  offset,
1202 	void                    **current_chead)
1203 {
1204 	/*
1205 	 * Has the page at this offset been compressed?
1206 	 */
1207 
1208 	compressor_slot_t *slot_p;
1209 	compressor_pager_t dst_pager;
1210 
1211 	assert(mem_obj);
1212 
1213 	compressor_pager_lookup(mem_obj, dst_pager);
1214 	if (dst_pager == NULL) {
1215 		return KERN_FAILURE;
1216 	}
1217 
1218 	compressor_pager_slot_lookup(dst_pager, FALSE, offset, &slot_p);
1219 	return vm_compressor_relocate(current_chead, slot_p);
1220 }
1221 #endif /* CONFIG_FREEZE */
1222 
1223 #if DEVELOPMENT || DEBUG
1224 
1225 kern_return_t
vm_compressor_pager_inject_error(memory_object_t mem_obj,memory_object_offset_t offset)1226 vm_compressor_pager_inject_error(memory_object_t mem_obj,
1227     memory_object_offset_t offset)
1228 {
1229 	kern_return_t result = KERN_FAILURE;
1230 	compressor_slot_t *slot_p;
1231 	compressor_pager_t pager;
1232 
1233 	assert(mem_obj);
1234 
1235 	compressor_pager_lookup(mem_obj, pager);
1236 	if (pager != NULL) {
1237 		compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
1238 		if (slot_p != NULL && *slot_p != 0) {
1239 			vm_compressor_inject_error(slot_p);
1240 			result = KERN_SUCCESS;
1241 		}
1242 	}
1243 
1244 	return result;
1245 }
1246 
1247 
1248 /*
1249  * Write debugging information about the pager to the given buffer
1250  * returns: true on success, false if there was not enough space
1251  * argument size - in: bytes free in the buffer, out: bytes written
1252  */
1253 kern_return_t
vm_compressor_pager_dump(memory_object_t mem_obj,__unused char * buf,__unused size_t * size,bool * is_compressor,unsigned int * slot_count)1254 vm_compressor_pager_dump(memory_object_t mem_obj,     /* IN */
1255     __unused char *buf,                               /* IN buffer to write to */
1256     __unused size_t *size,                           /* IN-OUT */
1257     bool *is_compressor,                              /* OUT */
1258     unsigned int *slot_count)                         /* OUT */
1259 {
1260 	compressor_pager_t pager = NULL;
1261 	compressor_pager_lookup(mem_obj, pager);
1262 
1263 	*size = 0;
1264 	if (pager == NULL) {
1265 		*is_compressor = false;
1266 		*slot_count = 0;
1267 		return KERN_SUCCESS;
1268 	}
1269 	*is_compressor = true;
1270 	*slot_count = pager->cpgr_num_slots_occupied;
1271 
1272 	/*
1273 	 *  size_t insize = *size;
1274 	 *  unsigned int needed_size = 0; // pager->cpgr_num_slots_occupied * sizeof(compressor_slot_t) / sizeof(int);
1275 	 *  if (needed_size > insize) {
1276 	 *       return KERN_NO_SPACE;
1277 	 *  }
1278 	 *  TODO: not fully implemented yet, need to dump out the mappings
1279 	 * size = 0;
1280 	 */
1281 	return KERN_SUCCESS;
1282 }
1283 
1284 #endif
1285