xref: /xnu-12377.1.9/osfmk/vm/vm_compressor_pager.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 
57 /*
58  *	Compressor Pager.
59  *		Memory Object Management.
60  */
61 
62 #include <kern/host_statistics.h>
63 #include <kern/kalloc.h>
64 #include <kern/ipc_kobject.h>
65 
66 #include <machine/atomic.h>
67 
68 #include <mach/memory_object_control.h>
69 #include <mach/memory_object_types.h>
70 #include <mach/upl.h>
71 
72 #include <vm/memory_object.h>
73 #include <vm/vm_compressor_internal.h>
74 #include <vm/vm_compressor_pager_internal.h>
75 #include <vm/vm_external.h>
76 #include <vm/vm_fault.h>
77 #include <vm/vm_pageout.h>
78 #include <vm/vm_protos_internal.h>
79 #include <vm/vm_object_internal.h>
80 
81 #include <sys/kdebug_triage.h>
82 
83 /* memory_object interfaces */
84 void compressor_memory_object_reference(memory_object_t mem_obj);
85 void compressor_memory_object_deallocate(memory_object_t mem_obj);
86 kern_return_t compressor_memory_object_init(
87 	memory_object_t         mem_obj,
88 	memory_object_control_t control,
89 	memory_object_cluster_size_t pager_page_size);
90 kern_return_t compressor_memory_object_terminate(memory_object_t mem_obj);
91 kern_return_t compressor_memory_object_data_request(
92 	memory_object_t         mem_obj,
93 	memory_object_offset_t  offset,
94 	memory_object_cluster_size_t            length,
95 	__unused vm_prot_t      protection_required,
96 	memory_object_fault_info_t      fault_info);
97 kern_return_t compressor_memory_object_data_return(
98 	memory_object_t         mem_obj,
99 	memory_object_offset_t  offset,
100 	memory_object_cluster_size_t                    size,
101 	__unused memory_object_offset_t *resid_offset,
102 	__unused int            *io_error,
103 	__unused boolean_t      dirty,
104 	__unused boolean_t      kernel_copy,
105 	__unused int    upl_flags);
106 kern_return_t compressor_memory_object_data_initialize(
107 	memory_object_t         mem_obj,
108 	memory_object_offset_t  offset,
109 	memory_object_cluster_size_t            size);
110 kern_return_t compressor_memory_object_map(
111 	__unused memory_object_t        mem_obj,
112 	__unused vm_prot_t              prot);
113 kern_return_t compressor_memory_object_last_unmap(memory_object_t mem_obj);
114 
115 const struct memory_object_pager_ops compressor_pager_ops = {
116 	.memory_object_reference = compressor_memory_object_reference,
117 	.memory_object_deallocate = compressor_memory_object_deallocate,
118 	.memory_object_init = compressor_memory_object_init,
119 	.memory_object_terminate = compressor_memory_object_terminate,
120 	.memory_object_data_request = compressor_memory_object_data_request,
121 	.memory_object_data_return = compressor_memory_object_data_return,
122 	.memory_object_data_initialize = compressor_memory_object_data_initialize,
123 	.memory_object_map = compressor_memory_object_map,
124 	.memory_object_last_unmap = compressor_memory_object_last_unmap,
125 	.memory_object_backing_object = NULL,
126 	.memory_object_pager_name = "compressor pager"
127 };
128 
129 /* internal data structures */
130 
131 struct {
132 	uint64_t        data_returns;
133 	uint64_t        data_requests;
134 	uint64_t        put;
135 	uint64_t        get;
136 	uint64_t        state_clr;
137 	uint64_t        state_get;
138 	uint64_t        transfer;
139 } compressor_pager_stats;
140 
141 typedef int compressor_slot_t; /* stand-in for c_slot_mapping */
142 
143 typedef struct compressor_pager {
144 	/* mandatory generic header */
145 	struct memory_object cpgr_hdr;
146 
147 	/* pager-specific data */
148 	lck_mtx_t                       cpgr_lock;
149 #if MEMORY_OBJECT_HAS_REFCOUNT
150 #define cpgr_references                 cpgr_hdr.mo_ref
151 #else
152 	os_ref_atomic_t                 cpgr_references;
153 #endif
154 	unsigned int                    cpgr_num_slots;
155 	unsigned int                    cpgr_num_slots_occupied;
156 	union {
157 		compressor_slot_t       cpgr_eslots[2]; /* embedded slots */
158 		compressor_slot_t       *cpgr_dslots;   /* direct slots */
159 		compressor_slot_t       **cpgr_islots;  /* indirect slots */
160 	} cpgr_slots;
161 } *compressor_pager_t;
162 
163 #define compressor_pager_lookup(_mem_obj_, _cpgr_)                      \
164 	MACRO_BEGIN                                                     \
165 	if (_mem_obj_ == NULL ||                                        \
166 	    _mem_obj_->mo_pager_ops != &compressor_pager_ops) {         \
167 	        _cpgr_ = NULL;                                          \
168 	} else {                                                        \
169 	        _cpgr_ = (compressor_pager_t) _mem_obj_;                \
170 	}                                                               \
171 	MACRO_END
172 
173 /* embedded slot pointers in compressor_pager get packed, so VA restricted */
174 static ZONE_DEFINE_TYPE(compressor_pager_zone, "compressor_pager",
175     struct compressor_pager, ZC_NOENCRYPT | ZC_VM);
176 
177 LCK_GRP_DECLARE(compressor_pager_lck_grp, "compressor_pager");
178 
179 #define compressor_pager_lock(_cpgr_) \
180 	lck_mtx_lock(&(_cpgr_)->cpgr_lock)
181 #define compressor_pager_unlock(_cpgr_) \
182 	lck_mtx_unlock(&(_cpgr_)->cpgr_lock)
183 #define compressor_pager_lock_init(_cpgr_) \
184 	lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, LCK_ATTR_NULL)
185 #define compressor_pager_lock_destroy(_cpgr_) \
186 	lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp)
187 
188 #define COMPRESSOR_SLOTS_CHUNK_SIZE     (512)
189 #define COMPRESSOR_SLOTS_PER_CHUNK      (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t))
190 
191 /* forward declarations */
192 unsigned int compressor_pager_slots_chunk_free(compressor_slot_t *chunk,
193     int num_slots,
194     vm_compressor_options_t flags,
195     int *failures);
196 void compressor_pager_slot_lookup(
197 	compressor_pager_t      pager,
198 	boolean_t               do_alloc,
199 	memory_object_offset_t  offset,
200 	compressor_slot_t       **slot_pp);
201 
202 #if     defined(__LP64__)
203 
204 /* restricted VA zones for slots */
205 
206 #define NUM_SLOTS_ZONES         3
207 
208 static const size_t compressor_slots_zones_sizes[NUM_SLOTS_ZONES] = {
209 	16,
210 	64,
211 	COMPRESSOR_SLOTS_CHUNK_SIZE
212 };
213 
214 static const char * compressor_slots_zones_names[NUM_SLOTS_ZONES] = {
215 	"compressor_slots.16",
216 	"compressor_slots.64",
217 	"compressor_slots.512"
218 };
219 
220 static zone_t
221     compressor_slots_zones[NUM_SLOTS_ZONES];
222 
223 #endif /* defined(__LP64__) */
224 
225 static void
226 zfree_slot_array(compressor_slot_t *slots, size_t size);
227 static compressor_slot_t *
228 zalloc_slot_array(size_t size, zalloc_flags_t);
229 
230 static inline unsigned int
compressor_pager_num_chunks(compressor_pager_t pager)231 compressor_pager_num_chunks(
232 	compressor_pager_t      pager)
233 {
234 	unsigned int num_chunks;
235 
236 	num_chunks = pager->cpgr_num_slots / COMPRESSOR_SLOTS_PER_CHUNK;
237 	if (num_chunks * COMPRESSOR_SLOTS_PER_CHUNK < pager->cpgr_num_slots) {
238 		num_chunks++;  /* do the equivalent of ceil() instead of trunc() for the above division */
239 	}
240 	return num_chunks;
241 }
242 
243 kern_return_t
compressor_memory_object_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pager_page_size)244 compressor_memory_object_init(
245 	memory_object_t         mem_obj,
246 	memory_object_control_t control,
247 	__unused memory_object_cluster_size_t pager_page_size)
248 {
249 	compressor_pager_t              pager;
250 
251 	assert(pager_page_size == PAGE_SIZE);
252 
253 	memory_object_control_reference(control);
254 
255 	compressor_pager_lookup(mem_obj, pager);
256 	compressor_pager_lock(pager);
257 
258 	if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
259 		panic("compressor_memory_object_init: bad request");
260 	}
261 	pager->cpgr_hdr.mo_control = control;
262 
263 	compressor_pager_unlock(pager);
264 
265 	return KERN_SUCCESS;
266 }
267 
268 kern_return_t
compressor_memory_object_map(__unused memory_object_t mem_obj,__unused vm_prot_t prot)269 compressor_memory_object_map(
270 	__unused memory_object_t        mem_obj,
271 	__unused vm_prot_t              prot)
272 {
273 	panic("compressor_memory_object_map");
274 	return KERN_FAILURE;
275 }
276 
277 kern_return_t
compressor_memory_object_last_unmap(__unused memory_object_t mem_obj)278 compressor_memory_object_last_unmap(
279 	__unused memory_object_t        mem_obj)
280 {
281 	panic("compressor_memory_object_last_unmap");
282 	return KERN_FAILURE;
283 }
284 
285 kern_return_t
compressor_memory_object_terminate(memory_object_t mem_obj)286 compressor_memory_object_terminate(
287 	memory_object_t         mem_obj)
288 {
289 	memory_object_control_t control;
290 	compressor_pager_t      pager;
291 
292 	/*
293 	 * control port is a receive right, not a send right.
294 	 */
295 
296 	compressor_pager_lookup(mem_obj, pager);
297 	compressor_pager_lock(pager);
298 
299 	/*
300 	 * After memory_object_terminate both memory_object_init
301 	 * and a no-senders notification are possible, so we need
302 	 * to clean up our reference to the memory_object_control
303 	 * to prepare for a new init.
304 	 */
305 
306 	control = pager->cpgr_hdr.mo_control;
307 	pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
308 
309 	compressor_pager_unlock(pager);
310 
311 	/*
312 	 * Now we deallocate our reference on the control.
313 	 */
314 	memory_object_control_deallocate(control);
315 	return KERN_SUCCESS;
316 }
317 
318 void
compressor_memory_object_reference(memory_object_t mem_obj)319 compressor_memory_object_reference(
320 	memory_object_t         mem_obj)
321 {
322 	compressor_pager_t      pager;
323 
324 	compressor_pager_lookup(mem_obj, pager);
325 	if (pager == NULL) {
326 		return;
327 	}
328 
329 	compressor_pager_lock(pager);
330 	os_ref_retain_locked_raw(&pager->cpgr_references, NULL);
331 	compressor_pager_unlock(pager);
332 }
333 
334 void
compressor_memory_object_deallocate(memory_object_t mem_obj)335 compressor_memory_object_deallocate(
336 	memory_object_t         mem_obj)
337 {
338 	compressor_pager_t      pager;
339 	unsigned int            num_slots_freed;
340 
341 	/*
342 	 * Because we don't give out multiple first references
343 	 * for a memory object, there can't be a race
344 	 * between getting a deallocate call and creating
345 	 * a new reference for the object.
346 	 */
347 
348 	compressor_pager_lookup(mem_obj, pager);
349 	if (pager == NULL) {
350 		return;
351 	}
352 
353 	compressor_pager_lock(pager);
354 	if (os_ref_release_locked_raw(&pager->cpgr_references, NULL) > 0) {
355 		compressor_pager_unlock(pager);
356 		return;
357 	}
358 
359 	/*
360 	 * We shouldn't get a deallocation call
361 	 * when the kernel has the object cached.
362 	 */
363 	if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
364 		panic("compressor_memory_object_deallocate(): bad request");
365 	}
366 
367 	/*
368 	 * Unlock the pager (though there should be no one
369 	 * waiting for it).
370 	 */
371 	compressor_pager_unlock(pager);
372 
373 	/* free the compressor slots */
374 	unsigned int num_chunks;
375 	unsigned int i;
376 	compressor_slot_t *chunk;
377 
378 	num_chunks = compressor_pager_num_chunks(pager);
379 	if (num_chunks > 1) {
380 		/* we have an array of chunks */
381 		for (i = 0; i < num_chunks; i++) {
382 			chunk = pager->cpgr_slots.cpgr_islots[i];
383 			if (chunk != NULL) {
384 				num_slots_freed =
385 				    compressor_pager_slots_chunk_free(
386 					chunk,
387 					COMPRESSOR_SLOTS_PER_CHUNK,
388 					0,
389 					NULL);
390 				pager->cpgr_slots.cpgr_islots[i] = NULL;
391 				zfree_slot_array(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
392 			}
393 		}
394 		kfree_type(compressor_slot_t *, num_chunks,
395 		    pager->cpgr_slots.cpgr_islots);
396 		pager->cpgr_slots.cpgr_islots = NULL;
397 	} else if (pager->cpgr_num_slots > 2) {
398 		chunk = pager->cpgr_slots.cpgr_dslots;
399 		num_slots_freed =
400 		    compressor_pager_slots_chunk_free(
401 			chunk,
402 			pager->cpgr_num_slots,
403 			0,
404 			NULL);
405 		pager->cpgr_slots.cpgr_dslots = NULL;
406 		zfree_slot_array(chunk,
407 		    (pager->cpgr_num_slots *
408 		    sizeof(pager->cpgr_slots.cpgr_dslots[0])));
409 	} else {
410 		chunk = &pager->cpgr_slots.cpgr_eslots[0];
411 		num_slots_freed =
412 		    compressor_pager_slots_chunk_free(
413 			chunk,
414 			pager->cpgr_num_slots,
415 			0,
416 			NULL);
417 	}
418 
419 	compressor_pager_lock_destroy(pager);
420 	zfree(compressor_pager_zone, pager);
421 }
422 
423 kern_return_t
compressor_memory_object_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,__unused memory_object_fault_info_t fault_info)424 compressor_memory_object_data_request(
425 	memory_object_t         mem_obj,
426 	memory_object_offset_t  offset,
427 	memory_object_cluster_size_t            length,
428 	__unused vm_prot_t      protection_required,
429 	__unused memory_object_fault_info_t     fault_info)
430 {
431 	compressor_pager_t      pager;
432 	kern_return_t           kr;
433 	compressor_slot_t       *slot_p;
434 
435 	compressor_pager_stats.data_requests++;
436 
437 	/*
438 	 * Request must be on a page boundary and a multiple of pages.
439 	 */
440 	if ((offset & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0) {
441 		panic("compressor_memory_object_data_request(): bad alignment");
442 	}
443 
444 	if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
445 		panic("%s: offset 0x%llx overflow",
446 		    __FUNCTION__, (uint64_t) offset);
447 		return KERN_FAILURE;
448 	}
449 
450 	compressor_pager_lookup(mem_obj, pager);
451 
452 	if (length == 0) {
453 		/* we're only querying the pager for this page */
454 	} else {
455 		panic("compressor: data_request");
456 	}
457 
458 	/* find the compressor slot for that page */
459 	compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
460 
461 	if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
462 		/* out of range */
463 		kr = KERN_FAILURE;
464 	} else if (slot_p == NULL || *slot_p == 0) {
465 		/* compressor does not have this page */
466 		kr = KERN_FAILURE;
467 	} else {
468 		/* compressor does have this page */
469 		kr = KERN_SUCCESS;
470 	}
471 	return kr;
472 }
473 
474 /*
475  * memory_object_data_initialize: check whether we already have each page, and
476  * write it if we do not.  The implementation is far from optimized, and
477  * also assumes that the default_pager is single-threaded.
478  */
479 /*  It is questionable whether or not a pager should decide what is relevant */
480 /* and what is not in data sent from the kernel.  Data initialize has been */
481 /* changed to copy back all data sent to it in preparation for its eventual */
482 /* merge with data return.  It is the kernel that should decide what pages */
483 /* to write back.  As of the writing of this note, this is indeed the case */
484 /* the kernel writes back one page at a time through this interface */
485 
486 kern_return_t
compressor_memory_object_data_initialize(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t size)487 compressor_memory_object_data_initialize(
488 	memory_object_t         mem_obj,
489 	memory_object_offset_t  offset,
490 	memory_object_cluster_size_t            size)
491 {
492 	compressor_pager_t      pager;
493 	memory_object_offset_t  cur_offset;
494 
495 	compressor_pager_lookup(mem_obj, pager);
496 	compressor_pager_lock(pager);
497 
498 	for (cur_offset = offset;
499 	    cur_offset < offset + size;
500 	    cur_offset += PAGE_SIZE) {
501 		panic("do a data_return() if slot for this page is empty");
502 	}
503 
504 	compressor_pager_unlock(pager);
505 
506 	return KERN_SUCCESS;
507 }
508 
509 
510 /*ARGSUSED*/
511 kern_return_t
compressor_memory_object_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t size,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)512 compressor_memory_object_data_return(
513 	__unused memory_object_t                        mem_obj,
514 	__unused memory_object_offset_t         offset,
515 	__unused memory_object_cluster_size_t   size,
516 	__unused memory_object_offset_t *resid_offset,
517 	__unused int            *io_error,
518 	__unused boolean_t      dirty,
519 	__unused boolean_t      kernel_copy,
520 	__unused int            upl_flags)
521 {
522 	panic("compressor: data_return");
523 	return KERN_FAILURE;
524 }
525 
526 /*
527  * Routine:	default_pager_memory_object_create
528  * Purpose:
529  *      Handle requests for memory objects from the
530  *      kernel.
531  * Notes:
532  *      Because we only give out the default memory
533  *      manager port to the kernel, we don't have to
534  *      be so paranoid about the contents.
535  */
536 kern_return_t
compressor_memory_object_create(memory_object_size_t new_size,memory_object_t * new_mem_obj)537 compressor_memory_object_create(
538 	memory_object_size_t    new_size,
539 	memory_object_t         *new_mem_obj)
540 {
541 	compressor_pager_t      pager;
542 	unsigned int            num_chunks;
543 
544 	if ((uint32_t)(new_size / PAGE_SIZE) != (new_size / PAGE_SIZE)) {
545 		/* 32-bit overflow for number of pages */
546 		panic("%s: size 0x%llx overflow",
547 		    __FUNCTION__, (uint64_t) new_size);
548 		return KERN_INVALID_ARGUMENT;
549 	}
550 
551 	pager = zalloc_flags(compressor_pager_zone, Z_WAITOK | Z_NOFAIL);
552 
553 	compressor_pager_lock_init(pager);
554 	os_ref_init_raw(&pager->cpgr_references, NULL);
555 	pager->cpgr_num_slots = (uint32_t)(new_size / PAGE_SIZE);
556 	pager->cpgr_num_slots_occupied = 0;
557 
558 	num_chunks = compressor_pager_num_chunks(pager);
559 	if (num_chunks > 1) {
560 		/* islots points to an array of chunks pointer. every chunk has 512/sizeof(int)=128 slot_mapping */
561 		pager->cpgr_slots.cpgr_islots = kalloc_type(compressor_slot_t *,
562 		    num_chunks, Z_WAITOK | Z_ZERO);
563 	} else if (pager->cpgr_num_slots > 2) {
564 		pager->cpgr_slots.cpgr_dslots = zalloc_slot_array(pager->cpgr_num_slots *
565 		    sizeof(pager->cpgr_slots.cpgr_dslots[0]), Z_WAITOK | Z_ZERO);
566 	} else {
567 		pager->cpgr_slots.cpgr_eslots[0] = 0;
568 		pager->cpgr_slots.cpgr_eslots[1] = 0;
569 	}
570 
571 	/*
572 	 * Set up associations between this memory object
573 	 * and this compressor_pager structure
574 	 */
575 	pager->cpgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
576 	pager->cpgr_hdr.mo_pager_ops = &compressor_pager_ops;
577 	pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
578 
579 	*new_mem_obj = (memory_object_t) pager;
580 	return KERN_SUCCESS;
581 }
582 
583 
584 unsigned int
compressor_pager_slots_chunk_free(compressor_slot_t * chunk,int num_slots,vm_compressor_options_t flags,int * failures)585 compressor_pager_slots_chunk_free(
586 	compressor_slot_t       *chunk,
587 	int                     num_slots,
588 	vm_compressor_options_t flags,
589 	int                     *failures)
590 {
591 	int i;
592 	vm_decompress_result_t retval;
593 	unsigned int num_slots_freed;
594 
595 	if (failures) {
596 		*failures = 0;
597 	}
598 	num_slots_freed = 0;
599 	for (i = 0; i < num_slots; i++) {
600 		if (chunk[i] != 0) {
601 			retval = vm_compressor_free(&chunk[i], flags);
602 
603 			if (retval == DECOMPRESS_SUCCESS) {
604 				num_slots_freed++;
605 			} else {
606 				assert3s(retval, <, 0); /* it's not DECOMPRESS_SUCCESS_* */
607 				if (retval == DECOMPRESS_NEED_BLOCK) {
608 					assert(flags & C_DONT_BLOCK);
609 				}
610 
611 				if (failures) {
612 					*failures += 1;
613 				}
614 			}
615 		}
616 	}
617 	return num_slots_freed;
618 }
619 
620 /* check if this pager has a slot_mapping spot for this page, if so give its position, if not, make place for it */
621 void
compressor_pager_slot_lookup(compressor_pager_t pager,boolean_t do_alloc,memory_object_offset_t offset,compressor_slot_t ** slot_pp)622 compressor_pager_slot_lookup(
623 	compressor_pager_t      pager,
624 	boolean_t               do_alloc,
625 	memory_object_offset_t  offset,
626 	compressor_slot_t       **slot_pp /* OUT */)
627 {
628 	unsigned int            num_chunks;
629 	uint32_t                page_num;
630 	unsigned int            chunk_idx;
631 	int                     slot_idx;
632 	compressor_slot_t       *chunk;
633 	compressor_slot_t       *t_chunk;
634 
635 	/* offset is relative to the pager, first page of the first vm_object that created the pager has an offset of 0 */
636 	page_num = (uint32_t)(offset / PAGE_SIZE);
637 	if (page_num != (offset / PAGE_SIZE)) {
638 		/* overflow */
639 		panic("%s: offset 0x%llx overflow",
640 		    __FUNCTION__, (uint64_t) offset);
641 		*slot_pp = NULL;
642 		return;
643 	}
644 	if (page_num >= pager->cpgr_num_slots) {
645 		/* out of range */
646 		*slot_pp = NULL;
647 		return;
648 	}
649 	num_chunks = compressor_pager_num_chunks(pager);
650 	if (num_chunks > 1) {
651 		/* we have an array of chunks */
652 		chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK;
653 		chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
654 
655 		if (chunk == NULL && do_alloc) {
656 			t_chunk = zalloc_slot_array(COMPRESSOR_SLOTS_CHUNK_SIZE,
657 			    Z_WAITOK | Z_ZERO);
658 
659 			compressor_pager_lock(pager);
660 
661 			if ((chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]) == NULL) {
662 				/*
663 				 * On some platforms, the memory stores from
664 				 * the bzero(t_chunk) above might not have been
665 				 * made visible and another thread might see
666 				 * the contents of this new chunk before it's
667 				 * been fully zero-filled.
668 				 * This memory barrier should take care of this
669 				 * according to the platform requirements.
670 				 */
671 				os_atomic_thread_fence(release);
672 
673 				chunk = pager->cpgr_slots.cpgr_islots[chunk_idx] = t_chunk;
674 				t_chunk = NULL;
675 			}
676 			compressor_pager_unlock(pager);
677 
678 			if (t_chunk) {
679 				zfree_slot_array(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
680 			}
681 		}
682 		if (chunk == NULL) {
683 			*slot_pp = NULL;
684 		} else {
685 			slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
686 			*slot_pp = &chunk[slot_idx];
687 		}
688 	} else if (pager->cpgr_num_slots > 2) {
689 		slot_idx = page_num;
690 		*slot_pp = &pager->cpgr_slots.cpgr_dslots[slot_idx];
691 	} else {
692 		slot_idx = page_num;
693 		*slot_pp = &pager->cpgr_slots.cpgr_eslots[slot_idx];
694 	}
695 }
696 
697 #if defined(__LP64__)
698 __startup_func
699 static void
vm_compressor_slots_init(void)700 vm_compressor_slots_init(void)
701 {
702 	for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) {
703 		compressor_slots_zones[idx] = zone_create(
704 			compressor_slots_zones_names[idx],
705 			compressor_slots_zones_sizes[idx],
706 			ZC_VM);
707 	}
708 }
709 STARTUP(ZALLOC, STARTUP_RANK_MIDDLE, vm_compressor_slots_init);
710 #endif /* defined(__LP64__) */
711 
712 static compressor_slot_t *
zalloc_slot_array(size_t size,zalloc_flags_t flags)713 zalloc_slot_array(size_t size, zalloc_flags_t flags)
714 {
715 #if defined(__LP64__)
716 	compressor_slot_t *slots = NULL;
717 
718 	assert(size <= COMPRESSOR_SLOTS_CHUNK_SIZE);
719 	for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) {
720 		if (size > compressor_slots_zones_sizes[idx]) {
721 			continue;
722 		}
723 		slots = zalloc_flags(compressor_slots_zones[idx], flags);
724 		break;
725 	}
726 	return slots;
727 #else  /* defined(__LP64__) */
728 	return kalloc_data(size, flags);
729 #endif /* !defined(__LP64__) */
730 }
731 
732 static void
zfree_slot_array(compressor_slot_t * slots,size_t size)733 zfree_slot_array(compressor_slot_t *slots, size_t size)
734 {
735 #if defined(__LP64__)
736 	assert(size <= COMPRESSOR_SLOTS_CHUNK_SIZE);
737 	for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) {
738 		if (size > compressor_slots_zones_sizes[idx]) {
739 			continue;
740 		}
741 		zfree(compressor_slots_zones[idx], slots);
742 		break;
743 	}
744 #else  /* defined(__LP64__) */
745 	kfree_data(slots, size);
746 #endif /* !defined(__LP64__) */
747 }
748 
749 kern_return_t
vm_compressor_pager_put(memory_object_t mem_obj,memory_object_offset_t offset,ppnum_t ppnum,void ** current_chead,char * scratch_buf,int * compressed_count_delta_p,vm_compressor_options_t flags)750 vm_compressor_pager_put(
751 	memory_object_t                 mem_obj,
752 	memory_object_offset_t          offset,
753 	ppnum_t                         ppnum,
754 	void                            **current_chead,
755 	char                            *scratch_buf,
756 	int                             *compressed_count_delta_p, /* OUT */
757 	vm_compressor_options_t         flags)
758 {
759 	compressor_pager_t pager;
760 	compressor_slot_t *slot_p;
761 	kern_return_t kr;
762 
763 	compressor_pager_stats.put++;
764 
765 	*compressed_count_delta_p = 0;
766 
767 	/* This routine is called by the pageout thread.  The pageout thread */
768 	/* cannot be blocked by read activities unless the read activities   */
769 	/* Therefore the grant of vs lock must be done on a try versus a      */
770 	/* blocking basis.  The code below relies on the fact that the       */
771 	/* interface is synchronous.  Should this interface be again async   */
772 	/* for some type  of pager in the future the pages will have to be   */
773 	/* returned through a separate, asynchronous path.		     */
774 
775 	compressor_pager_lookup(mem_obj, pager);
776 
777 	uint32_t dummy_conv;
778 	if (os_convert_overflow(offset / PAGE_SIZE, &dummy_conv)) {
779 		/* overflow, page number doesn't fit in a uint32 */
780 		panic("%s: offset 0x%llx overflow", __FUNCTION__, (uint64_t) offset);
781 		return KERN_RESOURCE_SHORTAGE;
782 	}
783 
784 	/* we're looking for the slot_mapping that corresponds to the offset, which vm_compressor_put() is then going to
785 	 * set a value into after it allocates the slot. if the slot_mapping doesn't exist, this will create it */
786 	compressor_pager_slot_lookup(pager, TRUE, offset, &slot_p);
787 
788 	if (slot_p == NULL) {
789 		/* out of range ? */
790 		panic("vm_compressor_pager_put: out of range");
791 	}
792 	if (*slot_p != 0) {
793 		/*
794 		 * Already compressed: forget about the old one.
795 		 *
796 		 * This can happen after a vm_object_do_collapse() when
797 		 * the "backing_object" had some pages paged out and the
798 		 * "object" had an equivalent page resident.
799 		 */
800 		vm_compressor_free(slot_p, flags);
801 		*compressed_count_delta_p -= 1;
802 	}
803 
804 	/*
805 	 * If the compressor operation succeeds, we presumably don't need to
806 	 * undo any previous WIMG update, as all live mappings should be
807 	 * disconnected.
808 	 */
809 
810 	kr = vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf, flags);
811 	if (kr == KERN_SUCCESS) {
812 		*compressed_count_delta_p += 1;
813 	}
814 	return kr;
815 }
816 
817 
818 kern_return_t
vm_compressor_pager_get(memory_object_t mem_obj,memory_object_offset_t offset,ppnum_t ppnum,int * my_fault_type,vm_compressor_options_t flags,int * compressed_count_delta_p)819 vm_compressor_pager_get(
820 	memory_object_t         mem_obj,
821 	memory_object_offset_t  offset,
822 	ppnum_t                 ppnum,
823 	int                     *my_fault_type,
824 	vm_compressor_options_t flags,
825 	int                     *compressed_count_delta_p)
826 {
827 	compressor_pager_t      pager;
828 	kern_return_t           kr;
829 	compressor_slot_t       *slot_p;
830 
831 	compressor_pager_stats.get++;
832 
833 	*compressed_count_delta_p = 0;
834 
835 	if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
836 		panic("%s: offset 0x%llx overflow",
837 		    __FUNCTION__, (uint64_t) offset);
838 		return KERN_MEMORY_ERROR;
839 	}
840 
841 	compressor_pager_lookup(mem_obj, pager);
842 
843 	/* find the compressor slot for that page */
844 	compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
845 
846 	if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
847 		/* out of range */
848 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COMPRESSOR_GET_OUT_OF_RANGE), 0 /* arg */);
849 		kr = KERN_MEMORY_FAILURE;
850 	} else if (slot_p == NULL || *slot_p == 0) {
851 		/* compressor does not have this page */
852 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COMPRESSOR_GET_NO_PAGE), 0 /* arg */);
853 		kr = KERN_MEMORY_ERROR;
854 	} else {
855 		/* compressor does have this page */
856 		kr = KERN_SUCCESS;
857 	}
858 	*my_fault_type = DBG_COMPRESSOR_FAULT;
859 
860 	if (kr == KERN_SUCCESS) {
861 		int     retval;
862 		bool unmodified = (vm_compressor_is_slot_compressed(slot_p) == false);
863 		/* get the page from the compressor */
864 		retval = vm_compressor_get(ppnum, slot_p, (unmodified ? (flags | C_PAGE_UNMODIFIED) : flags));
865 		if (retval <= DECOMPRESS_FIRST_FAIL_CODE) {
866 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COMPRESSOR_DECOMPRESS_FAILED), (uintptr_t)retval /* arg */);
867 			kr = KERN_MEMORY_FAILURE;
868 		} else if (retval == DECOMPRESS_SUCCESS_SWAPPEDIN) {
869 			*my_fault_type = DBG_COMPRESSOR_SWAPIN_FAULT;
870 		} else if (retval == DECOMPRESS_NEED_BLOCK) {
871 			assert((flags & C_DONT_BLOCK));
872 			/*
873 			 * Not a fatal failure because we just retry with a blocking get later. So we skip ktriage to avoid noise.
874 			 */
875 			kr = KERN_FAILURE;
876 		}
877 	}
878 
879 	if (kr == KERN_SUCCESS) {
880 		assert(slot_p != NULL);
881 		if (*slot_p != 0) {
882 			/*
883 			 * We got the page for a copy-on-write fault
884 			 * and we kept the original in place.  Slot
885 			 * is still occupied.
886 			 */
887 		} else {
888 			*compressed_count_delta_p -= 1;
889 		}
890 	}
891 
892 	return kr;
893 }
894 
895 unsigned int
vm_compressor_pager_state_clr(memory_object_t mem_obj,memory_object_offset_t offset)896 vm_compressor_pager_state_clr(
897 	memory_object_t         mem_obj,
898 	memory_object_offset_t  offset)
899 {
900 	compressor_pager_t      pager;
901 	compressor_slot_t       *slot_p;
902 	unsigned int            num_slots_freed;
903 
904 	assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
905 
906 	compressor_pager_stats.state_clr++;
907 
908 	if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
909 		/* overflow */
910 		panic("%s: offset 0x%llx overflow",
911 		    __FUNCTION__, (uint64_t) offset);
912 		return 0;
913 	}
914 
915 	compressor_pager_lookup(mem_obj, pager);
916 
917 	/* find the compressor slot for that page */
918 	compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
919 
920 	num_slots_freed = 0;
921 	if (slot_p && *slot_p != 0) {
922 		vm_compressor_free(slot_p, 0);
923 		num_slots_freed++;
924 		assert(*slot_p == 0);
925 	}
926 
927 	return num_slots_freed;
928 }
929 
930 vm_external_state_t
vm_compressor_pager_state_get(memory_object_t mem_obj,memory_object_offset_t offset)931 vm_compressor_pager_state_get(
932 	memory_object_t         mem_obj,
933 	memory_object_offset_t  offset)
934 {
935 	compressor_pager_t      pager;
936 	compressor_slot_t       *slot_p;
937 
938 	assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
939 
940 	compressor_pager_stats.state_get++;
941 
942 	if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
943 		/* overflow */
944 		panic("%s: offset 0x%llx overflow",
945 		    __FUNCTION__, (uint64_t) offset);
946 		return VM_EXTERNAL_STATE_ABSENT;
947 	}
948 
949 	compressor_pager_lookup(mem_obj, pager);
950 
951 	/* find the compressor slot for that page */
952 	compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
953 
954 	if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
955 		/* out of range */
956 		return VM_EXTERNAL_STATE_ABSENT;
957 	} else if (slot_p == NULL || *slot_p == 0) {
958 		/* compressor does not have this page */
959 		return VM_EXTERNAL_STATE_ABSENT;
960 	} else {
961 		/* compressor does have this page */
962 		return VM_EXTERNAL_STATE_EXISTS;
963 	}
964 }
965 
966 unsigned int
vm_compressor_pager_reap_pages(memory_object_t mem_obj,vm_compressor_options_t flags)967 vm_compressor_pager_reap_pages(
968 	memory_object_t         mem_obj,
969 	vm_compressor_options_t flags)
970 {
971 	compressor_pager_t      pager;
972 	unsigned int            num_chunks;
973 	int                     failures;
974 	unsigned int            i;
975 	compressor_slot_t       *chunk;
976 	unsigned int            num_slots_freed;
977 
978 	compressor_pager_lookup(mem_obj, pager);
979 	if (pager == NULL) {
980 		return 0;
981 	}
982 
983 	compressor_pager_lock(pager);
984 
985 	/* reap the compressor slots */
986 	num_slots_freed = 0;
987 
988 	num_chunks = compressor_pager_num_chunks(pager);
989 	if (num_chunks > 1) {
990 		/* we have an array of chunks */
991 		for (i = 0; i < num_chunks; i++) {
992 			chunk = pager->cpgr_slots.cpgr_islots[i];
993 			if (chunk != NULL) {
994 				num_slots_freed +=
995 				    compressor_pager_slots_chunk_free(
996 					chunk,
997 					COMPRESSOR_SLOTS_PER_CHUNK,
998 					flags,
999 					&failures);
1000 				if (failures == 0) {
1001 					pager->cpgr_slots.cpgr_islots[i] = NULL;
1002 					zfree_slot_array(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
1003 				}
1004 			}
1005 		}
1006 	} else if (pager->cpgr_num_slots > 2) {
1007 		chunk = pager->cpgr_slots.cpgr_dslots;
1008 		num_slots_freed +=
1009 		    compressor_pager_slots_chunk_free(
1010 			chunk,
1011 			pager->cpgr_num_slots,
1012 			flags,
1013 			NULL);
1014 	} else {
1015 		chunk = &pager->cpgr_slots.cpgr_eslots[0];
1016 		num_slots_freed +=
1017 		    compressor_pager_slots_chunk_free(
1018 			chunk,
1019 			pager->cpgr_num_slots,
1020 			flags,
1021 			NULL);
1022 	}
1023 
1024 	compressor_pager_unlock(pager);
1025 
1026 	return num_slots_freed;
1027 }
1028 
1029 void
vm_compressor_pager_transfer(memory_object_t dst_mem_obj,memory_object_offset_t dst_offset,memory_object_t src_mem_obj,memory_object_offset_t src_offset)1030 vm_compressor_pager_transfer(
1031 	memory_object_t         dst_mem_obj,
1032 	memory_object_offset_t  dst_offset,
1033 	memory_object_t         src_mem_obj,
1034 	memory_object_offset_t  src_offset)
1035 {
1036 	compressor_pager_t      src_pager, dst_pager;
1037 	compressor_slot_t       *src_slot_p, *dst_slot_p;
1038 
1039 	compressor_pager_stats.transfer++;
1040 
1041 	/* find the compressor slot for the destination */
1042 	compressor_pager_lookup(dst_mem_obj, dst_pager);
1043 	assert(dst_offset / PAGE_SIZE < dst_pager->cpgr_num_slots);
1044 	compressor_pager_slot_lookup(dst_pager, TRUE, dst_offset, &dst_slot_p);
1045 	assert(dst_slot_p != NULL);
1046 	assert(*dst_slot_p == 0);
1047 
1048 	/* find the compressor slot for the source */
1049 	compressor_pager_lookup(src_mem_obj, src_pager);
1050 	assert(src_offset / PAGE_SIZE < src_pager->cpgr_num_slots);
1051 	compressor_pager_slot_lookup(src_pager, FALSE, src_offset, &src_slot_p);
1052 	assert(src_slot_p != NULL);
1053 	assert(*src_slot_p != 0);
1054 
1055 	/* transfer the slot from source to destination */
1056 	vm_compressor_transfer(dst_slot_p, src_slot_p);
1057 	os_atomic_dec(&src_pager->cpgr_num_slots_occupied, relaxed);
1058 	os_atomic_inc(&dst_pager->cpgr_num_slots_occupied, relaxed);
1059 }
1060 
1061 memory_object_offset_t
vm_compressor_pager_next_compressed(memory_object_t mem_obj,memory_object_offset_t offset)1062 vm_compressor_pager_next_compressed(
1063 	memory_object_t         mem_obj,
1064 	memory_object_offset_t  offset)
1065 {
1066 	compressor_pager_t      pager;
1067 	unsigned int            num_chunks;
1068 	uint32_t                page_num;
1069 	unsigned int            chunk_idx;
1070 	uint32_t                slot_idx;
1071 	compressor_slot_t       *chunk;
1072 
1073 	compressor_pager_lookup(mem_obj, pager);
1074 
1075 	page_num = (uint32_t)(offset / PAGE_SIZE);
1076 	if (page_num != (offset / PAGE_SIZE)) {
1077 		/* overflow */
1078 		return (memory_object_offset_t) -1;
1079 	}
1080 	if (page_num >= pager->cpgr_num_slots) {
1081 		/* out of range */
1082 		return (memory_object_offset_t) -1;
1083 	}
1084 
1085 	num_chunks = compressor_pager_num_chunks(pager);
1086 	if (num_chunks == 1) {
1087 		if (pager->cpgr_num_slots > 2) {
1088 			chunk = pager->cpgr_slots.cpgr_dslots;
1089 		} else {
1090 			chunk = &pager->cpgr_slots.cpgr_eslots[0];
1091 		}
1092 		for (slot_idx = page_num;
1093 		    slot_idx < pager->cpgr_num_slots;
1094 		    slot_idx++) {
1095 			if (chunk[slot_idx] != 0) {
1096 				/* found a non-NULL slot in this chunk */
1097 				return (memory_object_offset_t) slot_idx *
1098 				       PAGE_SIZE;
1099 			}
1100 		}
1101 		return (memory_object_offset_t) -1;
1102 	}
1103 
1104 	/* we have an array of chunks; find the next non-NULL chunk */
1105 	chunk = NULL;
1106 	for (chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK,
1107 	    slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
1108 	    chunk_idx < num_chunks;
1109 	    chunk_idx++,
1110 	    slot_idx = 0) {
1111 		chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
1112 		if (chunk == NULL) {
1113 			/* no chunk here: try the next one */
1114 			continue;
1115 		}
1116 		/* search for an occupied slot in this chunk */
1117 		for (;
1118 		    slot_idx < COMPRESSOR_SLOTS_PER_CHUNK;
1119 		    slot_idx++) {
1120 			if (chunk[slot_idx] != 0) {
1121 				/* found an occupied slot in this chunk */
1122 				uint32_t next_slot;
1123 
1124 				next_slot = ((chunk_idx *
1125 				    COMPRESSOR_SLOTS_PER_CHUNK) +
1126 				    slot_idx);
1127 				if (next_slot >= pager->cpgr_num_slots) {
1128 					/* went beyond end of object */
1129 					return (memory_object_offset_t) -1;
1130 				}
1131 				return (memory_object_offset_t) next_slot *
1132 				       PAGE_SIZE;
1133 			}
1134 		}
1135 	}
1136 	return (memory_object_offset_t) -1;
1137 }
1138 
1139 unsigned int
vm_compressor_pager_get_count(memory_object_t mem_obj)1140 vm_compressor_pager_get_count(
1141 	memory_object_t mem_obj)
1142 {
1143 	compressor_pager_t      pager;
1144 
1145 	compressor_pager_lookup(mem_obj, pager);
1146 	if (pager == NULL) {
1147 		return 0;
1148 	}
1149 
1150 	/*
1151 	 * The caller should have the VM object locked and one
1152 	 * needs that lock to do a page-in or page-out, so no
1153 	 * need to lock the pager here.
1154 	 */
1155 	assert(pager->cpgr_num_slots_occupied >= 0);
1156 
1157 	return pager->cpgr_num_slots_occupied;
1158 }
1159 
1160 /* Add page count to the counter in the pager */
1161 void
vm_compressor_pager_count(memory_object_t mem_obj,int compressed_count_delta,boolean_t shared_lock,vm_object_t object __unused)1162 vm_compressor_pager_count(
1163 	memory_object_t mem_obj,
1164 	int             compressed_count_delta,
1165 	boolean_t       shared_lock,
1166 	vm_object_t     object __unused)
1167 {
1168 	compressor_pager_t      pager;
1169 
1170 	if (compressed_count_delta == 0) {
1171 		return;
1172 	}
1173 
1174 	compressor_pager_lookup(mem_obj, pager);
1175 	if (pager == NULL) {
1176 		return;
1177 	}
1178 
1179 	if (compressed_count_delta < 0) {
1180 		assert(pager->cpgr_num_slots_occupied >=
1181 		    (unsigned int) -compressed_count_delta);
1182 	}
1183 
1184 	/*
1185 	 * The caller should have the VM object locked,
1186 	 * shared or exclusive.
1187 	 */
1188 	if (shared_lock) {
1189 		vm_object_lock_assert_shared(object);
1190 		os_atomic_add(&pager->cpgr_num_slots_occupied, compressed_count_delta,
1191 		    relaxed);
1192 	} else {
1193 		vm_object_lock_assert_exclusive(object);
1194 		pager->cpgr_num_slots_occupied += compressed_count_delta;
1195 	}
1196 }
1197 
1198 #if CONFIG_FREEZE
1199 kern_return_t
vm_compressor_pager_relocate(memory_object_t mem_obj,memory_object_offset_t offset,void ** current_chead)1200 vm_compressor_pager_relocate(
1201 	memory_object_t         mem_obj,
1202 	memory_object_offset_t  offset,
1203 	void                    **current_chead)
1204 {
1205 	/*
1206 	 * Has the page at this offset been compressed?
1207 	 */
1208 
1209 	compressor_slot_t *slot_p;
1210 	compressor_pager_t dst_pager;
1211 
1212 	assert(mem_obj);
1213 
1214 	compressor_pager_lookup(mem_obj, dst_pager);
1215 	if (dst_pager == NULL) {
1216 		return KERN_FAILURE;
1217 	}
1218 
1219 	compressor_pager_slot_lookup(dst_pager, FALSE, offset, &slot_p);
1220 	return vm_compressor_relocate(current_chead, slot_p);
1221 }
1222 #endif /* CONFIG_FREEZE */
1223 
1224 #if DEVELOPMENT || DEBUG
1225 
1226 kern_return_t
vm_compressor_pager_inject_error(memory_object_t mem_obj,memory_object_offset_t offset)1227 vm_compressor_pager_inject_error(memory_object_t mem_obj,
1228     memory_object_offset_t offset)
1229 {
1230 	kern_return_t result = KERN_FAILURE;
1231 	compressor_slot_t *slot_p;
1232 	compressor_pager_t pager;
1233 
1234 	assert(mem_obj);
1235 
1236 	compressor_pager_lookup(mem_obj, pager);
1237 	if (pager != NULL) {
1238 		compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
1239 		if (slot_p != NULL && *slot_p != 0) {
1240 			vm_compressor_inject_error(slot_p);
1241 			result = KERN_SUCCESS;
1242 		}
1243 	}
1244 
1245 	return result;
1246 }
1247 
1248 
1249 /*
1250  * Write debugging information about the pager to the given buffer
1251  * returns: true on success, false if there was not enough space
1252  * argument size - in: bytes free in the buffer, out: bytes written
1253  */
1254 kern_return_t
vm_compressor_pager_dump(memory_object_t mem_obj,__unused char * buf,__unused size_t * size,bool * is_compressor,unsigned int * slot_count)1255 vm_compressor_pager_dump(memory_object_t mem_obj,     /* IN */
1256     __unused char *buf,                               /* IN buffer to write to */
1257     __unused size_t *size,                           /* IN-OUT */
1258     bool *is_compressor,                              /* OUT */
1259     unsigned int *slot_count)                         /* OUT */
1260 {
1261 	compressor_pager_t pager = NULL;
1262 	compressor_pager_lookup(mem_obj, pager);
1263 
1264 	*size = 0;
1265 	if (pager == NULL) {
1266 		*is_compressor = false;
1267 		*slot_count = 0;
1268 		return KERN_SUCCESS;
1269 	}
1270 	*is_compressor = true;
1271 	*slot_count = pager->cpgr_num_slots_occupied;
1272 
1273 	/*
1274 	 *  size_t insize = *size;
1275 	 *  unsigned int needed_size = 0; // pager->cpgr_num_slots_occupied * sizeof(compressor_slot_t) / sizeof(int);
1276 	 *  if (needed_size > insize) {
1277 	 *       return KERN_NO_SPACE;
1278 	 *  }
1279 	 *  TODO: not fully implemented yet, need to dump out the mappings
1280 	 * size = 0;
1281 	 */
1282 	return KERN_SUCCESS;
1283 }
1284 
1285 #endif
1286