xref: /xnu-8020.121.3/osfmk/vm/vm_compressor_pager.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 
57 /*
58  *	Compressor Pager.
59  *		Memory Object Management.
60  */
61 
62 #include <kern/host_statistics.h>
63 #include <kern/kalloc.h>
64 #include <kern/ipc_kobject.h>
65 
66 #include <machine/atomic.h>
67 
68 #include <mach/memory_object_control.h>
69 #include <mach/memory_object_types.h>
70 #include <mach/upl.h>
71 
72 #include <vm/memory_object.h>
73 #include <vm/vm_compressor_pager.h>
74 #include <vm/vm_external.h>
75 #include <vm/vm_pageout.h>
76 #include <vm/vm_protos.h>
77 
78 #include <sys/kdebug_triage.h>
79 
80 /* memory_object interfaces */
81 void compressor_memory_object_reference(memory_object_t mem_obj);
82 void compressor_memory_object_deallocate(memory_object_t mem_obj);
83 kern_return_t compressor_memory_object_init(
84 	memory_object_t         mem_obj,
85 	memory_object_control_t control,
86 	memory_object_cluster_size_t pager_page_size);
87 kern_return_t compressor_memory_object_terminate(memory_object_t mem_obj);
88 kern_return_t compressor_memory_object_data_request(
89 	memory_object_t         mem_obj,
90 	memory_object_offset_t  offset,
91 	memory_object_cluster_size_t            length,
92 	__unused vm_prot_t      protection_required,
93 	memory_object_fault_info_t      fault_info);
94 kern_return_t compressor_memory_object_data_return(
95 	memory_object_t         mem_obj,
96 	memory_object_offset_t  offset,
97 	memory_object_cluster_size_t                    size,
98 	__unused memory_object_offset_t *resid_offset,
99 	__unused int            *io_error,
100 	__unused boolean_t      dirty,
101 	__unused boolean_t      kernel_copy,
102 	__unused int    upl_flags);
103 kern_return_t compressor_memory_object_data_initialize(
104 	memory_object_t         mem_obj,
105 	memory_object_offset_t  offset,
106 	memory_object_cluster_size_t            size);
107 kern_return_t compressor_memory_object_data_unlock(
108 	__unused memory_object_t                mem_obj,
109 	__unused memory_object_offset_t offset,
110 	__unused memory_object_size_t           size,
111 	__unused vm_prot_t              desired_access);
112 kern_return_t compressor_memory_object_synchronize(
113 	memory_object_t         mem_obj,
114 	memory_object_offset_t  offset,
115 	memory_object_size_t            length,
116 	__unused vm_sync_t              flags);
117 kern_return_t compressor_memory_object_map(
118 	__unused memory_object_t        mem_obj,
119 	__unused vm_prot_t              prot);
120 kern_return_t compressor_memory_object_last_unmap(memory_object_t mem_obj);
121 kern_return_t compressor_memory_object_data_reclaim(
122 	__unused memory_object_t        mem_obj,
123 	__unused boolean_t              reclaim_backing_store);
124 
125 const struct memory_object_pager_ops compressor_pager_ops = {
126 	.memory_object_reference = compressor_memory_object_reference,
127 	.memory_object_deallocate = compressor_memory_object_deallocate,
128 	.memory_object_init = compressor_memory_object_init,
129 	.memory_object_terminate = compressor_memory_object_terminate,
130 	.memory_object_data_request = compressor_memory_object_data_request,
131 	.memory_object_data_return = compressor_memory_object_data_return,
132 	.memory_object_data_initialize = compressor_memory_object_data_initialize,
133 	.memory_object_data_unlock = compressor_memory_object_data_unlock,
134 	.memory_object_synchronize = compressor_memory_object_synchronize,
135 	.memory_object_map = compressor_memory_object_map,
136 	.memory_object_last_unmap = compressor_memory_object_last_unmap,
137 	.memory_object_data_reclaim = compressor_memory_object_data_reclaim,
138 	.memory_object_backing_object = NULL,
139 	.memory_object_pager_name = "compressor pager"
140 };
141 
142 /* internal data structures */
143 
144 struct {
145 	uint64_t        data_returns;
146 	uint64_t        data_requests;
147 	uint64_t        put;
148 	uint64_t        get;
149 	uint64_t        state_clr;
150 	uint64_t        state_get;
151 	uint64_t        transfer;
152 } compressor_pager_stats;
153 
154 typedef int compressor_slot_t;
155 
156 typedef struct compressor_pager {
157 	/* mandatory generic header */
158 	struct memory_object cpgr_hdr;
159 
160 	/* pager-specific data */
161 	lck_mtx_t                       cpgr_lock;
162 #if MEMORY_OBJECT_HAS_REFCOUNT
163 #define cpgr_references                 cpgr_hdr.mo_ref
164 #else
165 	os_ref_atomic_t                 cpgr_references;
166 #endif
167 	unsigned int                    cpgr_num_slots;
168 	unsigned int                    cpgr_num_slots_occupied;
169 	union {
170 		compressor_slot_t       cpgr_eslots[2]; /* embedded slots */
171 		compressor_slot_t       *cpgr_dslots;   /* direct slots */
172 		compressor_slot_t       **cpgr_islots;  /* indirect slots */
173 	} cpgr_slots;
174 } *compressor_pager_t;
175 
176 #define compressor_pager_lookup(_mem_obj_, _cpgr_)                      \
177 	MACRO_BEGIN                                                     \
178 	if (_mem_obj_ == NULL ||                                        \
179 	    _mem_obj_->mo_pager_ops != &compressor_pager_ops) {         \
180 	        _cpgr_ = NULL;                                          \
181 	} else {                                                        \
182 	        _cpgr_ = (compressor_pager_t) _mem_obj_;                \
183 	}                                                               \
184 	MACRO_END
185 
186 /* embedded slot pointers in compressor_pager get packed, so VA restricted */
187 static ZONE_DEFINE_TYPE(compressor_pager_zone, "compressor_pager",
188     struct compressor_pager, ZC_NOENCRYPT | ZC_VM_LP64 | ZC_NOTBITAG);
189 
190 LCK_GRP_DECLARE(compressor_pager_lck_grp, "compressor_pager");
191 
192 #define compressor_pager_lock(_cpgr_) \
193 	lck_mtx_lock(&(_cpgr_)->cpgr_lock)
194 #define compressor_pager_unlock(_cpgr_) \
195 	lck_mtx_unlock(&(_cpgr_)->cpgr_lock)
196 #define compressor_pager_lock_init(_cpgr_) \
197 	lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, LCK_ATTR_NULL)
198 #define compressor_pager_lock_destroy(_cpgr_) \
199 	lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp)
200 
201 #define COMPRESSOR_SLOTS_CHUNK_SIZE     (512)
202 #define COMPRESSOR_SLOTS_PER_CHUNK      (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t))
203 
204 /* forward declarations */
205 unsigned int compressor_pager_slots_chunk_free(compressor_slot_t *chunk,
206     int num_slots,
207     int flags,
208     int *failures);
209 void compressor_pager_slot_lookup(
210 	compressor_pager_t      pager,
211 	boolean_t               do_alloc,
212 	memory_object_offset_t  offset,
213 	compressor_slot_t       **slot_pp);
214 
215 #if     defined(__LP64__)
216 
217 /* restricted VA zones for slots */
218 
219 #define NUM_SLOTS_ZONES         3
220 
221 static const size_t compressor_slots_zones_sizes[NUM_SLOTS_ZONES] = {
222 	16,
223 	64,
224 	COMPRESSOR_SLOTS_CHUNK_SIZE
225 };
226 
227 static const char * compressor_slots_zones_names[NUM_SLOTS_ZONES] = {
228 	"compressor_slots.16",
229 	"compressor_slots.64",
230 	"compressor_slots.512"
231 };
232 
233 static zone_t
234     compressor_slots_zones[NUM_SLOTS_ZONES];
235 
236 #endif /* defined(__LP64__) */
237 
238 static void
239 zfree_slot_array(compressor_slot_t *slots, size_t size);
240 static compressor_slot_t *
241 zalloc_slot_array(size_t size, zalloc_flags_t);
242 
243 
244 kern_return_t
compressor_memory_object_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pager_page_size)245 compressor_memory_object_init(
246 	memory_object_t         mem_obj,
247 	memory_object_control_t control,
248 	__unused memory_object_cluster_size_t pager_page_size)
249 {
250 	compressor_pager_t              pager;
251 
252 	assert(pager_page_size == PAGE_SIZE);
253 
254 	memory_object_control_reference(control);
255 
256 	compressor_pager_lookup(mem_obj, pager);
257 	compressor_pager_lock(pager);
258 
259 	if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
260 		panic("compressor_memory_object_init: bad request");
261 	}
262 	pager->cpgr_hdr.mo_control = control;
263 
264 	compressor_pager_unlock(pager);
265 
266 	return KERN_SUCCESS;
267 }
268 
269 kern_return_t
compressor_memory_object_synchronize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_size_t length,__unused vm_sync_t flags)270 compressor_memory_object_synchronize(
271 	__unused memory_object_t        mem_obj,
272 	__unused memory_object_offset_t offset,
273 	__unused memory_object_size_t   length,
274 	__unused vm_sync_t              flags)
275 {
276 	panic("compressor_memory_object_synchronize: memory_object_synchronize no longer supported");
277 	return KERN_FAILURE;
278 }
279 
280 kern_return_t
compressor_memory_object_map(__unused memory_object_t mem_obj,__unused vm_prot_t prot)281 compressor_memory_object_map(
282 	__unused memory_object_t        mem_obj,
283 	__unused vm_prot_t              prot)
284 {
285 	panic("compressor_memory_object_map");
286 	return KERN_FAILURE;
287 }
288 
289 kern_return_t
compressor_memory_object_last_unmap(__unused memory_object_t mem_obj)290 compressor_memory_object_last_unmap(
291 	__unused memory_object_t        mem_obj)
292 {
293 	panic("compressor_memory_object_last_unmap");
294 	return KERN_FAILURE;
295 }
296 
297 kern_return_t
compressor_memory_object_data_reclaim(__unused memory_object_t mem_obj,__unused boolean_t reclaim_backing_store)298 compressor_memory_object_data_reclaim(
299 	__unused memory_object_t        mem_obj,
300 	__unused boolean_t              reclaim_backing_store)
301 {
302 	panic("compressor_memory_object_data_reclaim");
303 	return KERN_FAILURE;
304 }
305 
306 kern_return_t
compressor_memory_object_terminate(memory_object_t mem_obj)307 compressor_memory_object_terminate(
308 	memory_object_t         mem_obj)
309 {
310 	memory_object_control_t control;
311 	compressor_pager_t      pager;
312 
313 	/*
314 	 * control port is a receive right, not a send right.
315 	 */
316 
317 	compressor_pager_lookup(mem_obj, pager);
318 	compressor_pager_lock(pager);
319 
320 	/*
321 	 * After memory_object_terminate both memory_object_init
322 	 * and a no-senders notification are possible, so we need
323 	 * to clean up our reference to the memory_object_control
324 	 * to prepare for a new init.
325 	 */
326 
327 	control = pager->cpgr_hdr.mo_control;
328 	pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
329 
330 	compressor_pager_unlock(pager);
331 
332 	/*
333 	 * Now we deallocate our reference on the control.
334 	 */
335 	memory_object_control_deallocate(control);
336 	return KERN_SUCCESS;
337 }
338 
339 void
compressor_memory_object_reference(memory_object_t mem_obj)340 compressor_memory_object_reference(
341 	memory_object_t         mem_obj)
342 {
343 	compressor_pager_t      pager;
344 
345 	compressor_pager_lookup(mem_obj, pager);
346 	if (pager == NULL) {
347 		return;
348 	}
349 
350 	compressor_pager_lock(pager);
351 	os_ref_retain_locked_raw(&pager->cpgr_references, NULL);
352 	compressor_pager_unlock(pager);
353 }
354 
355 void
compressor_memory_object_deallocate(memory_object_t mem_obj)356 compressor_memory_object_deallocate(
357 	memory_object_t         mem_obj)
358 {
359 	compressor_pager_t      pager;
360 	unsigned int            num_slots_freed;
361 
362 	/*
363 	 * Because we don't give out multiple first references
364 	 * for a memory object, there can't be a race
365 	 * between getting a deallocate call and creating
366 	 * a new reference for the object.
367 	 */
368 
369 	compressor_pager_lookup(mem_obj, pager);
370 	if (pager == NULL) {
371 		return;
372 	}
373 
374 	compressor_pager_lock(pager);
375 	if (os_ref_release_locked_raw(&pager->cpgr_references, NULL) > 0) {
376 		compressor_pager_unlock(pager);
377 		return;
378 	}
379 
380 	/*
381 	 * We shouldn't get a deallocation call
382 	 * when the kernel has the object cached.
383 	 */
384 	if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
385 		panic("compressor_memory_object_deallocate(): bad request");
386 	}
387 
388 	/*
389 	 * Unlock the pager (though there should be no one
390 	 * waiting for it).
391 	 */
392 	compressor_pager_unlock(pager);
393 
394 	/* free the compressor slots */
395 	int num_chunks;
396 	int i;
397 	compressor_slot_t *chunk;
398 
399 	num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
400 	if (num_chunks > 1) {
401 		/* we have an array of chunks */
402 		for (i = 0; i < num_chunks; i++) {
403 			chunk = pager->cpgr_slots.cpgr_islots[i];
404 			if (chunk != NULL) {
405 				num_slots_freed =
406 				    compressor_pager_slots_chunk_free(
407 					chunk,
408 					COMPRESSOR_SLOTS_PER_CHUNK,
409 					0,
410 					NULL);
411 				pager->cpgr_slots.cpgr_islots[i] = NULL;
412 				zfree_slot_array(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
413 			}
414 		}
415 		kfree_type(compressor_slot_t *, num_chunks,
416 		    pager->cpgr_slots.cpgr_islots);
417 		pager->cpgr_slots.cpgr_islots = NULL;
418 	} else if (pager->cpgr_num_slots > 2) {
419 		chunk = pager->cpgr_slots.cpgr_dslots;
420 		num_slots_freed =
421 		    compressor_pager_slots_chunk_free(
422 			chunk,
423 			pager->cpgr_num_slots,
424 			0,
425 			NULL);
426 		pager->cpgr_slots.cpgr_dslots = NULL;
427 		zfree_slot_array(chunk,
428 		    (pager->cpgr_num_slots *
429 		    sizeof(pager->cpgr_slots.cpgr_dslots[0])));
430 	} else {
431 		chunk = &pager->cpgr_slots.cpgr_eslots[0];
432 		num_slots_freed =
433 		    compressor_pager_slots_chunk_free(
434 			chunk,
435 			pager->cpgr_num_slots,
436 			0,
437 			NULL);
438 	}
439 
440 	compressor_pager_lock_destroy(pager);
441 	zfree(compressor_pager_zone, pager);
442 }
443 
444 kern_return_t
compressor_memory_object_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,__unused memory_object_fault_info_t fault_info)445 compressor_memory_object_data_request(
446 	memory_object_t         mem_obj,
447 	memory_object_offset_t  offset,
448 	memory_object_cluster_size_t            length,
449 	__unused vm_prot_t      protection_required,
450 	__unused memory_object_fault_info_t     fault_info)
451 {
452 	compressor_pager_t      pager;
453 	kern_return_t           kr;
454 	compressor_slot_t       *slot_p;
455 
456 	compressor_pager_stats.data_requests++;
457 
458 	/*
459 	 * Request must be on a page boundary and a multiple of pages.
460 	 */
461 	if ((offset & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0) {
462 		panic("compressor_memory_object_data_request(): bad alignment");
463 	}
464 
465 	if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
466 		panic("%s: offset 0x%llx overflow",
467 		    __FUNCTION__, (uint64_t) offset);
468 		return KERN_FAILURE;
469 	}
470 
471 	compressor_pager_lookup(mem_obj, pager);
472 
473 	if (length == 0) {
474 		/* we're only querying the pager for this page */
475 	} else {
476 		panic("compressor: data_request");
477 	}
478 
479 	/* find the compressor slot for that page */
480 	compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
481 
482 	if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
483 		/* out of range */
484 		kr = KERN_FAILURE;
485 	} else if (slot_p == NULL || *slot_p == 0) {
486 		/* compressor does not have this page */
487 		kr = KERN_FAILURE;
488 	} else {
489 		/* compressor does have this page */
490 		kr = KERN_SUCCESS;
491 	}
492 	return kr;
493 }
494 
495 /*
496  * memory_object_data_initialize: check whether we already have each page, and
497  * write it if we do not.  The implementation is far from optimized, and
498  * also assumes that the default_pager is single-threaded.
499  */
500 /*  It is questionable whether or not a pager should decide what is relevant */
501 /* and what is not in data sent from the kernel.  Data initialize has been */
502 /* changed to copy back all data sent to it in preparation for its eventual */
503 /* merge with data return.  It is the kernel that should decide what pages */
504 /* to write back.  As of the writing of this note, this is indeed the case */
505 /* the kernel writes back one page at a time through this interface */
506 
507 kern_return_t
compressor_memory_object_data_initialize(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t size)508 compressor_memory_object_data_initialize(
509 	memory_object_t         mem_obj,
510 	memory_object_offset_t  offset,
511 	memory_object_cluster_size_t            size)
512 {
513 	compressor_pager_t      pager;
514 	memory_object_offset_t  cur_offset;
515 
516 	compressor_pager_lookup(mem_obj, pager);
517 	compressor_pager_lock(pager);
518 
519 	for (cur_offset = offset;
520 	    cur_offset < offset + size;
521 	    cur_offset += PAGE_SIZE) {
522 		panic("do a data_return() if slot for this page is empty");
523 	}
524 
525 	compressor_pager_unlock(pager);
526 
527 	return KERN_SUCCESS;
528 }
529 
530 kern_return_t
compressor_memory_object_data_unlock(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_size_t size,__unused vm_prot_t desired_access)531 compressor_memory_object_data_unlock(
532 	__unused memory_object_t                mem_obj,
533 	__unused memory_object_offset_t offset,
534 	__unused memory_object_size_t           size,
535 	__unused vm_prot_t              desired_access)
536 {
537 	panic("compressor_memory_object_data_unlock()");
538 	return KERN_FAILURE;
539 }
540 
541 
542 /*ARGSUSED*/
543 kern_return_t
compressor_memory_object_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t size,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)544 compressor_memory_object_data_return(
545 	__unused memory_object_t                        mem_obj,
546 	__unused memory_object_offset_t         offset,
547 	__unused memory_object_cluster_size_t   size,
548 	__unused memory_object_offset_t *resid_offset,
549 	__unused int            *io_error,
550 	__unused boolean_t      dirty,
551 	__unused boolean_t      kernel_copy,
552 	__unused int            upl_flags)
553 {
554 	panic("compressor: data_return");
555 	return KERN_FAILURE;
556 }
557 
558 /*
559  * Routine:	default_pager_memory_object_create
560  * Purpose:
561  *      Handle requests for memory objects from the
562  *      kernel.
563  * Notes:
564  *      Because we only give out the default memory
565  *      manager port to the kernel, we don't have to
566  *      be so paranoid about the contents.
567  */
568 kern_return_t
compressor_memory_object_create(memory_object_size_t new_size,memory_object_t * new_mem_obj)569 compressor_memory_object_create(
570 	memory_object_size_t    new_size,
571 	memory_object_t         *new_mem_obj)
572 {
573 	compressor_pager_t      pager;
574 	int                     num_chunks;
575 
576 	if ((uint32_t)(new_size / PAGE_SIZE) != (new_size / PAGE_SIZE)) {
577 		/* 32-bit overflow for number of pages */
578 		panic("%s: size 0x%llx overflow",
579 		    __FUNCTION__, (uint64_t) new_size);
580 		return KERN_INVALID_ARGUMENT;
581 	}
582 
583 	pager = zalloc_flags(compressor_pager_zone, Z_WAITOK | Z_NOFAIL);
584 
585 	compressor_pager_lock_init(pager);
586 	os_ref_init_raw(&pager->cpgr_references, NULL);
587 	pager->cpgr_num_slots = (uint32_t)(new_size / PAGE_SIZE);
588 	pager->cpgr_num_slots_occupied = 0;
589 
590 	num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
591 	if (num_chunks > 1) {
592 		pager->cpgr_slots.cpgr_islots = kalloc_type(compressor_slot_t *,
593 		    num_chunks, Z_WAITOK | Z_ZERO);
594 	} else if (pager->cpgr_num_slots > 2) {
595 		pager->cpgr_slots.cpgr_dslots = zalloc_slot_array(pager->cpgr_num_slots *
596 		    sizeof(pager->cpgr_slots.cpgr_dslots[0]), Z_WAITOK | Z_ZERO);
597 	} else {
598 		pager->cpgr_slots.cpgr_eslots[0] = 0;
599 		pager->cpgr_slots.cpgr_eslots[1] = 0;
600 	}
601 
602 	/*
603 	 * Set up associations between this memory object
604 	 * and this compressor_pager structure
605 	 */
606 	pager->cpgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
607 	pager->cpgr_hdr.mo_pager_ops = &compressor_pager_ops;
608 	pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
609 
610 	*new_mem_obj = (memory_object_t) pager;
611 	return KERN_SUCCESS;
612 }
613 
614 
615 unsigned int
compressor_pager_slots_chunk_free(compressor_slot_t * chunk,int num_slots,int flags,int * failures)616 compressor_pager_slots_chunk_free(
617 	compressor_slot_t       *chunk,
618 	int                     num_slots,
619 	int                     flags,
620 	int                     *failures)
621 {
622 	int i;
623 	int retval;
624 	unsigned int num_slots_freed;
625 
626 	if (failures) {
627 		*failures = 0;
628 	}
629 	num_slots_freed = 0;
630 	for (i = 0; i < num_slots; i++) {
631 		if (chunk[i] != 0) {
632 			retval = vm_compressor_free(&chunk[i], flags);
633 
634 			if (retval == 0) {
635 				num_slots_freed++;
636 			} else {
637 				if (retval == -2) {
638 					assert(flags & C_DONT_BLOCK);
639 				}
640 
641 				if (failures) {
642 					*failures += 1;
643 				}
644 			}
645 		}
646 	}
647 	return num_slots_freed;
648 }
649 
650 void
compressor_pager_slot_lookup(compressor_pager_t pager,boolean_t do_alloc,memory_object_offset_t offset,compressor_slot_t ** slot_pp)651 compressor_pager_slot_lookup(
652 	compressor_pager_t      pager,
653 	boolean_t               do_alloc,
654 	memory_object_offset_t  offset,
655 	compressor_slot_t       **slot_pp)
656 {
657 	int                     num_chunks;
658 	uint32_t                page_num;
659 	int                     chunk_idx;
660 	int                     slot_idx;
661 	compressor_slot_t       *chunk;
662 	compressor_slot_t       *t_chunk;
663 
664 	page_num = (uint32_t)(offset / PAGE_SIZE);
665 	if (page_num != (offset / PAGE_SIZE)) {
666 		/* overflow */
667 		panic("%s: offset 0x%llx overflow",
668 		    __FUNCTION__, (uint64_t) offset);
669 		*slot_pp = NULL;
670 		return;
671 	}
672 	if (page_num >= pager->cpgr_num_slots) {
673 		/* out of range */
674 		*slot_pp = NULL;
675 		return;
676 	}
677 	num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
678 	if (num_chunks > 1) {
679 		/* we have an array of chunks */
680 		chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK;
681 		chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
682 
683 		if (chunk == NULL && do_alloc) {
684 			t_chunk = zalloc_slot_array(COMPRESSOR_SLOTS_CHUNK_SIZE,
685 			    Z_WAITOK | Z_ZERO);
686 
687 			compressor_pager_lock(pager);
688 
689 			if ((chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]) == NULL) {
690 				/*
691 				 * On some platforms, the memory stores from
692 				 * the bzero(t_chunk) above might not have been
693 				 * made visible and another thread might see
694 				 * the contents of this new chunk before it's
695 				 * been fully zero-filled.
696 				 * This memory barrier should take care of this
697 				 * according to the platform requirements.
698 				 */
699 				os_atomic_thread_fence(release);
700 
701 				chunk = pager->cpgr_slots.cpgr_islots[chunk_idx] = t_chunk;
702 				t_chunk = NULL;
703 			}
704 			compressor_pager_unlock(pager);
705 
706 			if (t_chunk) {
707 				zfree_slot_array(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
708 			}
709 		}
710 		if (chunk == NULL) {
711 			*slot_pp = NULL;
712 		} else {
713 			slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
714 			*slot_pp = &chunk[slot_idx];
715 		}
716 	} else if (pager->cpgr_num_slots > 2) {
717 		slot_idx = page_num;
718 		*slot_pp = &pager->cpgr_slots.cpgr_dslots[slot_idx];
719 	} else {
720 		slot_idx = page_num;
721 		*slot_pp = &pager->cpgr_slots.cpgr_eslots[slot_idx];
722 	}
723 }
724 
725 #if defined(__LP64__)
726 __startup_func
727 static void
vm_compressor_slots_init(void)728 vm_compressor_slots_init(void)
729 {
730 	for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) {
731 		compressor_slots_zones[idx] = zone_create(
732 			compressor_slots_zones_names[idx],
733 			compressor_slots_zones_sizes[idx],
734 			ZC_PGZ_USE_GUARDS | ZC_VM_LP64 | ZC_NOTBITAG);
735 	}
736 }
737 STARTUP(ZALLOC, STARTUP_RANK_MIDDLE, vm_compressor_slots_init);
738 #endif /* defined(__LP64__) */
739 
740 static compressor_slot_t *
zalloc_slot_array(size_t size,zalloc_flags_t flags)741 zalloc_slot_array(size_t size, zalloc_flags_t flags)
742 {
743 #if defined(__LP64__)
744 	compressor_slot_t *slots = NULL;
745 
746 	assert(size <= COMPRESSOR_SLOTS_CHUNK_SIZE);
747 	for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) {
748 		if (size > compressor_slots_zones_sizes[idx]) {
749 			continue;
750 		}
751 		slots = zalloc_flags(compressor_slots_zones[idx], flags);
752 		break;
753 	}
754 	return slots;
755 #else  /* defined(__LP64__) */
756 	return kalloc_data(size, flags);
757 #endif /* !defined(__LP64__) */
758 }
759 
760 static void
zfree_slot_array(compressor_slot_t * slots,size_t size)761 zfree_slot_array(compressor_slot_t *slots, size_t size)
762 {
763 #if defined(__LP64__)
764 	assert(size <= COMPRESSOR_SLOTS_CHUNK_SIZE);
765 	for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) {
766 		if (size > compressor_slots_zones_sizes[idx]) {
767 			continue;
768 		}
769 		zfree(compressor_slots_zones[idx], slots);
770 		break;
771 	}
772 #else  /* defined(__LP64__) */
773 	kfree_data(slots, size);
774 #endif /* !defined(__LP64__) */
775 }
776 
777 kern_return_t
vm_compressor_pager_put(memory_object_t mem_obj,memory_object_offset_t offset,ppnum_t ppnum,void ** current_chead,char * scratch_buf,int * compressed_count_delta_p)778 vm_compressor_pager_put(
779 	memory_object_t                 mem_obj,
780 	memory_object_offset_t          offset,
781 	ppnum_t                         ppnum,
782 	void                            **current_chead,
783 	char                            *scratch_buf,
784 	int                             *compressed_count_delta_p)
785 {
786 	compressor_pager_t      pager;
787 	compressor_slot_t       *slot_p;
788 
789 	compressor_pager_stats.put++;
790 
791 	*compressed_count_delta_p = 0;
792 
793 	/* This routine is called by the pageout thread.  The pageout thread */
794 	/* cannot be blocked by read activities unless the read activities   */
795 	/* Therefore the grant of vs lock must be done on a try versus a      */
796 	/* blocking basis.  The code below relies on the fact that the       */
797 	/* interface is synchronous.  Should this interface be again async   */
798 	/* for some type  of pager in the future the pages will have to be   */
799 	/* returned through a separate, asynchronous path.		     */
800 
801 	compressor_pager_lookup(mem_obj, pager);
802 
803 	if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
804 		/* overflow */
805 		panic("%s: offset 0x%llx overflow",
806 		    __FUNCTION__, (uint64_t) offset);
807 		return KERN_RESOURCE_SHORTAGE;
808 	}
809 
810 	compressor_pager_slot_lookup(pager, TRUE, offset, &slot_p);
811 
812 	if (slot_p == NULL) {
813 		/* out of range ? */
814 		panic("vm_compressor_pager_put: out of range");
815 	}
816 	if (*slot_p != 0) {
817 		/*
818 		 * Already compressed: forget about the old one.
819 		 *
820 		 * This can happen after a vm_object_do_collapse() when
821 		 * the "backing_object" had some pages paged out and the
822 		 * "object" had an equivalent page resident.
823 		 */
824 		vm_compressor_free(slot_p, 0);
825 		*compressed_count_delta_p -= 1;
826 	}
827 
828 	/*
829 	 * If the compressor operation succeeds, we presumably don't need to
830 	 * undo any previous WIMG update, as all live mappings should be
831 	 * disconnected.
832 	 */
833 
834 	if (vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf)) {
835 		return KERN_RESOURCE_SHORTAGE;
836 	}
837 	*compressed_count_delta_p += 1;
838 
839 	return KERN_SUCCESS;
840 }
841 
842 
843 kern_return_t
vm_compressor_pager_get(memory_object_t mem_obj,memory_object_offset_t offset,ppnum_t ppnum,int * my_fault_type,int flags,int * compressed_count_delta_p)844 vm_compressor_pager_get(
845 	memory_object_t         mem_obj,
846 	memory_object_offset_t  offset,
847 	ppnum_t                 ppnum,
848 	int                     *my_fault_type,
849 	int                     flags,
850 	int                     *compressed_count_delta_p)
851 {
852 	compressor_pager_t      pager;
853 	kern_return_t           kr;
854 	compressor_slot_t       *slot_p;
855 
856 	compressor_pager_stats.get++;
857 
858 	*compressed_count_delta_p = 0;
859 
860 	if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
861 		panic("%s: offset 0x%llx overflow",
862 		    __FUNCTION__, (uint64_t) offset);
863 		return KERN_MEMORY_ERROR;
864 	}
865 
866 	compressor_pager_lookup(mem_obj, pager);
867 
868 	/* find the compressor slot for that page */
869 	compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
870 
871 	if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
872 		/* out of range */
873 		kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COMPRESSOR_GET_OUT_OF_RANGE), 0 /* arg */);
874 		kr = KERN_MEMORY_FAILURE;
875 	} else if (slot_p == NULL || *slot_p == 0) {
876 		/* compressor does not have this page */
877 		kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COMPRESSOR_GET_NO_PAGE), 0 /* arg */);
878 		kr = KERN_MEMORY_ERROR;
879 	} else {
880 		/* compressor does have this page */
881 		kr = KERN_SUCCESS;
882 	}
883 	*my_fault_type = DBG_COMPRESSOR_FAULT;
884 
885 	if (kr == KERN_SUCCESS) {
886 		int     retval;
887 
888 		/* get the page from the compressor */
889 		retval = vm_compressor_get(ppnum, slot_p, flags);
890 		if (retval == -1) {
891 			kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COMPRESSOR_DECOMPRESS_FAILED), 0 /* arg */);
892 			kr = KERN_MEMORY_FAILURE;
893 		} else if (retval == 1) {
894 			*my_fault_type = DBG_COMPRESSOR_SWAPIN_FAULT;
895 		} else if (retval == -2) {
896 			assert((flags & C_DONT_BLOCK));
897 			kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COMPRESSOR_BLOCKING_OP_FAILED), 0 /* arg */);
898 			kr = KERN_FAILURE;
899 		}
900 	}
901 
902 	if (kr == KERN_SUCCESS) {
903 		assert(slot_p != NULL);
904 		if (*slot_p != 0) {
905 			/*
906 			 * We got the page for a copy-on-write fault
907 			 * and we kept the original in place.  Slot
908 			 * is still occupied.
909 			 */
910 		} else {
911 			*compressed_count_delta_p -= 1;
912 		}
913 	}
914 
915 	return kr;
916 }
917 
918 unsigned int
vm_compressor_pager_state_clr(memory_object_t mem_obj,memory_object_offset_t offset)919 vm_compressor_pager_state_clr(
920 	memory_object_t         mem_obj,
921 	memory_object_offset_t  offset)
922 {
923 	compressor_pager_t      pager;
924 	compressor_slot_t       *slot_p;
925 	unsigned int            num_slots_freed;
926 
927 	assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
928 
929 	compressor_pager_stats.state_clr++;
930 
931 	if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
932 		/* overflow */
933 		panic("%s: offset 0x%llx overflow",
934 		    __FUNCTION__, (uint64_t) offset);
935 		return 0;
936 	}
937 
938 	compressor_pager_lookup(mem_obj, pager);
939 
940 	/* find the compressor slot for that page */
941 	compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
942 
943 	num_slots_freed = 0;
944 	if (slot_p && *slot_p != 0) {
945 		vm_compressor_free(slot_p, 0);
946 		num_slots_freed++;
947 		assert(*slot_p == 0);
948 	}
949 
950 	return num_slots_freed;
951 }
952 
953 vm_external_state_t
vm_compressor_pager_state_get(memory_object_t mem_obj,memory_object_offset_t offset)954 vm_compressor_pager_state_get(
955 	memory_object_t         mem_obj,
956 	memory_object_offset_t  offset)
957 {
958 	compressor_pager_t      pager;
959 	compressor_slot_t       *slot_p;
960 
961 	assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
962 
963 	compressor_pager_stats.state_get++;
964 
965 	if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
966 		/* overflow */
967 		panic("%s: offset 0x%llx overflow",
968 		    __FUNCTION__, (uint64_t) offset);
969 		return VM_EXTERNAL_STATE_ABSENT;
970 	}
971 
972 	compressor_pager_lookup(mem_obj, pager);
973 
974 	/* find the compressor slot for that page */
975 	compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
976 
977 	if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
978 		/* out of range */
979 		return VM_EXTERNAL_STATE_ABSENT;
980 	} else if (slot_p == NULL || *slot_p == 0) {
981 		/* compressor does not have this page */
982 		return VM_EXTERNAL_STATE_ABSENT;
983 	} else {
984 		/* compressor does have this page */
985 		return VM_EXTERNAL_STATE_EXISTS;
986 	}
987 }
988 
989 unsigned int
vm_compressor_pager_reap_pages(memory_object_t mem_obj,int flags)990 vm_compressor_pager_reap_pages(
991 	memory_object_t         mem_obj,
992 	int                     flags)
993 {
994 	compressor_pager_t      pager;
995 	int                     num_chunks;
996 	int                     failures;
997 	int                     i;
998 	compressor_slot_t       *chunk;
999 	unsigned int            num_slots_freed;
1000 
1001 	compressor_pager_lookup(mem_obj, pager);
1002 	if (pager == NULL) {
1003 		return 0;
1004 	}
1005 
1006 	compressor_pager_lock(pager);
1007 
1008 	/* reap the compressor slots */
1009 	num_slots_freed = 0;
1010 
1011 	num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
1012 	if (num_chunks > 1) {
1013 		/* we have an array of chunks */
1014 		for (i = 0; i < num_chunks; i++) {
1015 			chunk = pager->cpgr_slots.cpgr_islots[i];
1016 			if (chunk != NULL) {
1017 				num_slots_freed +=
1018 				    compressor_pager_slots_chunk_free(
1019 					chunk,
1020 					COMPRESSOR_SLOTS_PER_CHUNK,
1021 					flags,
1022 					&failures);
1023 				if (failures == 0) {
1024 					pager->cpgr_slots.cpgr_islots[i] = NULL;
1025 					zfree_slot_array(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
1026 				}
1027 			}
1028 		}
1029 	} else if (pager->cpgr_num_slots > 2) {
1030 		chunk = pager->cpgr_slots.cpgr_dslots;
1031 		num_slots_freed +=
1032 		    compressor_pager_slots_chunk_free(
1033 			chunk,
1034 			pager->cpgr_num_slots,
1035 			flags,
1036 			NULL);
1037 	} else {
1038 		chunk = &pager->cpgr_slots.cpgr_eslots[0];
1039 		num_slots_freed +=
1040 		    compressor_pager_slots_chunk_free(
1041 			chunk,
1042 			pager->cpgr_num_slots,
1043 			flags,
1044 			NULL);
1045 	}
1046 
1047 	compressor_pager_unlock(pager);
1048 
1049 	return num_slots_freed;
1050 }
1051 
1052 void
vm_compressor_pager_transfer(memory_object_t dst_mem_obj,memory_object_offset_t dst_offset,memory_object_t src_mem_obj,memory_object_offset_t src_offset)1053 vm_compressor_pager_transfer(
1054 	memory_object_t         dst_mem_obj,
1055 	memory_object_offset_t  dst_offset,
1056 	memory_object_t         src_mem_obj,
1057 	memory_object_offset_t  src_offset)
1058 {
1059 	compressor_pager_t      src_pager, dst_pager;
1060 	compressor_slot_t       *src_slot_p, *dst_slot_p;
1061 
1062 	compressor_pager_stats.transfer++;
1063 
1064 	/* find the compressor slot for the destination */
1065 	assert((uint32_t) dst_offset == dst_offset);
1066 	compressor_pager_lookup(dst_mem_obj, dst_pager);
1067 	assert(dst_offset / PAGE_SIZE < dst_pager->cpgr_num_slots);
1068 	compressor_pager_slot_lookup(dst_pager, TRUE, (uint32_t) dst_offset,
1069 	    &dst_slot_p);
1070 	assert(dst_slot_p != NULL);
1071 	assert(*dst_slot_p == 0);
1072 
1073 	/* find the compressor slot for the source */
1074 	assert((uint32_t) src_offset == src_offset);
1075 	compressor_pager_lookup(src_mem_obj, src_pager);
1076 	assert(src_offset / PAGE_SIZE < src_pager->cpgr_num_slots);
1077 	compressor_pager_slot_lookup(src_pager, FALSE, (uint32_t) src_offset,
1078 	    &src_slot_p);
1079 	assert(src_slot_p != NULL);
1080 	assert(*src_slot_p != 0);
1081 
1082 	/* transfer the slot from source to destination */
1083 	vm_compressor_transfer(dst_slot_p, src_slot_p);
1084 	OSAddAtomic(-1, &src_pager->cpgr_num_slots_occupied);
1085 	OSAddAtomic(+1, &dst_pager->cpgr_num_slots_occupied);
1086 }
1087 
1088 memory_object_offset_t
vm_compressor_pager_next_compressed(memory_object_t mem_obj,memory_object_offset_t offset)1089 vm_compressor_pager_next_compressed(
1090 	memory_object_t         mem_obj,
1091 	memory_object_offset_t  offset)
1092 {
1093 	compressor_pager_t      pager;
1094 	uint32_t                num_chunks;
1095 	uint32_t                page_num;
1096 	uint32_t                chunk_idx;
1097 	uint32_t                slot_idx;
1098 	compressor_slot_t       *chunk;
1099 
1100 	compressor_pager_lookup(mem_obj, pager);
1101 
1102 	page_num = (uint32_t)(offset / PAGE_SIZE);
1103 	if (page_num != (offset / PAGE_SIZE)) {
1104 		/* overflow */
1105 		return (memory_object_offset_t) -1;
1106 	}
1107 	if (page_num >= pager->cpgr_num_slots) {
1108 		/* out of range */
1109 		return (memory_object_offset_t) -1;
1110 	}
1111 
1112 	num_chunks = ((pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) /
1113 	    COMPRESSOR_SLOTS_PER_CHUNK);
1114 
1115 	if (num_chunks == 1) {
1116 		if (pager->cpgr_num_slots > 2) {
1117 			chunk = pager->cpgr_slots.cpgr_dslots;
1118 		} else {
1119 			chunk = &pager->cpgr_slots.cpgr_eslots[0];
1120 		}
1121 		for (slot_idx = page_num;
1122 		    slot_idx < pager->cpgr_num_slots;
1123 		    slot_idx++) {
1124 			if (chunk[slot_idx] != 0) {
1125 				/* found a non-NULL slot in this chunk */
1126 				return (memory_object_offset_t) (slot_idx *
1127 				       PAGE_SIZE);
1128 			}
1129 		}
1130 		return (memory_object_offset_t) -1;
1131 	}
1132 
1133 	/* we have an array of chunks; find the next non-NULL chunk */
1134 	chunk = NULL;
1135 	for (chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK,
1136 	    slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
1137 	    chunk_idx < num_chunks;
1138 	    chunk_idx++,
1139 	    slot_idx = 0) {
1140 		chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
1141 		if (chunk == NULL) {
1142 			/* no chunk here: try the next one */
1143 			continue;
1144 		}
1145 		/* search for an occupied slot in this chunk */
1146 		for (;
1147 		    slot_idx < COMPRESSOR_SLOTS_PER_CHUNK;
1148 		    slot_idx++) {
1149 			if (chunk[slot_idx] != 0) {
1150 				/* found an occupied slot in this chunk */
1151 				uint32_t next_slot;
1152 
1153 				next_slot = ((chunk_idx *
1154 				    COMPRESSOR_SLOTS_PER_CHUNK) +
1155 				    slot_idx);
1156 				if (next_slot >= pager->cpgr_num_slots) {
1157 					/* went beyond end of object */
1158 					return (memory_object_offset_t) -1;
1159 				}
1160 				return (memory_object_offset_t) (next_slot *
1161 				       PAGE_SIZE);
1162 			}
1163 		}
1164 	}
1165 	return (memory_object_offset_t) -1;
1166 }
1167 
1168 unsigned int
vm_compressor_pager_get_count(memory_object_t mem_obj)1169 vm_compressor_pager_get_count(
1170 	memory_object_t mem_obj)
1171 {
1172 	compressor_pager_t      pager;
1173 
1174 	compressor_pager_lookup(mem_obj, pager);
1175 	if (pager == NULL) {
1176 		return 0;
1177 	}
1178 
1179 	/*
1180 	 * The caller should have the VM object locked and one
1181 	 * needs that lock to do a page-in or page-out, so no
1182 	 * need to lock the pager here.
1183 	 */
1184 	assert(pager->cpgr_num_slots_occupied >= 0);
1185 
1186 	return pager->cpgr_num_slots_occupied;
1187 }
1188 
1189 void
vm_compressor_pager_count(memory_object_t mem_obj,int compressed_count_delta,boolean_t shared_lock,vm_object_t object __unused)1190 vm_compressor_pager_count(
1191 	memory_object_t mem_obj,
1192 	int             compressed_count_delta,
1193 	boolean_t       shared_lock,
1194 	vm_object_t     object __unused)
1195 {
1196 	compressor_pager_t      pager;
1197 
1198 	if (compressed_count_delta == 0) {
1199 		return;
1200 	}
1201 
1202 	compressor_pager_lookup(mem_obj, pager);
1203 	if (pager == NULL) {
1204 		return;
1205 	}
1206 
1207 	if (compressed_count_delta < 0) {
1208 		assert(pager->cpgr_num_slots_occupied >=
1209 		    (unsigned int) -compressed_count_delta);
1210 	}
1211 
1212 	/*
1213 	 * The caller should have the VM object locked,
1214 	 * shared or exclusive.
1215 	 */
1216 	if (shared_lock) {
1217 		vm_object_lock_assert_shared(object);
1218 		OSAddAtomic(compressed_count_delta,
1219 		    &pager->cpgr_num_slots_occupied);
1220 	} else {
1221 		vm_object_lock_assert_exclusive(object);
1222 		pager->cpgr_num_slots_occupied += compressed_count_delta;
1223 	}
1224 }
1225 
1226 #if CONFIG_FREEZE
1227 kern_return_t
vm_compressor_pager_relocate(memory_object_t mem_obj,memory_object_offset_t offset,void ** current_chead)1228 vm_compressor_pager_relocate(
1229 	memory_object_t         mem_obj,
1230 	memory_object_offset_t  offset,
1231 	void                    **current_chead)
1232 {
1233 	/*
1234 	 * Has the page at this offset been compressed?
1235 	 */
1236 
1237 	compressor_slot_t *slot_p;
1238 	compressor_pager_t dst_pager;
1239 
1240 	assert(mem_obj);
1241 
1242 	compressor_pager_lookup(mem_obj, dst_pager);
1243 	if (dst_pager == NULL) {
1244 		return KERN_FAILURE;
1245 	}
1246 
1247 	compressor_pager_slot_lookup(dst_pager, FALSE, offset, &slot_p);
1248 	return vm_compressor_relocate(current_chead, slot_p);
1249 }
1250 #endif /* CONFIG_FREEZE */
1251 
1252 #if DEVELOPMENT || DEBUG
1253 
1254 kern_return_t
vm_compressor_pager_inject_error(memory_object_t mem_obj,memory_object_offset_t offset)1255 vm_compressor_pager_inject_error(memory_object_t mem_obj,
1256     memory_object_offset_t offset)
1257 {
1258 	kern_return_t result = KERN_FAILURE;
1259 	compressor_slot_t *slot_p;
1260 	compressor_pager_t pager;
1261 
1262 	assert(mem_obj);
1263 
1264 	compressor_pager_lookup(mem_obj, pager);
1265 	if (pager != NULL) {
1266 		compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
1267 		if (slot_p != NULL && *slot_p != 0) {
1268 			vm_compressor_inject_error(slot_p);
1269 			result = KERN_SUCCESS;
1270 		}
1271 	}
1272 
1273 	return result;
1274 }
1275 
1276 #endif
1277