xref: /xnu-8792.61.2/bsd/skywalk/mem/skmem_region.c (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2016-2022 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /* BEGIN CSTYLED */
30 /*
31  * A region represents a collection of one or more similarly-sized memory
32  * segments, each of which is a contiguous range of integers.  A segment
33  * is either allocated or free, and is treated as disjoint from all other
34  * segments.  That is, the contiguity applies only at the segment level,
35  * and a region with multiple segments is not contiguous at the region level.
36  * A segment always belongs to the segment freelist, or the allocated-address
37  * hash chain, as described below.
38  *
39  * The optional SKMEM_REGION_CR_NOREDIRECT flag indicates that the region
40  * stays intact even after a defunct.  Otherwise, the segments belonging
41  * to the region will be freed at defunct time, and the span covered by
42  * the region will be redirected to zero-filled anonymous memory.
43  *
44  * Memory for a region is always created as pageable and purgeable.  It is
45  * the client's responsibility to prepare (wire) it, and optionally insert
46  * it to the IOMMU, at segment construction time.  When the segment is
47  * freed, the client is responsible for removing it from IOMMU (if needed),
48  * and complete (unwire) it.
49  *
50  * When the region is created with SKMEM_REGION_CR_PERSISTENT, the memory
51  * is immediately wired upon allocation (segment removed from freelist).
52  * It gets unwired when memory is discarded (segment inserted to freelist).
53  *
54  * The chronological life cycle of a segment is as such:
55  *
56  *    SKSEG_STATE_DETACHED
57  *        SKSEG_STATE_{MAPPED,MAPPED_WIRED}
58  *            [segment allocated, useable by client]
59  *              ...
60  *            [client frees segment]
61  *        SKSEG_STATE_{MAPPED,MAPPED_WIRED}
62  *	  [reclaim]
63  *    SKSEG_STATE_DETACHED
64  *
65  * The region can also be marked as user-mappable (SKMEM_REGION_CR_MMAPOK);
66  * this allows it to be further marked with SKMEM_REGION_CR_UREADONLY to
67  * prevent modifications by the user task.  Only user-mappable regions will
68  * be considered for inclusion during skmem_arena_mmap().
69  *
70  * Every skmem allocator has a region as its slab supplier.  Each slab is
71  * exactly a segment.  The allocator uses skmem_region_{alloc,free}() to
72  * create and destroy slabs.
73  *
74  * A region may be mirrored by another region; the latter acts as the master
75  * controller for both regions.  Mirrored (slave) regions cannot be used
76  * directly by the skmem allocator.  Region mirroring technique is used for
77  * managing shadow objects {umd,kmd} and {usd,ksd}, where an object in one
78  * region has the same size and lifetime as its shadow counterpart.
79  *
80  * CREATION/DESTRUCTION:
81  *
82  *   At creation time, all segments are allocated and are immediately inserted
83  *   into the freelist.  Allocating a purgeable segment has very little cost,
84  *   as it is not backed by physical memory until it is accessed.  Immediate
85  *   insertion into the freelist causes the mapping to be further torn down.
86  *
87  *   At destruction time, the freelist is emptied, and each segment is then
88  *   destroyed.  The system will assert if it detects there are outstanding
89  *   segments not yet returned to the region (not freed by the client.)
90  *
91  * ALLOCATION:
92  *
93  *   Allocating involves searching the freelist for a segment; if found, the
94  *   segment is removed from the freelist and is inserted into the allocated-
95  *   address hash chain.  The address of the memory object represented by
96  *   the segment is used as hash key.  The use of allocated-address hash chain
97  *   is needed since we return the address of the memory object, and not the
98  *   segment's itself, to the client.
99  *
100  * DEALLOCATION:
101  *
102  *   Freeing a memory object causes the chain to be searched for a matching
103  *   segment.  The system will assert if a segment cannot be found, since
104  *   that indicates that the memory object address is invalid.  Once found,
105  *   the segment is removed from the allocated-address hash chain, and is
106  *   inserted to the freelist.
107  *
108  * Segment allocation and deallocation can be expensive.  Because of this,
109  * we expect that most clients will utilize the skmem_cache slab allocator
110  * as the frontend instead.
111  */
112 /* END CSTYLED */
113 
114 #include <skywalk/os_skywalk_private.h>
115 #define _FN_KPRINTF             /* don't redefine kprintf() */
116 #include <pexpert/pexpert.h>    /* for PE_parse_boot_argn */
117 
118 static void skmem_region_destroy(struct skmem_region *skr);
119 static void skmem_region_depopulate(struct skmem_region *);
120 static int sksegment_cmp(const struct sksegment *, const struct sksegment *);
121 static struct sksegment *sksegment_create(struct skmem_region *, uint32_t);
122 static void sksegment_destroy(struct skmem_region *, struct sksegment *);
123 static void sksegment_freelist_insert(struct skmem_region *,
124     struct sksegment *, boolean_t);
125 static struct sksegment *sksegment_freelist_remove(struct skmem_region *,
126     struct sksegment *, uint32_t, boolean_t);
127 static struct sksegment *sksegment_freelist_grow(struct skmem_region *);
128 static struct sksegment *sksegment_alloc_with_idx(struct skmem_region *,
129     uint32_t);
130 static void *skmem_region_alloc_common(struct skmem_region *,
131     struct sksegment *);
132 static void *skmem_region_mirror_alloc(struct skmem_region *,
133     struct sksegment *, struct sksegment **);
134 static void skmem_region_applyall(void (*)(struct skmem_region *));
135 static void skmem_region_update(struct skmem_region *);
136 static void skmem_region_update_func(thread_call_param_t, thread_call_param_t);
137 static inline void skmem_region_retain_locked(struct skmem_region *);
138 static inline boolean_t skmem_region_release_locked(struct skmem_region *);
139 static int skmem_region_mib_get_sysctl SYSCTL_HANDLER_ARGS;
140 
141 RB_PROTOTYPE_PREV(segtfreehead, sksegment, sg_node, sksegment_cmp);
142 RB_GENERATE_PREV(segtfreehead, sksegment, sg_node, sksegment_cmp);
143 
144 SYSCTL_PROC(_kern_skywalk_stats, OID_AUTO, region,
145     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
146     0, 0, skmem_region_mib_get_sysctl, "S,sk_stats_region",
147     "Skywalk region statistics");
148 
149 static LCK_ATTR_DECLARE(skmem_region_lock_attr, 0, 0);
150 static LCK_GRP_DECLARE(skmem_region_lock_grp, "skmem_region");
151 static LCK_MTX_DECLARE_ATTR(skmem_region_lock, &skmem_region_lock_grp,
152     &skmem_region_lock_attr);
153 
154 /* protected by skmem_region_lock */
155 static TAILQ_HEAD(, skmem_region) skmem_region_head;
156 
157 static thread_call_t skmem_region_update_tc;
158 
159 #define SKMEM_REGION_UPDATE_INTERVAL    13      /* 13 seconds */
160 static uint32_t skmem_region_update_interval = SKMEM_REGION_UPDATE_INTERVAL;
161 
162 #define SKMEM_WDT_MAXTIME               30      /* # of secs before watchdog */
163 #define SKMEM_WDT_PURGE                 3       /* retry purge threshold */
164 
165 #if (DEVELOPMENT || DEBUG)
166 /* Mean Time Between Failures (ms) */
167 static volatile uint64_t skmem_region_mtbf;
168 
169 static int skmem_region_mtbf_sysctl(struct sysctl_oid *, void *, int,
170     struct sysctl_req *);
171 
172 SYSCTL_PROC(_kern_skywalk_mem, OID_AUTO, region_mtbf,
173     CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0,
174     skmem_region_mtbf_sysctl, "Q", "Region MTBF (ms)");
175 
176 SYSCTL_UINT(_kern_skywalk_mem, OID_AUTO, region_update_interval,
177     CTLFLAG_RW | CTLFLAG_LOCKED, &skmem_region_update_interval,
178     SKMEM_REGION_UPDATE_INTERVAL, "Region update interval (sec)");
179 #endif /* (DEVELOPMENT || DEBUG) */
180 
181 #define SKMEM_REGION_LOCK()                     \
182 	lck_mtx_lock(&skmem_region_lock)
183 #define SKMEM_REGION_LOCK_ASSERT_HELD()         \
184 	LCK_MTX_ASSERT(&skmem_region_lock, LCK_MTX_ASSERT_OWNED)
185 #define SKMEM_REGION_LOCK_ASSERT_NOTHELD()      \
186 	LCK_MTX_ASSERT(&skmem_region_lock, LCK_MTX_ASSERT_NOTOWNED)
187 #define SKMEM_REGION_UNLOCK()                   \
188 	lck_mtx_unlock(&skmem_region_lock)
189 
190 /*
191  * Hash table bounds.  Start with the initial value, and rescale up to
192  * the specified limit.  Ideally we don't need a limit, but in practice
193  * this helps guard against runaways.  These values should be revisited
194  * in future and be adjusted as needed.
195  */
196 #define SKMEM_REGION_HASH_INITIAL       32      /* initial hash table size */
197 #define SKMEM_REGION_HASH_LIMIT         4096    /* hash table size limit */
198 
199 #define SKMEM_REGION_HASH_INDEX(_a, _s, _m)     \
200 	(((_a) + ((_a) >> (_s)) + ((_a) >> ((_s) << 1))) & (_m))
201 #define SKMEM_REGION_HASH(_skr, _addr)                                     \
202 	(&(_skr)->skr_hash_table[SKMEM_REGION_HASH_INDEX((uintptr_t)_addr, \
203 	    (_skr)->skr_hash_shift, (_skr)->skr_hash_mask)])
204 
205 static ZONE_DEFINE(skr_zone, SKMEM_ZONE_PREFIX ".mem.skr",
206     sizeof(struct skmem_region), ZC_ZFREE_CLEARMEM);
207 
208 static unsigned int sg_size;                    /* size of zone element */
209 static struct skmem_cache *skmem_sg_cache;      /* cache for sksegment */
210 
211 static uint32_t skmem_seg_size = SKMEM_SEG_SIZE;
212 static uint32_t skmem_md_seg_size = SKMEM_MD_SEG_SIZE;
213 static uint32_t skmem_drv_buf_seg_size = SKMEM_DRV_BUF_SEG_SIZE;
214 static uint32_t skmem_drv_buf_seg_eff_size = SKMEM_DRV_BUF_SEG_SIZE;
215 uint32_t skmem_usr_buf_seg_size = SKMEM_USR_BUF_SEG_SIZE;
216 
217 #define SKMEM_TAG_SEGMENT_BMAP  "com.apple.skywalk.segment.bmap"
218 static SKMEM_TAG_DEFINE(skmem_tag_segment_bmap, SKMEM_TAG_SEGMENT_BMAP);
219 
220 #define SKMEM_TAG_SEGMENT_HASH  "com.apple.skywalk.segment.hash"
221 static SKMEM_TAG_DEFINE(skmem_tag_segment_hash, SKMEM_TAG_SEGMENT_HASH);
222 
223 #define SKMEM_TAG_REGION_MIB     "com.apple.skywalk.region.mib"
224 static SKMEM_TAG_DEFINE(skmem_tag_region_mib, SKMEM_TAG_REGION_MIB);
225 
226 #define BMAPSZ  64
227 
228 /* 64-bit mask with range */
229 #define BMASK64(_beg, _end)     \
230 	((((uint64_t)-1) >> ((BMAPSZ - 1) - (_end))) & ~((1ULL << (_beg)) - 1))
231 
232 static int __skmem_region_inited = 0;
233 
234 void
skmem_region_init(void)235 skmem_region_init(void)
236 {
237 	boolean_t randomize_seg_size;
238 
239 	_CASSERT(sizeof(bitmap_t) == sizeof(uint64_t));
240 	_CASSERT(BMAPSZ == (sizeof(bitmap_t) << 3));
241 	_CASSERT((SKMEM_SEG_SIZE % SKMEM_PAGE_SIZE) == 0);
242 	_CASSERT(SKMEM_REGION_HASH_LIMIT >= SKMEM_REGION_HASH_INITIAL);
243 	ASSERT(!__skmem_region_inited);
244 
245 	/* enforce the ordering here */
246 	_CASSERT(SKMEM_REGION_GUARD_HEAD == 0);
247 	_CASSERT(SKMEM_REGION_SCHEMA == 1);
248 	_CASSERT(SKMEM_REGION_RING == 2);
249 	_CASSERT(SKMEM_REGION_BUF_DEF == 3);
250 	_CASSERT(SKMEM_REGION_BUF_LARGE == 4);
251 	_CASSERT(SKMEM_REGION_RXBUF_DEF == 5);
252 	_CASSERT(SKMEM_REGION_RXBUF_LARGE == 6);
253 	_CASSERT(SKMEM_REGION_TXBUF_DEF == 7);
254 	_CASSERT(SKMEM_REGION_TXBUF_LARGE == 8);
255 	_CASSERT(SKMEM_REGION_UMD == 9);
256 	_CASSERT(SKMEM_REGION_TXAUSD == 10);
257 	_CASSERT(SKMEM_REGION_RXFUSD == 11);
258 	_CASSERT(SKMEM_REGION_UBFT == 12);
259 	_CASSERT(SKMEM_REGION_USTATS == 13);
260 	_CASSERT(SKMEM_REGION_FLOWADV == 14);
261 	_CASSERT(SKMEM_REGION_NEXUSADV == 15);
262 	_CASSERT(SKMEM_REGION_SYSCTLS == 16);
263 	_CASSERT(SKMEM_REGION_GUARD_TAIL == 17);
264 	_CASSERT(SKMEM_REGION_KMD == 18);
265 	_CASSERT(SKMEM_REGION_RXKMD == 19);
266 	_CASSERT(SKMEM_REGION_TXKMD == 20);
267 	_CASSERT(SKMEM_REGION_KBFT == 21);
268 	_CASSERT(SKMEM_REGION_RXKBFT == 22);
269 	_CASSERT(SKMEM_REGION_TXKBFT == 23);
270 	_CASSERT(SKMEM_REGION_TXAKSD == 24);
271 	_CASSERT(SKMEM_REGION_RXFKSD == 25);
272 	_CASSERT(SKMEM_REGION_KSTATS == 26);
273 	_CASSERT(SKMEM_REGION_INTRINSIC == 27);
274 
275 	_CASSERT(SREG_GUARD_HEAD == SKMEM_REGION_GUARD_HEAD);
276 	_CASSERT(SREG_SCHEMA == SKMEM_REGION_SCHEMA);
277 	_CASSERT(SREG_RING == SKMEM_REGION_RING);
278 	_CASSERT(SREG_BUF_DEF == SKMEM_REGION_BUF_DEF);
279 	_CASSERT(SREG_BUF_LARGE == SKMEM_REGION_BUF_LARGE);
280 	_CASSERT(SREG_RXBUF_DEF == SKMEM_REGION_RXBUF_DEF);
281 	_CASSERT(SREG_RXBUF_LARGE == SKMEM_REGION_RXBUF_LARGE);
282 	_CASSERT(SREG_TXBUF_DEF == SKMEM_REGION_TXBUF_DEF);
283 	_CASSERT(SREG_TXBUF_LARGE == SKMEM_REGION_TXBUF_LARGE);
284 	_CASSERT(SREG_UMD == SKMEM_REGION_UMD);
285 	_CASSERT(SREG_TXAUSD == SKMEM_REGION_TXAUSD);
286 	_CASSERT(SREG_RXFUSD == SKMEM_REGION_RXFUSD);
287 	_CASSERT(SREG_UBFT == SKMEM_REGION_UBFT);
288 	_CASSERT(SREG_USTATS == SKMEM_REGION_USTATS);
289 	_CASSERT(SREG_FLOWADV == SKMEM_REGION_FLOWADV);
290 	_CASSERT(SREG_NEXUSADV == SKMEM_REGION_NEXUSADV);
291 	_CASSERT(SREG_SYSCTLS == SKMEM_REGION_SYSCTLS);
292 	_CASSERT(SREG_GUARD_TAIL == SKMEM_REGION_GUARD_TAIL);
293 	_CASSERT(SREG_KMD == SKMEM_REGION_KMD);
294 	_CASSERT(SREG_RXKMD == SKMEM_REGION_RXKMD);
295 	_CASSERT(SREG_TXKMD == SKMEM_REGION_TXKMD);
296 	_CASSERT(SREG_KBFT == SKMEM_REGION_KBFT);
297 	_CASSERT(SREG_RXKBFT == SKMEM_REGION_RXKBFT);
298 	_CASSERT(SREG_TXKBFT == SKMEM_REGION_TXKBFT);
299 	_CASSERT(SREG_TXAKSD == SKMEM_REGION_TXAKSD);
300 	_CASSERT(SREG_RXFKSD == SKMEM_REGION_RXFKSD);
301 	_CASSERT(SREG_KSTATS == SKMEM_REGION_KSTATS);
302 
303 	_CASSERT(SKR_MODE_NOREDIRECT == SREG_MODE_NOREDIRECT);
304 	_CASSERT(SKR_MODE_MMAPOK == SREG_MODE_MMAPOK);
305 	_CASSERT(SKR_MODE_UREADONLY == SREG_MODE_UREADONLY);
306 	_CASSERT(SKR_MODE_KREADONLY == SREG_MODE_KREADONLY);
307 	_CASSERT(SKR_MODE_PERSISTENT == SREG_MODE_PERSISTENT);
308 	_CASSERT(SKR_MODE_MONOLITHIC == SREG_MODE_MONOLITHIC);
309 	_CASSERT(SKR_MODE_NOMAGAZINES == SREG_MODE_NOMAGAZINES);
310 	_CASSERT(SKR_MODE_NOCACHE == SREG_MODE_NOCACHE);
311 	_CASSERT(SKR_MODE_IODIR_IN == SREG_MODE_IODIR_IN);
312 	_CASSERT(SKR_MODE_IODIR_OUT == SREG_MODE_IODIR_OUT);
313 	_CASSERT(SKR_MODE_GUARD == SREG_MODE_GUARD);
314 	_CASSERT(SKR_MODE_SEGPHYSCONTIG == SREG_MODE_SEGPHYSCONTIG);
315 	_CASSERT(SKR_MODE_SHAREOK == SREG_MODE_SHAREOK);
316 	_CASSERT(SKR_MODE_PUREDATA == SREG_MODE_PUREDATA);
317 	_CASSERT(SKR_MODE_PSEUDO == SREG_MODE_PSEUDO);
318 	_CASSERT(SKR_MODE_THREADSAFE == SREG_MODE_THREADSAFE);
319 	_CASSERT(SKR_MODE_SLAB == SREG_MODE_SLAB);
320 	_CASSERT(SKR_MODE_MIRRORED == SREG_MODE_MIRRORED);
321 
322 	(void) PE_parse_boot_argn("skmem_seg_size", &skmem_seg_size,
323 	    sizeof(skmem_seg_size));
324 	if (skmem_seg_size < SKMEM_MIN_SEG_SIZE) {
325 		skmem_seg_size = SKMEM_MIN_SEG_SIZE;
326 	}
327 	skmem_seg_size = (uint32_t)P2ROUNDUP(skmem_seg_size,
328 	    SKMEM_MIN_SEG_SIZE);
329 	VERIFY(skmem_seg_size != 0 && (skmem_seg_size % SKMEM_PAGE_SIZE) == 0);
330 
331 	(void) PE_parse_boot_argn("skmem_md_seg_size", &skmem_md_seg_size,
332 	    sizeof(skmem_md_seg_size));
333 	if (skmem_md_seg_size < skmem_seg_size) {
334 		skmem_md_seg_size = skmem_seg_size;
335 	}
336 	skmem_md_seg_size = (uint32_t)P2ROUNDUP(skmem_md_seg_size,
337 	    SKMEM_MIN_SEG_SIZE);
338 	VERIFY((skmem_md_seg_size % SKMEM_PAGE_SIZE) == 0);
339 
340 	/*
341 	 * If set via boot-args, honor it and don't randomize.
342 	 */
343 	randomize_seg_size = !PE_parse_boot_argn("skmem_drv_buf_seg_size",
344 	    &skmem_drv_buf_seg_size, sizeof(skmem_drv_buf_seg_size));
345 	if (skmem_drv_buf_seg_size < skmem_seg_size) {
346 		skmem_drv_buf_seg_size = skmem_seg_size;
347 	}
348 	skmem_drv_buf_seg_size = skmem_drv_buf_seg_eff_size =
349 	    (uint32_t)P2ROUNDUP(skmem_drv_buf_seg_size, SKMEM_MIN_SEG_SIZE);
350 	VERIFY((skmem_drv_buf_seg_size % SKMEM_PAGE_SIZE) == 0);
351 
352 	/*
353 	 * Randomize the driver buffer segment size; here we choose
354 	 * a SKMEM_MIN_SEG_SIZE multiplier to bump up the value to.
355 	 * Set this as the effective driver buffer segment size.
356 	 */
357 	if (randomize_seg_size) {
358 		uint32_t sm;
359 		read_frandom(&sm, sizeof(sm));
360 		skmem_drv_buf_seg_eff_size +=
361 		    (SKMEM_MIN_SEG_SIZE * (sm % SKMEM_DRV_BUF_SEG_MULTIPLIER));
362 		VERIFY((skmem_drv_buf_seg_eff_size % SKMEM_MIN_SEG_SIZE) == 0);
363 	}
364 	VERIFY(skmem_drv_buf_seg_eff_size >= skmem_drv_buf_seg_size);
365 
366 	(void) PE_parse_boot_argn("skmem_usr_buf_seg_size",
367 	    &skmem_usr_buf_seg_size, sizeof(skmem_usr_buf_seg_size));
368 	if (skmem_usr_buf_seg_size < skmem_seg_size) {
369 		skmem_usr_buf_seg_size = skmem_seg_size;
370 	}
371 	skmem_usr_buf_seg_size = (uint32_t)P2ROUNDUP(skmem_usr_buf_seg_size,
372 	    SKMEM_MIN_SEG_SIZE);
373 	VERIFY((skmem_usr_buf_seg_size % SKMEM_PAGE_SIZE) == 0);
374 
375 	SK_ERR("seg_size %u, md_seg_size %u, drv_buf_seg_size %u [eff %u], "
376 	    "usr_buf_seg_size %u", skmem_seg_size, skmem_md_seg_size,
377 	    skmem_drv_buf_seg_size, skmem_drv_buf_seg_eff_size,
378 	    skmem_usr_buf_seg_size);
379 
380 	TAILQ_INIT(&skmem_region_head);
381 
382 	skmem_region_update_tc =
383 	    thread_call_allocate_with_options(skmem_region_update_func,
384 	    NULL, THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE);
385 	if (skmem_region_update_tc == NULL) {
386 		panic("%s: thread_call_allocate failed", __func__);
387 		/* NOTREACHED */
388 		__builtin_unreachable();
389 	}
390 
391 	sg_size = sizeof(struct sksegment);
392 	skmem_sg_cache = skmem_cache_create("sg", sg_size,
393 	    sizeof(uint64_t), NULL, NULL, NULL, NULL, NULL, 0);
394 
395 	/* and start the periodic region update machinery */
396 	skmem_dispatch(skmem_region_update_tc, NULL,
397 	    (skmem_region_update_interval * NSEC_PER_SEC));
398 
399 	__skmem_region_inited = 1;
400 }
401 
402 void
skmem_region_fini(void)403 skmem_region_fini(void)
404 {
405 	if (__skmem_region_inited) {
406 		ASSERT(TAILQ_EMPTY(&skmem_region_head));
407 
408 		if (skmem_region_update_tc != NULL) {
409 			(void) thread_call_cancel_wait(skmem_region_update_tc);
410 			(void) thread_call_free(skmem_region_update_tc);
411 			skmem_region_update_tc = NULL;
412 		}
413 
414 		if (skmem_sg_cache != NULL) {
415 			skmem_cache_destroy(skmem_sg_cache);
416 			skmem_sg_cache = NULL;
417 		}
418 
419 		__skmem_region_inited = 0;
420 	}
421 }
422 
423 /*
424  * Reap internal caches.
425  */
426 void
skmem_region_reap_caches(boolean_t purge)427 skmem_region_reap_caches(boolean_t purge)
428 {
429 	skmem_cache_reap_now(skmem_sg_cache, purge);
430 }
431 
432 /*
433  * Configure and compute the parameters of a region.
434  */
435 void
skmem_region_params_config(struct skmem_region_params * srp)436 skmem_region_params_config(struct skmem_region_params *srp)
437 {
438 	uint32_t cache_line_size = skmem_cpu_cache_line_size();
439 	size_t seglim, segsize, segcnt;
440 	size_t objsize, objcnt;
441 
442 	ASSERT(srp->srp_id < SKMEM_REGIONS);
443 
444 	/*
445 	 * If magazines layer is disabled system-wide, override
446 	 * the region parameter here.  This will effectively reduce
447 	 * the number of requested objects computed below.  Note that
448 	 * the region may have already been configured to exclude
449 	 * magazines in the default skmem_regions[] array.
450 	 */
451 	if (!skmem_allow_magazines()) {
452 		srp->srp_cflags |= SKMEM_REGION_CR_NOMAGAZINES;
453 	}
454 
455 	objsize = srp->srp_r_obj_size;
456 	ASSERT(objsize != 0);
457 	objcnt = srp->srp_r_obj_cnt;
458 	ASSERT(objcnt != 0);
459 
460 	if (srp->srp_cflags & SKMEM_REGION_CR_PSEUDO) {
461 		size_t align = srp->srp_align;
462 
463 		VERIFY(align != 0 && (align % SKMEM_CACHE_ALIGN) == 0);
464 		VERIFY(powerof2(align));
465 		objsize = MAX(objsize, sizeof(uint64_t));
466 #if KASAN
467 		/*
468 		 * When KASAN is enabled, the zone allocator adjusts the
469 		 * element size to include the redzone regions, in which
470 		 * case we assume that the elements won't start on the
471 		 * alignment boundary and thus need to do some fix-ups.
472 		 * These include increasing the effective object size
473 		 * which adds at least 16 bytes to the original size.
474 		 */
475 		objsize += sizeof(uint64_t) + align;
476 #endif /* KASAN */
477 		objsize = P2ROUNDUP(objsize, align);
478 
479 		segsize = objsize;
480 		srp->srp_r_seg_size = (uint32_t)segsize;
481 		segcnt = objcnt;
482 		goto done;
483 	} else {
484 		/* objects are always aligned at CPU cache line size */
485 		srp->srp_align = cache_line_size;
486 	}
487 
488 	/*
489 	 * Start with default segment size for the region, and compute the
490 	 * effective segment size (to nearest SKMEM_MIN_SEG_SIZE).  If the
491 	 * object size is greater, then we adjust the segment size to next
492 	 * multiple of the effective size larger than the object size.
493 	 */
494 	if (srp->srp_r_seg_size == 0) {
495 		switch (srp->srp_id) {
496 		case SKMEM_REGION_UMD:
497 		case SKMEM_REGION_KMD:
498 		case SKMEM_REGION_RXKMD:
499 		case SKMEM_REGION_TXKMD:
500 			srp->srp_r_seg_size = skmem_md_seg_size;
501 			break;
502 
503 		case SKMEM_REGION_BUF_DEF:
504 		case SKMEM_REGION_RXBUF_DEF:
505 		case SKMEM_REGION_TXBUF_DEF:
506 			/*
507 			 * Use the effective driver buffer segment size,
508 			 * since it reflects any randomization done at
509 			 * skmem_region_init() time.
510 			 */
511 			srp->srp_r_seg_size = skmem_drv_buf_seg_eff_size;
512 			break;
513 
514 		default:
515 			srp->srp_r_seg_size = skmem_seg_size;
516 			break;
517 		}
518 	} else {
519 		srp->srp_r_seg_size = (uint32_t)P2ROUNDUP(srp->srp_r_seg_size,
520 		    SKMEM_MIN_SEG_SIZE);
521 	}
522 
523 	seglim = srp->srp_r_seg_size;
524 	VERIFY(seglim != 0 && (seglim % SKMEM_PAGE_SIZE) == 0);
525 
526 	SK_DF(SK_VERB_MEM, "%s: seglim %zu objsize %zu objcnt %zu",
527 	    srp->srp_name, seglim, objsize, objcnt);
528 
529 	/*
530 	 * Make sure object size is multiple of CPU cache line
531 	 * size, and that we can evenly divide the segment size.
532 	 */
533 	if (!((objsize < cache_line_size) && (objsize < seglim) &&
534 	    ((cache_line_size % objsize) == 0) && ((seglim % objsize) == 0))) {
535 		objsize = P2ROUNDUP(objsize, cache_line_size);
536 		while (objsize < seglim && (seglim % objsize) != 0) {
537 			SK_DF(SK_VERB_MEM, "%s: objsize %zu -> %zu",
538 			    srp->srp_name, objsize, objsize + cache_line_size);
539 			objsize += cache_line_size;
540 		}
541 	}
542 
543 	/* segment must be larger than object */
544 	while (objsize > seglim) {
545 		SK_DF(SK_VERB_MEM, "%s: seglim %zu -> %zu", srp->srp_name,
546 		    seglim, seglim + SKMEM_MIN_SEG_SIZE);
547 		seglim += SKMEM_MIN_SEG_SIZE;
548 	}
549 
550 	/*
551 	 * Take into account worst-case per-CPU cached
552 	 * objects if this region is configured for it.
553 	 */
554 	if (!(srp->srp_cflags & SKMEM_REGION_CR_NOMAGAZINES)) {
555 		uint32_t magazine_max_objs =
556 		    skmem_cache_magazine_max((uint32_t)objsize);
557 		SK_DF(SK_VERB_MEM, "%s: objcnt %zu -> %zu", srp->srp_name,
558 		    objcnt, objcnt + magazine_max_objs);
559 		objcnt += magazine_max_objs;
560 	}
561 
562 	SK_DF(SK_VERB_MEM, "%s: seglim %zu objsize %zu "
563 	    "objcnt %zu", srp->srp_name, seglim, objsize, objcnt);
564 
565 	segsize = P2ROUNDUP(objsize * objcnt, SKMEM_MIN_SEG_SIZE);
566 	if (seglim > segsize) {
567 		/*
568 		 * If the segment limit is larger than what we need,
569 		 * avoid memory wastage by shrinking it.
570 		 */
571 		while (seglim > segsize && seglim > SKMEM_MIN_SEG_SIZE) {
572 			VERIFY(seglim >= SKMEM_MIN_SEG_SIZE);
573 			SK_DF(SK_VERB_MEM,
574 			    "%s: segsize %zu (%zu*%zu) seglim [-] %zu -> %zu",
575 			    srp->srp_name, segsize, objsize, objcnt, seglim,
576 			    P2ROUNDUP(seglim - SKMEM_MIN_SEG_SIZE,
577 			    SKMEM_MIN_SEG_SIZE));
578 			seglim = P2ROUNDUP(seglim - SKMEM_MIN_SEG_SIZE,
579 			    SKMEM_MIN_SEG_SIZE);
580 		}
581 
582 		/* adjust segment size */
583 		segsize = seglim;
584 	} else if (seglim < segsize) {
585 		size_t oseglim = seglim;
586 		/*
587 		 * If the segment limit is less than the segment size,
588 		 * see if increasing it slightly (up to 1.5x the segment
589 		 * size) would allow us to avoid allocating too many
590 		 * extra objects (due to excessive segment count).
591 		 */
592 		while (seglim < segsize && (segsize % seglim) != 0) {
593 			SK_DF(SK_VERB_MEM,
594 			    "%s: segsize %zu (%zu*%zu) seglim [+] %zu -> %zu",
595 			    srp->srp_name, segsize, objsize, objcnt, seglim,
596 			    (seglim + SKMEM_MIN_SEG_SIZE));
597 			seglim += SKMEM_MIN_SEG_SIZE;
598 			if (seglim >= (oseglim + (oseglim >> 1))) {
599 				break;
600 			}
601 		}
602 
603 		/* can't use P2ROUNDUP since seglim may not be power of 2 */
604 		segsize = SK_ROUNDUP(segsize, seglim);
605 	}
606 	ASSERT(segsize != 0 && (segsize % seglim) == 0);
607 
608 	SK_DF(SK_VERB_MEM, "%s: segsize %zu seglim %zu",
609 	    srp->srp_name, segsize, seglim);
610 
611 	/* compute segment count, and recompute segment size */
612 	if (srp->srp_cflags & SKMEM_REGION_CR_MONOLITHIC) {
613 		segcnt = 1;
614 	} else {
615 		/*
616 		 * The adjustments above were done in increments of
617 		 * SKMEM_MIN_SEG_SIZE.  If the object size is greater
618 		 * than that, ensure that the segment size is a multiple
619 		 * of the object size.
620 		 */
621 		if (objsize > SKMEM_MIN_SEG_SIZE) {
622 			ASSERT(seglim >= objsize);
623 			if ((seglim % objsize) != 0) {
624 				seglim += (seglim - objsize);
625 			}
626 			/* recompute segsize; see SK_ROUNDUP comment above */
627 			segsize = SK_ROUNDUP(segsize, seglim);
628 		}
629 
630 		segcnt = MAX(1, (segsize / seglim));
631 		segsize /= segcnt;
632 	}
633 
634 	SK_DF(SK_VERB_MEM, "%s: segcnt %zu segsize %zu",
635 	    srp->srp_name, segcnt, segsize);
636 
637 	/* recompute object count to avoid wastage */
638 	objcnt = (segsize * segcnt) / objsize;
639 	ASSERT(objcnt != 0);
640 done:
641 	srp->srp_c_obj_size = (uint32_t)objsize;
642 	srp->srp_c_obj_cnt = (uint32_t)objcnt;
643 	srp->srp_c_seg_size = (uint32_t)segsize;
644 	srp->srp_seg_cnt = (uint32_t)segcnt;
645 
646 	SK_DF(SK_VERB_MEM, "%s: objsize %zu objcnt %zu segcnt %zu segsize %zu",
647 	    srp->srp_name, objsize, objcnt, segcnt, segsize);
648 
649 #if SK_LOG
650 	if (__improbable(sk_verbose != 0)) {
651 		char label[32];
652 		(void) snprintf(label, sizeof(label), "REGION_%s:",
653 		    skmem_region_id2name(srp->srp_id));
654 		SK_D("%-16s o:[%4u x %6u -> %4u x %6u]", label,
655 		    (uint32_t)srp->srp_r_obj_cnt,
656 		    (uint32_t)srp->srp_r_obj_size,
657 		    (uint32_t)srp->srp_c_obj_cnt,
658 		    (uint32_t)srp->srp_c_obj_size);
659 	}
660 #endif /* SK_LOG */
661 }
662 
663 /*
664  * Create a region.
665  */
666 struct skmem_region *
skmem_region_create(const char * name,struct skmem_region_params * srp,sksegment_ctor_fn_t ctor,sksegment_dtor_fn_t dtor,void * private)667 skmem_region_create(const char *name, struct skmem_region_params *srp,
668     sksegment_ctor_fn_t ctor, sksegment_dtor_fn_t dtor, void *private)
669 {
670 	boolean_t pseudo = (srp->srp_cflags & SKMEM_REGION_CR_PSEUDO);
671 	uint32_t cflags = srp->srp_cflags;
672 	struct skmem_region *skr;
673 	uint32_t i;
674 
675 	ASSERT(srp->srp_id < SKMEM_REGIONS);
676 	ASSERT(srp->srp_c_seg_size != 0 &&
677 	    (pseudo || (srp->srp_c_seg_size % SKMEM_PAGE_SIZE) == 0));
678 	ASSERT(srp->srp_seg_cnt != 0);
679 	ASSERT(srp->srp_c_obj_cnt == 1 ||
680 	    (srp->srp_c_seg_size % srp->srp_c_obj_size) == 0);
681 	ASSERT(srp->srp_c_obj_size <= srp->srp_c_seg_size);
682 
683 	skr = zalloc_flags(skr_zone, Z_WAITOK | Z_ZERO);
684 	skr->skr_params.srp_r_seg_size = srp->srp_r_seg_size;
685 	skr->skr_seg_size = srp->srp_c_seg_size;
686 	skr->skr_size = (srp->srp_c_seg_size * srp->srp_seg_cnt);
687 	skr->skr_seg_objs = (srp->srp_c_seg_size / srp->srp_c_obj_size);
688 
689 	if (!pseudo) {
690 		skr->skr_seg_max_cnt = srp->srp_seg_cnt;
691 
692 		/* set alignment to CPU cache line size */
693 		skr->skr_params.srp_align = skmem_cpu_cache_line_size();
694 
695 		/* allocate the allocated-address hash chain */
696 		skr->skr_hash_initial = SKMEM_REGION_HASH_INITIAL;
697 		skr->skr_hash_limit = SKMEM_REGION_HASH_LIMIT;
698 		skr->skr_hash_table = sk_alloc_type_array(struct sksegment_bkt,
699 		    skr->skr_hash_initial, Z_WAITOK | Z_NOFAIL,
700 		    skmem_tag_segment_hash);
701 		skr->skr_hash_mask = (skr->skr_hash_initial - 1);
702 		skr->skr_hash_shift = flsll(srp->srp_c_seg_size) - 1;
703 
704 		for (i = 0; i < (skr->skr_hash_mask + 1); i++) {
705 			TAILQ_INIT(&skr->skr_hash_table[i].sgb_head);
706 		}
707 	} else {
708 		/* this upper bound doesn't apply */
709 		skr->skr_seg_max_cnt = 0;
710 
711 		/* pick up value set by skmem_regions_params_config() */
712 		skr->skr_params.srp_align = srp->srp_align;
713 	}
714 
715 	skr->skr_r_obj_size = srp->srp_r_obj_size;
716 	skr->skr_r_obj_cnt = srp->srp_r_obj_cnt;
717 	skr->skr_c_obj_size = srp->srp_c_obj_size;
718 	skr->skr_c_obj_cnt = srp->srp_c_obj_cnt;
719 
720 	skr->skr_params.srp_md_type = srp->srp_md_type;
721 	skr->skr_params.srp_md_subtype = srp->srp_md_subtype;
722 	skr->skr_params.srp_max_frags = srp->srp_max_frags;
723 
724 	skr->skr_seg_ctor = ctor;
725 	skr->skr_seg_dtor = dtor;
726 	skr->skr_private = private;
727 
728 	lck_mtx_init(&skr->skr_lock, &skmem_region_lock_grp,
729 	    &skmem_region_lock_attr);
730 
731 	TAILQ_INIT(&skr->skr_seg_free);
732 	RB_INIT(&skr->skr_seg_tfree);
733 
734 	skr->skr_id = srp->srp_id;
735 	uuid_generate_random(skr->skr_uuid);
736 	(void) snprintf(skr->skr_name, sizeof(skr->skr_name),
737 	    "%s.%s.%s", SKMEM_REGION_PREFIX, srp->srp_name, name);
738 
739 	SK_DF(SK_VERB_MEM_REGION, "\"%s\": skr 0x%llx ",
740 	    skr->skr_name, SK_KVA(skr));
741 
742 	/* sanity check */
743 	ASSERT(!(cflags & SKMEM_REGION_CR_GUARD) ||
744 	    !(cflags & (SKMEM_REGION_CR_KREADONLY | SKMEM_REGION_CR_UREADONLY |
745 	    SKMEM_REGION_CR_PERSISTENT | SKMEM_REGION_CR_SHAREOK |
746 	    SKMEM_REGION_CR_IODIR_IN | SKMEM_REGION_CR_IODIR_OUT |
747 	    SKMEM_REGION_CR_PUREDATA)));
748 
749 	skr->skr_cflags = cflags;
750 	if (cflags & SKMEM_REGION_CR_NOREDIRECT) {
751 		skr->skr_mode |= SKR_MODE_NOREDIRECT;
752 	}
753 	if (cflags & SKMEM_REGION_CR_MMAPOK) {
754 		skr->skr_mode |= SKR_MODE_MMAPOK;
755 	}
756 	if ((cflags & SKMEM_REGION_CR_MMAPOK) &&
757 	    (cflags & SKMEM_REGION_CR_UREADONLY)) {
758 		skr->skr_mode |= SKR_MODE_UREADONLY;
759 	}
760 	if (cflags & SKMEM_REGION_CR_KREADONLY) {
761 		skr->skr_mode |= SKR_MODE_KREADONLY;
762 	}
763 	if (cflags & SKMEM_REGION_CR_PERSISTENT) {
764 		skr->skr_mode |= SKR_MODE_PERSISTENT;
765 	}
766 	if (cflags & SKMEM_REGION_CR_MONOLITHIC) {
767 		skr->skr_mode |= SKR_MODE_MONOLITHIC;
768 	}
769 	if (cflags & SKMEM_REGION_CR_NOMAGAZINES) {
770 		skr->skr_mode |= SKR_MODE_NOMAGAZINES;
771 	}
772 	if (cflags & SKMEM_REGION_CR_NOCACHE) {
773 		skr->skr_mode |= SKR_MODE_NOCACHE;
774 	}
775 	if (cflags & SKMEM_REGION_CR_SEGPHYSCONTIG) {
776 		skr->skr_mode |= SKR_MODE_SEGPHYSCONTIG;
777 	}
778 	if (cflags & SKMEM_REGION_CR_SHAREOK) {
779 		skr->skr_mode |= SKR_MODE_SHAREOK;
780 	}
781 	if (cflags & SKMEM_REGION_CR_IODIR_IN) {
782 		skr->skr_mode |= SKR_MODE_IODIR_IN;
783 	}
784 	if (cflags & SKMEM_REGION_CR_IODIR_OUT) {
785 		skr->skr_mode |= SKR_MODE_IODIR_OUT;
786 	}
787 	if (cflags & SKMEM_REGION_CR_GUARD) {
788 		skr->skr_mode |= SKR_MODE_GUARD;
789 	}
790 	if (cflags & SKMEM_REGION_CR_PUREDATA) {
791 		skr->skr_mode |= SKR_MODE_PUREDATA;
792 	}
793 	if (cflags & SKMEM_REGION_CR_PSEUDO) {
794 		skr->skr_mode |= SKR_MODE_PSEUDO;
795 	}
796 	if (cflags & SKMEM_REGION_CR_THREADSAFE) {
797 		skr->skr_mode |= SKR_MODE_THREADSAFE;
798 	}
799 
800 #if XNU_TARGET_OS_OSX
801 	/*
802 	 * Mark all regions as persistent except for the guard and Intrinsic
803 	 * regions.
804 	 * This is to ensure that kernel threads won't be faulting-in while
805 	 * accessing these memory regions. We have observed various kinds of
806 	 * kernel panics due to kernel threads faulting on non-wired memory
807 	 * access when the VM subsystem is not in a state to swap-in the page.
808 	 */
809 	if (!((skr->skr_mode & SKR_MODE_PSEUDO) ||
810 	    (skr->skr_mode & SKR_MODE_GUARD))) {
811 		skr->skr_mode |= SKR_MODE_PERSISTENT;
812 	}
813 #endif /* XNU_TARGET_OS_OSX */
814 
815 	/* SKR_MODE_UREADONLY only takes effect for user task mapping */
816 	skr->skr_bufspec.user_writable = !(skr->skr_mode & SKR_MODE_UREADONLY);
817 	skr->skr_bufspec.kernel_writable = !(skr->skr_mode & SKR_MODE_KREADONLY);
818 	skr->skr_bufspec.purgeable = TRUE;
819 	skr->skr_bufspec.inhibitCache = !!(skr->skr_mode & SKR_MODE_NOCACHE);
820 	skr->skr_bufspec.physcontig = (skr->skr_mode & SKR_MODE_SEGPHYSCONTIG);
821 	skr->skr_bufspec.iodir_in = !!(skr->skr_mode & SKR_MODE_IODIR_IN);
822 	skr->skr_bufspec.iodir_out = !!(skr->skr_mode & SKR_MODE_IODIR_OUT);
823 	skr->skr_bufspec.puredata = !!(skr->skr_mode & SKR_MODE_PUREDATA);
824 	skr->skr_bufspec.threadSafe = !!(skr->skr_mode & SKR_MODE_THREADSAFE);
825 	skr->skr_regspec.noRedirect = !!(skr->skr_mode & SKR_MODE_NOREDIRECT);
826 
827 	/* allocate segment bitmaps */
828 	if (!(skr->skr_mode & SKR_MODE_PSEUDO)) {
829 		ASSERT(skr->skr_seg_max_cnt != 0);
830 		skr->skr_seg_bmap_len = BITMAP_LEN(skr->skr_seg_max_cnt);
831 		skr->skr_seg_bmap = sk_alloc_data(BITMAP_SIZE(skr->skr_seg_max_cnt),
832 		    Z_WAITOK | Z_NOFAIL, skmem_tag_segment_bmap);
833 		ASSERT(BITMAP_SIZE(skr->skr_seg_max_cnt) ==
834 		    (skr->skr_seg_bmap_len * sizeof(*skr->skr_seg_bmap)));
835 
836 		/* mark all bitmaps as free (bit set) */
837 		bitmap_full(skr->skr_seg_bmap, skr->skr_seg_max_cnt);
838 	}
839 
840 	/*
841 	 * Populate the freelist by allocating all segments for the
842 	 * region, which will be mapped but not faulted-in, and then
843 	 * immediately insert each to the freelist.  That will in
844 	 * turn unmap the segment's memory object.
845 	 */
846 	SKR_LOCK(skr);
847 	if (skr->skr_mode & SKR_MODE_PSEUDO) {
848 		char zone_name[64];
849 		(void) snprintf(zone_name, sizeof(zone_name), "%s.reg.%s",
850 		    SKMEM_ZONE_PREFIX, name);
851 		skr->skr_zreg = zone_create(zone_name, skr->skr_c_obj_size,
852 		    ZC_ZFREE_CLEARMEM | ZC_DESTRUCTIBLE);
853 	} else {
854 		/* create a backing IOSKRegion object */
855 		if ((skr->skr_reg = IOSKRegionCreate(&skr->skr_regspec,
856 		    (IOSKSize)skr->skr_seg_size,
857 		    (IOSKCount)skr->skr_seg_max_cnt)) == NULL) {
858 			SK_ERR("\%s\": [%u * %u] cflags 0x%b skr_reg failed",
859 			    skr->skr_name, (uint32_t)skr->skr_seg_size,
860 			    (uint32_t)skr->skr_seg_max_cnt, skr->skr_cflags,
861 			    SKMEM_REGION_CR_BITS);
862 			goto failed;
863 		}
864 	}
865 
866 	ASSERT(skr->skr_seg_objs != 0);
867 
868 	++skr->skr_refcnt;      /* for caller */
869 	SKR_UNLOCK(skr);
870 
871 	SKMEM_REGION_LOCK();
872 	TAILQ_INSERT_TAIL(&skmem_region_head, skr, skr_link);
873 	SKMEM_REGION_UNLOCK();
874 
875 	SK_DF(SK_VERB_MEM_REGION,
876 	    "  [TOTAL] seg (%u*%u) obj (%u*%u) cflags 0x%b",
877 	    (uint32_t)skr->skr_seg_size, (uint32_t)skr->skr_seg_max_cnt,
878 	    (uint32_t)skr->skr_c_obj_size, (uint32_t)skr->skr_c_obj_cnt,
879 	    skr->skr_cflags, SKMEM_REGION_CR_BITS);
880 
881 	return skr;
882 
883 failed:
884 	SKR_LOCK_ASSERT_HELD(skr);
885 	skmem_region_destroy(skr);
886 
887 	return NULL;
888 }
889 
890 /*
891  * Destroy a region.
892  */
893 static void
skmem_region_destroy(struct skmem_region * skr)894 skmem_region_destroy(struct skmem_region *skr)
895 {
896 	struct skmem_region *mskr;
897 
898 	SKR_LOCK_ASSERT_HELD(skr);
899 
900 	SK_DF(SK_VERB_MEM_REGION, "\"%s\": skr 0x%llx",
901 	    skr->skr_name, SK_KVA(skr));
902 
903 	/*
904 	 * Panic if we detect there are unfreed segments; the caller
905 	 * destroying this region is responsible for ensuring that all
906 	 * allocated segments have been freed prior to getting here.
907 	 */
908 	ASSERT(skr->skr_refcnt == 0);
909 	if (skr->skr_seginuse != 0) {
910 		panic("%s: '%s' (%p) not empty (%u unfreed)",
911 		    __func__, skr->skr_name, (void *)skr, skr->skr_seginuse);
912 		/* NOTREACHED */
913 		__builtin_unreachable();
914 	}
915 
916 	if (skr->skr_link.tqe_next != NULL || skr->skr_link.tqe_prev != NULL) {
917 		SKR_UNLOCK(skr);
918 		SKMEM_REGION_LOCK();
919 		TAILQ_REMOVE(&skmem_region_head, skr, skr_link);
920 		SKMEM_REGION_UNLOCK();
921 		SKR_LOCK(skr);
922 		ASSERT(skr->skr_refcnt == 0);
923 	}
924 
925 	/*
926 	 * Undo what's done earlier at region creation time.
927 	 */
928 	skmem_region_depopulate(skr);
929 	ASSERT(TAILQ_EMPTY(&skr->skr_seg_free));
930 	ASSERT(RB_EMPTY(&skr->skr_seg_tfree));
931 	ASSERT(skr->skr_seg_free_cnt == 0);
932 
933 	if (skr->skr_reg != NULL) {
934 		ASSERT(!(skr->skr_mode & SKR_MODE_PSEUDO));
935 		IOSKRegionDestroy(skr->skr_reg);
936 		skr->skr_reg = NULL;
937 	}
938 
939 	if (skr->skr_zreg != NULL) {
940 		ASSERT(skr->skr_mode & SKR_MODE_PSEUDO);
941 		zdestroy(skr->skr_zreg);
942 		skr->skr_zreg = NULL;
943 	}
944 
945 	if (skr->skr_seg_bmap != NULL) {
946 		ASSERT(!(skr->skr_mode & SKR_MODE_PSEUDO));
947 #if (DEBUG || DEVELOPMENT)
948 		ASSERT(skr->skr_seg_bmap_len != 0);
949 		/* must have been set to vacant (bit set) by now */
950 		assert(bitmap_is_full(skr->skr_seg_bmap, skr->skr_seg_max_cnt));
951 #endif /* DEBUG || DEVELOPMENT */
952 
953 		sk_free_data(skr->skr_seg_bmap, BITMAP_SIZE(skr->skr_seg_max_cnt));
954 		skr->skr_seg_bmap = NULL;
955 		skr->skr_seg_bmap_len = 0;
956 	}
957 	ASSERT(skr->skr_seg_bmap_len == 0);
958 
959 	if (skr->skr_hash_table != NULL) {
960 		ASSERT(!(skr->skr_mode & SKR_MODE_PSEUDO));
961 #if (DEBUG || DEVELOPMENT)
962 		for (uint32_t i = 0; i < (skr->skr_hash_mask + 1); i++) {
963 			ASSERT(TAILQ_EMPTY(&skr->skr_hash_table[i].sgb_head));
964 		}
965 #endif /* DEBUG || DEVELOPMENT */
966 
967 		sk_free_type_array(struct sksegment_bkt, skr->skr_hash_mask + 1,
968 		    skr->skr_hash_table);
969 		skr->skr_hash_table = NULL;
970 	}
971 	if ((mskr = skr->skr_mirror) != NULL) {
972 		ASSERT(!(skr->skr_mode & SKR_MODE_PSEUDO));
973 		skr->skr_mirror = NULL;
974 		mskr->skr_mode &= ~SKR_MODE_MIRRORED;
975 	}
976 	SKR_UNLOCK(skr);
977 
978 	if (mskr != NULL) {
979 		skmem_region_release(mskr);
980 	}
981 
982 	lck_mtx_destroy(&skr->skr_lock, &skmem_region_lock_grp);
983 
984 	zfree(skr_zone, skr);
985 }
986 
987 /*
988  * Mirror mskr (slave) to skr (master).
989  */
990 void
skmem_region_mirror(struct skmem_region * skr,struct skmem_region * mskr)991 skmem_region_mirror(struct skmem_region *skr, struct skmem_region *mskr)
992 {
993 	SK_DF(SK_VERB_MEM_REGION, "skr master 0x%llx, slave 0x%llx ",
994 	    SK_KVA(skr), SK_KVA(mskr));
995 
996 	SKR_LOCK(skr);
997 	ASSERT(!(skr->skr_mode & SKR_MODE_MIRRORED));
998 	ASSERT(!(mskr->skr_mode & SKR_MODE_MIRRORED));
999 	ASSERT(skr->skr_mirror == NULL);
1000 
1001 	/* both regions must share identical parameters */
1002 	ASSERT(skr->skr_size == mskr->skr_size);
1003 	ASSERT(skr->skr_seg_size == mskr->skr_seg_size);
1004 	ASSERT(skr->skr_seg_free_cnt == mskr->skr_seg_free_cnt);
1005 
1006 	skr->skr_mirror = mskr;
1007 	skmem_region_retain(mskr);
1008 	mskr->skr_mode |= SKR_MODE_MIRRORED;
1009 	SKR_UNLOCK(skr);
1010 }
1011 
1012 void
skmem_region_slab_config(struct skmem_region * skr,struct skmem_cache * skm,bool attach)1013 skmem_region_slab_config(struct skmem_region *skr, struct skmem_cache *skm,
1014     bool attach)
1015 {
1016 	int i;
1017 
1018 	SKR_LOCK(skr);
1019 	if (attach) {
1020 		for (i = 0; i < SKR_MAX_CACHES && skr->skr_cache[i] != NULL;
1021 		    i++) {
1022 			;
1023 		}
1024 		VERIFY(i < SKR_MAX_CACHES);
1025 		ASSERT(skr->skr_cache[i] == NULL);
1026 		skr->skr_mode |= SKR_MODE_SLAB;
1027 		skr->skr_cache[i] = skm;
1028 		skmem_region_retain_locked(skr);
1029 		SKR_UNLOCK(skr);
1030 	} else {
1031 		ASSERT(skr->skr_mode & SKR_MODE_SLAB);
1032 		for (i = 0; i < SKR_MAX_CACHES && skr->skr_cache[i] != skm;
1033 		    i++) {
1034 			;
1035 		}
1036 		VERIFY(i < SKR_MAX_CACHES);
1037 		ASSERT(skr->skr_cache[i] == skm);
1038 		skr->skr_cache[i] = NULL;
1039 		for (i = 0; i < SKR_MAX_CACHES && skr->skr_cache[i] == NULL;
1040 		    i++) {
1041 			;
1042 		}
1043 		if (i == SKR_MAX_CACHES) {
1044 			skr->skr_mode &= ~SKR_MODE_SLAB;
1045 		}
1046 		if (!skmem_region_release_locked(skr)) {
1047 			SKR_UNLOCK(skr);
1048 		}
1049 	}
1050 }
1051 
1052 /*
1053  * Common routines for skmem_region_{alloc,mirror_alloc}.
1054  */
1055 static void *
skmem_region_alloc_common(struct skmem_region * skr,struct sksegment * sg)1056 skmem_region_alloc_common(struct skmem_region *skr, struct sksegment *sg)
1057 {
1058 	struct sksegment_bkt *sgb;
1059 	void *addr;
1060 
1061 	SKR_LOCK_ASSERT_HELD(skr);
1062 
1063 	ASSERT(sg->sg_md != NULL);
1064 	ASSERT(sg->sg_start != 0 && sg->sg_end != 0);
1065 	addr = (void *)sg->sg_start;
1066 	sgb = SKMEM_REGION_HASH(skr, addr);
1067 	ASSERT(sg->sg_link.tqe_next == NULL);
1068 	ASSERT(sg->sg_link.tqe_prev == NULL);
1069 	TAILQ_INSERT_HEAD(&sgb->sgb_head, sg, sg_link);
1070 
1071 	skr->skr_seginuse++;
1072 	skr->skr_meminuse += skr->skr_seg_size;
1073 	if (sg->sg_state == SKSEG_STATE_MAPPED_WIRED) {
1074 		skr->skr_w_meminuse += skr->skr_seg_size;
1075 	}
1076 	skr->skr_alloc++;
1077 
1078 	return addr;
1079 }
1080 
1081 /*
1082  * Allocate a segment from the region.
1083  */
1084 void *
skmem_region_alloc(struct skmem_region * skr,void ** maddr,struct sksegment ** retsg,struct sksegment ** retsgm,uint32_t skmflag)1085 skmem_region_alloc(struct skmem_region *skr, void **maddr,
1086     struct sksegment **retsg, struct sksegment **retsgm, uint32_t skmflag)
1087 {
1088 	struct sksegment *sg = NULL;
1089 	struct sksegment *sg1 = NULL;
1090 	void *addr = NULL, *addr1 = NULL;
1091 	uint32_t retries = 0;
1092 
1093 	VERIFY(!(skr->skr_mode & SKR_MODE_GUARD));
1094 
1095 	if (retsg != NULL) {
1096 		*retsg = NULL;
1097 	}
1098 	if (retsgm != NULL) {
1099 		*retsgm = NULL;
1100 	}
1101 
1102 	/* SKMEM_NOSLEEP and SKMEM_FAILOK are mutually exclusive */
1103 	VERIFY((skmflag & (SKMEM_NOSLEEP | SKMEM_FAILOK)) !=
1104 	    (SKMEM_NOSLEEP | SKMEM_FAILOK));
1105 
1106 	SKR_LOCK(skr);
1107 	while (sg == NULL) {
1108 		/* see if there's a segment in the freelist */
1109 		sg = TAILQ_FIRST(&skr->skr_seg_free);
1110 		if (sg == NULL) {
1111 			/* see if we can grow the freelist */
1112 			sg = sksegment_freelist_grow(skr);
1113 			if (sg != NULL) {
1114 				break;
1115 			}
1116 
1117 			if (skr->skr_mode & SKR_MODE_SLAB) {
1118 				SKR_UNLOCK(skr);
1119 				/*
1120 				 * None found; it's possible that the slab
1121 				 * layer is caching extra amount, so ask
1122 				 * skmem_cache to reap/purge its caches.
1123 				 */
1124 				for (int i = 0; i < SKR_MAX_CACHES; i++) {
1125 					if (skr->skr_cache[i] == NULL) {
1126 						continue;
1127 					}
1128 					skmem_cache_reap_now(skr->skr_cache[i],
1129 					    TRUE);
1130 				}
1131 				SKR_LOCK(skr);
1132 				/*
1133 				 * If we manage to get some freed, try again.
1134 				 */
1135 				if (TAILQ_FIRST(&skr->skr_seg_free) != NULL) {
1136 					continue;
1137 				}
1138 			}
1139 
1140 			/*
1141 			 * Give up if this is a non-blocking allocation,
1142 			 * or if this is a blocking allocation but the
1143 			 * caller is willing to retry.
1144 			 */
1145 			if (skmflag & (SKMEM_NOSLEEP | SKMEM_FAILOK)) {
1146 				break;
1147 			}
1148 
1149 			/* otherwise we wait until one is available */
1150 			++skr->skr_seg_waiters;
1151 			(void) msleep(&skr->skr_seg_free, &skr->skr_lock,
1152 			    (PZERO - 1), skr->skr_name, NULL);
1153 		}
1154 	}
1155 
1156 	SKR_LOCK_ASSERT_HELD(skr);
1157 
1158 	if (sg != NULL) {
1159 retry:
1160 		/*
1161 		 * We have a segment; remove it from the freelist and
1162 		 * insert it into the allocated-address hash chain.
1163 		 * Note that this may return NULL if we can't allocate
1164 		 * the memory descriptor.
1165 		 */
1166 		if (sksegment_freelist_remove(skr, sg, skmflag,
1167 		    FALSE) == NULL) {
1168 			ASSERT(sg->sg_state == SKSEG_STATE_DETACHED);
1169 			ASSERT(sg->sg_md == NULL);
1170 			ASSERT(sg->sg_start == 0 && sg->sg_end == 0);
1171 
1172 			/*
1173 			 * If it's non-blocking allocation, simply just give
1174 			 * up and let the caller decide when to retry.  Else,
1175 			 * it gets a bit complicated due to the contract we
1176 			 * have for blocking allocations with the client; the
1177 			 * most sensible thing to do here is to retry the
1178 			 * allocation ourselves.  Note that we keep using the
1179 			 * same segment we originally got, since we only need
1180 			 * the memory descriptor to be allocated for it; thus
1181 			 * we make sure we don't release the region lock when
1182 			 * retrying allocation.  Doing so is crucial when the
1183 			 * region is mirrored, since the segment indices on
1184 			 * both regions need to match.
1185 			 */
1186 			if (skmflag & SKMEM_NOSLEEP) {
1187 				SK_ERR("\"%s\": failed to allocate segment "
1188 				    "(non-sleeping mode)", skr->skr_name);
1189 				sg = NULL;
1190 			} else {
1191 				if (++retries > SKMEM_WDT_MAXTIME) {
1192 					panic_plain("\"%s\": failed to "
1193 					    "allocate segment (sleeping mode) "
1194 					    "after %u retries\n\n%s",
1195 					    skr->skr_name, SKMEM_WDT_MAXTIME,
1196 					    skmem_dump(skr));
1197 					/* NOTREACHED */
1198 					__builtin_unreachable();
1199 				} else {
1200 					SK_ERR("\"%s\": failed to allocate "
1201 					    "segment (sleeping mode): %u "
1202 					    "retries", skr->skr_name, retries);
1203 				}
1204 				if (skr->skr_mode & SKR_MODE_SLAB) {
1205 					/*
1206 					 * We can't get any memory descriptor
1207 					 * for this segment; reap extra cached
1208 					 * objects from the slab layer and hope
1209 					 * that we get lucky next time around.
1210 					 *
1211 					 * XXX [email protected]: perhaps also
1212 					 * trigger the zone allocator to do
1213 					 * its garbage collection here?
1214 					 */
1215 					skmem_cache_reap();
1216 				}
1217 				delay(1 * USEC_PER_SEC);        /* 1 sec */
1218 				goto retry;
1219 			}
1220 		}
1221 
1222 		if (sg != NULL) {
1223 			/* insert to allocated-address hash chain */
1224 			addr = skmem_region_alloc_common(skr, sg);
1225 		}
1226 	}
1227 
1228 	if (sg == NULL) {
1229 		VERIFY(skmflag & (SKMEM_NOSLEEP | SKMEM_FAILOK));
1230 		if (skmflag & SKMEM_PANIC) {
1231 			VERIFY((skmflag & (SKMEM_NOSLEEP | SKMEM_FAILOK)) ==
1232 			    SKMEM_NOSLEEP);
1233 			/*
1234 			 * If is a failed non-blocking alloc and the caller
1235 			 * insists that it must be successful, then panic.
1236 			 */
1237 			panic_plain("\"%s\": skr 0x%p unable to satisfy "
1238 			    "mandatory allocation\n", skr->skr_name, skr);
1239 			/* NOTREACHED */
1240 			__builtin_unreachable();
1241 		} else {
1242 			/*
1243 			 * Give up if this is a non-blocking allocation,
1244 			 * or one where the caller is willing to handle
1245 			 * allocation failures.
1246 			 */
1247 			goto done;
1248 		}
1249 	}
1250 
1251 	ASSERT((mach_vm_address_t)addr == sg->sg_start);
1252 
1253 #if SK_LOG
1254 	SK_DF(SK_VERB_MEM_REGION, "skr 0x%llx sg 0x%llx",
1255 	    SK_KVA(skr), SK_KVA(sg));
1256 	if (skr->skr_mirror == NULL ||
1257 	    !(skr->skr_mirror->skr_mode & SKR_MODE_MIRRORED)) {
1258 		SK_DF(SK_VERB_MEM_REGION, "  [%u] [0x%llx-0x%llx)",
1259 		    sg->sg_index, SK_KVA(sg->sg_start), SK_KVA(sg->sg_end));
1260 	} else {
1261 		SK_DF(SK_VERB_MEM_REGION, "  [%u] [0x%llx-0x%llx) mirrored",
1262 		    sg->sg_index, SK_KVA(sg), SK_KVA(sg->sg_start),
1263 		    SK_KVA(sg->sg_end));
1264 	}
1265 #endif /* SK_LOG */
1266 
1267 	/*
1268 	 * If mirroring, allocate shadow object from slave region.
1269 	 */
1270 	if (skr->skr_mirror != NULL) {
1271 		ASSERT(skr->skr_mirror != skr);
1272 		ASSERT(!(skr->skr_mode & SKR_MODE_MIRRORED));
1273 		ASSERT(skr->skr_mirror->skr_mode & SKR_MODE_MIRRORED);
1274 		addr1 = skmem_region_mirror_alloc(skr->skr_mirror, sg, &sg1);
1275 		ASSERT(addr1 != NULL);
1276 		ASSERT(sg1 != NULL && sg1 != sg);
1277 		ASSERT(sg1->sg_index == sg->sg_index);
1278 	}
1279 
1280 done:
1281 	SKR_UNLOCK(skr);
1282 
1283 	/* return segment metadata to caller if asked (reference not needed) */
1284 	if (addr != NULL) {
1285 		if (retsg != NULL) {
1286 			*retsg = sg;
1287 		}
1288 		if (retsgm != NULL) {
1289 			*retsgm = sg1;
1290 		}
1291 	}
1292 
1293 	if (maddr != NULL) {
1294 		*maddr = addr1;
1295 	}
1296 
1297 	return addr;
1298 }
1299 
1300 /*
1301  * Allocate a segment from a mirror region at the same index.  While it
1302  * is somewhat a simplified variant of skmem_region_alloc, keeping it
1303  * separate allows us to avoid further convoluting that routine.
1304  */
1305 static void *
skmem_region_mirror_alloc(struct skmem_region * skr,struct sksegment * sg0,struct sksegment ** retsg)1306 skmem_region_mirror_alloc(struct skmem_region *skr, struct sksegment *sg0,
1307     struct sksegment **retsg)
1308 {
1309 	struct sksegment sg_key = { .sg_index = sg0->sg_index };
1310 	struct sksegment *sg = NULL;
1311 	void *addr = NULL;
1312 
1313 	ASSERT(skr->skr_mode & SKR_MODE_MIRRORED);
1314 	ASSERT(skr->skr_mirror == NULL);
1315 	ASSERT(sg0->sg_type == SKSEG_TYPE_ALLOC);
1316 
1317 	if (retsg != NULL) {
1318 		*retsg = NULL;
1319 	}
1320 
1321 	SKR_LOCK(skr);
1322 
1323 	/*
1324 	 * See if we can find one in the freelist first.  Otherwise,
1325 	 * create a new segment of the same index and add that to the
1326 	 * freelist.  We would always get a segment since both regions
1327 	 * are synchronized when it comes to the indices of allocated
1328 	 * segments.
1329 	 */
1330 	sg = RB_FIND(segtfreehead, &skr->skr_seg_tfree, &sg_key);
1331 	if (sg == NULL) {
1332 		sg = sksegment_alloc_with_idx(skr, sg0->sg_index);
1333 		VERIFY(sg != NULL);
1334 	}
1335 	VERIFY(sg->sg_index == sg0->sg_index);
1336 
1337 	/*
1338 	 * We have a segment; remove it from the freelist and insert
1339 	 * it into the allocated-address hash chain.  This either
1340 	 * succeeds or panics (SKMEM_PANIC) when a memory descriptor
1341 	 * can't be allocated.
1342 	 *
1343 	 * TODO: consider retrying IOBMD allocation attempts if needed.
1344 	 */
1345 	sg = sksegment_freelist_remove(skr, sg, SKMEM_PANIC, FALSE);
1346 	VERIFY(sg != NULL);
1347 
1348 	/* insert to allocated-address hash chain */
1349 	addr = skmem_region_alloc_common(skr, sg);
1350 
1351 #if SK_LOG
1352 	SK_DF(SK_VERB_MEM_REGION, "skr 0x%llx sg 0x%llx",
1353 	    SK_KVA(skr), SK_KVA(sg));
1354 	SK_DF(SK_VERB_MEM_REGION, "  [%u] [0x%llx-0x%llx)",
1355 	    sg->sg_index, SK_KVA(sg->sg_start), SK_KVA(sg->sg_end));
1356 #endif /* SK_LOG */
1357 
1358 	SKR_UNLOCK(skr);
1359 
1360 	/* return segment metadata to caller if asked (reference not needed) */
1361 	if (retsg != NULL) {
1362 		*retsg = sg;
1363 	}
1364 
1365 	return addr;
1366 }
1367 
1368 /*
1369  * Free a segment to the region.
1370  */
1371 void
skmem_region_free(struct skmem_region * skr,void * addr,void * maddr)1372 skmem_region_free(struct skmem_region *skr, void *addr, void *maddr)
1373 {
1374 	struct sksegment_bkt *sgb;
1375 	struct sksegment *sg, *tsg;
1376 
1377 	VERIFY(!(skr->skr_mode & SKR_MODE_GUARD));
1378 
1379 	/*
1380 	 * Search the hash chain to find a matching segment for the
1381 	 * given address.  If found, remove the segment from the
1382 	 * hash chain and insert it into the freelist.  Otherwise,
1383 	 * we panic since the caller has given us a bogus address.
1384 	 */
1385 	SKR_LOCK(skr);
1386 	sgb = SKMEM_REGION_HASH(skr, addr);
1387 	TAILQ_FOREACH_SAFE(sg, &sgb->sgb_head, sg_link, tsg) {
1388 		ASSERT(sg->sg_start != 0 && sg->sg_end != 0);
1389 		if (sg->sg_start == (mach_vm_address_t)addr) {
1390 			TAILQ_REMOVE(&sgb->sgb_head, sg, sg_link);
1391 			sg->sg_link.tqe_next = NULL;
1392 			sg->sg_link.tqe_prev = NULL;
1393 			break;
1394 		}
1395 	}
1396 
1397 	ASSERT(sg != NULL);
1398 	if (sg->sg_state == SKSEG_STATE_MAPPED_WIRED) {
1399 		ASSERT(skr->skr_w_meminuse >= skr->skr_seg_size);
1400 		skr->skr_w_meminuse -= skr->skr_seg_size;
1401 	}
1402 	sksegment_freelist_insert(skr, sg, FALSE);
1403 
1404 	ASSERT(skr->skr_seginuse != 0);
1405 	skr->skr_seginuse--;
1406 	skr->skr_meminuse -= skr->skr_seg_size;
1407 	skr->skr_free++;
1408 
1409 #if SK_LOG
1410 	SK_DF(SK_VERB_MEM_REGION, "skr 0x%llx sg 0x%llx",
1411 	    SK_KVA(skr), SK_KVA(sg));
1412 	if (skr->skr_mirror == NULL ||
1413 	    !(skr->skr_mirror->skr_mode & SKR_MODE_MIRRORED)) {
1414 		SK_DF(SK_VERB_MEM_REGION, "  [%u] [0x%llx-0x%llx)",
1415 		    sg->sg_index, SK_KVA(addr),
1416 		    SK_KVA((uintptr_t)addr + skr->skr_seg_size));
1417 	} else {
1418 		SK_DF(SK_VERB_MEM_REGION, "  [%u] [0x%llx-0x%llx) mirrored",
1419 		    sg->sg_index, SK_KVA(sg), SK_KVA(addr),
1420 		    SK_KVA((uintptr_t)addr + skr->skr_seg_size));
1421 	}
1422 #endif /* SK_LOG */
1423 
1424 	/*
1425 	 * If mirroring, also free shadow object in slave region.
1426 	 */
1427 	if (skr->skr_mirror != NULL) {
1428 		ASSERT(maddr != NULL);
1429 		ASSERT(skr->skr_mirror != skr);
1430 		ASSERT(!(skr->skr_mode & SKR_MODE_MIRRORED));
1431 		ASSERT(skr->skr_mirror->skr_mode & SKR_MODE_MIRRORED);
1432 		skmem_region_free(skr->skr_mirror, maddr, NULL);
1433 	}
1434 
1435 	/* wake up any blocked threads waiting for a segment */
1436 	if (skr->skr_seg_waiters != 0) {
1437 		SK_DF(SK_VERB_MEM_REGION,
1438 		    "sg 0x%llx waking up %u waiters", SK_KVA(sg),
1439 		    skr->skr_seg_waiters);
1440 		skr->skr_seg_waiters = 0;
1441 		wakeup(&skr->skr_seg_free);
1442 	}
1443 	SKR_UNLOCK(skr);
1444 }
1445 
1446 __attribute__((always_inline))
1447 static inline void
skmem_region_retain_locked(struct skmem_region * skr)1448 skmem_region_retain_locked(struct skmem_region *skr)
1449 {
1450 	SKR_LOCK_ASSERT_HELD(skr);
1451 	skr->skr_refcnt++;
1452 	ASSERT(skr->skr_refcnt != 0);
1453 }
1454 
1455 /*
1456  * Retain a segment.
1457  */
1458 void
skmem_region_retain(struct skmem_region * skr)1459 skmem_region_retain(struct skmem_region *skr)
1460 {
1461 	SKR_LOCK(skr);
1462 	skmem_region_retain_locked(skr);
1463 	SKR_UNLOCK(skr);
1464 }
1465 
1466 __attribute__((always_inline))
1467 static inline boolean_t
skmem_region_release_locked(struct skmem_region * skr)1468 skmem_region_release_locked(struct skmem_region *skr)
1469 {
1470 	SKR_LOCK_ASSERT_HELD(skr);
1471 	ASSERT(skr->skr_refcnt != 0);
1472 	if (--skr->skr_refcnt == 0) {
1473 		skmem_region_destroy(skr);
1474 		return TRUE;
1475 	}
1476 	return FALSE;
1477 }
1478 
1479 /*
1480  * Release (and potentially destroy) a segment.
1481  */
1482 boolean_t
skmem_region_release(struct skmem_region * skr)1483 skmem_region_release(struct skmem_region *skr)
1484 {
1485 	boolean_t lastref;
1486 
1487 	SKR_LOCK(skr);
1488 	if (!(lastref = skmem_region_release_locked(skr))) {
1489 		SKR_UNLOCK(skr);
1490 	}
1491 
1492 	return lastref;
1493 }
1494 
1495 /*
1496  * Depopulate the segment freelist.
1497  */
1498 static void
skmem_region_depopulate(struct skmem_region * skr)1499 skmem_region_depopulate(struct skmem_region *skr)
1500 {
1501 	struct sksegment *sg, *tsg;
1502 
1503 	SK_DF(SK_VERB_MEM_REGION, "\"%s\": skr 0x%llx ",
1504 	    skr->skr_name, SK_KVA(skr));
1505 
1506 	SKR_LOCK_ASSERT_HELD(skr);
1507 	ASSERT(skr->skr_seg_bmap_len != 0 || (skr->skr_mode & SKR_MODE_PSEUDO));
1508 
1509 	TAILQ_FOREACH_SAFE(sg, &skr->skr_seg_free, sg_link, tsg) {
1510 		struct sksegment *sg0;
1511 		uint32_t i;
1512 
1513 		i = sg->sg_index;
1514 		sg0 = sksegment_freelist_remove(skr, sg, 0, TRUE);
1515 		VERIFY(sg0 == sg);
1516 
1517 		sksegment_destroy(skr, sg);
1518 		ASSERT(bit_test(skr->skr_seg_bmap[i / BMAPSZ], i % BMAPSZ));
1519 	}
1520 }
1521 
1522 /*
1523  * Free tree segment compare routine.
1524  */
1525 static int
sksegment_cmp(const struct sksegment * sg1,const struct sksegment * sg2)1526 sksegment_cmp(const struct sksegment *sg1, const struct sksegment *sg2)
1527 {
1528 	return sg1->sg_index - sg2->sg_index;
1529 }
1530 
1531 /*
1532  * Create a segment.
1533  *
1534  * Upon success, clear the bit for the segment's index in skr_seg_bmap bitmap.
1535  */
1536 static struct sksegment *
sksegment_create(struct skmem_region * skr,uint32_t i)1537 sksegment_create(struct skmem_region *skr, uint32_t i)
1538 {
1539 	struct sksegment *sg = NULL;
1540 	bitmap_t *bmap;
1541 
1542 	SKR_LOCK_ASSERT_HELD(skr);
1543 
1544 	ASSERT(!(skr->skr_mode & SKR_MODE_PSEUDO));
1545 	ASSERT(i < skr->skr_seg_max_cnt);
1546 	ASSERT(skr->skr_reg != NULL);
1547 	ASSERT(skr->skr_seg_size == round_page(skr->skr_seg_size));
1548 
1549 	bmap = &skr->skr_seg_bmap[i / BMAPSZ];
1550 	ASSERT(bit_test(*bmap, i % BMAPSZ));
1551 
1552 	sg = skmem_cache_alloc(skmem_sg_cache, SKMEM_SLEEP);
1553 	bzero(sg, sg_size);
1554 
1555 	sg->sg_region = skr;
1556 	sg->sg_index = i;
1557 	sg->sg_state = SKSEG_STATE_DETACHED;
1558 
1559 	/* claim it (clear bit) */
1560 	bit_clear(*bmap, i % BMAPSZ);
1561 
1562 	SK_DF(SK_VERB_MEM_REGION, "  [%u] [0x%llx-0x%llx) 0x%b", i,
1563 	    SK_KVA(sg->sg_start), SK_KVA(sg->sg_end), skr->skr_mode,
1564 	    SKR_MODE_BITS);
1565 
1566 	return sg;
1567 }
1568 
1569 /*
1570  * Destroy a segment.
1571  *
1572  * Set the bit for the segment's index in skr_seg_bmap bitmap,
1573  * indicating that it is now vacant.
1574  */
1575 static void
sksegment_destroy(struct skmem_region * skr,struct sksegment * sg)1576 sksegment_destroy(struct skmem_region *skr, struct sksegment *sg)
1577 {
1578 	uint32_t i = sg->sg_index;
1579 	bitmap_t *bmap;
1580 
1581 	SKR_LOCK_ASSERT_HELD(skr);
1582 
1583 	ASSERT(!(skr->skr_mode & SKR_MODE_PSEUDO));
1584 	ASSERT(skr == sg->sg_region);
1585 	ASSERT(skr->skr_reg != NULL);
1586 	ASSERT(sg->sg_type == SKSEG_TYPE_DESTROYED);
1587 	ASSERT(i < skr->skr_seg_max_cnt);
1588 
1589 	bmap = &skr->skr_seg_bmap[i / BMAPSZ];
1590 	ASSERT(!bit_test(*bmap, i % BMAPSZ));
1591 
1592 	SK_DF(SK_VERB_MEM_REGION, "  [%u] [0x%llx-0x%llx) 0x%b",
1593 	    i, SK_KVA(sg->sg_start), SK_KVA(sg->sg_end),
1594 	    skr->skr_mode, SKR_MODE_BITS);
1595 
1596 	/*
1597 	 * Undo what's done earlier at segment creation time.
1598 	 */
1599 
1600 	ASSERT(sg->sg_md == NULL);
1601 	ASSERT(sg->sg_start == 0 && sg->sg_end == 0);
1602 	ASSERT(sg->sg_state == SKSEG_STATE_DETACHED);
1603 
1604 	/* release it (set bit) */
1605 	bit_set(*bmap, i % BMAPSZ);
1606 
1607 	skmem_cache_free(skmem_sg_cache, sg);
1608 }
1609 
1610 /*
1611  * Insert a segment into freelist (freeing the segment).
1612  */
1613 static void
sksegment_freelist_insert(struct skmem_region * skr,struct sksegment * sg,boolean_t populating)1614 sksegment_freelist_insert(struct skmem_region *skr, struct sksegment *sg,
1615     boolean_t populating)
1616 {
1617 	SKR_LOCK_ASSERT_HELD(skr);
1618 
1619 	ASSERT(!(skr->skr_mode & SKR_MODE_PSEUDO));
1620 	ASSERT(sg->sg_type != SKSEG_TYPE_FREE);
1621 	ASSERT(skr == sg->sg_region);
1622 	ASSERT(skr->skr_reg != NULL);
1623 	ASSERT(sg->sg_index < skr->skr_seg_max_cnt);
1624 
1625 	/*
1626 	 * If the region is being populated, then we're done.
1627 	 */
1628 	if (__improbable(populating)) {
1629 		ASSERT(sg->sg_md == NULL);
1630 		ASSERT(sg->sg_start == 0 && sg->sg_end == 0);
1631 		ASSERT(sg->sg_state == SKSEG_STATE_DETACHED);
1632 	} else {
1633 		IOSKMemoryBufferRef md;
1634 		IOReturn err;
1635 
1636 		ASSERT(sg->sg_md != NULL);
1637 		ASSERT(sg->sg_start != 0 && sg->sg_end != 0);
1638 
1639 		/*
1640 		 * Let the client remove the memory from IOMMU, and unwire it.
1641 		 */
1642 		if (skr->skr_seg_dtor != NULL) {
1643 			skr->skr_seg_dtor(sg, sg->sg_md, skr->skr_private);
1644 		}
1645 
1646 		ASSERT(sg->sg_state == SKSEG_STATE_MAPPED ||
1647 		    sg->sg_state == SKSEG_STATE_MAPPED_WIRED);
1648 
1649 		IOSKRegionClearBufferDebug(skr->skr_reg, sg->sg_index, &md);
1650 		VERIFY(sg->sg_md == md);
1651 
1652 		/* if persistent, unwire this memory now */
1653 		if (skr->skr_mode & SKR_MODE_PERSISTENT) {
1654 			err = IOSKMemoryUnwire(md);
1655 			if (err != kIOReturnSuccess) {
1656 				panic("Fail to unwire md %p, err %d", md, err);
1657 			}
1658 		}
1659 
1660 		/* mark memory as empty/discarded for consistency */
1661 		err = IOSKMemoryDiscard(md);
1662 		if (err != kIOReturnSuccess) {
1663 			panic("Fail to discard md %p, err %d", md, err);
1664 		}
1665 
1666 		IOSKMemoryDestroy(md);
1667 		sg->sg_md = NULL;
1668 		sg->sg_start = sg->sg_end = 0;
1669 		sg->sg_state = SKSEG_STATE_DETACHED;
1670 
1671 		ASSERT(skr->skr_memtotal >= skr->skr_seg_size);
1672 		skr->skr_memtotal -= skr->skr_seg_size;
1673 	}
1674 
1675 	sg->sg_type = SKSEG_TYPE_FREE;
1676 	ASSERT(sg->sg_link.tqe_next == NULL);
1677 	ASSERT(sg->sg_link.tqe_prev == NULL);
1678 	TAILQ_INSERT_TAIL(&skr->skr_seg_free, sg, sg_link);
1679 	ASSERT(sg->sg_node.rbe_left == NULL);
1680 	ASSERT(sg->sg_node.rbe_right == NULL);
1681 	ASSERT(sg->sg_node.rbe_parent == NULL);
1682 	RB_INSERT(segtfreehead, &skr->skr_seg_tfree, sg);
1683 	++skr->skr_seg_free_cnt;
1684 	ASSERT(skr->skr_seg_free_cnt <= skr->skr_seg_max_cnt);
1685 }
1686 
1687 /*
1688  * Remove a segment from the freelist (allocating the segment).
1689  */
1690 static struct sksegment *
sksegment_freelist_remove(struct skmem_region * skr,struct sksegment * sg,uint32_t skmflag,boolean_t purging)1691 sksegment_freelist_remove(struct skmem_region *skr, struct sksegment *sg,
1692     uint32_t skmflag, boolean_t purging)
1693 {
1694 #pragma unused(skmflag)
1695 	mach_vm_address_t segstart;
1696 	IOReturn err;
1697 
1698 	SKR_LOCK_ASSERT_HELD(skr);
1699 
1700 	ASSERT(!(skr->skr_mode & SKR_MODE_PSEUDO));
1701 	ASSERT(sg != NULL);
1702 	ASSERT(skr == sg->sg_region);
1703 	ASSERT(skr->skr_reg != NULL);
1704 	ASSERT(sg->sg_type == SKSEG_TYPE_FREE);
1705 	ASSERT(sg->sg_index < skr->skr_seg_max_cnt);
1706 
1707 #if (DEVELOPMENT || DEBUG)
1708 	uint64_t mtbf = skmem_region_get_mtbf();
1709 	/*
1710 	 * MTBF doesn't apply when SKMEM_PANIC is set as caller would assert.
1711 	 */
1712 	if (__improbable(mtbf != 0 && !purging &&
1713 	    (net_uptime_ms() % mtbf) == 0 &&
1714 	    !(skmflag & SKMEM_PANIC))) {
1715 		SK_ERR("skr \"%s\" 0x%llx sg 0x%llx MTBF failure",
1716 		    skr->skr_name, SK_KVA(skr), SK_KVA(sg));
1717 		net_update_uptime();
1718 		return NULL;
1719 	}
1720 #endif /* (DEVELOPMENT || DEBUG) */
1721 
1722 	TAILQ_REMOVE(&skr->skr_seg_free, sg, sg_link);
1723 	sg->sg_link.tqe_next = NULL;
1724 	sg->sg_link.tqe_prev = NULL;
1725 	RB_REMOVE(segtfreehead, &skr->skr_seg_tfree, sg);
1726 	sg->sg_node.rbe_left = NULL;
1727 	sg->sg_node.rbe_right = NULL;
1728 	sg->sg_node.rbe_parent = NULL;
1729 
1730 	ASSERT(skr->skr_seg_free_cnt != 0);
1731 	--skr->skr_seg_free_cnt;
1732 
1733 	/*
1734 	 * If the region is being depopulated, then we're done.
1735 	 */
1736 	if (__improbable(purging)) {
1737 		ASSERT(sg->sg_md == NULL);
1738 		ASSERT(sg->sg_start == 0 && sg->sg_end == 0);
1739 		ASSERT(sg->sg_state == SKSEG_STATE_DETACHED);
1740 		sg->sg_type = SKSEG_TYPE_DESTROYED;
1741 		return sg;
1742 	}
1743 
1744 	ASSERT(sg->sg_md == NULL);
1745 	ASSERT(sg->sg_start == 0 && sg->sg_end == 0);
1746 	ASSERT(sg->sg_state == SKSEG_STATE_DETACHED);
1747 
1748 	/* created as non-volatile (mapped) upon success */
1749 	if ((sg->sg_md = IOSKMemoryBufferCreate(skr->skr_seg_size,
1750 	    &skr->skr_bufspec, &segstart)) == NULL) {
1751 		ASSERT(sg->sg_type == SKSEG_TYPE_FREE);
1752 		if (skmflag & SKMEM_PANIC) {
1753 			/* if the caller insists for a success then panic */
1754 			panic_plain("\"%s\": skr 0x%p sg 0x%p (idx %u) unable "
1755 			    "to satisfy mandatory allocation\n", skr->skr_name,
1756 			    skr, sg, sg->sg_index);
1757 			/* NOTREACHED */
1758 			__builtin_unreachable();
1759 		}
1760 		/* reinsert this segment to freelist */
1761 		ASSERT(sg->sg_link.tqe_next == NULL);
1762 		ASSERT(sg->sg_link.tqe_prev == NULL);
1763 		TAILQ_INSERT_HEAD(&skr->skr_seg_free, sg, sg_link);
1764 		ASSERT(sg->sg_node.rbe_left == NULL);
1765 		ASSERT(sg->sg_node.rbe_right == NULL);
1766 		ASSERT(sg->sg_node.rbe_parent == NULL);
1767 		RB_INSERT(segtfreehead, &skr->skr_seg_tfree, sg);
1768 		++skr->skr_seg_free_cnt;
1769 		return NULL;
1770 	}
1771 
1772 	sg->sg_start = segstart;
1773 	sg->sg_end = (segstart + skr->skr_seg_size);
1774 	ASSERT(sg->sg_start != 0 && sg->sg_end != 0);
1775 
1776 	/* mark memory as non-volatile just to be consistent */
1777 	err = IOSKMemoryReclaim(sg->sg_md);
1778 	if (err != kIOReturnSuccess) {
1779 		panic("Fail to reclaim md %p, err %d", sg->sg_md, err);
1780 	}
1781 
1782 	/* if persistent, wire down its memory now */
1783 	if (skr->skr_mode & SKR_MODE_PERSISTENT) {
1784 		err = IOSKMemoryWire(sg->sg_md);
1785 		if (err != kIOReturnSuccess) {
1786 			panic("Fail to wire md %p, err %d", sg->sg_md, err);
1787 		}
1788 	}
1789 
1790 	err = IOSKRegionSetBuffer(skr->skr_reg, sg->sg_index, sg->sg_md);
1791 	if (err != kIOReturnSuccess) {
1792 		panic("Fail to set md %p, err %d", sg->sg_md, err);
1793 	}
1794 
1795 	/*
1796 	 * Let the client wire it and insert to IOMMU, if applicable.
1797 	 * Try to find out if it's wired and set the right state.
1798 	 */
1799 	if (skr->skr_seg_ctor != NULL) {
1800 		skr->skr_seg_ctor(sg, sg->sg_md, skr->skr_private);
1801 	}
1802 
1803 	sg->sg_state = IOSKBufferIsWired(sg->sg_md) ?
1804 	    SKSEG_STATE_MAPPED_WIRED : SKSEG_STATE_MAPPED;
1805 
1806 	skr->skr_memtotal += skr->skr_seg_size;
1807 
1808 	ASSERT(sg->sg_md != NULL);
1809 	ASSERT(sg->sg_start != 0 && sg->sg_end != 0);
1810 
1811 	sg->sg_type = SKSEG_TYPE_ALLOC;
1812 	return sg;
1813 }
1814 
1815 /*
1816  * Find the first available index and allocate a segment at that index.
1817  */
1818 static struct sksegment *
sksegment_freelist_grow(struct skmem_region * skr)1819 sksegment_freelist_grow(struct skmem_region *skr)
1820 {
1821 	struct sksegment *sg = NULL;
1822 	uint32_t i, j, idx;
1823 
1824 	SKR_LOCK_ASSERT_HELD(skr);
1825 
1826 	ASSERT(!(skr->skr_mode & SKR_MODE_PSEUDO));
1827 	ASSERT(skr->skr_seg_bmap_len != 0);
1828 	ASSERT(skr->skr_seg_max_cnt != 0);
1829 
1830 	for (i = 0; i < skr->skr_seg_bmap_len; i++) {
1831 		bitmap_t *bmap, mask;
1832 		uint32_t end = (BMAPSZ - 1);
1833 
1834 		if (i == (skr->skr_seg_bmap_len - 1)) {
1835 			end = (skr->skr_seg_max_cnt - 1) % BMAPSZ;
1836 		}
1837 
1838 		bmap = &skr->skr_seg_bmap[i];
1839 		mask = BMASK64(0, end);
1840 
1841 		j = ffsll((*bmap) & mask);
1842 		if (j == 0) {
1843 			continue;
1844 		}
1845 
1846 		--j;
1847 		idx = (i * BMAPSZ) + j;
1848 
1849 		sg = sksegment_alloc_with_idx(skr, idx);
1850 
1851 		/* we're done */
1852 		break;
1853 	}
1854 
1855 	ASSERT((sg != NULL) || (skr->skr_seginuse == skr->skr_seg_max_cnt));
1856 	return sg;
1857 }
1858 
1859 /*
1860  * Create a single segment at a specific index and add it to the freelist.
1861  */
1862 static struct sksegment *
sksegment_alloc_with_idx(struct skmem_region * skr,uint32_t idx)1863 sksegment_alloc_with_idx(struct skmem_region *skr, uint32_t idx)
1864 {
1865 	struct sksegment *sg;
1866 
1867 	SKR_LOCK_ASSERT_HELD(skr);
1868 
1869 	if (!bit_test(skr->skr_seg_bmap[idx / BMAPSZ], idx % BMAPSZ)) {
1870 		panic("%s: '%s' (%p) idx %u (out of %u) is already allocated",
1871 		    __func__, skr->skr_name, (void *)skr, idx,
1872 		    (skr->skr_seg_max_cnt - 1));
1873 		/* NOTREACHED */
1874 		__builtin_unreachable();
1875 	}
1876 
1877 	/* must not fail, blocking alloc */
1878 	sg = sksegment_create(skr, idx);
1879 	VERIFY(sg != NULL);
1880 	VERIFY(!bit_test(skr->skr_seg_bmap[idx / BMAPSZ], idx % BMAPSZ));
1881 
1882 	/* populate the freelist */
1883 	sksegment_freelist_insert(skr, sg, TRUE);
1884 	ASSERT(sg == TAILQ_LAST(&skr->skr_seg_free, segfreehead));
1885 #if (DEVELOPMENT || DEBUG)
1886 	struct sksegment sg_key = { .sg_index = sg->sg_index };
1887 	ASSERT(sg == RB_FIND(segtfreehead, &skr->skr_seg_tfree, &sg_key));
1888 #endif /* (DEVELOPMENT || DEBUG) */
1889 
1890 	SK_DF(SK_VERB_MEM_REGION, "sg %u/%u", (idx + 1), skr->skr_seg_max_cnt);
1891 
1892 	return sg;
1893 }
1894 
1895 /*
1896  * Rescale the regions's allocated-address hash table.
1897  */
1898 static void
skmem_region_hash_rescale(struct skmem_region * skr)1899 skmem_region_hash_rescale(struct skmem_region *skr)
1900 {
1901 	struct sksegment_bkt *old_table, *new_table;
1902 	size_t old_size, new_size;
1903 	uint32_t i, moved = 0;
1904 
1905 	if (skr->skr_mode & SKR_MODE_PSEUDO) {
1906 		ASSERT(skr->skr_hash_table == NULL);
1907 		/* this is no-op for pseudo region */
1908 		return;
1909 	}
1910 
1911 	ASSERT(skr->skr_hash_table != NULL);
1912 	/* insist that we are executing in the update thread call context */
1913 	ASSERT(sk_is_region_update_protected());
1914 
1915 	/*
1916 	 * To get small average lookup time (lookup depth near 1.0), the hash
1917 	 * table size should be roughly the same (not necessarily equivalent)
1918 	 * as the region size.
1919 	 */
1920 	new_size = MAX(skr->skr_hash_initial,
1921 	    (1 << (flsll(3 * skr->skr_seginuse + 4) - 2)));
1922 	new_size = MIN(skr->skr_hash_limit, new_size);
1923 	old_size = (skr->skr_hash_mask + 1);
1924 
1925 	if ((old_size >> 1) <= new_size && new_size <= (old_size << 1)) {
1926 		return;
1927 	}
1928 
1929 	new_table = sk_alloc_type_array(struct sksegment_bkt, new_size,
1930 	    Z_NOWAIT, skmem_tag_segment_hash);
1931 	if (__improbable(new_table == NULL)) {
1932 		return;
1933 	}
1934 
1935 	for (i = 0; i < new_size; i++) {
1936 		TAILQ_INIT(&new_table[i].sgb_head);
1937 	}
1938 
1939 	SKR_LOCK(skr);
1940 
1941 	old_size = (skr->skr_hash_mask + 1);
1942 	old_table = skr->skr_hash_table;
1943 
1944 	skr->skr_hash_mask = (uint32_t)(new_size - 1);
1945 	skr->skr_hash_table = new_table;
1946 	skr->skr_rescale++;
1947 
1948 	for (i = 0; i < old_size; i++) {
1949 		struct sksegment_bkt *sgb = &old_table[i];
1950 		struct sksegment_bkt *new_sgb;
1951 		struct sksegment *sg;
1952 
1953 		while ((sg = TAILQ_FIRST(&sgb->sgb_head)) != NULL) {
1954 			TAILQ_REMOVE(&sgb->sgb_head, sg, sg_link);
1955 			ASSERT(sg->sg_start != 0 && sg->sg_end != 0);
1956 			new_sgb = SKMEM_REGION_HASH(skr, sg->sg_start);
1957 			TAILQ_INSERT_TAIL(&new_sgb->sgb_head, sg, sg_link);
1958 			++moved;
1959 		}
1960 		ASSERT(TAILQ_EMPTY(&sgb->sgb_head));
1961 	}
1962 
1963 	SK_DF(SK_VERB_MEM_REGION,
1964 	    "skr 0x%llx old_size %u new_size %u [%u moved]", SK_KVA(skr),
1965 	    (uint32_t)old_size, (uint32_t)new_size, moved);
1966 
1967 	SKR_UNLOCK(skr);
1968 
1969 	sk_free_type_array(struct sksegment_bkt, old_size, old_table);
1970 }
1971 
1972 /*
1973  * Apply a function to operate on all regions.
1974  */
1975 static void
skmem_region_applyall(void (* func)(struct skmem_region *))1976 skmem_region_applyall(void (*func)(struct skmem_region *))
1977 {
1978 	struct skmem_region *skr;
1979 
1980 	net_update_uptime();
1981 
1982 	SKMEM_REGION_LOCK();
1983 	TAILQ_FOREACH(skr, &skmem_region_head, skr_link) {
1984 		func(skr);
1985 	}
1986 	SKMEM_REGION_UNLOCK();
1987 }
1988 
1989 static void
skmem_region_update(struct skmem_region * skr)1990 skmem_region_update(struct skmem_region *skr)
1991 {
1992 	SKMEM_REGION_LOCK_ASSERT_HELD();
1993 
1994 	/* insist that we are executing in the update thread call context */
1995 	ASSERT(sk_is_region_update_protected());
1996 
1997 	SKR_LOCK(skr);
1998 	/*
1999 	 * If there are threads blocked waiting for an available
2000 	 * segment, wake them up periodically so they can issue
2001 	 * another skmem_cache_reap() to reclaim resources cached
2002 	 * by skmem_cache.
2003 	 */
2004 	if (skr->skr_seg_waiters != 0) {
2005 		SK_DF(SK_VERB_MEM_REGION,
2006 		    "waking up %u waiters to reclaim", skr->skr_seg_waiters);
2007 		skr->skr_seg_waiters = 0;
2008 		wakeup(&skr->skr_seg_free);
2009 	}
2010 	SKR_UNLOCK(skr);
2011 
2012 	/*
2013 	 * Rescale the hash table if needed.
2014 	 */
2015 	skmem_region_hash_rescale(skr);
2016 }
2017 
2018 /*
2019  * Thread call callback for update.
2020  */
2021 static void
skmem_region_update_func(thread_call_param_t dummy,thread_call_param_t arg)2022 skmem_region_update_func(thread_call_param_t dummy, thread_call_param_t arg)
2023 {
2024 #pragma unused(dummy, arg)
2025 	sk_protect_t protect;
2026 
2027 	protect = sk_region_update_protect();
2028 	skmem_region_applyall(skmem_region_update);
2029 	sk_region_update_unprotect(protect);
2030 
2031 	skmem_dispatch(skmem_region_update_tc, NULL,
2032 	    (skmem_region_update_interval * NSEC_PER_SEC));
2033 }
2034 
2035 boolean_t
skmem_region_for_pp(skmem_region_id_t id)2036 skmem_region_for_pp(skmem_region_id_t id)
2037 {
2038 	int i;
2039 
2040 	for (i = 0; i < SKMEM_PP_REGIONS; i++) {
2041 		if (id == skmem_pp_region_ids[i]) {
2042 			return TRUE;
2043 		}
2044 	}
2045 	return FALSE;
2046 }
2047 
2048 void
skmem_region_get_stats(struct skmem_region * skr,struct sk_stats_region * sreg)2049 skmem_region_get_stats(struct skmem_region *skr, struct sk_stats_region *sreg)
2050 {
2051 	bzero(sreg, sizeof(*sreg));
2052 
2053 	(void) snprintf(sreg->sreg_name, sizeof(sreg->sreg_name),
2054 	    "%s", skr->skr_name);
2055 	uuid_copy(sreg->sreg_uuid, skr->skr_uuid);
2056 	sreg->sreg_id = (sk_stats_region_id_t)skr->skr_id;
2057 	sreg->sreg_mode = skr->skr_mode;
2058 
2059 	sreg->sreg_r_seg_size = skr->skr_params.srp_r_seg_size;
2060 	sreg->sreg_c_seg_size = skr->skr_seg_size;
2061 	sreg->sreg_seg_cnt = skr->skr_seg_max_cnt;
2062 	sreg->sreg_seg_objs = skr->skr_seg_objs;
2063 	sreg->sreg_r_obj_size = skr->skr_r_obj_size;
2064 	sreg->sreg_r_obj_cnt = skr->skr_r_obj_cnt;
2065 	sreg->sreg_c_obj_size = skr->skr_c_obj_size;
2066 	sreg->sreg_c_obj_cnt = skr->skr_c_obj_cnt;
2067 	sreg->sreg_align = skr->skr_align;
2068 	sreg->sreg_max_frags = skr->skr_max_frags;
2069 
2070 	sreg->sreg_meminuse = skr->skr_meminuse;
2071 	sreg->sreg_w_meminuse = skr->skr_w_meminuse;
2072 	sreg->sreg_memtotal = skr->skr_memtotal;
2073 	sreg->sreg_seginuse = skr->skr_seginuse;
2074 	sreg->sreg_rescale = skr->skr_rescale;
2075 	sreg->sreg_hash_size = (skr->skr_hash_mask + 1);
2076 	sreg->sreg_alloc = skr->skr_alloc;
2077 	sreg->sreg_free = skr->skr_free;
2078 }
2079 
2080 static size_t
skmem_region_mib_get_stats(struct skmem_region * skr,void * out,size_t len)2081 skmem_region_mib_get_stats(struct skmem_region *skr, void *out, size_t len)
2082 {
2083 	size_t actual_space = sizeof(struct sk_stats_region);
2084 	struct sk_stats_region *sreg = out;
2085 
2086 	if (out == NULL || len < actual_space) {
2087 		goto done;
2088 	}
2089 
2090 	skmem_region_get_stats(skr, sreg);
2091 
2092 done:
2093 	return actual_space;
2094 }
2095 
2096 static int
2097 skmem_region_mib_get_sysctl SYSCTL_HANDLER_ARGS
2098 {
2099 #pragma unused(arg1, arg2, oidp)
2100 	struct skmem_region *skr;
2101 	size_t actual_space;
2102 	size_t buffer_space;
2103 	size_t allocated_space;
2104 	caddr_t buffer = NULL;
2105 	caddr_t scan;
2106 	int error = 0;
2107 
2108 	if (!kauth_cred_issuser(kauth_cred_get())) {
2109 		return EPERM;
2110 	}
2111 
2112 	net_update_uptime();
2113 	buffer_space = req->oldlen;
2114 	if (req->oldptr != USER_ADDR_NULL && buffer_space != 0) {
2115 		if (buffer_space > SK_SYSCTL_ALLOC_MAX) {
2116 			buffer_space = SK_SYSCTL_ALLOC_MAX;
2117 		}
2118 		allocated_space = buffer_space;
2119 		buffer = sk_alloc_data(allocated_space, Z_WAITOK, skmem_tag_region_mib);
2120 		if (__improbable(buffer == NULL)) {
2121 			return ENOBUFS;
2122 		}
2123 	} else if (req->oldptr == USER_ADDR_NULL) {
2124 		buffer_space = 0;
2125 	}
2126 	actual_space = 0;
2127 	scan = buffer;
2128 
2129 	SKMEM_REGION_LOCK();
2130 	TAILQ_FOREACH(skr, &skmem_region_head, skr_link) {
2131 		size_t size = skmem_region_mib_get_stats(skr, scan, buffer_space);
2132 		if (scan != NULL) {
2133 			if (buffer_space < size) {
2134 				/* supplied buffer too small, stop copying */
2135 				error = ENOMEM;
2136 				break;
2137 			}
2138 			scan += size;
2139 			buffer_space -= size;
2140 		}
2141 		actual_space += size;
2142 	}
2143 	SKMEM_REGION_UNLOCK();
2144 
2145 	if (actual_space != 0) {
2146 		int out_error = SYSCTL_OUT(req, buffer, actual_space);
2147 		if (out_error != 0) {
2148 			error = out_error;
2149 		}
2150 	}
2151 	if (buffer != NULL) {
2152 		sk_free_data(buffer, allocated_space);
2153 	}
2154 
2155 	return error;
2156 }
2157 
2158 #if SK_LOG
2159 const char *
skmem_region_id2name(skmem_region_id_t id)2160 skmem_region_id2name(skmem_region_id_t id)
2161 {
2162 	const char *name;
2163 	switch (id) {
2164 	case SKMEM_REGION_SCHEMA:
2165 		name = "SCHEMA";
2166 		break;
2167 
2168 	case SKMEM_REGION_RING:
2169 		name = "RING";
2170 		break;
2171 
2172 	case SKMEM_REGION_BUF_DEF:
2173 		name = "BUF_DEF";
2174 		break;
2175 
2176 	case SKMEM_REGION_BUF_LARGE:
2177 		name = "BUF_LARGE";
2178 		break;
2179 
2180 	case SKMEM_REGION_RXBUF_DEF:
2181 		name = "RXBUF_DEF";
2182 		break;
2183 
2184 	case SKMEM_REGION_RXBUF_LARGE:
2185 		name = "RXBUF_LARGE";
2186 		break;
2187 
2188 	case SKMEM_REGION_TXBUF_DEF:
2189 		name = "TXBUF_DEF";
2190 		break;
2191 
2192 	case SKMEM_REGION_TXBUF_LARGE:
2193 		name = "TXBUF_LARGE";
2194 		break;
2195 
2196 	case SKMEM_REGION_UMD:
2197 		name = "UMD";
2198 		break;
2199 
2200 	case SKMEM_REGION_TXAUSD:
2201 		name = "TXAUSD";
2202 		break;
2203 
2204 	case SKMEM_REGION_RXFUSD:
2205 		name = "RXFUSD";
2206 		break;
2207 
2208 	case SKMEM_REGION_USTATS:
2209 		name = "USTATS";
2210 		break;
2211 
2212 	case SKMEM_REGION_FLOWADV:
2213 		name = "FLOWADV";
2214 		break;
2215 
2216 	case SKMEM_REGION_NEXUSADV:
2217 		name = "NEXUSADV";
2218 		break;
2219 
2220 	case SKMEM_REGION_SYSCTLS:
2221 		name = "SYSCTLS";
2222 		break;
2223 
2224 	case SKMEM_REGION_GUARD_HEAD:
2225 		name = "HEADGUARD";
2226 		break;
2227 
2228 	case SKMEM_REGION_GUARD_TAIL:
2229 		name = "TAILGUARD";
2230 		break;
2231 
2232 	case SKMEM_REGION_KMD:
2233 		name = "KMD";
2234 		break;
2235 
2236 	case SKMEM_REGION_RXKMD:
2237 		name = "RXKMD";
2238 		break;
2239 
2240 	case SKMEM_REGION_TXKMD:
2241 		name = "TXKMD";
2242 		break;
2243 
2244 	case SKMEM_REGION_TXAKSD:
2245 		name = "TXAKSD";
2246 		break;
2247 
2248 	case SKMEM_REGION_RXFKSD:
2249 		name = "RXFKSD";
2250 		break;
2251 
2252 	case SKMEM_REGION_KSTATS:
2253 		name = "KSTATS";
2254 		break;
2255 
2256 	case SKMEM_REGION_KBFT:
2257 		name = "KBFT";
2258 		break;
2259 
2260 	case SKMEM_REGION_UBFT:
2261 		name = "UBFT";
2262 		break;
2263 
2264 	case SKMEM_REGION_RXKBFT:
2265 		name = "RXKBFT";
2266 		break;
2267 
2268 	case SKMEM_REGION_TXKBFT:
2269 		name = "TXKBFT";
2270 		break;
2271 
2272 	case SKMEM_REGION_INTRINSIC:
2273 		name = "INTRINSIC";
2274 		break;
2275 
2276 	default:
2277 		name = "UNKNOWN";
2278 		break;
2279 	}
2280 
2281 	return name;
2282 }
2283 #endif /* SK_LOG */
2284 
2285 #if (DEVELOPMENT || DEBUG)
2286 uint64_t
skmem_region_get_mtbf(void)2287 skmem_region_get_mtbf(void)
2288 {
2289 	return skmem_region_mtbf;
2290 }
2291 
2292 void
skmem_region_set_mtbf(uint64_t newval)2293 skmem_region_set_mtbf(uint64_t newval)
2294 {
2295 	if (newval < SKMEM_REGION_MTBF_MIN) {
2296 		if (newval != 0) {
2297 			newval = SKMEM_REGION_MTBF_MIN;
2298 		}
2299 	} else if (newval > SKMEM_REGION_MTBF_MAX) {
2300 		newval = SKMEM_REGION_MTBF_MAX;
2301 	}
2302 
2303 	if (skmem_region_mtbf != newval) {
2304 		atomic_set_64(&skmem_region_mtbf, newval);
2305 		SK_ERR("MTBF set to %llu msec", skmem_region_mtbf);
2306 	}
2307 }
2308 
2309 static int
skmem_region_mtbf_sysctl(struct sysctl_oid * oidp,void * arg1,int arg2,struct sysctl_req * req)2310 skmem_region_mtbf_sysctl(struct sysctl_oid *oidp, void *arg1, int arg2,
2311     struct sysctl_req *req)
2312 {
2313 #pragma unused(oidp, arg1, arg2)
2314 	int changed, error;
2315 	uint64_t newval;
2316 
2317 	_CASSERT(sizeof(skmem_region_mtbf) == sizeof(uint64_t));
2318 	if ((error = sysctl_io_number(req, skmem_region_mtbf,
2319 	    sizeof(uint64_t), &newval, &changed)) == 0) {
2320 		if (changed) {
2321 			skmem_region_set_mtbf(newval);
2322 		}
2323 	}
2324 	return error;
2325 }
2326 #endif /* (DEVELOPMENT || DEBUG) */
2327