xref: /xnu-8019.80.24/osfmk/kern/ltable.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2016-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <kern/cpu_data.h>
29 #include <kern/kern_types.h>
30 #include <kern/clock.h>
31 #include <kern/locks.h>
32 #include <kern/ltable.h>
33 #include <kern/zalloc.h>
34 #include <libkern/OSAtomic.h>
35 #include <pexpert/pexpert.h>
36 #include <vm/vm_kern.h>
37 
38 
39 #define P2ROUNDUP(x, align) (-(-((uintptr_t)(x)) & -((uintptr_t)align)))
40 #define ROUNDDOWN(x, y)  (((x)/(y))*(y))
41 
42 /* ----------------------------------------------------------------------
43  *
44  * Lockless Link Table Interface
45  *
46  * ---------------------------------------------------------------------- */
47 
48 /* default VA space for link tables (zone allocated) */
49 #define DEFAULT_MAX_TABLE_SIZE  P2ROUNDUP(8 * 1024 * 1024, PAGE_SIZE)
50 
51 TUNABLE(vm_size_t, g_lt_max_tbl_size, "lt_tbl_size", 0);
52 LCK_GRP_DECLARE(g_lt_lck_grp, "link_table_locks");
53 
54 #if DEVELOPMENT || DEBUG
55 /* global for lldb macros */
56 uint64_t g_lt_idx_max = LT_IDX_MAX;
57 #endif
58 
59 __startup_func
60 static void
ltable_startup_tunables_init(void)61 ltable_startup_tunables_init(void)
62 {
63 	// make sure that if a boot-arg was passed, g_lt_max_tbl_size
64 	// is a PAGE_SIZE multiple.
65 	//
66 	// Also set the default for platforms where PAGE_SIZE
67 	// isn't a compile time constant.
68 	if (g_lt_max_tbl_size == 0) {
69 		g_lt_max_tbl_size = (typeof(g_lt_max_tbl_size))DEFAULT_MAX_TABLE_SIZE;
70 	} else {
71 		g_lt_max_tbl_size = round_page(g_lt_max_tbl_size);
72 	}
73 }
74 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, ltable_startup_tunables_init);
75 
76 
77 /* construct a link table element from an offset and mask into a slab */
78 #define lt_elem_ofst_slab(slab, slab_msk, ofst) \
79 	/* cast through 'void *' to avoid compiler alignment warning messages */ \
80 	((struct lt_elem *)((void *)((uintptr_t)(slab) + ((ofst) & (slab_msk)))))
81 
82 #if CONFIG_LTABLE_STATS
83 /* version that makes no assumption on waste within a slab */
84 static inline struct lt_elem *
lt_elem_idx(struct link_table * table,uint32_t idx)85 lt_elem_idx(struct link_table *table, uint32_t idx)
86 {
87 	int slab_idx = idx / table->slab_elem;
88 	struct lt_elem *slab = table->table[slab_idx];
89 	if (!slab) {
90 		panic("Invalid index:%d slab:%d (NULL) for table:%p",
91 		    idx, slab_idx, table);
92 	}
93 	assert(slab->lt_id.idx <= idx && (slab->lt_id.idx + table->slab_elem) > idx);
94 	return lt_elem_ofst_slab(slab, table->slab_msk, (idx - slab->lt_id.idx) * table->elem_sz);
95 }
96 #else /* !CONFIG_LTABLE_STATS */
97 /* verion that assumes 100% ultilization of slabs (no waste) */
98 static inline struct lt_elem *
lt_elem_idx(struct link_table * table,uint32_t idx)99 lt_elem_idx(struct link_table *table, uint32_t idx)
100 {
101 	uint32_t ofst = idx * table->elem_sz;
102 	struct lt_elem *slab = table->table[ofst >> table->slab_shift];
103 	if (!slab) {
104 		panic("Invalid index:%d slab:%d (NULL) for table:%p",
105 		    idx, (ofst >> table->slab_shift), table);
106 	}
107 	assert(slab->lt_id.idx <= idx && (slab->lt_id.idx + table->slab_elem) > idx);
108 	return lt_elem_ofst_slab(slab, table->slab_msk, ofst);
109 }
110 #endif /* CONFIG_LTABLE_STATS */
111 
112 static int __assert_only
lt_elem_in_range(struct lt_elem * elem,struct link_table * table)113 lt_elem_in_range(struct lt_elem *elem, struct link_table *table)
114 {
115 	struct lt_elem **base = table->table;
116 	uintptr_t e = (uintptr_t)elem;
117 	assert(base != NULL);
118 	while (*base != NULL) {
119 		uintptr_t b = (uintptr_t)(*base);
120 		if (e >= b && e < b + table->slab_sz) {
121 			return 1;
122 		}
123 		base++;
124 		if ((uintptr_t)base >= (uintptr_t)table->table + PAGE_SIZE) {
125 			return 0;
126 		}
127 	}
128 	return 0;
129 }
130 
131 
132 /**
133  * lt_elem_invalidate: mark 'elem' as invalid
134  *
135  * NOTE: this does _not_ get or put a reference on 'elem'
136  */
137 void
lt_elem_invalidate(struct lt_elem * elem)138 lt_elem_invalidate(struct lt_elem *elem)
139 {
140 	uint32_t __assert_only old = OSBitAndAtomic(~LT_BITS_VALID, &elem->lt_bits);
141 	OSMemoryBarrier();
142 	assert(((lt_bits_type(old) != LT_RESERVED) && (old & LT_BITS_VALID)) ||
143 	    ((lt_bits_type(old) == LT_RESERVED) && !(old & LT_BITS_VALID)));
144 }
145 
146 /**
147  * lt_elem_mkvalid: mark 'elem' as valid
148  *
149  * NOTE: this does _not_ get or put a reference on 'elem'
150  */
151 void
lt_elem_mkvalid(struct lt_elem * elem)152 lt_elem_mkvalid(struct lt_elem *elem)
153 {
154 	uint32_t __assert_only old = OSBitOrAtomic(LT_BITS_VALID, &elem->lt_bits);
155 	OSMemoryBarrier();
156 	assert(!(old & LT_BITS_VALID));
157 }
158 
159 static void
lt_elem_set_type(struct lt_elem * elem,int type)160 lt_elem_set_type(struct lt_elem *elem, int type)
161 {
162 	uint32_t old_bits, new_bits;
163 	do {
164 		old_bits = elem->lt_bits;
165 		new_bits = (old_bits & ~LT_BITS_TYPE) |
166 		    ((type & LT_BITS_TYPE_MASK) << LT_BITS_TYPE_SHIFT);
167 	} while (OSCompareAndSwap(old_bits, new_bits, &elem->lt_bits) == FALSE);
168 	OSMemoryBarrier();
169 }
170 
171 
172 /**
173  * ltable_init: initialize a link table with given parameters
174  *
175  */
176 void
ltable_init(struct link_table * table,const char * name,uint32_t max_tbl_elem,uint32_t elem_sz,ltable_poison_func poison)177 ltable_init(struct link_table *table, const char *name,
178     uint32_t max_tbl_elem, uint32_t elem_sz,
179     ltable_poison_func poison)
180 {
181 	kern_return_t kr;
182 	uint32_t slab_sz, slab_shift, slab_msk, slab_elem;
183 	zone_t slab_zone;
184 	size_t max_tbl_sz;
185 	struct lt_elem *e, **base;
186 
187 #ifndef CONFIG_LTABLE_STATS
188 	/* the element size _must_ be a power of two! */
189 	if ((elem_sz & (elem_sz - 1)) != 0) {
190 		panic("elem_sz:%d for table:'%s' must be a power of two!",
191 		    elem_sz, name);
192 	}
193 #endif
194 
195 	/*
196 	 * First, allocate a single page of memory to act as the base
197 	 * for the table's element slabs
198 	 */
199 	kr = kernel_memory_allocate(kernel_map, (vm_offset_t *)&base,
200 	    PAGE_SIZE, 0, KMA_NOPAGEWAIT | KMA_ZERO, VM_KERN_MEMORY_LTABLE);
201 	if (kr != KERN_SUCCESS) {
202 		panic("Cannot initialize %s table: "
203 		    "kernel_memory_allocate failed:%d\n", name, kr);
204 	}
205 
206 	/*
207 	 * Based on the maximum table size, calculate the slab size:
208 	 * we allocate 1 page of slab pointers for the table, and we need to
209 	 * index elements of 'elem_sz', this gives us the slab size based on
210 	 * the maximum size the table should grow.
211 	 */
212 	max_tbl_sz = (max_tbl_elem * elem_sz);
213 	max_tbl_sz = P2ROUNDUP(max_tbl_sz, PAGE_SIZE);
214 
215 	/* system maximum table size divided by number of slots in a page */
216 	slab_sz = (uint32_t)(max_tbl_sz / (PAGE_SIZE / (sizeof(void *))));
217 	if (slab_sz < PAGE_SIZE) {
218 		slab_sz = PAGE_SIZE;
219 	}
220 
221 	/* make sure the slab size is a power of two */
222 	slab_shift = 0;
223 	slab_msk = ~0;
224 	for (uint32_t i = 0; i < 31; i++) {
225 		uint32_t bit = (1 << i);
226 		if ((slab_sz & bit) == slab_sz) {
227 			slab_shift = i;
228 			slab_msk = 0;
229 			for (uint32_t j = 0; j < i; j++) {
230 				slab_msk |= (1 << j);
231 			}
232 			break;
233 		}
234 		slab_sz &= ~bit;
235 	}
236 	slab_elem = slab_sz / elem_sz;
237 
238 	/* initialize the table's slab zone (for table growth) */
239 	ltdbg("Initializing %s zone: slab:%d (%d,0x%x) max:%ld",
240 	    name, slab_sz, slab_shift, slab_msk, max_tbl_sz);
241 	slab_zone = zone_create(name, slab_sz, ZC_NONE);
242 	assert(slab_zone != ZONE_NULL);
243 
244 	/* allocate the first slab and populate it */
245 	base[0] = zalloc_flags(slab_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
246 
247 	/* setup the initial freelist */
248 	ltdbg("initializing %d links (%d bytes each)...", slab_elem, elem_sz);
249 	for (unsigned l = 0; l < slab_elem; l++) {
250 		e = lt_elem_ofst_slab(base[0], slab_msk, l * elem_sz);
251 		e->lt_id.idx = l;
252 		/*
253 		 * Generations are never 0, and always have the low bit set.
254 		 * It means the `0` ltable_id is never valid.
255 		 */
256 		e->lt_id.generation = 1;
257 		e->lt_next_idx = l + 1;
258 	}
259 
260 	/* make sure the last free element points to a never-valid idx */
261 	e = lt_elem_ofst_slab(base[0], slab_msk, (slab_elem - 1) * elem_sz);
262 	e->lt_next_idx = LT_IDX_MAX;
263 
264 	lck_mtx_init(&table->lock, &g_lt_lck_grp, LCK_ATTR_NULL);
265 
266 	table->slab_sz = slab_sz;
267 	table->slab_shift = slab_shift;
268 	table->slab_msk = slab_msk;
269 	table->slab_elem = slab_elem;
270 	table->slab_zone = slab_zone;
271 
272 	table->elem_sz = elem_sz;
273 	table->nelem = slab_elem;
274 	table->used_elem = 0;
275 	table->elem_sz = elem_sz;
276 	table->poison = poison;
277 
278 	table->table = base;
279 	table->next_free_slab = &base[1];
280 	table->free_list.id = base[0]->lt_id.id;
281 
282 #if CONFIG_LTABLE_STATS
283 	table->nslabs = 1;
284 	table->nallocs = 0;
285 	table->nreallocs = 0;
286 	table->npreposts = 0;
287 	table->nreservations = 0;
288 	table->nreserved_releases = 0;
289 
290 	table->max_used = 0;
291 	table->avg_used = 0;
292 	table->max_reservations = 0;
293 	table->avg_reservations = 0;
294 #endif
295 }
296 
297 static inline bool
ltable_has_space(uint32_t nelem,uint32_t used_elem,uint32_t ask)298 ltable_has_space(uint32_t nelem, uint32_t used_elem, uint32_t ask)
299 {
300 	return used_elem + ask <= nelem;
301 }
302 
303 /**
304  * ltable_grow: grow a link table by adding another 'slab' of table elements
305  *
306  * Conditions:
307  *	table mutex is unlocked
308  *	calling thread can block
309  */
310 void
ltable_grow(struct link_table * table,uint32_t min_free)311 ltable_grow(struct link_table *table, uint32_t min_free)
312 {
313 	struct lt_elem *slab, **slot;
314 	struct lt_elem *e = NULL, *first_new_elem, *last_new_elem;
315 	struct ltable_id free_id;
316 	struct ltable_id next_id;
317 
318 	assert(get_preemption_level() == 0);
319 	assert(table && table->slab_zone);
320 
321 	lck_mtx_lock(&table->lock);
322 
323 	/*
324 	 * If the caller just wanted to ensure a minimum number of elements,
325 	 * do that (and don't just blindly grow the table). Also, don't grow
326 	 * the table unnecessarily - we could have been beaten by a higher
327 	 * priority thread who acquired the lock and grew the table before we
328 	 * got here.
329 	 *
330 	 * Add some slop (MAX_CPUS) to that check: if we keep bumping
331 	 * in the limit, then people allocating will keep taking the mutex
332 	 * and take the slowpath, we want to avoid that.
333 	 */
334 	if (ltable_has_space(table->nelem, table->used_elem, min_free + MAX_CPUS)) {
335 		lck_mtx_unlock(&table->lock);
336 		return;
337 	}
338 
339 	/* we are now committed to table growth */
340 	ltdbg_v("BEGIN");
341 
342 	if (table->next_free_slab == NULL) {
343 		panic("No more room to grow table: %p (nelem: %d, used: %d)",
344 		    table, table->nelem, table->used_elem);
345 	}
346 	slot = table->next_free_slab;
347 	table->next_free_slab++;
348 	if ((uintptr_t)table->next_free_slab >= (uintptr_t)table->table + PAGE_SIZE) {
349 		table->next_free_slab = NULL;
350 	}
351 
352 	assert(*slot == NULL);
353 
354 	/* allocate another slab */
355 	slab = zalloc_flags(table->slab_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
356 
357 	/* put the new elements into a freelist */
358 	ltdbg_v("    init %d new links...", table->slab_elem);
359 	for (unsigned l = 0; l < table->slab_elem; l++) {
360 		uint32_t idx = l + table->nelem;
361 		if (idx >= (LT_IDX_MAX - 1)) {
362 			break; /* the last element of the last slab */
363 		}
364 		e = lt_elem_ofst_slab(slab, table->slab_msk, l * table->elem_sz);
365 		e->lt_id.idx = idx;
366 		e->lt_id.generation = 1;
367 		e->lt_next_idx = idx + 1;
368 	}
369 	last_new_elem = e;
370 	assert(last_new_elem != NULL);
371 
372 	first_new_elem = lt_elem_ofst_slab(slab, table->slab_msk, 0);
373 
374 	disable_preemption();
375 
376 	/* update table book keeping, and atomically swap the freelist head */
377 	*slot = slab;
378 	if (table->nelem + table->slab_elem >= LT_IDX_MAX) {
379 		table->nelem = LT_IDX_MAX - 1;
380 	} else {
381 		table->nelem += table->slab_elem;
382 	}
383 
384 #if CONFIG_LTABLE_STATS
385 	table->nslabs += 1;
386 #endif
387 
388 	/*
389 	 * The atomic swap of the free list head marks the end of table
390 	 * growth. Incoming requests may now use the newly allocated slab
391 	 * of table elements
392 	 */
393 	free_id = table->free_list;
394 	next_id.idx = first_new_elem->lt_id.idx;
395 	do {
396 		last_new_elem->lt_next_idx = free_id.idx;
397 		next_id.generation = free_id.generation + 2;
398 	} while (!os_atomic_cmpxchgv(&table->free_list.id, free_id.id,
399 	    next_id.id, &free_id.id, seq_cst));
400 
401 	enable_preemption();
402 
403 	lck_mtx_unlock(&table->lock);
404 
405 	return;
406 }
407 
408 #if DEVELOPMENT || DEBUG
409 
410 int
ltable_nelem(struct link_table * table)411 ltable_nelem(struct link_table *table)
412 {
413 	int nelem = 0;
414 
415 	lck_mtx_lock(&table->lock);
416 
417 	nelem = table->used_elem;
418 
419 	lck_mtx_unlock(&table->lock);
420 
421 	return nelem;
422 }
423 #endif
424 
425 /**
426  * ltable_alloc_elem: allocate one or more elements from a given table
427  *
428  * The returned element(s) will be of type 'type', but will remain invalid.
429  *
430  * If the caller has disabled preemption, then this function may (rarely) spin
431  * waiting either for another thread to either release 'nelem' table elements,
432  * or grow the table.
433  *
434  * If the caller can block, then this function may (rarely) block while
435  * the table grows to meet the demand for 'nelem' element(s).
436  */
437 __attribute__((noinline))
438 struct lt_elem *
ltable_alloc_elem(struct link_table * table,int type,int nelem,int nattempts)439 ltable_alloc_elem(struct link_table *table, int type,
440     int nelem, int nattempts)
441 {
442 	int nspins = 0, ntries = 0, nalloc = 0;
443 	uint32_t table_size, used_elem;
444 	struct lt_elem *elem = NULL, *last = NULL;
445 	struct ltable_id free_id, next_id;
446 
447 	static const int max_retries = 500;
448 
449 	if (type != LT_ELEM && type != LT_LINK && type != LT_RESERVED) {
450 		panic("link_table_aloc of invalid elem type:%d from table @%p",
451 		    type, table);
452 	}
453 
454 	assert(nelem > 0);
455 
456 	/*
457 	 * If the callers only wants to try a certain number of times, make it
458 	 * look like we've already made (MAX - nattempts) tries at allocation
459 	 */
460 	if (nattempts > 0 && nattempts <= max_retries) {
461 		ntries = max_retries - nattempts;
462 	}
463 
464 try_again:
465 	elem = NULL;
466 	nalloc = 0;
467 
468 	/* get fresh values */
469 	table_size = os_atomic_load(&table->nelem, relaxed);
470 	used_elem = os_atomic_load(&table->used_elem, relaxed);
471 
472 	if (ntries++ > max_retries) {
473 		struct lt_elem *tmp;
474 		if (nattempts > 0) {
475 			/*
476 			 * The caller specified a particular number of
477 			 * attempts before failure, so it's expected that
478 			 * they're prepared to handle a NULL return.
479 			 */
480 			return NULL;
481 		}
482 
483 		if (!ltable_has_space(table_size, used_elem, nelem)) {
484 			panic("No more room to grow table: 0x%p size:%d, used:%d, requested elem:%d",
485 			    table, table_size, used_elem, nelem);
486 		}
487 		if (nelem == 1) {
488 			panic("Too many alloc retries: %d, table:%p, type:%d, nelem:%d",
489 			    ntries, table, type, nelem);
490 		}
491 		/* don't panic: try allocating one-at-a-time */
492 		while (nelem > 0) {
493 			tmp = ltable_alloc_elem(table, type, 1, nattempts);
494 			if (elem) {
495 				lt_elem_list_link(table, tmp, elem);
496 			}
497 			elem = tmp;
498 			--nelem;
499 		}
500 		assert(elem != NULL);
501 		return elem;
502 	}
503 
504 	if (get_preemption_level() != 0) {
505 		if (!ltable_has_space(table_size, used_elem, nelem)) {
506 #if CONFIG_LTABLE_STATS
507 			table->nspins += 1;
508 #endif
509 			/*
510 			 * We may have just raced with table growth: check
511 			 * again to make sure there really isn't any space.
512 			 */
513 			if (++nspins > 4) {
514 				panic("Can't grow table %p with preemption"
515 				    " disabled!", table);
516 			}
517 			delay(1);
518 			goto try_again;
519 		}
520 	} else {
521 		/*
522 		 * If we're close to be out of elements, just grow the table,
523 		 * do not wait to have just one element left because it's
524 		 * likely that person will be under preemption disabled
525 		 * and not allowed to grow the table.
526 		 */
527 		if (!ltable_has_space(table_size, used_elem, nelem + MAX_CPUS)) {
528 			ltable_grow(table, nelem + MAX_CPUS);
529 			goto try_again;
530 		}
531 	}
532 
533 	/* read this value only once before the CAS */
534 	free_id = table->free_list;
535 	if (free_id.idx >= table_size) {
536 		goto try_again;
537 	}
538 
539 	/*
540 	 * Find the item on the free list which will become the new free list
541 	 * head, but be careful not to modify any memory (read only)!  Other
542 	 * threads can alter table state at any time up until the CAS.  We
543 	 * don't modify any memory until we've successfully swapped out the
544 	 * free list head with the one we've investigated.
545 	 */
546 	last = elem = lt_elem_idx(table, free_id.idx);
547 	while (++nalloc < nelem) {
548 		uint32_t idx = last->lt_next_idx;
549 		if (idx == LT_IDX_MAX) {
550 			goto try_again;
551 		}
552 		last = lt_elem_idx(table, idx);
553 	}
554 	/* 'last' points to the last element being allocated */
555 
556 	/*
557 	 * Disable preemption while we update both the count
558 	 * and perform the CAS with preemption disabled
559 	 * in order to minimize the inconsistency window
560 	 *
561 	 * other allocators can get _really_ confused
562 	 * if you are preempted in the wrong spot.
563 	 */
564 	disable_preemption();
565 
566 	/*
567 	 * Use the generation as an anti ABA.
568 	 * last->lt_next_idx might be LT_IDX_MAX and this is OK.
569 	 */
570 	next_id.generation = free_id.generation + 2;
571 	next_id.idx = last->lt_next_idx;
572 
573 	if (!os_atomic_cmpxchg(&table->free_list.id,
574 	    free_id.id, next_id.id, seq_cst)) {
575 		enable_preemption();
576 		goto try_again;
577 	}
578 	os_atomic_add(&table->used_elem, nelem, relaxed);
579 
580 	enable_preemption();
581 
582 	/*
583 	 * After the CAS, we know that we own free_id, and it points to a
584 	 * valid table entry (checked above). Grab the table pointer and
585 	 * reset some values.
586 	 */
587 
588 
589 	/* end the list of allocated elements */
590 	last->lt_next_idx = LT_IDX_MAX;
591 
592 	/*
593 	 * Update the generation count, and return the element(s)
594 	 * with a single reference (and no valid bit). If the
595 	 * caller immediately calls _put() on any element, then
596 	 * it will be released back to the free list. If the caller
597 	 * subsequently marks the element as valid, then the put
598 	 * will simply drop the reference.
599 	 */
600 	for (struct lt_elem *tmp = elem;;) {
601 		assert(!lt_bits_valid(tmp->lt_bits) &&
602 		    (lt_bits_refcnt(tmp->lt_bits) == 0));
603 		--nalloc;
604 		tmp->lt_id.generation += 2;
605 		tmp->lt_bits = 1;
606 		lt_elem_set_type(tmp, type);
607 		if (tmp->lt_next_idx == LT_IDX_MAX) {
608 			break;
609 		}
610 		assert(tmp->lt_next_idx != LT_IDX_MAX);
611 		tmp = lt_elem_idx(table, tmp->lt_next_idx);
612 	}
613 	assert(nalloc == 0);
614 
615 #if CONFIG_LTABLE_STATS
616 	uint64_t nreservations;
617 	table->nallocs += nelem;
618 	if (type == LT_RESERVED) {
619 		OSIncrementAtomic64(&table->nreservations);
620 	}
621 	nreservations = table->nreservations;
622 	if (table->used_elem > table->max_used) {
623 		table->max_used = table->used_elem;
624 	}
625 	if (nreservations > table->max_reservations) {
626 		table->max_reservations = nreservations;
627 	}
628 	table->avg_used = (table->avg_used + table->used_elem) / 2;
629 	table->avg_reservations = (table->avg_reservations + nreservations) / 2;
630 #endif
631 
632 	return elem;
633 }
634 
635 
636 /**
637  * ltable_realloc_elem: convert a reserved element to a particular type
638  *
639  * This funciton is used to convert reserved elements (not yet marked valid)
640  * to the given 'type'. The generation of 'elem' is incremented, the element
641  * is disconnected from any list to which it belongs, and its type is set to
642  * 'type'.
643  */
644 void
ltable_realloc_elem(struct link_table * table,struct lt_elem * elem,int type)645 ltable_realloc_elem(struct link_table *table, struct lt_elem *elem, int type)
646 {
647 	(void)table;
648 	assert(lt_elem_in_range(elem, table) &&
649 	    !lt_bits_valid(elem->lt_bits));
650 
651 #if CONFIG_LTABLE_STATS
652 	table->nreallocs += 1;
653 	if (lt_bits_type(elem->lt_bits) == LT_RESERVED && type != LT_RESERVED) {
654 		/*
655 		 * This isn't under any lock, so we'll clamp it.
656 		 * the stats are meant to be informative, not perfectly
657 		 * accurate
658 		 */
659 		OSDecrementAtomic64(&table->nreservations);
660 	}
661 	table->avg_reservations = (table->avg_reservations + table->nreservations) / 2;
662 #endif
663 
664 	/*
665 	 * Return the same element with a new generation count, and a
666 	 * (potentially) new type. Don't touch the refcount: the caller
667 	 * is responsible for getting that (and the valid bit) correct.
668 	 */
669 	elem->lt_id.generation += 2;
670 	elem->lt_next_idx = LT_IDX_MAX;
671 	lt_elem_set_type(elem, type);
672 
673 	return;
674 }
675 
676 static void
ltable_free_list(struct link_table * table,struct lt_elem * head,struct lt_elem * tail,uint32_t nelem)677 ltable_free_list(struct link_table *table,
678     struct lt_elem *head, struct lt_elem *tail, uint32_t nelem)
679 {
680 	struct ltable_id free_id;
681 	struct ltable_id next_id;
682 
683 	/*
684 	 * Disable preemption while we update both the count
685 	 * and perform the CAS with preemption disabled
686 	 * in order to minimize the inconsistency window
687 	 *
688 	 * other allocators can get _really_ confused
689 	 * if you are preempted in the wrong spot.
690 	 */
691 	disable_preemption();
692 
693 	free_id = table->free_list;
694 	next_id.idx = head->lt_id.idx;
695 	do {
696 		tail->lt_next_idx = free_id.idx;
697 		next_id.generation = free_id.generation + 2;
698 	} while (!os_atomic_cmpxchgv(&table->free_list.id, free_id.id,
699 	    next_id.id, &free_id.id, seq_cst));
700 
701 	os_atomic_sub(&table->used_elem, nelem, relaxed);
702 
703 	enable_preemption();
704 }
705 
706 /**
707  * ltable_free_elem: release an element back to a link table
708  *
709  * Do not call this function directly: use ltable_[get|put]_elem!
710  *
711  * Conditions:
712  *     'elem' was originally allocated from 'table'
713  *     'elem' is _not_ marked valid
714  *     'elem' has a reference count of 0
715  */
716 static void
ltable_free_elem(struct link_table * table,struct lt_elem * elem)717 ltable_free_elem(struct link_table *table, struct lt_elem *elem)
718 {
719 	assert(lt_elem_in_range(elem, table) &&
720 	    !lt_bits_valid(elem->lt_bits) &&
721 	    (lt_bits_refcnt(elem->lt_bits) == 0));
722 
723 #if CONFIG_LTABLE_STATS
724 	table->avg_used = (table->avg_used + table->used_elem) / 2;
725 	if (lt_bits_type(elem->lt_bits) == LT_RESERVED) {
726 		OSDecrementAtomic64(&table->nreservations);
727 	}
728 	table->avg_reservations = (table->avg_reservations + table->nreservations) / 2;
729 #endif
730 
731 	elem->lt_bits = 0;
732 
733 	if (table->poison) {
734 		(table->poison)(table, elem);
735 	}
736 
737 	ltable_free_list(table, elem, elem, 1);
738 }
739 
740 
741 /**
742  * ltable_get_elem: get a reference to a table element identified by 'id'
743  *
744  * Returns a reference to the table element associated with the given 'id', or
745  * NULL if the 'id' was invalid or does not exist in 'table'. The caller is
746  * responsible to release the reference using ltable_put_elem().
747  *
748  * NOTE: if the table element pointed to by 'id' is marked as invalid,
749  *       this function will return NULL.
750  */
751 struct lt_elem *
ltable_get_elem(struct link_table * table,uint64_t id)752 ltable_get_elem(struct link_table *table, uint64_t id)
753 {
754 	struct lt_elem *elem;
755 	uint32_t idx, bits, new_bits;
756 
757 	/*
758 	 * Here we have a reference to the table which is guaranteed to remain
759 	 * valid until we drop the reference
760 	 */
761 
762 	idx = ((struct ltable_id *)&id)->idx;
763 
764 	if (idx >= table->nelem) {
765 		panic("id:0x%llx : idx:%d > %d", id, idx, table->nelem);
766 	}
767 
768 	elem = lt_elem_idx(table, idx);
769 
770 	/* verify the validity by taking a reference on the table object */
771 	bits = elem->lt_bits;
772 	if (!lt_bits_valid(bits)) {
773 		return NULL;
774 	}
775 
776 	/*
777 	 * do a pre-verify on the element ID to potentially
778 	 * avoid 2 compare-and-swaps
779 	 */
780 	if (elem->lt_id.id != id) {
781 		return NULL;
782 	}
783 
784 	new_bits = bits + 1;
785 
786 	/* check for overflow */
787 	assert(lt_bits_refcnt(new_bits) > 0);
788 
789 	while (OSCompareAndSwap(bits, new_bits, &elem->lt_bits) == FALSE) {
790 		/*
791 		 * either the element became invalid,
792 		 * or someone else grabbed/removed a reference.
793 		 */
794 		bits = elem->lt_bits;
795 		if (!lt_bits_valid(bits)) {
796 			/* don't return invalid elements */
797 			return NULL;
798 		}
799 		new_bits = bits + 1;
800 		assert(lt_bits_refcnt(new_bits) > 0);
801 	}
802 
803 	/* load barrier */
804 	OSMemoryBarrier();
805 
806 	/* check to see that our reference is to the same generation! */
807 	if (elem->lt_id.id != id) {
808 		/*
809 		 *  ltdbg("ID:0x%llx table generation (%d) != %d",
810 		 *     id, elem->lt_id.generation,
811 		 *     ((struct ltable_id *)&id)->generation);
812 		 */
813 		ltable_put_elem(table, elem);
814 		return NULL;
815 	}
816 
817 	/* We now have a reference on a valid object */
818 	return elem;
819 }
820 
821 /**
822  * ltable_elem_valid: returns whether an element ID looks valid.
823  */
824 extern bool
ltable_elem_valid(struct link_table * table,uint64_t id)825 ltable_elem_valid(struct link_table *table, uint64_t id)
826 {
827 	struct lt_elem *elem;
828 	uint32_t idx;
829 
830 	idx = ((struct ltable_id *)&id)->idx;
831 	if (idx >= table->nelem) {
832 		panic("id:0x%llx : idx:%d > %d", id, idx, table->nelem);
833 	}
834 
835 	elem = lt_elem_idx(table, idx);
836 	return elem->lt_id.id == id && lt_bits_valid(elem->lt_bits);
837 }
838 
839 /**
840  * ltable_put_elem: release a reference to table element
841  *
842  * This function releases a reference taken on a table element via
843  * ltable_get_elem(). This function will release the element back to 'table'
844  * when the reference count goes to 0 AND the element has been marked as
845  * invalid.
846  */
847 void
ltable_put_elem(struct link_table * table,struct lt_elem * elem)848 ltable_put_elem(struct link_table *table, struct lt_elem *elem)
849 {
850 	uint32_t old_bits, new_bits;
851 
852 	assert(lt_elem_in_range(elem, table));
853 
854 	old_bits = os_atomic_dec_orig(&elem->lt_bits, release);
855 	assert(lt_bits_refcnt(old_bits) > 0);
856 	new_bits = old_bits - 1;
857 
858 	/*
859 	 * if this was the last reference, and it was marked as invalid,
860 	 * then we can add this link object back to the free list
861 	 */
862 	if (!lt_bits_valid(new_bits) && (lt_bits_refcnt(new_bits) == 0)) {
863 		os_atomic_thread_fence(acquire);
864 		ltable_free_elem(table, elem);
865 	}
866 }
867 
868 
869 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
870  *
871  * API: lt_elem_list_...
872  *
873  * Reuse the free list linkage member, 'lt_next_idx' of a table element
874  * in a slightly more generic singly-linked list. All members of this
875  * list have been allocated from a table, but have not been made valid.
876  *
877  * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
878 
879 /**
880  * lt_elem_list_link: link a child onto a parent
881  *
882  * Note that if 'parent' is the head of a list, this function will follow that
883  * list and attach 'child' to the end of it. In the simplest case, this
884  * results in: parent->child
885  * however this could also result in: parent->...->child
886  */
887 int
lt_elem_list_link(struct link_table * table,struct lt_elem * parent,struct lt_elem * child)888 lt_elem_list_link(struct link_table *table, struct lt_elem *parent, struct lt_elem *child)
889 {
890 	int nelem = 1;
891 
892 	assert(lt_elem_in_range(parent, table));
893 
894 	/* find the end of the parent's list */
895 	while (parent->lt_next_idx != LT_IDX_MAX) {
896 		assert(parent->lt_next_idx < table->nelem);
897 		parent = lt_elem_idx(table, parent->lt_next_idx);
898 		nelem++;
899 	}
900 
901 	if (child) {
902 		assert(lt_elem_in_range(child, table));
903 		parent->lt_next_idx = child->lt_id.idx;
904 	}
905 
906 	return nelem;
907 }
908 
909 
910 /**
911  * lt_elem_list_first: obtain a pointer to the first element of a list.
912  *
913  * This function converts the head of a singly-linked list, 'id', into a real
914  * lt_elem object and returns a pointer to the object.
915  *
916  * It does _not_ take an extra reference on the object: the list implicitly
917  * holds that reference.
918  */
919 struct lt_elem *
lt_elem_list_first(struct link_table * table,uint64_t id)920 lt_elem_list_first(struct link_table *table, uint64_t id)
921 {
922 	uint32_t idx;
923 	struct lt_elem *elem = NULL;
924 
925 	if (id == 0) {
926 		return NULL;
927 	}
928 
929 	idx = ((struct ltable_id *)&id)->idx;
930 
931 	if (idx > table->nelem) {
932 		panic("Invalid element for id:0x%llx", id);
933 	}
934 	elem = lt_elem_idx(table, idx);
935 
936 	/* invalid element: reserved ID was probably already reallocated */
937 	if (elem->lt_id.id != id) {
938 		return NULL;
939 	}
940 
941 	/* the returned element should _not_ be marked valid! */
942 	if (lt_bits_valid(elem->lt_bits) ||
943 	    lt_bits_type(elem->lt_bits) != LT_RESERVED ||
944 	    lt_bits_refcnt(elem->lt_bits) != 1) {
945 		panic("Valid/unreserved element %p (0x%x) in reserved list",
946 		    elem, elem->lt_bits);
947 	}
948 
949 	return elem;
950 }
951 
952 
953 /**
954  * lt_elem_list_next: return the item subsequent to 'elem' in a list
955  *
956  * Note that this will return NULL if 'elem' is actually the end of the list.
957  */
958 struct lt_elem *
lt_elem_list_next(struct link_table * table,struct lt_elem * head)959 lt_elem_list_next(struct link_table *table, struct lt_elem *head)
960 {
961 	struct lt_elem *elem;
962 
963 	if (!head) {
964 		return NULL;
965 	}
966 	if (head->lt_next_idx >= table->nelem) {
967 		return NULL;
968 	}
969 
970 	elem = lt_elem_idx(table, head->lt_next_idx);
971 	assert(lt_elem_in_range(elem, table));
972 
973 	return elem;
974 }
975 
976 
977 /**
978  * lt_elem_list_pop: pop an item off the head of a list
979  *
980  * The list head is pointed to by '*id', the element corresponding to '*id' is
981  * returned by this function, and the new list head is returned in the in/out
982  * parameter, '*id'.  The caller is responsible for the reference on the
983  * returned object.  A realloc is done to reset the type of the object, but it
984  * is still left invalid.
985  */
986 struct lt_elem *
lt_elem_list_pop(struct link_table * table,uint64_t * id,int type)987 lt_elem_list_pop(struct link_table *table, uint64_t *id, int type)
988 {
989 	struct lt_elem *first, *next;
990 
991 	if (!id || *id == 0) {
992 		return NULL;
993 	}
994 
995 	/* pop an item off the reserved stack */
996 
997 	first = lt_elem_list_first(table, *id);
998 	if (!first) {
999 		*id = 0;
1000 		return NULL;
1001 	}
1002 
1003 	next = lt_elem_list_next(table, first);
1004 	if (next) {
1005 		*id = next->lt_id.id;
1006 	} else {
1007 		*id = 0;
1008 	}
1009 
1010 	ltable_realloc_elem(table, first, type);
1011 
1012 	return first;
1013 }
1014 
1015 /**
1016  * lt_elem_list_release: free an entire list of reserved elements
1017  *
1018  * All elements in the list whose first member is 'head' will be released back
1019  * to 'table' as free elements. The 'type' parameter is used in development
1020  * kernels to assert that all elements on the list are of the given type.
1021  */
1022 int
lt_elem_list_release(struct link_table * table,struct lt_elem * head,int __assert_only type)1023 lt_elem_list_release(struct link_table *table, struct lt_elem *head,
1024     int __assert_only type)
1025 {
1026 	struct lt_elem *elem;
1027 	int nelem = 0;
1028 
1029 	if (!head) {
1030 		return 0;
1031 	}
1032 
1033 	for (elem = head;;) {
1034 		assert(lt_elem_in_range(elem, table));
1035 		assert(!lt_bits_valid(elem->lt_bits) && (lt_bits_refcnt(elem->lt_bits) == 1));
1036 		assert(lt_bits_type(elem->lt_bits) == type);
1037 
1038 		nelem++;
1039 		elem->lt_bits = 0;
1040 		if (table->poison) {
1041 			(table->poison)(table, elem);
1042 		}
1043 
1044 		if (elem->lt_next_idx == LT_IDX_MAX) {
1045 			break;
1046 		}
1047 		assert(elem->lt_next_idx < table->nelem);
1048 		elem = lt_elem_idx(table, elem->lt_next_idx);
1049 	}
1050 
1051 	/*
1052 	 * 'elem' now points to the end of our list, and 'head' points to the
1053 	 * beginning. We want to atomically swap the free list pointer with
1054 	 * the 'head' and ensure that 'elem' points to the previous free list
1055 	 * head.
1056 	 */
1057 
1058 	ltable_free_list(table, head, elem, nelem);
1059 	return nelem;
1060 }
1061