1 /*
2 * Copyright (c) 2020-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <stdbool.h>
25 #include <stdint.h>
26 #include <kern/assert.h>
27 #include <kern/locks.h>
28 #include <os/atomic_private.h>
29
30 #include "log_mem.h"
31
32 #define BLOCK_INVALID ((size_t)-1)
33 #define BLOCK_LEVEL_BASE(level) ((1 << (level)) - 1)
34 #define BLOCK_SIZE(level) (1 << (level))
35 #define BLOCK_PARENT(b) (((b) % 2 == 0) ? ((b) >> 1) - 1 : ((b) >> 1))
36 #define BLOCK_LCHILD(b) (((b) << 1) + 1)
37 #define BLOCK_BUDDY(b) (((b) & 0x1) ? (b) + 1 : (b) - 1)
38 #define BLOCK_INDEX(lm, l, a, s) \
39 (BLOCK_LEVEL_BASE(l) + ((uintptr_t)(a) - (uintptr_t)(lm)->lm_mem) / (s))
40
41 #define MAP_SIZE(size_order, min_order) \
42 MAX(1, (1 << ((size_order) - (min_order) + 1)) / 8)
43
44 #define BITMAP_BUCKET_SIZE (8 * sizeof(((logmem_t *)0)->lm_mem_map[0]))
45 #define BITMAP_BUCKET(i) ((i) / BITMAP_BUCKET_SIZE)
46 #define BITMAP_BIT(i) (uint8_t)(1 << (BITMAP_BUCKET_SIZE - ((i) % BITMAP_BUCKET_SIZE) - 1))
47
48 #define logmem_lock(lock, lm) if ((lock)) lck_spin_lock(&(lm)->lm_lock)
49 #define logmem_unlock(lock, lm) if ((lock)) lck_spin_unlock(&(lm)->lm_lock)
50
51 static LCK_GRP_DECLARE(logmem_lck_grp, "logmem_lck_grp");
52
53
54 static bool
bitmap_get(logmem_t * lm,size_t block)55 bitmap_get(logmem_t *lm, size_t block)
56 {
57 return lm->lm_mem_map[BITMAP_BUCKET(block)] & BITMAP_BIT(block);
58 }
59
60 static void
bitmap_set(logmem_t * lm,size_t block)61 bitmap_set(logmem_t *lm, size_t block)
62 {
63 lm->lm_mem_map[BITMAP_BUCKET(block)] |= BITMAP_BIT(block);
64 }
65
66 static void
bitmap_clear(logmem_t * lm,size_t block)67 bitmap_clear(logmem_t *lm, size_t block)
68 {
69 lm->lm_mem_map[BITMAP_BUCKET(block)] &= ~BITMAP_BIT(block);
70 }
71
72 static void
bitmap_reserve_root(logmem_t * lm,size_t block)73 bitmap_reserve_root(logmem_t *lm, size_t block)
74 {
75 const size_t top_block = BLOCK_LEVEL_BASE(lm->lm_cap_order - lm->lm_max_order);
76
77 for (ssize_t next = BLOCK_PARENT(block); next >= top_block; next = BLOCK_PARENT(next)) {
78 /*
79 * If the rest of the root path is already marked as
80 * allocated we are done.
81 */
82 if (bitmap_get(lm, next)) {
83 break;
84 }
85 bitmap_set(lm, next);
86 }
87 }
88
89 static void
bitmap_release_root(logmem_t * lm,size_t block)90 bitmap_release_root(logmem_t *lm, size_t block)
91 {
92 const size_t top_block = BLOCK_LEVEL_BASE(lm->lm_cap_order - lm->lm_max_order);
93 int buddy_allocated = 0;
94
95 while (block > top_block) {
96 buddy_allocated = bitmap_get(lm, BLOCK_BUDDY(block));
97 block = BLOCK_PARENT(block);
98 /*
99 * If there is another allocation within the parent subtree
100 * in place we cannot mark the rest of the root path as free.
101 */
102 if (buddy_allocated) {
103 break;
104 }
105 bitmap_clear(lm, block);
106 }
107 }
108
109 static void
bitmap_update_subtree(logmem_t * lm,size_t level,size_t block,void (* fun)(logmem_t *,size_t))110 bitmap_update_subtree(logmem_t *lm, size_t level, size_t block, void (*fun)(logmem_t *, size_t))
111 {
112 const size_t lcount = lm->lm_cap_order - lm->lm_min_order - level + 1;
113
114 for (size_t l = 0, n = 1; l < lcount; l++, n <<= 1) {
115 for (int i = 0; i < n; i++) {
116 fun(lm, block + i);
117 }
118 block = BLOCK_LCHILD(block);
119 }
120 }
121
122 static void
bitmap_release_subtree(logmem_t * lm,size_t level,size_t block)123 bitmap_release_subtree(logmem_t *lm, size_t level, size_t block)
124 {
125 bitmap_update_subtree(lm, level, block, bitmap_clear);
126 }
127
128 static void
bitmap_reserve_subtree(logmem_t * lm,size_t level,size_t block)129 bitmap_reserve_subtree(logmem_t *lm, size_t level, size_t block)
130 {
131 bitmap_update_subtree(lm, level, block, bitmap_set);
132 }
133
134 static size_t
block_size_level(logmem_t * lm,size_t amount)135 block_size_level(logmem_t *lm, size_t amount)
136 {
137 for (size_t l = lm->lm_min_order; l <= lm->lm_max_order; l++) {
138 if (amount <= BLOCK_SIZE(l)) {
139 return lm->lm_cap_order - l;
140 }
141 }
142 return BLOCK_INVALID;
143 }
144
145 static size_t
block_locate(logmem_t * lm,void * addr,size_t amount,size_t * block)146 block_locate(logmem_t *lm, void *addr, size_t amount, size_t *block)
147 {
148 size_t level = block_size_level(lm, amount);
149 if (level != BLOCK_INVALID) {
150 *block = BLOCK_INDEX(lm, level, addr, amount);
151 }
152 return level;
153 }
154
155 static size_t
block_reserve(logmem_t * lm,size_t level)156 block_reserve(logmem_t *lm, size_t level)
157 {
158 assert(level != BLOCK_INVALID);
159
160 const size_t base = BLOCK_LEVEL_BASE(level);
161 const size_t end = base + BLOCK_SIZE(level);
162
163 for (size_t block = base; block < end; block++) {
164 if (!bitmap_get(lm, block)) {
165 bitmap_reserve_root(lm, block);
166 bitmap_reserve_subtree(lm, level, block);
167 return block - base;
168 }
169 }
170
171 return BLOCK_INVALID;
172 }
173
174 static void *
logmem_alloc_impl(logmem_t * lm,size_t * amount,bool lock_lm)175 logmem_alloc_impl(logmem_t *lm, size_t *amount, bool lock_lm)
176 {
177 assert(amount);
178
179 os_atomic_inc(&lm->lm_cnt_allocations, relaxed);
180
181 if (!lm->lm_mem) {
182 os_atomic_inc(&lm->lm_cnt_failed_lmoff, relaxed);
183 return NULL;
184 }
185
186 if (*amount == 0 || *amount > BLOCK_SIZE(lm->lm_max_order)) {
187 os_atomic_inc(&lm->lm_cnt_failed_size, relaxed);
188 return NULL;
189 }
190
191 size_t level = block_size_level(lm, *amount);
192 logmem_lock(lock_lm, lm);
193 size_t block = block_reserve(lm, level);
194 logmem_unlock(lock_lm, lm);
195
196 if (block == BLOCK_INVALID) {
197 os_atomic_inc(&lm->lm_cnt_failed_full, relaxed);
198 return NULL;
199 }
200
201 *amount = BLOCK_SIZE(lm->lm_cap_order - level);
202 os_atomic_sub(&lm->lm_cnt_free, (uint32_t)*amount, relaxed);
203
204 return &lm->lm_mem[block * *amount];
205 }
206
207 void *
logmem_alloc_locked(logmem_t * lm,size_t * amount)208 logmem_alloc_locked(logmem_t *lm, size_t *amount)
209 {
210 return logmem_alloc_impl(lm, amount, true);
211 }
212
213 void *
logmem_alloc(logmem_t * lm,size_t * amount)214 logmem_alloc(logmem_t *lm, size_t *amount)
215 {
216 return logmem_alloc_impl(lm, amount, false);
217 }
218
219 static void
logmem_free_impl(logmem_t * lm,void * addr,size_t amount,bool lock_lm)220 logmem_free_impl(logmem_t *lm, void *addr, size_t amount, bool lock_lm)
221 {
222 assert(addr);
223 assert(amount > 0 && ((amount & (amount - 1)) == 0));
224
225 size_t block = BLOCK_INVALID;
226 size_t level = block_locate(lm, addr, amount, &block);
227 assert(level != BLOCK_INVALID);
228 assert(block != BLOCK_INVALID);
229
230 assert(lm->lm_mem);
231
232 logmem_lock(lock_lm, lm);
233 bitmap_release_root(lm, block);
234 bitmap_release_subtree(lm, level, block);
235 logmem_unlock(lock_lm, lm);
236
237 os_atomic_add(&lm->lm_cnt_free, (uint32_t)amount, relaxed);
238 }
239
240 void
logmem_free_locked(logmem_t * lm,void * addr,size_t amount)241 logmem_free_locked(logmem_t *lm, void *addr, size_t amount)
242 {
243 logmem_free_impl(lm, addr, amount, true);
244 }
245
246 void
logmem_free(logmem_t * lm,void * addr,size_t amount)247 logmem_free(logmem_t *lm, void *addr, size_t amount)
248 {
249 logmem_free_impl(lm, addr, amount, false);
250 }
251
252 size_t
logmem_required_size(size_t size_order,size_t min_order)253 logmem_required_size(size_t size_order, size_t min_order)
254 {
255 return round_page(BLOCK_SIZE(size_order)) + round_page(MAP_SIZE(size_order, min_order));
256 }
257
258 size_t
logmem_max_size(const logmem_t * lm)259 logmem_max_size(const logmem_t *lm)
260 {
261 return BLOCK_SIZE(lm->lm_max_order);
262 }
263
264 bool
logmem_empty(const logmem_t * lm)265 logmem_empty(const logmem_t *lm)
266 {
267 return lm->lm_cnt_free == BLOCK_SIZE(lm->lm_cap_order);
268 }
269
270 bool
logmem_ready(const logmem_t * lm)271 logmem_ready(const logmem_t *lm)
272 {
273 return lm->lm_mem != NULL;
274 }
275
276 void
logmem_init(logmem_t * lm,void * lm_mem,size_t lm_mem_size,size_t size_order,size_t min_order,size_t max_order)277 logmem_init(logmem_t *lm, void *lm_mem, size_t lm_mem_size, size_t size_order, size_t min_order, size_t max_order)
278 {
279 assert(lm_mem_size >= logmem_required_size(size_order, min_order));
280 assert(size_order >= max_order);
281 assert(max_order >= min_order);
282
283 bzero(lm, sizeof(*lm));
284 bzero(lm_mem, lm_mem_size);
285
286 lck_spin_init(&lm->lm_lock, &logmem_lck_grp, LCK_ATTR_NULL);
287 lm->lm_mem = lm_mem;
288 lm->lm_mem_map = (uint8_t *)((uintptr_t)lm_mem + round_page(BLOCK_SIZE(size_order)));
289 lm->lm_mem_size = lm_mem_size;
290 lm->lm_cap_order = size_order;
291 lm->lm_min_order = min_order;
292 lm->lm_max_order = max_order;
293 lm->lm_cnt_free = BLOCK_SIZE(size_order);
294 }
295