1 #include <mach_ldebug.h>
2 #include <debug.h>
3
4 #include <mach/kern_return.h>
5 #include <mach/mach_host_server.h>
6 #include <mach_debug/lockgroup_info.h>
7
8 #include <os/atomic.h>
9
10 #include <kern/locks.h>
11 #include <kern/smr_hash.h>
12 #include <kern/misc_protos.h>
13 #include <kern/kalloc.h>
14 #include <kern/thread.h>
15 #include <kern/processor.h>
16 #include <kern/sched_prim.h>
17 #include <kern/debug.h>
18 #include <libkern/section_keywords.h>
19 #include <machine/atomic.h>
20 #include <machine/machine_cpu.h>
21 #include <machine/atomic.h>
22 #include <string.h>
23 #include <kern/kalloc.h>
24
25 #include <sys/kdebug.h>
26 #include <sys/errno.h>
27
28 #if SCHED_HYGIENE_DEBUG
29 static uint64_t
sane_us2abs(uint64_t us)30 sane_us2abs(uint64_t us)
31 {
32 uint64_t t;
33 nanoseconds_to_absolutetime(us * NSEC_PER_USEC, &t);
34 return t;
35 }
36 #endif
37
38 #if !KASAN
39 static void
hw_lck_ticket_test_wait_for_delta(hw_lck_ticket_t * lck,uint8_t delta,int msec)40 hw_lck_ticket_test_wait_for_delta(hw_lck_ticket_t *lck, uint8_t delta, int msec)
41 {
42 hw_lck_ticket_t tmp;
43
44 delta *= HW_LCK_TICKET_LOCK_INCREMENT;
45 for (int i = 0; i < msec * 1000; i++) {
46 tmp.lck_value = os_atomic_load(&lck->lck_value, relaxed);
47 #if CONFIG_PV_TICKET
48 const uint8_t cticket = tmp.cticket &
49 ~HW_LCK_TICKET_LOCK_PVWAITFLAG;
50 #else
51 const uint8_t cticket = tmp.cticket;
52 #endif
53 if ((uint8_t)(tmp.nticket - cticket) == delta) {
54 return;
55 }
56 delay(1);
57 }
58 assert(false);
59 }
60
61 __dead2
62 static void
hw_lck_ticket_allow_invalid_worker(void * arg,wait_result_t __unused wr)63 hw_lck_ticket_allow_invalid_worker(void *arg, wait_result_t __unused wr)
64 {
65 hw_lck_ticket_t *lck = arg;
66 hw_lock_status_t rc;
67
68 /* wait until we can observe the test take the lock */
69 hw_lck_ticket_test_wait_for_delta(lck, 1, 10);
70
71 rc = hw_lck_ticket_lock_allow_invalid(lck,
72 &hw_lock_test_give_up_policy, NULL);
73 assert(rc == HW_LOCK_INVALID); // because the other thread invalidated it
74 assert(preemption_enabled());
75
76 thread_terminate_self();
77 __builtin_unreachable();
78 }
79 #endif /* !KASAN */
80
81 static int
hw_lck_ticket_allow_invalid_test(__unused int64_t in,int64_t * out)82 hw_lck_ticket_allow_invalid_test(__unused int64_t in, int64_t *out)
83 {
84 vm_offset_t addr = 0;
85 hw_lck_ticket_t *lck;
86 kern_return_t kr;
87 hw_lock_status_t rc;
88
89 printf("%s: STARTING\n", __func__);
90
91 kr = kmem_alloc(kernel_map, &addr, PAGE_SIZE,
92 KMA_ZERO | KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
93 if (kr != KERN_SUCCESS) {
94 printf("%s: kma failed (%d)\n", __func__, kr);
95 return ENOMEM;
96 }
97
98 lck = (hw_lck_ticket_t *)addr;
99 rc = hw_lck_ticket_lock_allow_invalid(lck,
100 &hw_lock_test_give_up_policy, NULL);
101 assert(rc == HW_LOCK_INVALID); // because the lock is 0
102 assert(preemption_enabled());
103
104 hw_lck_ticket_init(lck, NULL);
105
106 assert(hw_lck_ticket_lock_try(lck, NULL));
107 assert(!hw_lck_ticket_lock_try(lck, NULL));
108 hw_lck_ticket_unlock(lck);
109
110 rc = hw_lck_ticket_lock_allow_invalid(lck,
111 &hw_lock_test_give_up_policy, NULL);
112 assert(rc == HW_LOCK_ACQUIRED); // because the lock is initialized
113 assert(!preemption_enabled());
114
115 #if SCHED_HYGIENE_DEBUG
116 if (os_atomic_load(&sched_preemption_disable_threshold_mt, relaxed) < sane_us2abs(20 * 1000)) {
117 /*
118 * This test currently relies on timeouts that cannot always
119 * be guaranteed (rdar://84691107). Abandon the measurement if
120 * we have a tight timeout.
121 */
122 abandon_preemption_disable_measurement();
123 }
124 #endif
125
126 hw_lck_ticket_unlock(lck);
127 assert(preemption_enabled());
128
129 #if !KASAN
130 thread_t th;
131
132 kr = kernel_thread_start_priority(hw_lck_ticket_allow_invalid_worker, lck,
133 BASEPRI_KERNEL, &th);
134 assert(kr == KERN_SUCCESS);
135 thread_deallocate(th);
136
137 /* invalidate the lock */
138 hw_lck_ticket_lock(lck, NULL);
139
140 /* wait for the worker thread to take the reservation */
141 hw_lck_ticket_test_wait_for_delta(lck, 2, 20);
142 hw_lck_ticket_invalidate(lck);
143 hw_lck_ticket_unlock(lck);
144 hw_lck_ticket_destroy(lck, NULL);
145
146 hw_lck_ticket_init(lck, NULL);
147 #endif /* !KASAN */
148
149 kernel_memory_depopulate(addr, PAGE_SIZE, KMA_KOBJECT,
150 VM_KERN_MEMORY_DIAG);
151
152 rc = hw_lck_ticket_lock_allow_invalid(lck,
153 &hw_lock_test_give_up_policy, NULL);
154 assert(rc == HW_LOCK_INVALID); // because the memory is unmapped
155
156 kmem_free(kernel_map, addr, PAGE_SIZE);
157
158 printf("%s: SUCCESS\n", __func__);
159
160 *out = 1;
161 return 0;
162 }
163 SYSCTL_TEST_REGISTER(hw_lck_ticket_allow_invalid, hw_lck_ticket_allow_invalid_test);
164
165
166 struct smrh_elem {
167 struct smrq_slink link;
168 uintptr_t val;
169 };
170
171 static bool
smrh_elem_try_get(void * arg __unused)172 smrh_elem_try_get(void *arg __unused)
173 {
174 return true;
175 }
176
177 SMRH_TRAITS_DEFINE_SCALAR(smrh_test_traits, struct smrh_elem, val, link,
178 .domain = &smr_system,
179 .obj_try_get = smrh_elem_try_get,
180 );
181
182 LCK_GRP_DECLARE(smrh_test_grp, "foo");
183 LCK_MTX_DECLARE(smrh_test_lck, &smrh_test_grp);
184
185 static int
smr_hash_basic_test(__unused int64_t in,int64_t * out)186 smr_hash_basic_test(__unused int64_t in, int64_t *out)
187 {
188 __auto_type T = &smrh_test_traits;
189 const size_t nelems = 64;
190 struct smrh_elem e_buf[nelems];
191 struct smr_hash h_buf;
192
193 struct smrh_elem *elems = e_buf;
194 struct smr_hash *h = &h_buf;
195
196 __auto_type check_content = ^{
197 struct smrh_elem *e;
198 smrh_key_t key;
199 bool seen[nelems] = { };
200
201 assert3u(h->smrh_count, ==, nelems / 2);
202
203 for (int i = 0; i < nelems / 2; i++) {
204 key = SMRH_SCALAR_KEY(elems[i].val);
205 assert(smr_hash_entered_find(h, key, T));
206
207 key = SMRH_SCALAR_KEY(elems[i + nelems / 2].val);
208 assert(!smr_hash_entered_find(h, key, T));
209 }
210
211 smr_hash_foreach(e, h, T) {
212 for (int i = 0; i < nelems / 2; i++) {
213 if (e->val == elems[i].val) {
214 assert(!seen[i]);
215 seen[i] = true;
216 break;
217 }
218 }
219 }
220
221 for (int i = 0; i < nelems / 2; i++) {
222 assert(seen[i]);
223 }
224 };
225
226 printf("%s: STARTING\n", __func__);
227
228 smr_hash_init(h, 4);
229
230 printf("%s: populating the hash with unique entries\n", __func__);
231
232 uintptr_t base = early_random();
233 for (size_t i = 0; i < nelems; i++) {
234 elems[i].val = base + (uint16_t)early_random() + 1;
235 base = elems[i].val;
236 }
237
238 for (int i = 0; i < nelems / 2; i++) {
239 smr_hash_serialized_insert(h, &elems[i].link, T);
240 }
241 check_content();
242
243 static bool progression[4] = {
244 1, 1, 0, 0,
245 };
246
247 for (int step = 0; step < ARRAY_COUNT(progression); step++) {
248 if (progression[step]) {
249 printf("%s: growing the hash\n", __func__);
250 lck_mtx_lock(&smrh_test_lck);
251 smr_hash_grow_and_unlock(h, &smrh_test_lck, T);
252 } else {
253 printf("%s: shrinking the hash\n", __func__);
254 lck_mtx_lock(&smrh_test_lck);
255 smr_hash_shrink_and_unlock(h, &smrh_test_lck, T);
256 }
257 check_content();
258 }
259
260 printf("%s: destroying the hash\n", __func__);
261 smr_hash_destroy(h);
262
263 printf("%s: SUCCESS\n", __func__);
264
265 *out = 1;
266 return 0;
267 }
268 SYSCTL_TEST_REGISTER(smr_hash_basic, smr_hash_basic_test);
269
270 static int
smr_shash_basic_test(__unused int64_t in,int64_t * out)271 smr_shash_basic_test(__unused int64_t in, int64_t *out)
272 {
273 __auto_type T = &smrh_test_traits;
274 const size_t nelems = 8192;
275 const size_t never = 512; /* never inserted elements */
276 struct smr_shash h_buf;
277
278 struct smrh_elem *elems;
279 struct smr_shash *h = &h_buf;
280
281 elems = kalloc_type(struct smrh_elem, nelems, Z_WAITOK | Z_ZERO);
282 if (elems == 0) {
283 return ENOMEM;
284 }
285
286 __auto_type check_content = ^(size_t max_inserted){
287 smrh_key_t key;
288 size_t n = 0;
289
290 assert3u(counter_load(&h->smrsh_count), ==, max_inserted);
291
292 smrht_enter(T);
293
294 for (size_t i = 0; i < nelems; i++, n++) {
295 if (n > 0 && n % 32 == 0) {
296 smrht_leave(T);
297 smrht_enter(T);
298 }
299 key = SMRH_SCALAR_KEY(elems[i].val);
300 if (i < max_inserted) {
301 assert(smr_shash_entered_find(h, key, T));
302 } else {
303 assert(!smr_shash_entered_find(h, key, T));
304 }
305 }
306
307 smrht_leave(T);
308 };
309
310 printf("%s: STARTING\n", __func__);
311
312 smr_shash_init(h, SMRSH_COMPACT, 8);
313
314 printf("%s: populating the hash with unique entries\n", __func__);
315
316 uintptr_t base = early_random();
317 for (size_t i = 0; i < nelems; i++) {
318 elems[i].val = base + (uint32_t)early_random();
319 base = elems[i].val;
320 }
321
322 printf("%s: insert into the hash, triggering several resizes\n", __func__);
323
324 for (size_t i = 0; i < nelems - never; i++) {
325 smrh_key_t key = SMRH_SCALAR_KEY(elems[i].val);
326 struct smrh_elem *dupe;
327
328 if (i > 0 && i % 32 == 0) {
329 check_content(i);
330 }
331
332 dupe = smr_shash_get_or_insert(h, key, &elems[i].link, T);
333 assert(dupe == NULL);
334 }
335 check_content(nelems - never);
336
337 printf("%s: remove from the hash, triggering several resizes\n", __func__);
338
339 for (size_t i = nelems - never; i-- > 0;) {
340 smr_shash_remove(h, &elems[i].link, T);
341
342 if (i % 32 == 0) {
343 check_content(i);
344 }
345 }
346
347 printf("%s: destroying the hash\n", __func__);
348 smr_shash_destroy(h, T, NULL);
349
350 printf("%s: SUCCESS\n", __func__);
351
352 kfree_type(struct smrh_elem, nelems, elems);
353
354 *out = 1;
355 return 0;
356 }
357 SYSCTL_TEST_REGISTER(smr_shash_basic, smr_shash_basic_test);
358