1 /*
2 * Copyright (c) 2019 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /**
29 * On devices that support it, this test ensures that a mach exception is
30 * generated when a matrix-math exception is triggered, and that the
31 * matrix register file is correctly preserved or zeroed on context switch.
32 */
33
34 /*
35 * IMPLEMENTATION NOTE:
36 *
37 * This test code goes to some unusual lengths to avoid calling out to libc or
38 * libdarwintest while the CPU is in streaming SVE mode (i.e., between
39 * ops->start() and ops->stop()). Both of these libraries are built with SIMD
40 * instructions that will cause the test executable to crash while in streaming
41 * SVE mode.
42 *
43 * Ordinarily this is the wrong way to solve this problem. Functions that use
44 * streaming SVE mode should have annotations telling the compiler so, and the
45 * compiler will automatically generate appropriate interworking code. However
46 * this interworking code will stash SME state to memory and temporarily exit
47 * streaming SVE mode. We're specifically testing how xnu manages live SME
48 * register state, so we can't let the compiler stash and disable this state
49 * behind our backs.
50 */
51
52 #ifdef __arm64__
53 #include <mach/error.h>
54 #endif /* __arm64__ */
55
56 #include <darwintest.h>
57 #include <pthread.h>
58 #include <stdlib.h>
59 #include <mach/mach.h>
60 #include <mach/thread_act.h>
61 #include <mach/thread_status.h>
62 #include <mach/exception.h>
63 #include <machine/cpu_capabilities.h>
64 #include <sys/types.h>
65 #include <sys/sysctl.h>
66
67 #include "arm_matrix.h"
68 #include "exc_helpers.h"
69 #include "test_utils.h"
70
71 T_GLOBAL_META(
72 T_META_NAMESPACE("xnu.arm"),
73 T_META_RADAR_COMPONENT_NAME("xnu"),
74 T_META_RADAR_COMPONENT_VERSION("arm"),
75 T_META_OWNER("ghackmann"),
76 T_META_RUN_CONCURRENTLY(true)
77 );
78
79 #ifdef __arm64__
80
81 #ifndef EXC_ARM_SME_DISALLOWED
82 #define EXC_ARM_SME_DISALLOWED 2
83 #endif
84
85 /* Whether we caught the EXC_BAD_INSTRUCTION mach exception or not. */
86 static volatile bool mach_exc_caught = false;
87
88 static size_t
bad_instruction_exception_handler(__unused mach_port_t task,__unused mach_port_t thread,exception_type_t type,mach_exception_data_t codes)89 bad_instruction_exception_handler(
90 __unused mach_port_t task,
91 __unused mach_port_t thread,
92 exception_type_t type,
93 mach_exception_data_t codes)
94 {
95 T_QUIET; T_ASSERT_EQ(type, EXC_BAD_INSTRUCTION, "Caught an EXC_BAD_INSTRUCTION exception");
96 T_QUIET; T_ASSERT_EQ(codes[0], (uint64_t)EXC_ARM_UNDEFINED, "The subcode is EXC_ARM_UNDEFINED");
97
98 mach_exc_caught = true;
99 return 4;
100 }
101 #endif
102
103
104 #ifdef __arm64__
105 static void
test_matrix_not_started(const struct arm_matrix_operations * ops)106 test_matrix_not_started(const struct arm_matrix_operations *ops)
107 {
108 if (!ops->is_available()) {
109 T_SKIP("Running on non-%s target, skipping...", ops->name);
110 }
111
112 mach_port_t exc_port = create_exception_port(EXC_MASK_BAD_INSTRUCTION);
113
114 size_t size = ops->data_size();
115 uint8_t *d = ops->alloc_data();
116 bzero(d, size);
117
118 ops->start();
119 ops->load_one_vector(d);
120 ops->stop();
121 T_PASS("%s instruction after start instruction should not cause an exception", ops->name);
122
123 mach_exc_caught = false;
124 run_exception_handler(exc_port, bad_instruction_exception_handler);
125 ops->load_one_vector(d);
126 T_EXPECT_TRUE(mach_exc_caught, "%s instruction before start instruction should cause an exception", ops->name);
127
128 free(d);
129 }
130 #endif
131
132
133 T_DECL(sme_not_started,
134 "Test that SME instructions before smstart generate mach exceptions.", T_META_TAG_VM_NOT_ELIGIBLE)
135 {
136 #ifndef __arm64__
137 T_SKIP("Running on non-arm64 target, skipping...");
138 #else
139 test_matrix_not_started(&sme_operations);
140 #endif
141 }
142
143 #ifdef __arm64__
144 struct test_thread;
145 typedef bool (*thread_fn_t)(struct test_thread const* thread);
146
147 struct test_thread {
148 pthread_t thread;
149 pthread_t companion_thread;
150 thread_fn_t thread_fn;
151 uint32_t cpuid;
152 uint32_t thread_id;
153 const struct arm_matrix_operations *ops;
154 };
155
156 static uint32_t barrier;
157 static pthread_cond_t barrier_cond = PTHREAD_COND_INITIALIZER;
158 static pthread_mutex_t barrier_lock = PTHREAD_MUTEX_INITIALIZER;
159
160 static uint32_t end_barrier;
161 static pthread_cond_t end_barrier_cond = PTHREAD_COND_INITIALIZER;
162 static pthread_mutex_t end_barrier_lock = PTHREAD_MUTEX_INITIALIZER;
163
164 static void
test_thread_barrier(void)165 test_thread_barrier(void)
166 {
167 /* Wait for all threads to reach this barrier */
168 pthread_mutex_lock(&barrier_lock);
169 barrier--;
170 if (barrier) {
171 while (barrier) {
172 pthread_cond_wait(&barrier_cond, &barrier_lock);
173 }
174 } else {
175 pthread_cond_broadcast(&barrier_cond);
176 }
177 pthread_mutex_unlock(&barrier_lock);
178 }
179
180 static void
test_thread_notify_exited(void)181 test_thread_notify_exited(void)
182 {
183 pthread_mutex_lock(&end_barrier_lock);
184 if (0 == --end_barrier) {
185 pthread_cond_signal(&end_barrier_cond);
186 }
187 pthread_mutex_unlock(&end_barrier_lock);
188 }
189
190 static void
wait_for_test_threads(void)191 wait_for_test_threads(void)
192 {
193 pthread_mutex_lock(&end_barrier_lock);
194 while (end_barrier) {
195 pthread_cond_wait(&end_barrier_cond, &end_barrier_lock);
196 }
197 pthread_mutex_unlock(&end_barrier_lock);
198 }
199
200 static uint32_t
ncpus(void)201 ncpus(void)
202 {
203 uint32_t ncpu;
204 size_t ncpu_size = sizeof(ncpu);
205 int err = sysctlbyname("hw.ncpu", &ncpu, &ncpu_size, NULL, 0);
206 T_QUIET; T_ASSERT_POSIX_ZERO(err, "Retrieved CPU count");
207
208 return ncpu;
209 }
210
211 static int
thread_bind_cpu_unchecked(uint32_t cpuid)212 thread_bind_cpu_unchecked(uint32_t cpuid)
213 {
214 /*
215 * libc's sysctl() implementation calls strlen(name), which is
216 * SIMD-accelerated. Avoid this by directly invoking the libsyscall
217 * wrapper with namelen computed at compile time.
218 */
219 #define THREAD_BIND_CPU "kern.sched_thread_bind_cpu"
220 extern int __sysctlbyname(const char *name, size_t namelen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
221 const char *name = THREAD_BIND_CPU;
222 size_t namelen = sizeof(THREAD_BIND_CPU) - 1;
223 return __sysctlbyname(name, namelen, NULL, 0, &cpuid, sizeof(cpuid));
224 }
225
226 static void
thread_bind_cpu(uint32_t cpuid)227 thread_bind_cpu(uint32_t cpuid)
228 {
229 int err = thread_bind_cpu_unchecked(cpuid);
230 T_QUIET; T_ASSERT_POSIX_ZERO(err, "Bound thread to CPU %u", cpuid);
231 }
232
233 static void *
test_thread_shim(void * arg)234 test_thread_shim(void *arg)
235 {
236 struct test_thread const *thread = arg;
237
238 thread_bind_cpu(thread->cpuid);
239 bool const ret = thread->thread_fn(thread);
240 test_thread_notify_exited();
241 return (void *)(uintptr_t)ret;
242 }
243
244 static void
test_on_each_cpu(thread_fn_t thread_fn,const struct arm_matrix_operations * ops,const char * desc)245 test_on_each_cpu(thread_fn_t thread_fn, const struct arm_matrix_operations *ops, const char *desc)
246 {
247 uint32_t ncpu = ncpus();
248 uint32_t nthreads = ncpu * 2;
249 barrier = 1 /* This thread */ + nthreads;
250 end_barrier = nthreads;
251 struct test_thread *threads = calloc(nthreads, sizeof(threads[0]));
252
253 for (uint32_t i = 0; i < nthreads; i++) {
254 threads[i].thread_fn = thread_fn;
255 threads[i].cpuid = i % ncpu;
256 threads[i].thread_id = i;
257 threads[i].ops = ops;
258
259 int const err = pthread_create(&threads[i].thread, NULL, test_thread_shim, &threads[i]);
260 T_QUIET; T_ASSERT_EQ(err, 0, "%s: created thread #%u", desc, i);
261
262 // The other of two threads under test pinned to the same CPU.
263 threads[(ncpu + i) % nthreads].companion_thread = threads[i].thread;
264 }
265
266 // Wait for all companion_threads to be set.
267 test_thread_barrier();
268
269 // like pthread_join()ing all threads, but without the priority boosting shenanigans.
270 wait_for_test_threads();
271
272 for (uint32_t i = 0; i < nthreads; i++) {
273 void *thread_ret_ptr;
274 int err = pthread_join(threads[i].thread, &thread_ret_ptr);
275 T_QUIET; T_ASSERT_EQ(err, 0, "%s: joined thread #%u", desc, i);
276
277 bool thread_ret = (uintptr_t)thread_ret_ptr;
278 if (thread_ret) {
279 T_PASS("%s: thread #%u passed", desc, i);
280 } else {
281 T_FAIL("%s: thread #%u failed", desc, i);
282 }
283 }
284
285 free(threads);
286 }
287
288 static bool
active_context_switch_thread(struct test_thread const * thread)289 active_context_switch_thread(struct test_thread const* thread)
290 {
291 const struct arm_matrix_operations *ops = thread->ops;
292 const uint32_t thread_id = thread->thread_id;
293 size_t size = ops->data_size();
294 uint8_t *d1 = ops->alloc_data();
295 memset(d1, (char)thread_id, size);
296
297 uint8_t *d2 = ops->alloc_data();
298
299 test_thread_barrier();
300
301 // companion_thread will be valid only after the barrier.
302 thread_t const companion_thread = pthread_mach_thread_np(thread->companion_thread);
303 T_QUIET; T_ASSERT_NE(companion_thread, THREAD_NULL, "pthread_mach_thread_np");
304
305 bool ok = true;
306 for (unsigned int i = 0; i < 100000 && ok; i++) {
307 ops->start();
308 ops->load_data(d1);
309
310 /*
311 * Rescheduling with the matrix registers active must preserve
312 * state, even after a context switch.
313 */
314 thread_switch(companion_thread, SWITCH_OPTION_NONE, 0);
315
316 ops->store_data(d2);
317 ops->stop();
318
319 if (memcmp(d1, d2, size)) {
320 ok = false;
321 }
322 }
323
324 free(d2);
325 free(d1);
326 return ok;
327 }
328
329 static bool
inactive_context_switch_thread(struct test_thread const * thread)330 inactive_context_switch_thread(struct test_thread const* thread)
331 {
332 const struct arm_matrix_operations *ops = thread->ops;
333 const uint32_t thread_id = thread->thread_id;
334 size_t size = ops->data_size();
335 uint8_t *d1 = ops->alloc_data();
336 memset(d1, (char)thread_id, size);
337
338 uint8_t *d2 = ops->alloc_data();
339
340 test_thread_barrier();
341
342 // companion_thread will be valid only after the barrier.
343 thread_t const companion_thread = pthread_mach_thread_np(thread->companion_thread);
344 T_QUIET; T_ASSERT_NE(companion_thread, THREAD_NULL, "pthread_mach_thread_np");
345
346 bool ok = true;
347 for (unsigned int i = 0; i < 100000 && ok; i++) {
348 ops->start();
349 ops->load_data(d1);
350 ops->stop();
351
352 /*
353 * Rescheduling with the matrix registers inactive may preserve
354 * state or may zero it out.
355 */
356 thread_switch(companion_thread, SWITCH_OPTION_NONE, 0);
357
358 ops->start();
359 ops->store_data(d2);
360 ops->stop();
361
362 for (size_t j = 0; j < size; j++) {
363 if (d1[j] != d2[j] && d2[j] != 0) {
364 ok = false;
365 }
366 }
367 }
368
369 free(d2);
370 free(d1);
371 return ok;
372 }
373
374 static void
test_thread_migration(const struct arm_matrix_operations * ops)375 test_thread_migration(const struct arm_matrix_operations *ops)
376 {
377 size_t size = ops->data_size();
378 uint8_t *d = ops->alloc_data();
379 arc4random_buf(d, size);
380
381 uint32_t ncpu = ncpus();
382 uint8_t *cpu_d[ncpu];
383 for (uint32_t cpuid = 0; cpuid < ncpu; cpuid++) {
384 cpu_d[cpuid] = ops->alloc_data();
385 memset(cpu_d[cpuid], 0, size);
386 }
387
388 ops->start();
389 ops->load_data(d);
390 for (uint32_t cpuid = 0; cpuid < ncpu; cpuid++) {
391 int err = thread_bind_cpu_unchecked(cpuid);
392 if (err) {
393 ops->stop();
394 T_ASSERT_POSIX_ZERO(err, "Bound thread to CPU %u", cpuid);
395 }
396 ops->store_data(cpu_d[cpuid]);
397 }
398 ops->stop();
399
400 for (uint32_t cpuid = 0; cpuid < ncpu; cpuid++) {
401 int cmp = memcmp(d, cpu_d[cpuid], size);
402 T_EXPECT_EQ(cmp, 0, "Matrix state migrated to CPU %u", cpuid);
403 free(cpu_d[cpuid]);
404 }
405 free(d);
406 }
407 #endif
408
409
410 T_DECL(sme_context_switch,
411 "Test that SME contexts are migrated during context switch and do not leak between process contexts.",
412 T_META_BOOTARGS_SET("enable_skstb=1"),
413 T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_SME2", 1),
414 XNU_T_META_SOC_SPECIFIC, T_META_TAG_VM_NOT_ELIGIBLE)
415 {
416 #ifndef __arm64__
417 T_SKIP("Running on non-arm64 target, skipping...");
418 #else
419 if (!sme_operations.is_available()) {
420 T_SKIP("Running on non-SME target, skipping...");
421 }
422
423 test_thread_migration(&sme_operations);
424 test_on_each_cpu(active_context_switch_thread, &sme_operations, "SME context migrates when active");
425 test_on_each_cpu(inactive_context_switch_thread, &sme_operations, "SME context does not leak across processes");
426 #endif
427 }
428
429
430 #if __arm64__
431 /*
432 * Sequence of events in thread_{get,set}_state test:
433 *
434 * 1. Parent creates child thread.
435 * 2. Child thread signals parent thread to proceed.
436 * 3. Parent populates child's matrix state registers via thread_set_state(),
437 * and signals child thread to proceed.
438 * 4. Child arbitrarily updates each byte in its local matrix register state
439 * by adding 1, and signals parent thread to proceed.
440 * 5. Parent reads back the child's updated matrix state with
441 * thread_get_state(), and confirms that every byte has been modified as
442 * expected.
443 */
444 static enum thread_state_test_state {
445 INIT,
446 CHILD_READY,
447 PARENT_POPULATED_MATRIX_STATE,
448 CHILD_UPDATED_MATRIX_STATE,
449 DONE
450 } thread_state_test_state;
451
452 static pthread_cond_t thread_state_test_cond = PTHREAD_COND_INITIALIZER;
453 static pthread_mutex_t thread_state_test_lock = PTHREAD_MUTEX_INITIALIZER;
454
455 static void
wait_for_thread_state_test_state(enum thread_state_test_state state)456 wait_for_thread_state_test_state(enum thread_state_test_state state)
457 {
458 pthread_mutex_lock(&thread_state_test_lock);
459 while (thread_state_test_state != state) {
460 pthread_cond_wait(&thread_state_test_cond, &thread_state_test_lock);
461 }
462 pthread_mutex_unlock(&thread_state_test_lock);
463 }
464
465 static void
thread_set_state_test_state(enum thread_state_test_state state)466 thread_set_state_test_state(enum thread_state_test_state state)
467 {
468 pthread_mutex_lock(&thread_state_test_lock);
469 thread_state_test_state = state;
470 pthread_cond_broadcast(&thread_state_test_cond);
471 pthread_mutex_unlock(&thread_state_test_lock);
472 }
473
474 static void *
test_matrix_thread_state_child(void * arg __unused)475 test_matrix_thread_state_child(void *arg __unused)
476 {
477 const struct arm_matrix_operations *ops = arg;
478
479 size_t size = ops->data_size();
480 uint8_t *d = ops->alloc_data();
481
482
483 thread_set_state_test_state(CHILD_READY);
484 wait_for_thread_state_test_state(PARENT_POPULATED_MATRIX_STATE);
485 ops->store_data(d);
486 for (size_t i = 0; i < size; i++) {
487 d[i]++;
488 }
489 ops->load_data(d);
490 thread_set_state_test_state(CHILD_UPDATED_MATRIX_STATE);
491
492 wait_for_thread_state_test_state(DONE);
493 ops->stop();
494 return NULL;
495 }
496
497 static void
test_matrix_thread_state(const struct arm_matrix_operations * ops)498 test_matrix_thread_state(const struct arm_matrix_operations *ops)
499 {
500 if (!ops->is_available()) {
501 T_SKIP("Running on non-%s target, skipping...", ops->name);
502 }
503
504 size_t size = ops->data_size();
505 uint8_t *d = ops->alloc_data();
506 arc4random_buf(d, size);
507
508 thread_state_test_state = INIT;
509
510 pthread_t thread;
511 #pragma clang diagnostic push
512 #pragma clang diagnostic ignored "-Wincompatible-pointer-types-discards-qualifiers"
513 void *arg = ops;
514 #pragma clang diagnostic pop
515 int err = pthread_create(&thread, NULL, test_matrix_thread_state_child, arg);
516 T_QUIET; T_ASSERT_EQ(err, 0, "pthread_create()");
517
518 mach_port_t mach_thread = pthread_mach_thread_np(thread);
519 T_QUIET; T_ASSERT_NE(mach_thread, MACH_PORT_NULL, "pthread_mach_thread_np()");
520
521 wait_for_thread_state_test_state(CHILD_READY);
522 kern_return_t kr = ops->thread_set_state(mach_thread, d);
523 T_QUIET; T_ASSERT_EQ(kr, KERN_SUCCESS, "%s thread_set_state()", ops->name);
524 thread_set_state_test_state(PARENT_POPULATED_MATRIX_STATE);
525
526 wait_for_thread_state_test_state(CHILD_UPDATED_MATRIX_STATE);
527 uint8_t *thread_d = ops->alloc_data();
528 kr = ops->thread_get_state(mach_thread, thread_d);
529 T_QUIET; T_ASSERT_EQ(kr, KERN_SUCCESS, "%s thread_get_state()", ops->name);
530 for (size_t i = 0; i < size; i++) {
531 d[i]++;
532 }
533 T_EXPECT_EQ(memcmp(d, thread_d, size), 0, "thread_get_state() read expected %s data from child thread", ops->name);
534
535 thread_set_state_test_state(DONE);
536 free(thread_d);
537 free(d);
538 pthread_join(thread, NULL);
539 }
540
541 #endif
542
543 #ifdef __arm64__
544
545 T_DECL(sme_thread_state,
546 "Test thread_{get,set}_state with SME thread state.",
547 XNU_T_META_SOC_SPECIFIC)
548 {
549 test_matrix_thread_state(&sme_operations);
550 }
551
552 T_DECL(sme_exception_ports,
553 "Test that thread_set_exception_ports rejects SME thread-state flavors.",
554 XNU_T_META_SOC_SPECIFIC)
555 {
556 mach_port_t exc_port;
557 mach_port_t task = mach_task_self();
558 mach_port_t thread = mach_thread_self();
559
560 kern_return_t kr = mach_port_allocate(task, MACH_PORT_RIGHT_RECEIVE, &exc_port);
561 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "Allocated mach exception port");
562 kr = mach_port_insert_right(task, exc_port, exc_port, MACH_MSG_TYPE_MAKE_SEND);
563 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "Inserted a SEND right into the exception port");
564
565 kr = thread_set_exception_ports(thread, EXC_MASK_ALL, exc_port, EXCEPTION_STATE, ARM_THREAD_STATE64);
566 T_EXPECT_MACH_SUCCESS(kr, "thread_set_exception_ports accepts flavor %u", (unsigned int)ARM_THREAD_STATE64);
567
568 for (thread_state_flavor_t flavor = ARM_SME_STATE; flavor <= ARM_SME2_STATE; flavor++) {
569 kr = thread_set_exception_ports(thread, EXC_MASK_ALL, exc_port, EXCEPTION_STATE, flavor);
570 T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ARGUMENT, "thread_set_exception_ports rejects flavor %u", (unsigned int)flavor);
571 }
572 }
573
574 T_DECL(sme_max_svl_b_sysctl,
575 "Test the hw.optional.arm.sme_max_svl_b sysctl",
576 XNU_T_META_SOC_SPECIFIC)
577 {
578 unsigned int max_svl_b;
579 size_t max_svl_b_size = sizeof(max_svl_b);
580
581 int err = sysctlbyname("hw.optional.arm.sme_max_svl_b", &max_svl_b, &max_svl_b_size, NULL, 0);
582 T_QUIET; T_ASSERT_POSIX_SUCCESS(err, "sysctlbyname(hw.optional.arm.sme_max_svl_b)");
583 if (sme_operations.is_available()) {
584 /* Architecturally SVL must be a power-of-two between 128 and 2048 bits */
585 const unsigned int ARCH_MIN_SVL_B = 128 / 8;
586 const unsigned int ARCH_MAX_SVL_B = 2048 / 8;
587
588 T_EXPECT_EQ(__builtin_popcount(max_svl_b), 1, "Maximum SVL_B is a power of 2");
589 T_EXPECT_GE(max_svl_b, ARCH_MIN_SVL_B, "Maximum SVL_B >= architectural minimum");
590 T_EXPECT_LE(max_svl_b, ARCH_MAX_SVL_B, "Maximum SVL_B <= architectural maximum");
591 } else {
592 T_EXPECT_EQ(max_svl_b, 0, "Maximum SVL_B is 0 when SME is unavailable");
593 }
594 }
595 #endif /* __arm64__ */
596