1*8d741a5dSApple OSS Distributions /*
2*8d741a5dSApple OSS Distributions * Copyright (c) 2019 Apple Computer, Inc. All rights reserved.
3*8d741a5dSApple OSS Distributions *
4*8d741a5dSApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*8d741a5dSApple OSS Distributions *
6*8d741a5dSApple OSS Distributions * This file contains Original Code and/or Modifications of Original Code
7*8d741a5dSApple OSS Distributions * as defined in and that are subject to the Apple Public Source License
8*8d741a5dSApple OSS Distributions * Version 2.0 (the 'License'). You may not use this file except in
9*8d741a5dSApple OSS Distributions * compliance with the License. The rights granted to you under the License
10*8d741a5dSApple OSS Distributions * may not be used to create, or enable the creation or redistribution of,
11*8d741a5dSApple OSS Distributions * unlawful or unlicensed copies of an Apple operating system, or to
12*8d741a5dSApple OSS Distributions * circumvent, violate, or enable the circumvention or violation of, any
13*8d741a5dSApple OSS Distributions * terms of an Apple operating system software license agreement.
14*8d741a5dSApple OSS Distributions *
15*8d741a5dSApple OSS Distributions * Please obtain a copy of the License at
16*8d741a5dSApple OSS Distributions * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*8d741a5dSApple OSS Distributions *
18*8d741a5dSApple OSS Distributions * The Original Code and all software distributed under the License are
19*8d741a5dSApple OSS Distributions * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*8d741a5dSApple OSS Distributions * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*8d741a5dSApple OSS Distributions * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*8d741a5dSApple OSS Distributions * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*8d741a5dSApple OSS Distributions * Please see the License for the specific language governing rights and
24*8d741a5dSApple OSS Distributions * limitations under the License.
25*8d741a5dSApple OSS Distributions *
26*8d741a5dSApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*8d741a5dSApple OSS Distributions */
28*8d741a5dSApple OSS Distributions /**
29*8d741a5dSApple OSS Distributions * On devices that support it, this test ensures that a mach exception is
30*8d741a5dSApple OSS Distributions * generated when a matrix-math exception is triggered, and that the
31*8d741a5dSApple OSS Distributions * matrix register file is correctly preserved or zeroed on context switch.
32*8d741a5dSApple OSS Distributions */
33*8d741a5dSApple OSS Distributions
34*8d741a5dSApple OSS Distributions /*
35*8d741a5dSApple OSS Distributions * IMPLEMENTATION NOTE:
36*8d741a5dSApple OSS Distributions *
37*8d741a5dSApple OSS Distributions * This test code goes to some unusual lengths to avoid calling out to libc or
38*8d741a5dSApple OSS Distributions * libdarwintest while the CPU is in streaming SVE mode (i.e., between
39*8d741a5dSApple OSS Distributions * ops->start() and ops->stop()). Both of these libraries are built with SIMD
40*8d741a5dSApple OSS Distributions * instructions that will cause the test executable to crash while in streaming
41*8d741a5dSApple OSS Distributions * SVE mode.
42*8d741a5dSApple OSS Distributions *
43*8d741a5dSApple OSS Distributions * Ordinarily this is the wrong way to solve this problem. Functions that use
44*8d741a5dSApple OSS Distributions * streaming SVE mode should have annotations telling the compiler so, and the
45*8d741a5dSApple OSS Distributions * compiler will automatically generate appropriate interworking code. However
46*8d741a5dSApple OSS Distributions * this interworking code will stash SME state to memory and temporarily exit
47*8d741a5dSApple OSS Distributions * streaming SVE mode. We're specifically testing how xnu manages live SME
48*8d741a5dSApple OSS Distributions * register state, so we can't let the compiler stash and disable this state
49*8d741a5dSApple OSS Distributions * behind our backs.
50*8d741a5dSApple OSS Distributions */
51*8d741a5dSApple OSS Distributions
52*8d741a5dSApple OSS Distributions #ifdef __arm64__
53*8d741a5dSApple OSS Distributions #include <mach/error.h>
54*8d741a5dSApple OSS Distributions #endif /* __arm64__ */
55*8d741a5dSApple OSS Distributions
56*8d741a5dSApple OSS Distributions #include <darwintest.h>
57*8d741a5dSApple OSS Distributions #include <pthread.h>
58*8d741a5dSApple OSS Distributions #include <stdlib.h>
59*8d741a5dSApple OSS Distributions #include <mach/mach.h>
60*8d741a5dSApple OSS Distributions #include <mach/thread_status.h>
61*8d741a5dSApple OSS Distributions #include <mach/exception.h>
62*8d741a5dSApple OSS Distributions #include <machine/cpu_capabilities.h>
63*8d741a5dSApple OSS Distributions #include <sys/types.h>
64*8d741a5dSApple OSS Distributions #include <sys/sysctl.h>
65*8d741a5dSApple OSS Distributions
66*8d741a5dSApple OSS Distributions #include "arm_matrix.h"
67*8d741a5dSApple OSS Distributions #include "exc_helpers.h"
68*8d741a5dSApple OSS Distributions #include "test_utils.h"
69*8d741a5dSApple OSS Distributions
70*8d741a5dSApple OSS Distributions T_GLOBAL_META(
71*8d741a5dSApple OSS Distributions T_META_NAMESPACE("xnu.arm"),
72*8d741a5dSApple OSS Distributions T_META_RADAR_COMPONENT_NAME("xnu"),
73*8d741a5dSApple OSS Distributions T_META_RADAR_COMPONENT_VERSION("arm"),
74*8d741a5dSApple OSS Distributions T_META_OWNER("ghackmann"),
75*8d741a5dSApple OSS Distributions T_META_RUN_CONCURRENTLY(true)
76*8d741a5dSApple OSS Distributions );
77*8d741a5dSApple OSS Distributions
78*8d741a5dSApple OSS Distributions #ifdef __arm64__
79*8d741a5dSApple OSS Distributions
80*8d741a5dSApple OSS Distributions #ifndef EXC_ARM_SME_DISALLOWED
81*8d741a5dSApple OSS Distributions #define EXC_ARM_SME_DISALLOWED 2
82*8d741a5dSApple OSS Distributions #endif
83*8d741a5dSApple OSS Distributions
84*8d741a5dSApple OSS Distributions /* Whether we caught the EXC_BAD_INSTRUCTION mach exception or not. */
85*8d741a5dSApple OSS Distributions static volatile bool mach_exc_caught = false;
86*8d741a5dSApple OSS Distributions
87*8d741a5dSApple OSS Distributions static size_t
bad_instruction_exception_handler(__unused mach_port_t task,__unused mach_port_t thread,exception_type_t type,mach_exception_data_t codes)88*8d741a5dSApple OSS Distributions bad_instruction_exception_handler(
89*8d741a5dSApple OSS Distributions __unused mach_port_t task,
90*8d741a5dSApple OSS Distributions __unused mach_port_t thread,
91*8d741a5dSApple OSS Distributions exception_type_t type,
92*8d741a5dSApple OSS Distributions mach_exception_data_t codes)
93*8d741a5dSApple OSS Distributions {
94*8d741a5dSApple OSS Distributions T_QUIET; T_ASSERT_EQ(type, EXC_BAD_INSTRUCTION, "Caught an EXC_BAD_INSTRUCTION exception");
95*8d741a5dSApple OSS Distributions T_QUIET; T_ASSERT_EQ(codes[0], (uint64_t)EXC_ARM_UNDEFINED, "The subcode is EXC_ARM_UNDEFINED");
96*8d741a5dSApple OSS Distributions
97*8d741a5dSApple OSS Distributions mach_exc_caught = true;
98*8d741a5dSApple OSS Distributions return 4;
99*8d741a5dSApple OSS Distributions }
100*8d741a5dSApple OSS Distributions #endif
101*8d741a5dSApple OSS Distributions
102*8d741a5dSApple OSS Distributions
103*8d741a5dSApple OSS Distributions #ifdef __arm64__
104*8d741a5dSApple OSS Distributions static void
test_matrix_not_started(const struct arm_matrix_operations * ops)105*8d741a5dSApple OSS Distributions test_matrix_not_started(const struct arm_matrix_operations *ops)
106*8d741a5dSApple OSS Distributions {
107*8d741a5dSApple OSS Distributions if (!ops->is_available()) {
108*8d741a5dSApple OSS Distributions T_SKIP("Running on non-%s target, skipping...", ops->name);
109*8d741a5dSApple OSS Distributions }
110*8d741a5dSApple OSS Distributions
111*8d741a5dSApple OSS Distributions mach_port_t exc_port = create_exception_port(EXC_MASK_BAD_INSTRUCTION);
112*8d741a5dSApple OSS Distributions
113*8d741a5dSApple OSS Distributions size_t size = ops->data_size();
114*8d741a5dSApple OSS Distributions uint8_t *d = ops->alloc_data();
115*8d741a5dSApple OSS Distributions bzero(d, size);
116*8d741a5dSApple OSS Distributions
117*8d741a5dSApple OSS Distributions ops->start();
118*8d741a5dSApple OSS Distributions ops->load_one_vector(d);
119*8d741a5dSApple OSS Distributions ops->stop();
120*8d741a5dSApple OSS Distributions T_PASS("%s instruction after start instruction should not cause an exception", ops->name);
121*8d741a5dSApple OSS Distributions
122*8d741a5dSApple OSS Distributions mach_exc_caught = false;
123*8d741a5dSApple OSS Distributions run_exception_handler(exc_port, bad_instruction_exception_handler);
124*8d741a5dSApple OSS Distributions ops->load_one_vector(d);
125*8d741a5dSApple OSS Distributions T_EXPECT_TRUE(mach_exc_caught, "%s instruction before start instruction should cause an exception", ops->name);
126*8d741a5dSApple OSS Distributions
127*8d741a5dSApple OSS Distributions free(d);
128*8d741a5dSApple OSS Distributions }
129*8d741a5dSApple OSS Distributions #endif
130*8d741a5dSApple OSS Distributions
131*8d741a5dSApple OSS Distributions
132*8d741a5dSApple OSS Distributions T_DECL(sme_not_started,
133*8d741a5dSApple OSS Distributions "Test that SME instructions before smstart generate mach exceptions.", T_META_TAG_VM_NOT_ELIGIBLE)
134*8d741a5dSApple OSS Distributions {
135*8d741a5dSApple OSS Distributions #ifndef __arm64__
136*8d741a5dSApple OSS Distributions T_SKIP("Running on non-arm64 target, skipping...");
137*8d741a5dSApple OSS Distributions #else
138*8d741a5dSApple OSS Distributions test_matrix_not_started(&sme_operations);
139*8d741a5dSApple OSS Distributions #endif
140*8d741a5dSApple OSS Distributions }
141*8d741a5dSApple OSS Distributions
142*8d741a5dSApple OSS Distributions #ifdef __arm64__
143*8d741a5dSApple OSS Distributions typedef bool (*thread_fn_t)(const struct arm_matrix_operations *, uint32_t);
144*8d741a5dSApple OSS Distributions
145*8d741a5dSApple OSS Distributions struct test_thread {
146*8d741a5dSApple OSS Distributions pthread_t thread;
147*8d741a5dSApple OSS Distributions thread_fn_t thread_fn;
148*8d741a5dSApple OSS Distributions uint32_t cpuid;
149*8d741a5dSApple OSS Distributions uint32_t thread_id;
150*8d741a5dSApple OSS Distributions const struct arm_matrix_operations *ops;
151*8d741a5dSApple OSS Distributions };
152*8d741a5dSApple OSS Distributions
153*8d741a5dSApple OSS Distributions static uint32_t barrier;
154*8d741a5dSApple OSS Distributions static pthread_cond_t barrier_cond = PTHREAD_COND_INITIALIZER;
155*8d741a5dSApple OSS Distributions static pthread_mutex_t barrier_lock = PTHREAD_MUTEX_INITIALIZER;
156*8d741a5dSApple OSS Distributions
157*8d741a5dSApple OSS Distributions static void
test_thread_barrier(void)158*8d741a5dSApple OSS Distributions test_thread_barrier(void)
159*8d741a5dSApple OSS Distributions {
160*8d741a5dSApple OSS Distributions /* Wait for all threads to reach this barrier */
161*8d741a5dSApple OSS Distributions pthread_mutex_lock(&barrier_lock);
162*8d741a5dSApple OSS Distributions barrier--;
163*8d741a5dSApple OSS Distributions if (barrier) {
164*8d741a5dSApple OSS Distributions while (barrier) {
165*8d741a5dSApple OSS Distributions pthread_cond_wait(&barrier_cond, &barrier_lock);
166*8d741a5dSApple OSS Distributions }
167*8d741a5dSApple OSS Distributions } else {
168*8d741a5dSApple OSS Distributions pthread_cond_broadcast(&barrier_cond);
169*8d741a5dSApple OSS Distributions }
170*8d741a5dSApple OSS Distributions pthread_mutex_unlock(&barrier_lock);
171*8d741a5dSApple OSS Distributions }
172*8d741a5dSApple OSS Distributions
173*8d741a5dSApple OSS Distributions static uint32_t
ncpus(void)174*8d741a5dSApple OSS Distributions ncpus(void)
175*8d741a5dSApple OSS Distributions {
176*8d741a5dSApple OSS Distributions uint32_t ncpu;
177*8d741a5dSApple OSS Distributions size_t ncpu_size = sizeof(ncpu);
178*8d741a5dSApple OSS Distributions int err = sysctlbyname("hw.ncpu", &ncpu, &ncpu_size, NULL, 0);
179*8d741a5dSApple OSS Distributions T_QUIET; T_ASSERT_POSIX_ZERO(err, "Retrieved CPU count");
180*8d741a5dSApple OSS Distributions
181*8d741a5dSApple OSS Distributions return ncpu;
182*8d741a5dSApple OSS Distributions }
183*8d741a5dSApple OSS Distributions
184*8d741a5dSApple OSS Distributions static int
thread_bind_cpu_unchecked(uint32_t cpuid)185*8d741a5dSApple OSS Distributions thread_bind_cpu_unchecked(uint32_t cpuid)
186*8d741a5dSApple OSS Distributions {
187*8d741a5dSApple OSS Distributions /*
188*8d741a5dSApple OSS Distributions * libc's sysctl() implementation calls strlen(name), which is
189*8d741a5dSApple OSS Distributions * SIMD-accelerated. Avoid this by directly invoking the libsyscall
190*8d741a5dSApple OSS Distributions * wrapper with namelen computed at compile time.
191*8d741a5dSApple OSS Distributions */
192*8d741a5dSApple OSS Distributions #define THREAD_BIND_CPU "kern.sched_thread_bind_cpu"
193*8d741a5dSApple OSS Distributions extern int __sysctlbyname(const char *name, size_t namelen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
194*8d741a5dSApple OSS Distributions const char *name = THREAD_BIND_CPU;
195*8d741a5dSApple OSS Distributions size_t namelen = sizeof(THREAD_BIND_CPU) - 1;
196*8d741a5dSApple OSS Distributions return __sysctlbyname(name, namelen, NULL, 0, &cpuid, sizeof(cpuid));
197*8d741a5dSApple OSS Distributions }
198*8d741a5dSApple OSS Distributions
199*8d741a5dSApple OSS Distributions static void
thread_bind_cpu(uint32_t cpuid)200*8d741a5dSApple OSS Distributions thread_bind_cpu(uint32_t cpuid)
201*8d741a5dSApple OSS Distributions {
202*8d741a5dSApple OSS Distributions int err = thread_bind_cpu_unchecked(cpuid);
203*8d741a5dSApple OSS Distributions T_QUIET; T_ASSERT_POSIX_ZERO(err, "Bound thread to CPU %u", cpuid);
204*8d741a5dSApple OSS Distributions }
205*8d741a5dSApple OSS Distributions
206*8d741a5dSApple OSS Distributions static void *
test_thread_shim(void * arg)207*8d741a5dSApple OSS Distributions test_thread_shim(void *arg)
208*8d741a5dSApple OSS Distributions {
209*8d741a5dSApple OSS Distributions struct test_thread *thread = arg;
210*8d741a5dSApple OSS Distributions
211*8d741a5dSApple OSS Distributions thread_bind_cpu(thread->cpuid);
212*8d741a5dSApple OSS Distributions bool ret = thread->thread_fn(thread->ops, thread->thread_id);
213*8d741a5dSApple OSS Distributions return (void *)(uintptr_t)ret;
214*8d741a5dSApple OSS Distributions }
215*8d741a5dSApple OSS Distributions
216*8d741a5dSApple OSS Distributions static void
test_on_each_cpu(thread_fn_t thread_fn,const struct arm_matrix_operations * ops,const char * desc)217*8d741a5dSApple OSS Distributions test_on_each_cpu(thread_fn_t thread_fn, const struct arm_matrix_operations *ops, const char *desc)
218*8d741a5dSApple OSS Distributions {
219*8d741a5dSApple OSS Distributions uint32_t ncpu = ncpus();
220*8d741a5dSApple OSS Distributions uint32_t nthreads = ncpu * 2;
221*8d741a5dSApple OSS Distributions barrier = nthreads;
222*8d741a5dSApple OSS Distributions struct test_thread *threads = calloc(nthreads, sizeof(threads[0]));
223*8d741a5dSApple OSS Distributions for (uint32_t i = 0; i < nthreads; i++) {
224*8d741a5dSApple OSS Distributions threads[i].thread_fn = thread_fn;
225*8d741a5dSApple OSS Distributions threads[i].cpuid = i % ncpu;
226*8d741a5dSApple OSS Distributions threads[i].thread_id = i;
227*8d741a5dSApple OSS Distributions threads[i].ops = ops;
228*8d741a5dSApple OSS Distributions
229*8d741a5dSApple OSS Distributions int err = pthread_create(&threads[i].thread, NULL, test_thread_shim, &threads[i]);
230*8d741a5dSApple OSS Distributions T_QUIET; T_ASSERT_EQ(err, 0, "%s: created thread #%u", desc, i);
231*8d741a5dSApple OSS Distributions }
232*8d741a5dSApple OSS Distributions
233*8d741a5dSApple OSS Distributions for (uint32_t i = 0; i < nthreads; i++) {
234*8d741a5dSApple OSS Distributions void *thread_ret_ptr;
235*8d741a5dSApple OSS Distributions int err = pthread_join(threads[i].thread, &thread_ret_ptr);
236*8d741a5dSApple OSS Distributions T_QUIET; T_ASSERT_EQ(err, 0, "%s: joined thread #%u", desc, i);
237*8d741a5dSApple OSS Distributions
238*8d741a5dSApple OSS Distributions bool thread_ret = (uintptr_t)thread_ret_ptr;
239*8d741a5dSApple OSS Distributions if (thread_ret) {
240*8d741a5dSApple OSS Distributions T_PASS("%s: thread #%u passed", desc, i);
241*8d741a5dSApple OSS Distributions } else {
242*8d741a5dSApple OSS Distributions T_FAIL("%s: thread #%u failed", desc, i);
243*8d741a5dSApple OSS Distributions }
244*8d741a5dSApple OSS Distributions }
245*8d741a5dSApple OSS Distributions
246*8d741a5dSApple OSS Distributions free(threads);
247*8d741a5dSApple OSS Distributions }
248*8d741a5dSApple OSS Distributions
249*8d741a5dSApple OSS Distributions static bool
active_context_switch_thread(const struct arm_matrix_operations * ops,uint32_t thread_id)250*8d741a5dSApple OSS Distributions active_context_switch_thread(const struct arm_matrix_operations *ops, uint32_t thread_id)
251*8d741a5dSApple OSS Distributions {
252*8d741a5dSApple OSS Distributions size_t size = ops->data_size();
253*8d741a5dSApple OSS Distributions uint8_t *d1 = ops->alloc_data();
254*8d741a5dSApple OSS Distributions memset(d1, (char)thread_id, size);
255*8d741a5dSApple OSS Distributions
256*8d741a5dSApple OSS Distributions uint8_t *d2 = ops->alloc_data();
257*8d741a5dSApple OSS Distributions
258*8d741a5dSApple OSS Distributions test_thread_barrier();
259*8d741a5dSApple OSS Distributions
260*8d741a5dSApple OSS Distributions bool ok = true;
261*8d741a5dSApple OSS Distributions for (unsigned int i = 0; i < 100000 && ok; i++) {
262*8d741a5dSApple OSS Distributions ops->start();
263*8d741a5dSApple OSS Distributions ops->load_data(d1);
264*8d741a5dSApple OSS Distributions
265*8d741a5dSApple OSS Distributions /*
266*8d741a5dSApple OSS Distributions * Rescheduling with the matrix registers active must preserve
267*8d741a5dSApple OSS Distributions * state, even after a context switch.
268*8d741a5dSApple OSS Distributions */
269*8d741a5dSApple OSS Distributions sched_yield();
270*8d741a5dSApple OSS Distributions
271*8d741a5dSApple OSS Distributions ops->store_data(d2);
272*8d741a5dSApple OSS Distributions ops->stop();
273*8d741a5dSApple OSS Distributions
274*8d741a5dSApple OSS Distributions if (memcmp(d1, d2, size)) {
275*8d741a5dSApple OSS Distributions ok = false;
276*8d741a5dSApple OSS Distributions }
277*8d741a5dSApple OSS Distributions }
278*8d741a5dSApple OSS Distributions
279*8d741a5dSApple OSS Distributions free(d2);
280*8d741a5dSApple OSS Distributions free(d1);
281*8d741a5dSApple OSS Distributions return ok;
282*8d741a5dSApple OSS Distributions }
283*8d741a5dSApple OSS Distributions
284*8d741a5dSApple OSS Distributions static bool
inactive_context_switch_thread(const struct arm_matrix_operations * ops,uint32_t thread_id)285*8d741a5dSApple OSS Distributions inactive_context_switch_thread(const struct arm_matrix_operations *ops, uint32_t thread_id)
286*8d741a5dSApple OSS Distributions {
287*8d741a5dSApple OSS Distributions size_t size = ops->data_size();
288*8d741a5dSApple OSS Distributions uint8_t *d1 = ops->alloc_data();
289*8d741a5dSApple OSS Distributions memset(d1, (char)thread_id, size);
290*8d741a5dSApple OSS Distributions
291*8d741a5dSApple OSS Distributions uint8_t *d2 = ops->alloc_data();
292*8d741a5dSApple OSS Distributions
293*8d741a5dSApple OSS Distributions test_thread_barrier();
294*8d741a5dSApple OSS Distributions
295*8d741a5dSApple OSS Distributions bool ok = true;
296*8d741a5dSApple OSS Distributions for (unsigned int i = 0; i < 100000 && ok; i++) {
297*8d741a5dSApple OSS Distributions ops->start();
298*8d741a5dSApple OSS Distributions ops->load_data(d1);
299*8d741a5dSApple OSS Distributions ops->stop();
300*8d741a5dSApple OSS Distributions
301*8d741a5dSApple OSS Distributions /*
302*8d741a5dSApple OSS Distributions * Rescheduling with the matrix registers inactive may preserve
303*8d741a5dSApple OSS Distributions * state or may zero it out.
304*8d741a5dSApple OSS Distributions */
305*8d741a5dSApple OSS Distributions sched_yield();
306*8d741a5dSApple OSS Distributions
307*8d741a5dSApple OSS Distributions ops->start();
308*8d741a5dSApple OSS Distributions ops->store_data(d2);
309*8d741a5dSApple OSS Distributions ops->stop();
310*8d741a5dSApple OSS Distributions
311*8d741a5dSApple OSS Distributions for (size_t j = 0; j < size; j++) {
312*8d741a5dSApple OSS Distributions if (d1[j] != d2[j] && d2[j] != 0) {
313*8d741a5dSApple OSS Distributions ok = false;
314*8d741a5dSApple OSS Distributions }
315*8d741a5dSApple OSS Distributions }
316*8d741a5dSApple OSS Distributions }
317*8d741a5dSApple OSS Distributions
318*8d741a5dSApple OSS Distributions free(d2);
319*8d741a5dSApple OSS Distributions free(d1);
320*8d741a5dSApple OSS Distributions return ok;
321*8d741a5dSApple OSS Distributions }
322*8d741a5dSApple OSS Distributions
323*8d741a5dSApple OSS Distributions static void
test_thread_migration(const struct arm_matrix_operations * ops)324*8d741a5dSApple OSS Distributions test_thread_migration(const struct arm_matrix_operations *ops)
325*8d741a5dSApple OSS Distributions {
326*8d741a5dSApple OSS Distributions size_t size = ops->data_size();
327*8d741a5dSApple OSS Distributions uint8_t *d = ops->alloc_data();
328*8d741a5dSApple OSS Distributions arc4random_buf(d, size);
329*8d741a5dSApple OSS Distributions
330*8d741a5dSApple OSS Distributions uint32_t ncpu = ncpus();
331*8d741a5dSApple OSS Distributions uint8_t *cpu_d[ncpu];
332*8d741a5dSApple OSS Distributions for (uint32_t cpuid = 0; cpuid < ncpu; cpuid++) {
333*8d741a5dSApple OSS Distributions cpu_d[cpuid] = ops->alloc_data();
334*8d741a5dSApple OSS Distributions memset(cpu_d[cpuid], 0, size);
335*8d741a5dSApple OSS Distributions }
336*8d741a5dSApple OSS Distributions
337*8d741a5dSApple OSS Distributions ops->start();
338*8d741a5dSApple OSS Distributions ops->load_data(d);
339*8d741a5dSApple OSS Distributions for (uint32_t cpuid = 0; cpuid < ncpu; cpuid++) {
340*8d741a5dSApple OSS Distributions int err = thread_bind_cpu_unchecked(cpuid);
341*8d741a5dSApple OSS Distributions if (err) {
342*8d741a5dSApple OSS Distributions ops->stop();
343*8d741a5dSApple OSS Distributions T_ASSERT_POSIX_ZERO(err, "Bound thread to CPU %u", cpuid);
344*8d741a5dSApple OSS Distributions }
345*8d741a5dSApple OSS Distributions ops->store_data(cpu_d[cpuid]);
346*8d741a5dSApple OSS Distributions }
347*8d741a5dSApple OSS Distributions ops->stop();
348*8d741a5dSApple OSS Distributions
349*8d741a5dSApple OSS Distributions for (uint32_t cpuid = 0; cpuid < ncpu; cpuid++) {
350*8d741a5dSApple OSS Distributions int cmp = memcmp(d, cpu_d[cpuid], size);
351*8d741a5dSApple OSS Distributions T_EXPECT_EQ(cmp, 0, "Matrix state migrated to CPU %u", cpuid);
352*8d741a5dSApple OSS Distributions free(cpu_d[cpuid]);
353*8d741a5dSApple OSS Distributions }
354*8d741a5dSApple OSS Distributions free(d);
355*8d741a5dSApple OSS Distributions }
356*8d741a5dSApple OSS Distributions #endif
357*8d741a5dSApple OSS Distributions
358*8d741a5dSApple OSS Distributions
359*8d741a5dSApple OSS Distributions T_DECL(sme_context_switch,
360*8d741a5dSApple OSS Distributions "Test that SME contexts are migrated during context switch and do not leak between process contexts.",
361*8d741a5dSApple OSS Distributions T_META_BOOTARGS_SET("enable_skstb=1"),
362*8d741a5dSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_SME2", 1),
363*8d741a5dSApple OSS Distributions XNU_T_META_SOC_SPECIFIC, T_META_TAG_VM_NOT_ELIGIBLE)
364*8d741a5dSApple OSS Distributions {
365*8d741a5dSApple OSS Distributions #ifndef __arm64__
366*8d741a5dSApple OSS Distributions T_SKIP("Running on non-arm64 target, skipping...");
367*8d741a5dSApple OSS Distributions #else
368*8d741a5dSApple OSS Distributions if (!sme_operations.is_available()) {
369*8d741a5dSApple OSS Distributions T_SKIP("Running on non-SME target, skipping...");
370*8d741a5dSApple OSS Distributions }
371*8d741a5dSApple OSS Distributions
372*8d741a5dSApple OSS Distributions test_thread_migration(&sme_operations);
373*8d741a5dSApple OSS Distributions test_on_each_cpu(active_context_switch_thread, &sme_operations, "SME context migrates when active");
374*8d741a5dSApple OSS Distributions test_on_each_cpu(inactive_context_switch_thread, &sme_operations, "SME context does not leak across processes");
375*8d741a5dSApple OSS Distributions #endif
376*8d741a5dSApple OSS Distributions }
377*8d741a5dSApple OSS Distributions
378