1 /*
2 * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 *
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 *
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
26 *
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 */
29 /*
30 * @OSF_COPYRIGHT@
31 */
32 /*
33 * @APPLE_FREE_COPYRIGHT@
34 */
35 /*
36 * File: arm/commpage/commpage.c
37 * Purpose: Set up and export a RO/RW page
38 */
39 #include <libkern/section_keywords.h>
40 #include <mach/mach_types.h>
41 #include <mach/machine.h>
42 #include <mach/vm_map.h>
43 #include <machine/cpu_capabilities.h>
44 #include <machine/commpage.h>
45 #include <machine/config.h>
46 #include <machine/pmap.h>
47 #include <vm/vm_kern.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_protos.h>
50 #include <ipc/ipc_port.h>
51 #include <arm/cpuid.h> /* for cpuid_info() & cache_info() */
52 #include <arm/cpu_capabilities_public.h>
53 #include <arm/misc_protos.h>
54 #include <arm/rtclock.h>
55 #include <libkern/OSAtomic.h>
56 #include <stdatomic.h>
57 #include <kern/remote_time.h>
58 #include <kern/smr.h>
59 #include <machine/atomic.h>
60 #include <machine/machine_remote_time.h>
61 #include <machine/machine_routines.h>
62
63 #include <sys/kdebug.h>
64 #include <sys/random.h>
65
66 #if CONFIG_ATM
67 #include <atm/atm_internal.h>
68 #endif
69
70 static int commpage_cpus( void );
71
72 #if defined (__arm64__)
73 #include <arm64/proc_reg.h>
74 #include <pexpert/arm64/apt_msg.h>
75 #endif
76
77
78 static void commpage_init_cpu_capabilities( void );
79
80 SECURITY_READ_ONLY_LATE(vm_address_t) commPagePtr = 0;
81 SECURITY_READ_ONLY_LATE(vm_address_t) commpage_rw_addr = 0;
82 SECURITY_READ_ONLY_LATE(vm_address_t) commpage_kernel_ro_addr = 0;
83 SECURITY_READ_ONLY_LATE(uint64_t) _cpu_capabilities = 0;
84 SECURITY_READ_ONLY_LATE(vm_address_t) commpage_rw_text_addr = 0;
85
86 extern user64_addr_t commpage_text64_location;
87 extern user32_addr_t commpage_text32_location;
88
89 /* For sysctl access from BSD side */
90 #define ARM_FEATURE_FLAG(x) \
91 extern int gARM_ ## x;
92 #include <arm/arm_features.inc>
93 #undef ARM_FEATURE_FLAG
94
95 extern int gUCNormalMem;
96
97 void
commpage_populate(void)98 commpage_populate(void)
99 {
100 uint16_t c2;
101 int cpufamily;
102
103 // Create the data and the text commpage
104 vm_map_address_t kernel_data_addr, kernel_text_addr, kernel_ro_data_addr, user_text_addr;
105 pmap_create_commpages(&kernel_data_addr, &kernel_text_addr, &kernel_ro_data_addr, &user_text_addr);
106
107 commpage_rw_addr = kernel_data_addr;
108 commpage_rw_text_addr = kernel_text_addr;
109 commpage_kernel_ro_addr = kernel_ro_data_addr;
110 commPagePtr = (vm_address_t) _COMM_PAGE_BASE_ADDRESS;
111
112 #if __arm64__
113 commpage_text64_location = user_text_addr;
114 bcopy(_COMM_PAGE64_SIGNATURE_STRING, (void *)(_COMM_PAGE_SIGNATURE + _COMM_PAGE_RW_OFFSET),
115 MIN(_COMM_PAGE_SIGNATURELEN, strlen(_COMM_PAGE64_SIGNATURE_STRING)));
116 #endif
117
118 *((uint16_t*)(_COMM_PAGE_VERSION + _COMM_PAGE_RW_OFFSET)) = (uint16_t) _COMM_PAGE_THIS_VERSION;
119
120 commpage_init_cpu_capabilities();
121 commpage_set_timestamp(0, 0, 0, 0, 0);
122
123 if (_cpu_capabilities & kCache32) {
124 c2 = 32;
125 } else if (_cpu_capabilities & kCache64) {
126 c2 = 64;
127 } else if (_cpu_capabilities & kCache128) {
128 c2 = 128;
129 } else {
130 c2 = 0;
131 }
132
133 *((uint16_t*)(_COMM_PAGE_CACHE_LINESIZE + _COMM_PAGE_RW_OFFSET)) = c2;
134
135 commpage_update_active_cpus();
136 cpufamily = cpuid_get_cpufamily();
137 *((uint8_t*)(_COMM_PAGE_CPU_CLUSTERS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) ml_get_cluster_count();
138 *((uint8_t*)(_COMM_PAGE_PHYSICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.physical_cpu_max;
139 *((uint8_t*)(_COMM_PAGE_LOGICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.logical_cpu_max;
140 *((uint64_t*)(_COMM_PAGE_MEMORY_SIZE + _COMM_PAGE_RW_OFFSET)) = machine_info.max_mem;
141 *((uint32_t*)(_COMM_PAGE_CPUFAMILY + _COMM_PAGE_RW_OFFSET)) = (uint32_t)cpufamily;
142 *((uint32_t*)(_COMM_PAGE_DEV_FIRM_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint32_t)PE_i_can_has_debugger(NULL);
143 *((uint32_t*)(_COMM_PAGE_DEV_FIRM + _COMM_PAGE_RO_OFFSET)) = (uint32_t)PE_i_can_has_debugger(NULL);
144 *((uint8_t*)(_COMM_PAGE_USER_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = user_timebase_type();
145
146 // Populate logical CPU -> logical cluster table
147 ml_map_cpus_to_clusters((uint8_t*)(_COMM_PAGE_CPU_TO_CLUSTER + _COMM_PAGE_RW_OFFSET));
148
149 *((uint8_t*)(_COMM_PAGE_CONT_HWCLOCK + _COMM_PAGE_RW_OFFSET)) = (uint8_t)user_cont_hwclock_allowed();
150 *((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift;
151 *((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT + _COMM_PAGE_RO_OFFSET)) = (uint8_t) page_shift;
152
153 #if __arm64__
154 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift_user32;
155 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32 + _COMM_PAGE_RO_OFFSET)) = (uint8_t) page_shift_user32;
156 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
157 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64 + _COMM_PAGE_RO_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
158 #endif /* __arm64__ */
159
160 commpage_update_timebase();
161 commpage_update_mach_continuous_time(0);
162
163 clock_sec_t secs;
164 clock_usec_t microsecs;
165 clock_get_boottime_microtime(&secs, µsecs);
166 commpage_update_boottime(secs * USEC_PER_SEC + microsecs);
167
168 /*
169 * set commpage approximate time to zero for initialization.
170 * scheduler shall populate correct value before running user thread
171 */
172 *((uint64_t *)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET)) = 0;
173 #ifdef CONFIG_MACH_APPROXIMATE_TIME
174 *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 1;
175 #else
176 *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 0;
177 #endif
178
179 commpage_update_kdebug_state();
180
181 #if CONFIG_ATM
182 commpage_update_atm_diagnostic_config(atm_get_diagnostic_config());
183 #endif
184
185
186 *((uint64_t*)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET)) = BT_RESET_SENTINEL_TS;
187
188 #if CONFIG_QUIESCE_COUNTER
189 cpu_quiescent_set_storage((_Atomic uint64_t *)(_COMM_PAGE_CPU_QUIESCENT_COUNTER +
190 _COMM_PAGE_RW_OFFSET));
191 #endif /* CONFIG_QUIESCE_COUNTER */
192
193 /*
194 * Set random values for targets in Apple Security Bounty
195 * addr should be unmapped for userland processes
196 * kaddr should be unmapped for kernel
197 */
198 uint64_t asb_value, asb_addr, asb_kvalue, asb_kaddr;
199 uint64_t asb_rand_vals[] = {
200 0x93e78adcded4d3d5, 0xd16c5b76ad99bccf, 0x67dfbbd12c4a594e, 0x7365636e6f6f544f,
201 0x239a974c9811e04b, 0xbf60e7fa45741446, 0x8acf5210b466b05, 0x67dfbbd12c4a594e
202 };
203 const int nrandval = sizeof(asb_rand_vals) / sizeof(asb_rand_vals[0]);
204 uint8_t randidx;
205
206 read_random(&randidx, sizeof(uint8_t));
207 asb_value = asb_rand_vals[randidx++ % nrandval];
208 *((uint64_t*)(_COMM_PAGE_ASB_TARGET_VALUE + _COMM_PAGE_RW_OFFSET)) = asb_value;
209
210 // userspace faulting address should be > MACH_VM_MAX_ADDRESS
211 asb_addr = asb_rand_vals[randidx++ % nrandval];
212 uint64_t user_min = MACH_VM_MAX_ADDRESS;
213 uint64_t user_max = UINT64_MAX;
214 asb_addr %= (user_max - user_min);
215 asb_addr += user_min;
216 *((uint64_t*)(_COMM_PAGE_ASB_TARGET_ADDRESS + _COMM_PAGE_RW_OFFSET)) = asb_addr;
217
218 asb_kvalue = asb_rand_vals[randidx++ % nrandval];
219 *((uint64_t*)(_COMM_PAGE_ASB_TARGET_KERN_VALUE + _COMM_PAGE_RW_OFFSET)) = asb_kvalue;
220
221 // kernel faulting address should be < VM_MIN_KERNEL_ADDRESS
222 asb_kaddr = asb_rand_vals[randidx++ % nrandval];
223 uint64_t kernel_min = 0x0LL;
224 uint64_t kernel_max = VM_MIN_KERNEL_ADDRESS;
225 asb_kaddr %= (kernel_max - kernel_min);
226 asb_kaddr += kernel_min;
227 *((uint64_t*)(_COMM_PAGE_ASB_TARGET_KERN_ADDRESS + _COMM_PAGE_RW_OFFSET)) = asb_kaddr;
228
229 #if __arm64__
230 *((uint8_t*)(_COMM_PAGE_APT_MSG_POLICY + _COMM_PAGE_RW_OFFSET)) = apt_msg_policy();
231 #endif
232 }
233
234 #define COMMPAGE_TEXT_SEGMENT "__TEXT_EXEC"
235 #define COMMPAGE_TEXT_SECTION "__commpage_text"
236
237 /* Get a pointer to the start of the ARM PFZ code section. This macro tell the
238 * linker that the storage for the variable here is at the start of the section */
239 extern char commpage_text_start[]
240 __SECTION_START_SYM(COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
241
242 /* Get a pointer to the end of the ARM PFZ code section. This macro tell the
243 * linker that the storage for the variable here is at the end of the section */
244 extern char commpage_text_end[]
245 __SECTION_END_SYM(COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
246
247 /* This is defined in the commpage text section as a symbol at the start of the preemptible
248 * functions */
249 extern char commpage_text_preemptible_functions;
250
251 #if CONFIG_ARM_PFZ
252 static size_t size_of_pfz = 0;
253 #endif
254
255 /* This is the opcode for brk #666 */
256 #define BRK_666_OPCODE 0xD4205340
257
258 void
commpage_text_populate(void)259 commpage_text_populate(void)
260 {
261 #if CONFIG_ARM_PFZ
262 size_t size_of_commpage_text = commpage_text_end - commpage_text_start;
263 if (size_of_commpage_text == 0) {
264 panic("ARM comm page text section %s,%s missing", COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
265 }
266 assert(size_of_commpage_text <= PAGE_SIZE);
267 assert(size_of_commpage_text > 0);
268
269 /* Get the size of the PFZ half of the comm page text section. */
270 size_of_pfz = &commpage_text_preemptible_functions - commpage_text_start;
271
272 // Copy the code segment of comm page text section into the PFZ
273 memcpy((void *) _COMM_PAGE64_TEXT_START_ADDRESS, (void *) commpage_text_start, size_of_commpage_text);
274
275 // Make sure to populate the rest of it with brk 666 so that undefined code
276 // doesn't get run
277 memset((char *) _COMM_PAGE64_TEXT_START_ADDRESS + size_of_commpage_text, BRK_666_OPCODE,
278 PAGE_SIZE - size_of_commpage_text);
279 #endif
280 }
281
282 uint32_t
commpage_is_in_pfz64(addr64_t addr64)283 commpage_is_in_pfz64(addr64_t addr64)
284 {
285 #if CONFIG_ARM_PFZ
286 if ((addr64 >= commpage_text64_location) &&
287 (addr64 < (commpage_text64_location + size_of_pfz))) {
288 return 1;
289 } else {
290 return 0;
291 }
292 #else
293 #pragma unused (addr64)
294 return 0;
295 #endif
296 }
297
298
299 void
commpage_set_timestamp(uint64_t tbr,uint64_t secs,uint64_t frac,uint64_t scale,uint64_t tick_per_sec)300 commpage_set_timestamp(
301 uint64_t tbr,
302 uint64_t secs,
303 uint64_t frac,
304 uint64_t scale,
305 uint64_t tick_per_sec)
306 {
307 new_commpage_timeofday_data_t *commpage_timeofday_datap;
308
309 if (commPagePtr == 0) {
310 return;
311 }
312
313 commpage_timeofday_datap = (new_commpage_timeofday_data_t *)(_COMM_PAGE_NEWTIMEOFDAY_DATA + _COMM_PAGE_RW_OFFSET);
314
315 commpage_timeofday_datap->TimeStamp_tick = 0x0ULL;
316
317 __builtin_arm_dmb(DMB_ISH);
318
319 commpage_timeofday_datap->TimeStamp_sec = secs;
320 commpage_timeofday_datap->TimeStamp_frac = frac;
321 commpage_timeofday_datap->Ticks_scale = scale;
322 commpage_timeofday_datap->Ticks_per_sec = tick_per_sec;
323
324 __builtin_arm_dmb(DMB_ISH);
325
326 commpage_timeofday_datap->TimeStamp_tick = tbr;
327
328 }
329
330 /*
331 * Update _COMM_PAGE_MEMORY_PRESSURE. Called periodically from vm's compute_memory_pressure()
332 */
333
334 void
commpage_set_memory_pressure(unsigned int pressure)335 commpage_set_memory_pressure(
336 unsigned int pressure )
337 {
338 if (commPagePtr == 0) {
339 return;
340 }
341 *((uint32_t *)(_COMM_PAGE_MEMORY_PRESSURE + _COMM_PAGE_RW_OFFSET)) = pressure;
342 }
343
344 /*
345 * Determine number of CPUs on this system.
346 */
347 static int
commpage_cpus(void)348 commpage_cpus( void )
349 {
350 int cpus;
351
352 cpus = machine_info.max_cpus;
353
354 if (cpus == 0) {
355 panic("commpage cpus==0");
356 }
357 if (cpus > 0xFF) {
358 cpus = 0xFF;
359 }
360
361 return cpus;
362 }
363
364 uint64_t
_get_cpu_capabilities(void)365 _get_cpu_capabilities(void)
366 {
367 return _cpu_capabilities;
368 }
369
370 vm_address_t
_get_commpage_priv_address(void)371 _get_commpage_priv_address(void)
372 {
373 return commpage_rw_addr;
374 }
375
376 vm_address_t
_get_commpage_ro_address(void)377 _get_commpage_ro_address(void)
378 {
379 return commpage_kernel_ro_addr;
380 }
381
382 vm_address_t
_get_commpage_text_priv_address(void)383 _get_commpage_text_priv_address(void)
384 {
385 return commpage_rw_text_addr;
386 }
387
388 #if defined(__arm64__)
389 /**
390 * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64ISAR0_EL1
391 */
392 static void
commpage_init_arm_optional_features_isar0(uint64_t * commpage_bits)393 commpage_init_arm_optional_features_isar0(uint64_t *commpage_bits)
394 {
395 uint64_t bits = 0;
396 uint64_t isar0 = __builtin_arm_rsr64("ID_AA64ISAR0_EL1");
397
398 if ((isar0 & ID_AA64ISAR0_EL1_TS_MASK) >= ID_AA64ISAR0_EL1_TS_FLAGM_EN) {
399 gARM_FEAT_FlagM = 1;
400 bits |= kHasFEATFlagM;
401 }
402 if ((isar0 & ID_AA64ISAR0_EL1_TS_MASK) >= ID_AA64ISAR0_EL1_TS_FLAGM2_EN) {
403 gARM_FEAT_FlagM2 = 1;
404 bits |= kHasFEATFlagM2;
405 }
406 if ((isar0 & ID_AA64ISAR0_EL1_FHM_MASK) >= ID_AA64ISAR0_EL1_FHM_8_2) {
407 gARM_FEAT_FHM = 1;
408 bits |= kHasFeatFHM;
409 }
410 if ((isar0 & ID_AA64ISAR0_EL1_DP_MASK) >= ID_AA64ISAR0_EL1_DP_EN) {
411 gARM_FEAT_DotProd = 1;
412 bits |= kHasFeatDotProd;
413 }
414 if ((isar0 & ID_AA64ISAR0_EL1_SHA3_MASK) >= ID_AA64ISAR0_EL1_SHA3_EN) {
415 gARM_FEAT_SHA3 = 1;
416 bits |= kHasFeatSHA3;
417 }
418 if ((isar0 & ID_AA64ISAR0_EL1_RDM_MASK) >= ID_AA64ISAR0_EL1_RDM_EN) {
419 gARM_FEAT_RDM = 1;
420 bits |= kHasFeatRDM;
421 }
422 if ((isar0 & ID_AA64ISAR0_EL1_ATOMIC_MASK) >= ID_AA64ISAR0_EL1_ATOMIC_8_1) {
423 gARM_FEAT_LSE = 1;
424 bits |= kHasFeatLSE;
425 }
426 if ((isar0 & ID_AA64ISAR0_EL1_SHA2_MASK) >= ID_AA64ISAR0_EL1_SHA2_512_EN) {
427 gARM_FEAT_SHA512 = 1;
428 bits |= kHasFeatSHA512;
429 }
430 if ((isar0 & ID_AA64ISAR0_EL1_CRC32_MASK) == ID_AA64ISAR0_EL1_CRC32_EN) {
431 gARM_FEAT_CRC32 = 1;
432 bits |= kHasARMv8Crc32;
433 }
434
435 #if __ARM_V8_CRYPTO_EXTENSIONS__
436 /**
437 * T7000 has a bug in the ISAR0 register that reports that PMULL is not
438 * supported when it actually is. To work around this, for all of the crypto
439 * extensions, just check if they're supported using the board_config.h
440 * values.
441 */
442 gARM_FEAT_PMULL = 1;
443 gARM_FEAT_SHA1 = 1;
444 gARM_FEAT_AES = 1;
445 gARM_FEAT_SHA256 = 1;
446 bits |= kHasARMv8Crypto;
447 #endif /* __ARM_V8_CRYPTO_EXTENSIONS__ */
448
449 *commpage_bits |= bits;
450 }
451
452 /**
453 * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64ISAR1_EL1
454 */
455 static void
commpage_init_arm_optional_features_isar1(uint64_t * commpage_bits)456 commpage_init_arm_optional_features_isar1(uint64_t *commpage_bits)
457 {
458 uint64_t bits = 0;
459 uint64_t isar1 = __builtin_arm_rsr64("ID_AA64ISAR1_EL1");
460 uint64_t sctlr = __builtin_arm_rsr64("SCTLR_EL1");
461
462 if ((isar1 & ID_AA64ISAR1_EL1_SPECRES_MASK) >= ID_AA64ISAR1_EL1_SPECRES_EN &&
463 sctlr & SCTLR_EnRCTX) {
464 gARM_FEAT_SPECRES = 1;
465 bits |= kHasFeatSPECRES;
466 #ifdef HAS_SPECRES2
467 if ((isar1 & ID_AA64ISAR1_EL1_SPECRES_MASK) >= ID_AA64ISAR1_EL1_SPECRES2_EN) {
468 gARM_FEAT_SPECRES2 = 1;
469 }
470 #endif /* HAS_SPECRES2 */
471 }
472 if ((isar1 & ID_AA64ISAR1_EL1_SB_MASK) >= ID_AA64ISAR1_EL1_SB_EN) {
473 gARM_FEAT_SB = 1;
474 bits |= kHasFeatSB;
475 }
476 if ((isar1 & ID_AA64ISAR1_EL1_FRINTTS_MASK) >= ID_AA64ISAR1_EL1_FRINTTS_EN) {
477 gARM_FEAT_FRINTTS = 1;
478 bits |= kHasFeatFRINTTS;
479 }
480 if ((isar1 & ID_AA64ISAR1_EL1_GPI_MASK) >= ID_AA64ISAR1_EL1_GPI_EN) {
481 gARM_FEAT_PACIMP = 1;
482 bits |= kHasArmv8GPI;
483 }
484 if ((isar1 & ID_AA64ISAR1_EL1_LRCPC_MASK) >= ID_AA64ISAR1_EL1_LRCPC_EN) {
485 gARM_FEAT_LRCPC = 1;
486 bits |= kHasFeatLRCPC;
487 }
488 if ((isar1 & ID_AA64ISAR1_EL1_LRCPC_MASK) >= ID_AA64ISAR1_EL1_LRCP2C_EN) {
489 gARM_FEAT_LRCPC2 = 1;
490 bits |= kHasFeatLRCPC2;
491 }
492 if ((isar1 & ID_AA64ISAR1_EL1_FCMA_MASK) >= ID_AA64ISAR1_EL1_FCMA_EN) {
493 gARM_FEAT_FCMA = 1;
494 bits |= kHasFeatFCMA;
495 }
496 if ((isar1 & ID_AA64ISAR1_EL1_JSCVT_MASK) >= ID_AA64ISAR1_EL1_JSCVT_EN) {
497 gARM_FEAT_JSCVT = 1;
498 bits |= kHasFeatJSCVT;
499 }
500 if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_PAuth_EN) {
501 gARM_FEAT_PAuth = 1;
502 bits |= kHasFeatPAuth;
503 }
504 if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_PAuth2_EN) {
505 gARM_FEAT_PAuth2 = 1;
506 }
507 if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_FPAC_EN) {
508 gARM_FEAT_FPAC = 1;
509 }
510 if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_FPACCOMBINE) {
511 gARM_FEAT_FPACCOMBINE = 1;
512 }
513 if ((isar1 & ID_AA64ISAR1_EL1_DPB_MASK) >= ID_AA64ISAR1_EL1_DPB_EN) {
514 gARM_FEAT_DPB = 1;
515 bits |= kHasFeatDPB;
516 }
517 if ((isar1 & ID_AA64ISAR1_EL1_DPB_MASK) >= ID_AA64ISAR1_EL1_DPB2_EN) {
518 gARM_FEAT_DPB2 = 1;
519 bits |= kHasFeatDPB2;
520 }
521 if ((isar1 & ID_AA64ISAR1_EL1_BF16_MASK) >= ID_AA64ISAR1_EL1_BF16_EN) {
522 gARM_FEAT_BF16 = 1;
523 }
524 if ((isar1 & ID_AA64ISAR1_EL1_BF16_MASK) >= ID_AA64ISAR1_EL1_EBF16_EN) {
525 gARM_FEAT_EBF16 = 1;
526 }
527 if ((isar1 & ID_AA64ISAR1_EL1_I8MM_MASK) >= ID_AA64ISAR1_EL1_I8MM_EN) {
528 gARM_FEAT_I8MM = 1;
529 }
530
531 *commpage_bits |= bits;
532 }
533
534 /**
535 * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64ISAR2_EL1
536 */
537 static void
commpage_init_arm_optional_features_isar2(void)538 commpage_init_arm_optional_features_isar2(void)
539 {
540 uint64_t isar2 = __builtin_arm_rsr64("ID_AA64ISAR2_EL1");
541
542 if ((isar2 & ID_AA64ISAR2_EL1_WFxT_MASK) >= ID_AA64ISAR2_EL1_WFxT_EN) {
543 gARM_FEAT_WFxT = 1;
544 }
545 if ((isar2 & ID_AA64ISAR2_EL1_RPRES_MASK) >= ID_AA64ISAR2_EL1_RPRES_EN) {
546 gARM_FEAT_RPRES = 1;
547 }
548 if ((isar2 & ID_AA64ISAR2_EL1_CSSC_MASK) >= ID_AA64ISAR2_EL1_CSSC_EN) {
549 gARM_FEAT_CSSC = 1;
550 }
551 if ((isar2 & ID_AA64ISAR2_EL1_BC_MASK) >= ID_AA64ISAR2_EL1_BC_EN) {
552 gARM_FEAT_HBC = 1;
553 }
554 }
555
556 /**
557 * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64MMFR0_EL1
558 */
559 static void
commpage_init_arm_optional_features_mmfr0(uint64_t * commpage_bits)560 commpage_init_arm_optional_features_mmfr0(uint64_t *commpage_bits)
561 {
562 uint64_t bits = 0;
563 uint64_t mmfr0 = __builtin_arm_rsr64("ID_AA64MMFR0_EL1");
564
565 if ((mmfr0 & ID_AA64MMFR0_EL1_ECV_MASK) >= ID_AA64MMFR0_EL1_ECV_EN) {
566 gARM_FEAT_ECV = 1;
567 }
568
569 *commpage_bits |= bits;
570 }
571
572 /**
573 * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64MMFR2_EL1
574 */
575 static void
commpage_init_arm_optional_features_mmfr2(uint64_t * commpage_bits)576 commpage_init_arm_optional_features_mmfr2(uint64_t *commpage_bits)
577 {
578 uint64_t bits = 0;
579 uint64_t mmfr2 = __builtin_arm_rsr64("ID_AA64MMFR2_EL1");
580
581 if ((mmfr2 & ID_AA64MMFR2_EL1_AT_MASK) >= ID_AA64MMFR2_EL1_AT_LSE2_EN) {
582 gARM_FEAT_LSE2 = 1;
583 bits |= kHasFeatLSE2;
584 }
585
586 *commpage_bits |= bits;
587 }
588
589 /**
590 * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64PFR0_EL1
591 */
592 static void
commpage_init_arm_optional_features_pfr0(uint64_t * commpage_bits)593 commpage_init_arm_optional_features_pfr0(uint64_t *commpage_bits)
594 {
595 uint64_t bits = 0;
596 uint64_t pfr0 = __builtin_arm_rsr64("ID_AA64PFR0_EL1");
597
598 if ((pfr0 & ID_AA64PFR0_EL1_CSV3_MASK) >= ID_AA64PFR0_EL1_CSV3_EN) {
599 gARM_FEAT_CSV3 = 1;
600 bits |= kHasFeatCSV3;
601 }
602 if ((pfr0 & ID_AA64PFR0_EL1_CSV2_MASK) >= ID_AA64PFR0_EL1_CSV2_EN) {
603 gARM_FEAT_CSV2 = 1;
604 bits |= kHasFeatCSV2;
605 }
606 if ((pfr0 & ID_AA64PFR0_EL1_DIT_MASK) >= ID_AA64PFR0_EL1_DIT_EN) {
607 gARM_FEAT_DIT = 1;
608 bits |= kHasFeatDIT;
609 }
610 if ((pfr0 & ID_AA64PFR0_EL1_AdvSIMD_MASK) != ID_AA64PFR0_EL1_AdvSIMD_DIS) {
611 gARM_AdvSIMD = 1;
612 bits |= kHasAdvSIMD;
613 if ((pfr0 & ID_AA64PFR0_EL1_AdvSIMD_MASK) >= ID_AA64PFR0_EL1_AdvSIMD_HPFPCVT) {
614 gARM_AdvSIMD_HPFPCvt = 1;
615 bits |= kHasAdvSIMD_HPFPCvt;
616 }
617 if ((pfr0 & ID_AA64PFR0_EL1_AdvSIMD_MASK) >= ID_AA64PFR0_EL1_AdvSIMD_FP16) {
618 gARM_FEAT_FP16 = 1;
619 bits |= kHasFeatFP16;
620 }
621 }
622
623 *commpage_bits |= bits;
624 }
625
626 /**
627 * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64PFR1_EL1
628 */
629 static void
commpage_init_arm_optional_features_pfr1(uint64_t * commpage_bits)630 commpage_init_arm_optional_features_pfr1(uint64_t *commpage_bits)
631 {
632 uint64_t pfr1 = __builtin_arm_rsr64("ID_AA64PFR1_EL1");
633
634 if ((pfr1 & ID_AA64PFR1_EL1_SSBS_MASK) >= ID_AA64PFR1_EL1_SSBS_EN) {
635 gARM_FEAT_SSBS = 1;
636 }
637
638 if ((pfr1 & ID_AA64PFR1_EL1_BT_MASK) >= ID_AA64PFR1_EL1_BT_EN) {
639 gARM_FEAT_BTI = 1;
640 }
641
642 unsigned int sme_version = arm_sme_version();
643 if (sme_version >= ARM_FEAT_SME) {
644 gARM_FEAT_SME = 1;
645 *commpage_bits |= kHasFeatSME;
646 }
647 if (sme_version >= ARM_FEAT_SME2) {
648 gARM_FEAT_SME2 = 1;
649 *commpage_bits |= kHasFeatSME2;
650 }
651
652 }
653
654 /**
655 * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64PFR2_EL1
656 */
657 static void
commpage_init_arm_optional_features_pfr2(__unused uint64_t * commpage_bits)658 commpage_init_arm_optional_features_pfr2(__unused uint64_t *commpage_bits)
659 {
660 }
661
662 /**
663 * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64SMFR0_EL1
664 */
665 __attribute__((target("sme")))
666 static void
commpage_init_arm_optional_features_smfr0(void)667 commpage_init_arm_optional_features_smfr0(void)
668 {
669 if (arm_sme_version() == 0) {
670 /*
671 * We can safely read ID_AA64SMFR0_EL1 on SME-less devices. But
672 * arm_sme_version() == 0 could also mean that the user
673 * defeatured SME with a boot-arg.
674 */
675 return;
676 }
677
678 uint64_t smfr0 = __builtin_arm_rsr64("ID_AA64SMFR0_EL1");
679
680 /*
681 * ID_AA64SMFR0_EL1 has to be parsed differently from other feature ID
682 * registers. See "Alternative ID scheme used for ID_AA64SMFR0_EL1" in
683 * the ARM ARM.
684 */
685
686 /* 1-bit fields */
687 if (smfr0 & ID_AA64SMFR0_EL1_F32F32_EN) {
688 gARM_SME_F32F32 = 1;
689 }
690 if (smfr0 & ID_AA64SMFR0_EL1_BI32I32_EN) {
691 gARM_SME_BI32I32 = 1;
692 }
693 if (smfr0 & ID_AA64SMFR0_EL1_B16F32_EN) {
694 gARM_SME_B16F32 = 1;
695 }
696 if (smfr0 & ID_AA64SMFR0_EL1_F16F32_EN) {
697 gARM_SME_F16F32 = 1;
698 }
699 if (smfr0 & ID_AA64SMFR0_EL1_F64F64_EN) {
700 gARM_FEAT_SME_F64F64 = 1;
701 }
702
703 /* 4-bit fields (0 bits are ignored) */
704 if ((smfr0 & ID_AA64SMFR0_EL1_I8I32_EN) == ID_AA64SMFR0_EL1_I8I32_EN) {
705 gARM_SME_I8I32 = 1;
706 }
707 if ((smfr0 & ID_AA64SMFR0_EL1_I16I32_EN) == ID_AA64SMFR0_EL1_I16I32_EN) {
708 gARM_SME_I16I32 = 1;
709 }
710 if ((smfr0 & ID_AA64SMFR0_EL1_I16I64_EN) == ID_AA64SMFR0_EL1_I16I64_EN) {
711 gARM_FEAT_SME_I16I64 = 1;
712 }
713 }
714
715 static void
commpage_init_arm_optional_features_mmfr1(uint64_t * commpage_bits)716 commpage_init_arm_optional_features_mmfr1(uint64_t *commpage_bits)
717 {
718 uint64_t bits = 0;
719 const uint64_t mmfr1 = __builtin_arm_rsr64("ID_AA64MMFR1_EL1");
720
721 if ((mmfr1 & ID_AA64MMFR1_EL1_AFP_MASK) == ID_AA64MMFR1_EL1_AFP_EN) {
722 gARM_FEAT_AFP = 1;
723 bits |= kHasFeatAFP;
724 }
725
726 *commpage_bits |= bits;
727 }
728
729 /**
730 * Read the system register @name, attempt to set set bits of @mask if not
731 * already, test if bits were actually set, reset the register to its
732 * previous value if required, and 'return' @mask with only bits that
733 * were successfully set (or already set) in the system register. */
734 #define _test_sys_bits(name, mask) ({ \
735 const uint64_t src = __builtin_arm_rsr64(#name); \
736 uint64_t test = src | mask; \
737 if (test != src) { \
738 __builtin_arm_wsr64(#name, test); \
739 test = __builtin_arm_rsr64(#name); \
740 if (test != src) { \
741 __builtin_arm_wsr64(#name, src); \
742 }\
743 } \
744 mask & test; \
745 })
746
747 /**
748 * Reports whether FPU exceptions are supported.
749 * Possible FPU exceptions are :
750 * - input denormal;
751 * - inexact;
752 * - underflow;
753 * - overflow;
754 * - divide by 0;
755 * - invalid operation.
756 *
757 * Any of those can be supported or not but for now, we consider that
758 * it all or nothing : FPU exceptions support flag set <=> all 6 exceptions
759 * a supported.
760 */
761 static void
commpage_init_arm_optional_features_fpcr(uint64_t * commpage_bits)762 commpage_init_arm_optional_features_fpcr(uint64_t *commpage_bits)
763 {
764 uint64_t support_mask = FPCR_IDE | FPCR_IXE | FPCR_UFE | FPCR_OFE |
765 FPCR_DZE | FPCR_IOE;
766 uint64_t FPCR_bits = _test_sys_bits(FPCR, support_mask);
767 if (FPCR_bits == support_mask) {
768 gARM_FP_SyncExceptions = 1;
769 *commpage_bits |= kHasFP_SyncExceptions;
770 }
771 }
772
773
774 /**
775 * Reports whether stateless FEATs are present or not.
776 * Those only depend on the SoC and on previous variables.
777 */
778 static void
commpage_init_arm_optional_features_misc(__unused uint64_t * commpage_bits)779 commpage_init_arm_optional_features_misc(__unused uint64_t *commpage_bits)
780 {
781 }
782
783 /**
784 * Initializes all commpage entries and sysctls for ARM64 optional features accessible from EL0.
785 */
786 static void
commpage_init_arm_optional_features(uint64_t * commpage_bits)787 commpage_init_arm_optional_features(uint64_t *commpage_bits)
788 {
789 commpage_init_arm_optional_features_isar0(commpage_bits);
790 commpage_init_arm_optional_features_isar1(commpage_bits);
791 commpage_init_arm_optional_features_isar2();
792 commpage_init_arm_optional_features_mmfr0(commpage_bits);
793 commpage_init_arm_optional_features_mmfr1(commpage_bits);
794 commpage_init_arm_optional_features_mmfr2(commpage_bits);
795 commpage_init_arm_optional_features_pfr0(commpage_bits);
796 commpage_init_arm_optional_features_pfr1(commpage_bits);
797 commpage_init_arm_optional_features_pfr2(commpage_bits);
798 commpage_init_arm_optional_features_smfr0();
799 commpage_init_arm_optional_features_fpcr(commpage_bits);
800 /*
801 * commpage_init_arm_optional_features_misc handles features flags
802 * derived from other feature flags, so it must run last.
803 */
804 commpage_init_arm_optional_features_misc(commpage_bits);
805 }
806 #endif /* __arm64__ */
807
808 /*
809 * Initialize _cpu_capabilities vector
810 */
811 static void
commpage_init_cpu_capabilities(void)812 commpage_init_cpu_capabilities( void )
813 {
814 uint64_t bits;
815 int cpus;
816 ml_cpu_info_t cpu_info;
817
818 bits = 0;
819 ml_cpu_get_info(&cpu_info);
820
821 switch (cpu_info.cache_line_size) {
822 case 128:
823 bits |= kCache128;
824 break;
825 case 64:
826 bits |= kCache64;
827 break;
828 case 32:
829 bits |= kCache32;
830 break;
831 default:
832 break;
833 }
834 cpus = commpage_cpus();
835
836 if (cpus == 1) {
837 bits |= kUP;
838 }
839
840 bits |= (cpus << kNumCPUsShift);
841
842 bits |= kFastThreadLocalStorage; // TPIDRURO for TLS
843
844 bits |= kHasVfp;
845
846 #if defined(__arm64__)
847 bits |= kHasFMA;
848 #endif
849 bits |= kHasEvent;
850 #ifdef __arm64__
851 commpage_init_arm_optional_features(&bits);
852 #endif
853
854
855
856 #if HAS_UCNORMAL_MEM
857 gUCNormalMem = 1;
858 bits |= kHasUCNormalMemory;
859 #endif
860
861 _cpu_capabilities = bits;
862
863 *((uint32_t *)(_COMM_PAGE_CPU_CAPABILITIES + _COMM_PAGE_RW_OFFSET)) = (uint32_t)_cpu_capabilities;
864 *((uint64_t *)(_COMM_PAGE_CPU_CAPABILITIES64 + _COMM_PAGE_RW_OFFSET)) = _cpu_capabilities;
865
866 }
867
868 /*
869 * Updated every time a logical CPU goes offline/online
870 */
871 void
commpage_update_active_cpus(void)872 commpage_update_active_cpus(void)
873 {
874 if (!commPagePtr) {
875 return;
876 }
877 *((uint8_t *)(_COMM_PAGE_ACTIVE_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t)processor_avail_count;
878
879 }
880
881 /*
882 * Update the commpage bits for mach_absolute_time and mach_continuous_time (for userspace)
883 */
884 void
commpage_update_timebase(void)885 commpage_update_timebase(void)
886 {
887 if (commPagePtr) {
888 *((uint64_t*)(_COMM_PAGE_TIMEBASE_OFFSET + _COMM_PAGE_RW_OFFSET)) = rtclock_base_abstime;
889 }
890 }
891
892 /*
893 * Update the commpage with current kdebug state: whether tracing is enabled, a
894 * typefilter is present, and continuous time should be used for timestamps.
895 *
896 * Disregards configuration and set to 0 if tracing is disabled.
897 */
898 void
commpage_update_kdebug_state(void)899 commpage_update_kdebug_state(void)
900 {
901 if (commPagePtr) {
902 uint32_t state = kdebug_commpage_state();
903 *((volatile uint32_t *)(_COMM_PAGE_KDEBUG_ENABLE + _COMM_PAGE_RW_OFFSET)) = state;
904 }
905 }
906
907 /* Ditto for atm_diagnostic_config */
908 void
commpage_update_atm_diagnostic_config(uint32_t diagnostic_config)909 commpage_update_atm_diagnostic_config(uint32_t diagnostic_config)
910 {
911 if (commPagePtr) {
912 *((volatile uint32_t*)(_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG + _COMM_PAGE_RW_OFFSET)) = diagnostic_config;
913 }
914 }
915
916 /*
917 * Update the commpage data with the state of multiuser mode for
918 * this device. Allowing various services in userspace to avoid
919 * IPC in the (more common) non-multiuser environment.
920 */
921 void
commpage_update_multiuser_config(uint32_t multiuser_config)922 commpage_update_multiuser_config(uint32_t multiuser_config)
923 {
924 if (commPagePtr) {
925 *((volatile uint32_t *)(_COMM_PAGE_MULTIUSER_CONFIG + _COMM_PAGE_RW_OFFSET)) = multiuser_config;
926 }
927 }
928
929 /*
930 * update the commpage data for
931 * last known value of mach_absolute_time()
932 */
933
934 void
commpage_update_mach_approximate_time(uint64_t abstime)935 commpage_update_mach_approximate_time(uint64_t abstime)
936 {
937 #ifdef CONFIG_MACH_APPROXIMATE_TIME
938 if (!commPagePtr) {
939 return;
940 }
941
942 uint64_t *approx_time_base = (uint64_t *)(uintptr_t)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET);
943
944 uint64_t saved_data = os_atomic_load_wide(approx_time_base, relaxed);
945 if (saved_data < abstime) {
946 /*
947 * ignore the success/fail return value assuming that
948 * if the value has been updated since we last read it,
949 * someone else has written a timestamp that is new enough.
950 */
951 __unused bool ret = os_atomic_cmpxchg(approx_time_base,
952 saved_data, abstime, relaxed);
953 }
954
955
956 #else /* CONFIG_MACH_APPROXIMATE_TIME */
957 #pragma unused (abstime)
958 #endif
959 }
960
961 /*
962 * update the commpage data's total system sleep time for
963 * userspace call to mach_continuous_time()
964 */
965 void
commpage_update_mach_continuous_time(uint64_t sleeptime)966 commpage_update_mach_continuous_time(uint64_t sleeptime)
967 {
968 if (!commPagePtr) {
969 return;
970 }
971
972 uint64_t *cont_time_base = (uint64_t *)(uintptr_t)(_COMM_PAGE_CONT_TIMEBASE + _COMM_PAGE_RW_OFFSET);
973
974 os_atomic_store_wide(cont_time_base, sleeptime, relaxed);
975
976 }
977
978 void
commpage_update_mach_continuous_time_hw_offset(uint64_t offset)979 commpage_update_mach_continuous_time_hw_offset(uint64_t offset)
980 {
981 *((uint64_t *)(_COMM_PAGE_CONT_HW_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = offset;
982 }
983
984 /*
985 * update the commpage's value for the boot time
986 */
987 void
commpage_update_boottime(uint64_t value)988 commpage_update_boottime(uint64_t value)
989 {
990 if (!commPagePtr) {
991 return;
992 }
993
994 uint64_t *boottime_usec = (uint64_t *)(uintptr_t)(_COMM_PAGE_BOOTTIME_USEC + _COMM_PAGE_RW_OFFSET);
995
996 os_atomic_store_wide(boottime_usec, value, relaxed);
997
998 }
999
1000 /*
1001 * set the commpage's remote time params for
1002 * userspace call to mach_bridge_remote_time()
1003 */
1004 void
commpage_set_remotetime_params(double rate,uint64_t base_local_ts,uint64_t base_remote_ts)1005 commpage_set_remotetime_params(double rate, uint64_t base_local_ts, uint64_t base_remote_ts)
1006 {
1007 if (commPagePtr) {
1008 #ifdef __arm64__
1009 struct bt_params *paramsp = (struct bt_params *)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET);
1010 paramsp->base_local_ts = 0;
1011 __builtin_arm_dmb(DMB_ISH);
1012 paramsp->rate = rate;
1013 paramsp->base_remote_ts = base_remote_ts;
1014 __builtin_arm_dmb(DMB_ISH);
1015 paramsp->base_local_ts = base_local_ts; //This will act as a generation count
1016 #endif /* __arm64__ */
1017 }
1018 }
1019
1020
1021 /*
1022 * update the commpage with if dtrace user land probes are enabled
1023 */
1024 void
commpage_update_dof(boolean_t enabled)1025 commpage_update_dof(boolean_t enabled)
1026 {
1027 #if CONFIG_DTRACE
1028 *((uint8_t*)(_COMM_PAGE_DTRACE_DOF_ENABLED + _COMM_PAGE_RW_OFFSET)) = (enabled ? 1 : 0);
1029 #else
1030 (void)enabled;
1031 #endif
1032 }
1033
1034 /*
1035 * update the dyld global config flags
1036 */
1037 void
commpage_update_dyld_flags(uint64_t value)1038 commpage_update_dyld_flags(uint64_t value)
1039 {
1040 *((uint64_t*)(_COMM_PAGE_DYLD_FLAGS + _COMM_PAGE_RW_OFFSET)) = value;
1041
1042 }
1043
1044 /*
1045 * update the APT active indicator
1046 */
1047 void
commpage_update_apt_active(bool active)1048 commpage_update_apt_active(bool active)
1049 {
1050 uint8_t *slot = (uint8_t *)(void *)(_COMM_PAGE_APT_ACTIVE + _COMM_PAGE_RW_OFFSET);
1051 os_atomic_store(slot, active ? 1 : 0, relaxed);
1052 }
1053