1 /*
2 * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 *
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 *
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
26 *
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 */
29 /*
30 * @OSF_COPYRIGHT@
31 */
32 /*
33 * @APPLE_FREE_COPYRIGHT@
34 */
35 /*
36 * File: arm/commpage/commpage.c
37 * Purpose: Set up and export a RO/RW page
38 */
39 #include <libkern/section_keywords.h>
40 #include <mach/mach_types.h>
41 #include <mach/machine.h>
42 #include <mach/vm_map.h>
43 #include <machine/cpu_capabilities.h>
44 #include <machine/commpage.h>
45 #include <machine/config.h>
46 #include <machine/pmap.h>
47 #include <vm/vm_kern.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_protos.h>
50 #include <ipc/ipc_port.h>
51 #include <arm/cpuid.h> /* for cpuid_info() & cache_info() */
52 #include <arm/cpu_capabilities_public.h>
53 #include <arm/misc_protos.h>
54 #include <arm/rtclock.h>
55 #include <libkern/OSAtomic.h>
56 #include <stdatomic.h>
57 #include <kern/remote_time.h>
58 #include <kern/smr.h>
59 #include <machine/atomic.h>
60 #include <machine/machine_remote_time.h>
61 #include <machine/machine_routines.h>
62 #include <sys/code_signing.h>
63
64 #include <sys/kdebug.h>
65 #include <sys/random.h>
66
67 #if CONFIG_ATM
68 #include <atm/atm_internal.h>
69 #endif
70
71 static int commpage_cpus( void );
72
73 #if defined (__arm64__)
74 #include <arm64/proc_reg.h>
75 #include <pexpert/arm64/apt_msg.h>
76 #endif
77
78
79 static void commpage_init_cpu_capabilities( void );
80
81 SECURITY_READ_ONLY_LATE(vm_address_t) commPagePtr = 0;
82 SECURITY_READ_ONLY_LATE(vm_address_t) commpage_rw_addr = 0;
83 SECURITY_READ_ONLY_LATE(vm_address_t) commpage_kernel_ro_addr = 0;
84 SECURITY_READ_ONLY_LATE(uint64_t) _cpu_capabilities = 0;
85 SECURITY_READ_ONLY_LATE(vm_address_t) commpage_rw_text_addr = 0;
86
87 extern user64_addr_t commpage_text64_location;
88 extern user32_addr_t commpage_text32_location;
89
90 /* For sysctl access from BSD side */
91 #define ARM_FEATURE_FLAG(x) \
92 extern int gARM_ ## x;
93 #include <arm/arm_features.inc>
94 #undef ARM_FEATURE_FLAG
95
96 extern int gUCNormalMem;
97
98 void
commpage_populate(void)99 commpage_populate(void)
100 {
101 uint16_t c2;
102 int cpufamily;
103
104 // Create the data and the text commpage
105 vm_map_address_t kernel_data_addr, kernel_text_addr, kernel_ro_data_addr, user_text_addr;
106 pmap_create_commpages(&kernel_data_addr, &kernel_text_addr, &kernel_ro_data_addr, &user_text_addr);
107
108 commpage_rw_addr = kernel_data_addr;
109 commpage_rw_text_addr = kernel_text_addr;
110 commpage_kernel_ro_addr = kernel_ro_data_addr;
111 commPagePtr = (vm_address_t) _COMM_PAGE_BASE_ADDRESS;
112
113 #if __arm64__
114 commpage_text64_location = user_text_addr;
115 bcopy(_COMM_PAGE64_SIGNATURE_STRING, (void *)(_COMM_PAGE_SIGNATURE + _COMM_PAGE_RW_OFFSET),
116 MIN(_COMM_PAGE_SIGNATURELEN, strlen(_COMM_PAGE64_SIGNATURE_STRING)));
117 #endif
118
119 *((uint16_t*)(_COMM_PAGE_VERSION + _COMM_PAGE_RW_OFFSET)) = (uint16_t) _COMM_PAGE_THIS_VERSION;
120
121 commpage_init_cpu_capabilities();
122 commpage_set_timestamp(0, 0, 0, 0, 0);
123
124 if (_cpu_capabilities & kCache32) {
125 c2 = 32;
126 } else if (_cpu_capabilities & kCache64) {
127 c2 = 64;
128 } else if (_cpu_capabilities & kCache128) {
129 c2 = 128;
130 } else {
131 c2 = 0;
132 }
133
134 *((uint16_t*)(_COMM_PAGE_CACHE_LINESIZE + _COMM_PAGE_RW_OFFSET)) = c2;
135
136 commpage_update_active_cpus();
137 cpufamily = cpuid_get_cpufamily();
138 *((uint8_t*)(_COMM_PAGE_CPU_CLUSTERS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) ml_get_cluster_count();
139 *((uint8_t*)(_COMM_PAGE_PHYSICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.physical_cpu_max;
140 *((uint8_t*)(_COMM_PAGE_LOGICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.logical_cpu_max;
141 *((uint64_t*)(_COMM_PAGE_MEMORY_SIZE + _COMM_PAGE_RW_OFFSET)) = machine_info.max_mem;
142 *((uint32_t*)(_COMM_PAGE_CPUFAMILY + _COMM_PAGE_RW_OFFSET)) = (uint32_t)cpufamily;
143 *((uint32_t*)(_COMM_PAGE_DEV_FIRM_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint32_t)PE_i_can_has_debugger(NULL);
144 *((uint32_t*)(_COMM_PAGE_DEV_FIRM + _COMM_PAGE_RO_OFFSET)) = (uint32_t)PE_i_can_has_debugger(NULL);
145 *((uint8_t*)(_COMM_PAGE_USER_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = user_timebase_type();
146
147 // Populate logical CPU -> logical cluster table
148 ml_map_cpus_to_clusters((uint8_t*)(_COMM_PAGE_CPU_TO_CLUSTER + _COMM_PAGE_RW_OFFSET));
149
150 *((uint8_t*)(_COMM_PAGE_CONT_HWCLOCK + _COMM_PAGE_RW_OFFSET)) = (uint8_t)user_cont_hwclock_allowed();
151 *((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift;
152 *((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT + _COMM_PAGE_RO_OFFSET)) = (uint8_t) page_shift;
153
154 #if __arm64__
155 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift_user32;
156 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32 + _COMM_PAGE_RO_OFFSET)) = (uint8_t) page_shift_user32;
157 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
158 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64 + _COMM_PAGE_RO_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
159 #endif /* __arm64__ */
160
161 commpage_update_timebase();
162 commpage_update_mach_continuous_time(0);
163
164 clock_sec_t secs;
165 clock_usec_t microsecs;
166 clock_get_boottime_microtime(&secs, µsecs);
167 commpage_update_boottime(secs * USEC_PER_SEC + microsecs);
168
169 /*
170 * set commpage approximate time to zero for initialization.
171 * scheduler shall populate correct value before running user thread
172 */
173 *((uint64_t *)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET)) = 0;
174 #ifdef CONFIG_MACH_APPROXIMATE_TIME
175 *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 1;
176 #else
177 *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 0;
178 #endif
179
180 commpage_update_kdebug_state();
181
182 #if CONFIG_ATM
183 commpage_update_atm_diagnostic_config(atm_get_diagnostic_config());
184 #endif
185
186
187 *((uint64_t*)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET)) = BT_RESET_SENTINEL_TS;
188
189 #if CONFIG_QUIESCE_COUNTER
190 cpu_quiescent_set_storage((_Atomic uint64_t *)(_COMM_PAGE_CPU_QUIESCENT_COUNTER +
191 _COMM_PAGE_RW_OFFSET));
192 #endif /* CONFIG_QUIESCE_COUNTER */
193
194 /*
195 * Set random values for targets in Apple Security Bounty
196 * addr should be unmapped for userland processes
197 * kaddr should be unmapped for kernel
198 */
199 uint64_t asb_value, asb_addr, asb_kvalue, asb_kaddr;
200 uint64_t asb_rand_vals[] = {
201 0x93e78adcded4d3d5, 0xd16c5b76ad99bccf, 0x67dfbbd12c4a594e, 0x7365636e6f6f544f,
202 0x239a974c9811e04b, 0xbf60e7fa45741446, 0x8acf5210b466b05, 0x67dfbbd12c4a594e
203 };
204 const int nrandval = sizeof(asb_rand_vals) / sizeof(asb_rand_vals[0]);
205 uint8_t randidx;
206
207 read_random(&randidx, sizeof(uint8_t));
208 asb_value = asb_rand_vals[randidx++ % nrandval];
209 *((uint64_t*)(_COMM_PAGE_ASB_TARGET_VALUE + _COMM_PAGE_RW_OFFSET)) = asb_value;
210
211 // userspace faulting address should be > MACH_VM_MAX_ADDRESS
212 asb_addr = asb_rand_vals[randidx++ % nrandval];
213 uint64_t user_min = MACH_VM_MAX_ADDRESS;
214 uint64_t user_max = UINT64_MAX;
215 asb_addr %= (user_max - user_min);
216 asb_addr += user_min;
217 *((uint64_t*)(_COMM_PAGE_ASB_TARGET_ADDRESS + _COMM_PAGE_RW_OFFSET)) = asb_addr;
218
219 asb_kvalue = asb_rand_vals[randidx++ % nrandval];
220 *((uint64_t*)(_COMM_PAGE_ASB_TARGET_KERN_VALUE + _COMM_PAGE_RW_OFFSET)) = asb_kvalue;
221
222 // kernel faulting address should be < VM_MIN_KERNEL_ADDRESS
223 asb_kaddr = asb_rand_vals[randidx++ % nrandval];
224 uint64_t kernel_min = 0x0LL;
225 uint64_t kernel_max = VM_MIN_KERNEL_ADDRESS;
226 asb_kaddr %= (kernel_max - kernel_min);
227 asb_kaddr += kernel_min;
228 *((uint64_t*)(_COMM_PAGE_ASB_TARGET_KERN_ADDRESS + _COMM_PAGE_RW_OFFSET)) = asb_kaddr;
229
230 #if __arm64__
231 *((uint8_t*)(_COMM_PAGE_APT_MSG_POLICY + _COMM_PAGE_RW_OFFSET)) = apt_msg_policy();
232 #endif
233
234 commpage_set_erm_active(extended_research_mode_state());
235 }
236
237 #define COMMPAGE_TEXT_SEGMENT "__TEXT_EXEC"
238 #define COMMPAGE_TEXT_SECTION "__commpage_text"
239
240 /* Get a pointer to the start of the ARM PFZ code section. This macro tell the
241 * linker that the storage for the variable here is at the start of the section */
242 extern char commpage_text_start[]
243 __SECTION_START_SYM(COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
244
245 /* Get a pointer to the end of the ARM PFZ code section. This macro tell the
246 * linker that the storage for the variable here is at the end of the section */
247 extern char commpage_text_end[]
248 __SECTION_END_SYM(COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
249
250 /* This is defined in the commpage text section as a symbol at the start of the preemptible
251 * functions */
252 extern char commpage_text_preemptible_functions;
253
254 #if CONFIG_ARM_PFZ
255 static size_t size_of_pfz = 0;
256 #endif
257
258 /* This is the opcode for brk #666 */
259 #define BRK_666_OPCODE 0xD4205340
260
261 void
commpage_text_populate(void)262 commpage_text_populate(void)
263 {
264 #if CONFIG_ARM_PFZ
265 size_t size_of_commpage_text = commpage_text_end - commpage_text_start;
266 if (size_of_commpage_text == 0) {
267 panic("ARM comm page text section %s,%s missing", COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
268 }
269 assert(size_of_commpage_text <= PAGE_SIZE);
270 assert(size_of_commpage_text > 0);
271
272 /* Get the size of the PFZ half of the comm page text section. */
273 size_of_pfz = &commpage_text_preemptible_functions - commpage_text_start;
274
275 // Copy the code segment of comm page text section into the PFZ
276 memcpy((void *) _COMM_PAGE64_TEXT_START_ADDRESS, (void *) commpage_text_start, size_of_commpage_text);
277
278 // Make sure to populate the rest of it with brk 666 so that undefined code
279 // doesn't get run
280 memset((char *) _COMM_PAGE64_TEXT_START_ADDRESS + size_of_commpage_text, BRK_666_OPCODE,
281 PAGE_SIZE - size_of_commpage_text);
282 #endif
283 }
284
285 uint32_t
commpage_is_in_pfz64(addr64_t addr64)286 commpage_is_in_pfz64(addr64_t addr64)
287 {
288 #if CONFIG_ARM_PFZ
289 if ((addr64 >= commpage_text64_location) &&
290 (addr64 < (commpage_text64_location + size_of_pfz))) {
291 return 1;
292 } else {
293 return 0;
294 }
295 #else
296 #pragma unused (addr64)
297 return 0;
298 #endif
299 }
300
301
302 void
commpage_set_timestamp(uint64_t tbr,uint64_t secs,uint64_t frac,uint64_t scale,uint64_t tick_per_sec)303 commpage_set_timestamp(
304 uint64_t tbr,
305 uint64_t secs,
306 uint64_t frac,
307 uint64_t scale,
308 uint64_t tick_per_sec)
309 {
310 new_commpage_timeofday_data_t *commpage_timeofday_datap;
311
312 if (commPagePtr == 0) {
313 return;
314 }
315
316 commpage_timeofday_datap = (new_commpage_timeofday_data_t *)(_COMM_PAGE_NEWTIMEOFDAY_DATA + _COMM_PAGE_RW_OFFSET);
317
318 commpage_timeofday_datap->TimeStamp_tick = 0x0ULL;
319
320 __builtin_arm_dmb(DMB_ISH);
321
322 commpage_timeofday_datap->TimeStamp_sec = secs;
323 commpage_timeofday_datap->TimeStamp_frac = frac;
324 commpage_timeofday_datap->Ticks_scale = scale;
325 commpage_timeofday_datap->Ticks_per_sec = tick_per_sec;
326
327 __builtin_arm_dmb(DMB_ISH);
328
329 commpage_timeofday_datap->TimeStamp_tick = tbr;
330
331 }
332
333 /*
334 * Update _COMM_PAGE_MEMORY_PRESSURE. Called periodically from vm's compute_memory_pressure()
335 */
336
337 void
commpage_set_memory_pressure(unsigned int pressure)338 commpage_set_memory_pressure(
339 unsigned int pressure )
340 {
341 if (commPagePtr == 0) {
342 return;
343 }
344 *((uint32_t *)(_COMM_PAGE_MEMORY_PRESSURE + _COMM_PAGE_RW_OFFSET)) = pressure;
345 }
346
347 /*
348 * Determine number of CPUs on this system.
349 */
350 static int
commpage_cpus(void)351 commpage_cpus( void )
352 {
353 int cpus;
354
355 cpus = machine_info.max_cpus;
356
357 if (cpus == 0) {
358 panic("commpage cpus==0");
359 }
360 if (cpus > 0xFF) {
361 cpus = 0xFF;
362 }
363
364 return cpus;
365 }
366
367 uint64_t
_get_cpu_capabilities(void)368 _get_cpu_capabilities(void)
369 {
370 return _cpu_capabilities;
371 }
372
373 vm_address_t
_get_commpage_priv_address(void)374 _get_commpage_priv_address(void)
375 {
376 return commpage_rw_addr;
377 }
378
379 vm_address_t
_get_commpage_ro_address(void)380 _get_commpage_ro_address(void)
381 {
382 return commpage_kernel_ro_addr;
383 }
384
385 vm_address_t
_get_commpage_text_priv_address(void)386 _get_commpage_text_priv_address(void)
387 {
388 return commpage_rw_text_addr;
389 }
390
391 #if defined(__arm64__)
392
393
394 /**
395 * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64ISAR0_EL1
396 */
397 static void
commpage_init_arm_optional_features_isar0(uint64_t * commpage_bits)398 commpage_init_arm_optional_features_isar0(uint64_t *commpage_bits)
399 {
400 uint64_t bits = 0;
401 uint64_t isar0 = __builtin_arm_rsr64("ID_AA64ISAR0_EL1");
402
403 if ((isar0 & ID_AA64ISAR0_EL1_TS_MASK) >= ID_AA64ISAR0_EL1_TS_FLAGM_EN) {
404 gARM_FEAT_FlagM = 1;
405 bits |= kHasFEATFlagM;
406 }
407 if ((isar0 & ID_AA64ISAR0_EL1_TS_MASK) >= ID_AA64ISAR0_EL1_TS_FLAGM2_EN) {
408 gARM_FEAT_FlagM2 = 1;
409 bits |= kHasFEATFlagM2;
410 }
411 if ((isar0 & ID_AA64ISAR0_EL1_FHM_MASK) >= ID_AA64ISAR0_EL1_FHM_8_2) {
412 gARM_FEAT_FHM = 1;
413 bits |= kHasFeatFHM;
414 }
415 if ((isar0 & ID_AA64ISAR0_EL1_DP_MASK) >= ID_AA64ISAR0_EL1_DP_EN) {
416 gARM_FEAT_DotProd = 1;
417 bits |= kHasFeatDotProd;
418 }
419 if ((isar0 & ID_AA64ISAR0_EL1_SHA3_MASK) >= ID_AA64ISAR0_EL1_SHA3_EN) {
420 gARM_FEAT_SHA3 = 1;
421 bits |= kHasFeatSHA3;
422 }
423 if ((isar0 & ID_AA64ISAR0_EL1_RDM_MASK) >= ID_AA64ISAR0_EL1_RDM_EN) {
424 gARM_FEAT_RDM = 1;
425 bits |= kHasFeatRDM;
426 }
427 if ((isar0 & ID_AA64ISAR0_EL1_ATOMIC_MASK) >= ID_AA64ISAR0_EL1_ATOMIC_8_1) {
428 gARM_FEAT_LSE = 1;
429 bits |= kHasFeatLSE;
430 }
431 if ((isar0 & ID_AA64ISAR0_EL1_SHA2_MASK) >= ID_AA64ISAR0_EL1_SHA2_512_EN) {
432 gARM_FEAT_SHA512 = 1;
433 bits |= kHasFeatSHA512;
434 }
435 if ((isar0 & ID_AA64ISAR0_EL1_CRC32_MASK) == ID_AA64ISAR0_EL1_CRC32_EN) {
436 gARM_FEAT_CRC32 = 1;
437 bits |= kHasARMv8Crc32;
438 }
439
440 #if __ARM_V8_CRYPTO_EXTENSIONS__
441 /**
442 * T7000 has a bug in the ISAR0 register that reports that PMULL is not
443 * supported when it actually is. To work around this, for all of the crypto
444 * extensions, just check if they're supported using the board_config.h
445 * values.
446 */
447 gARM_FEAT_PMULL = 1;
448 gARM_FEAT_SHA1 = 1;
449 gARM_FEAT_AES = 1;
450 gARM_FEAT_SHA256 = 1;
451 bits |= kHasARMv8Crypto;
452 #endif /* __ARM_V8_CRYPTO_EXTENSIONS__ */
453
454 *commpage_bits |= bits;
455 }
456
457 /**
458 * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64ISAR1_EL1
459 */
460 static void
commpage_init_arm_optional_features_isar1(uint64_t * commpage_bits)461 commpage_init_arm_optional_features_isar1(uint64_t *commpage_bits)
462 {
463 uint64_t bits = 0;
464 uint64_t isar1 = __builtin_arm_rsr64("ID_AA64ISAR1_EL1");
465 uint64_t sctlr = __builtin_arm_rsr64("SCTLR_EL1");
466
467 if ((isar1 & ID_AA64ISAR1_EL1_SPECRES_MASK) >= ID_AA64ISAR1_EL1_SPECRES_EN &&
468 sctlr & SCTLR_EnRCTX) {
469 gARM_FEAT_SPECRES = 1;
470 bits |= kHasFeatSPECRES;
471 #ifdef HAS_SPECRES2
472 if ((isar1 & ID_AA64ISAR1_EL1_SPECRES_MASK) >= ID_AA64ISAR1_EL1_SPECRES2_EN) {
473 gARM_FEAT_SPECRES2 = 1;
474 }
475 #endif /* HAS_SPECRES2 */
476 }
477 if ((isar1 & ID_AA64ISAR1_EL1_SB_MASK) >= ID_AA64ISAR1_EL1_SB_EN) {
478 gARM_FEAT_SB = 1;
479 bits |= kHasFeatSB;
480 }
481 if ((isar1 & ID_AA64ISAR1_EL1_FRINTTS_MASK) >= ID_AA64ISAR1_EL1_FRINTTS_EN) {
482 gARM_FEAT_FRINTTS = 1;
483 bits |= kHasFeatFRINTTS;
484 }
485 if ((isar1 & ID_AA64ISAR1_EL1_GPI_MASK) >= ID_AA64ISAR1_EL1_GPI_EN) {
486 gARM_FEAT_PACIMP = 1;
487 bits |= kHasArmv8GPI;
488 }
489 if ((isar1 & ID_AA64ISAR1_EL1_LRCPC_MASK) >= ID_AA64ISAR1_EL1_LRCPC_EN) {
490 gARM_FEAT_LRCPC = 1;
491 bits |= kHasFeatLRCPC;
492 }
493 if ((isar1 & ID_AA64ISAR1_EL1_LRCPC_MASK) >= ID_AA64ISAR1_EL1_LRCP2C_EN) {
494 gARM_FEAT_LRCPC2 = 1;
495 bits |= kHasFeatLRCPC2;
496 }
497 if ((isar1 & ID_AA64ISAR1_EL1_FCMA_MASK) >= ID_AA64ISAR1_EL1_FCMA_EN) {
498 gARM_FEAT_FCMA = 1;
499 bits |= kHasFeatFCMA;
500 }
501 if ((isar1 & ID_AA64ISAR1_EL1_JSCVT_MASK) >= ID_AA64ISAR1_EL1_JSCVT_EN) {
502 gARM_FEAT_JSCVT = 1;
503 bits |= kHasFeatJSCVT;
504 }
505 if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_PAuth_EN) {
506 gARM_FEAT_PAuth = 1;
507 bits |= kHasFeatPAuth;
508 }
509 if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_PAuth2_EN) {
510 gARM_FEAT_PAuth2 = 1;
511 }
512 if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_FPAC_EN) {
513 gARM_FEAT_FPAC = 1;
514 }
515 if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_FPACCOMBINE) {
516 gARM_FEAT_FPACCOMBINE = 1;
517 }
518 if ((isar1 & ID_AA64ISAR1_EL1_DPB_MASK) >= ID_AA64ISAR1_EL1_DPB_EN) {
519 gARM_FEAT_DPB = 1;
520 bits |= kHasFeatDPB;
521 }
522 if ((isar1 & ID_AA64ISAR1_EL1_DPB_MASK) >= ID_AA64ISAR1_EL1_DPB2_EN) {
523 gARM_FEAT_DPB2 = 1;
524 bits |= kHasFeatDPB2;
525 }
526 if ((isar1 & ID_AA64ISAR1_EL1_BF16_MASK) >= ID_AA64ISAR1_EL1_BF16_EN) {
527 gARM_FEAT_BF16 = 1;
528 }
529 if ((isar1 & ID_AA64ISAR1_EL1_BF16_MASK) >= ID_AA64ISAR1_EL1_EBF16_EN) {
530 gARM_FEAT_EBF16 = 1;
531 }
532 if ((isar1 & ID_AA64ISAR1_EL1_I8MM_MASK) >= ID_AA64ISAR1_EL1_I8MM_EN) {
533 gARM_FEAT_I8MM = 1;
534 }
535
536 *commpage_bits |= bits;
537 }
538
539 /**
540 * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64ISAR2_EL1
541 */
542 static void
commpage_init_arm_optional_features_isar2(void)543 commpage_init_arm_optional_features_isar2(void)
544 {
545 uint64_t isar2 = __builtin_arm_rsr64("ID_AA64ISAR2_EL1");
546
547 if ((isar2 & ID_AA64ISAR2_EL1_WFxT_MASK) >= ID_AA64ISAR2_EL1_WFxT_EN) {
548 gARM_FEAT_WFxT = 1;
549 }
550 if ((isar2 & ID_AA64ISAR2_EL1_RPRES_MASK) >= ID_AA64ISAR2_EL1_RPRES_EN) {
551 gARM_FEAT_RPRES = 1;
552 }
553 if ((isar2 & ID_AA64ISAR2_EL1_CSSC_MASK) >= ID_AA64ISAR2_EL1_CSSC_EN) {
554 gARM_FEAT_CSSC = 1;
555 }
556 if ((isar2 & ID_AA64ISAR2_EL1_BC_MASK) >= ID_AA64ISAR2_EL1_BC_EN) {
557 gARM_FEAT_HBC = 1;
558 }
559 }
560
561
562 /**
563 * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64MMFR0_EL1
564 */
565 static void
commpage_init_arm_optional_features_mmfr0(uint64_t * commpage_bits)566 commpage_init_arm_optional_features_mmfr0(uint64_t *commpage_bits)
567 {
568 uint64_t bits = 0;
569 uint64_t mmfr0 = __builtin_arm_rsr64("ID_AA64MMFR0_EL1");
570
571 if ((mmfr0 & ID_AA64MMFR0_EL1_ECV_MASK) >= ID_AA64MMFR0_EL1_ECV_EN) {
572 gARM_FEAT_ECV = 1;
573 }
574
575 *commpage_bits |= bits;
576 }
577
578 /**
579 * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64MMFR2_EL1
580 */
581 static void
commpage_init_arm_optional_features_mmfr2(uint64_t * commpage_bits)582 commpage_init_arm_optional_features_mmfr2(uint64_t *commpage_bits)
583 {
584 uint64_t bits = 0;
585 uint64_t mmfr2 = __builtin_arm_rsr64("ID_AA64MMFR2_EL1");
586
587 if ((mmfr2 & ID_AA64MMFR2_EL1_AT_MASK) >= ID_AA64MMFR2_EL1_AT_LSE2_EN) {
588 gARM_FEAT_LSE2 = 1;
589 bits |= kHasFeatLSE2;
590 }
591
592 *commpage_bits |= bits;
593 }
594
595 /**
596 * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64PFR0_EL1
597 */
598 static void
commpage_init_arm_optional_features_pfr0(uint64_t * commpage_bits)599 commpage_init_arm_optional_features_pfr0(uint64_t *commpage_bits)
600 {
601 uint64_t bits = 0;
602 uint64_t pfr0 = __builtin_arm_rsr64("ID_AA64PFR0_EL1");
603
604 if ((pfr0 & ID_AA64PFR0_EL1_CSV3_MASK) >= ID_AA64PFR0_EL1_CSV3_EN) {
605 gARM_FEAT_CSV3 = 1;
606 bits |= kHasFeatCSV3;
607 }
608 if ((pfr0 & ID_AA64PFR0_EL1_CSV2_MASK) >= ID_AA64PFR0_EL1_CSV2_EN) {
609 gARM_FEAT_CSV2 = 1;
610 bits |= kHasFeatCSV2;
611 }
612 if ((pfr0 & ID_AA64PFR0_EL1_DIT_MASK) >= ID_AA64PFR0_EL1_DIT_EN) {
613 gARM_FEAT_DIT = 1;
614 bits |= kHasFeatDIT;
615 }
616 if ((pfr0 & ID_AA64PFR0_EL1_AdvSIMD_MASK) != ID_AA64PFR0_EL1_AdvSIMD_DIS) {
617 gARM_AdvSIMD = 1;
618 bits |= kHasAdvSIMD;
619 if ((pfr0 & ID_AA64PFR0_EL1_AdvSIMD_MASK) >= ID_AA64PFR0_EL1_AdvSIMD_HPFPCVT) {
620 gARM_AdvSIMD_HPFPCvt = 1;
621 bits |= kHasAdvSIMD_HPFPCvt;
622 }
623 if ((pfr0 & ID_AA64PFR0_EL1_AdvSIMD_MASK) >= ID_AA64PFR0_EL1_AdvSIMD_FP16) {
624 gARM_FEAT_FP16 = 1;
625 bits |= kHasFeatFP16;
626 }
627 }
628
629 *commpage_bits |= bits;
630 }
631
632 /**
633 * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64PFR1_EL1
634 */
635 static void
commpage_init_arm_optional_features_pfr1(uint64_t * commpage_bits)636 commpage_init_arm_optional_features_pfr1(uint64_t *commpage_bits)
637 {
638 uint64_t pfr1 = __builtin_arm_rsr64("ID_AA64PFR1_EL1");
639
640 if ((pfr1 & ID_AA64PFR1_EL1_SSBS_MASK) >= ID_AA64PFR1_EL1_SSBS_EN) {
641 gARM_FEAT_SSBS = 1;
642 }
643
644 if ((pfr1 & ID_AA64PFR1_EL1_BT_MASK) >= ID_AA64PFR1_EL1_BT_EN) {
645 gARM_FEAT_BTI = 1;
646 }
647
648 unsigned int sme_version = arm_sme_version();
649 if (sme_version >= ARM_FEAT_SME) {
650 gARM_FEAT_SME = 1;
651 *commpage_bits |= kHasFeatSME;
652 }
653 if (sme_version >= ARM_FEAT_SME2) {
654 gARM_FEAT_SME2 = 1;
655 *commpage_bits |= kHasFeatSME2;
656 }
657
658 }
659
660 /**
661 * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64PFR2_EL1
662 */
663 static void
commpage_init_arm_optional_features_pfr2(__unused uint64_t * commpage_bits)664 commpage_init_arm_optional_features_pfr2(__unused uint64_t *commpage_bits)
665 {
666 uint64_t pfr2 __unused = __builtin_arm_rsr64("ID_AA64PFR2_EL1");
667
668
669 }
670
671 /**
672 * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64SMFR0_EL1
673 */
674 __attribute__((target("sme")))
675 static void
commpage_init_arm_optional_features_smfr0(void)676 commpage_init_arm_optional_features_smfr0(void)
677 {
678 if (arm_sme_version() == 0) {
679 /*
680 * We can safely read ID_AA64SMFR0_EL1 on SME-less devices. But
681 * arm_sme_version() == 0 could also mean that the user
682 * defeatured SME with a boot-arg.
683 */
684 return;
685 }
686
687 uint64_t smfr0 = __builtin_arm_rsr64("ID_AA64SMFR0_EL1");
688
689 /*
690 * ID_AA64SMFR0_EL1 has to be parsed differently from other feature ID
691 * registers. See "Alternative ID scheme used for ID_AA64SMFR0_EL1" in
692 * the ARM ARM.
693 */
694
695 /* 1-bit fields */
696 if (smfr0 & ID_AA64SMFR0_EL1_F32F32_EN) {
697 gARM_SME_F32F32 = 1;
698 }
699 if (smfr0 & ID_AA64SMFR0_EL1_BI32I32_EN) {
700 gARM_SME_BI32I32 = 1;
701 }
702 if (smfr0 & ID_AA64SMFR0_EL1_B16F32_EN) {
703 gARM_SME_B16F32 = 1;
704 }
705 if (smfr0 & ID_AA64SMFR0_EL1_F16F32_EN) {
706 gARM_SME_F16F32 = 1;
707 }
708 if (smfr0 & ID_AA64SMFR0_EL1_F64F64_EN) {
709 gARM_FEAT_SME_F64F64 = 1;
710 }
711
712 /* 4-bit fields (0 bits are ignored) */
713 if ((smfr0 & ID_AA64SMFR0_EL1_I8I32_EN) == ID_AA64SMFR0_EL1_I8I32_EN) {
714 gARM_SME_I8I32 = 1;
715 }
716 if ((smfr0 & ID_AA64SMFR0_EL1_I16I32_EN) == ID_AA64SMFR0_EL1_I16I32_EN) {
717 gARM_SME_I16I32 = 1;
718 }
719 if ((smfr0 & ID_AA64SMFR0_EL1_I16I64_EN) == ID_AA64SMFR0_EL1_I16I64_EN) {
720 gARM_FEAT_SME_I16I64 = 1;
721 }
722 }
723
724 static void
commpage_init_arm_optional_features_mmfr1(uint64_t * commpage_bits)725 commpage_init_arm_optional_features_mmfr1(uint64_t *commpage_bits)
726 {
727 uint64_t bits = 0;
728 const uint64_t mmfr1 = __builtin_arm_rsr64("ID_AA64MMFR1_EL1");
729
730 if ((mmfr1 & ID_AA64MMFR1_EL1_AFP_MASK) == ID_AA64MMFR1_EL1_AFP_EN) {
731 gARM_FEAT_AFP = 1;
732 bits |= kHasFeatAFP;
733 }
734
735 *commpage_bits |= bits;
736 }
737
738 /**
739 * Read the system register @name, attempt to set set bits of @mask if not
740 * already, test if bits were actually set, reset the register to its
741 * previous value if required, and 'return' @mask with only bits that
742 * were successfully set (or already set) in the system register. */
743 #define _test_sys_bits(name, mask) ({ \
744 const uint64_t src = __builtin_arm_rsr64(#name); \
745 uint64_t test = src | mask; \
746 if (test != src) { \
747 __builtin_arm_wsr64(#name, test); \
748 test = __builtin_arm_rsr64(#name); \
749 if (test != src) { \
750 __builtin_arm_wsr64(#name, src); \
751 }\
752 } \
753 mask & test; \
754 })
755
756 /**
757 * Reports whether FPU exceptions are supported.
758 * Possible FPU exceptions are :
759 * - input denormal;
760 * - inexact;
761 * - underflow;
762 * - overflow;
763 * - divide by 0;
764 * - invalid operation.
765 *
766 * Any of those can be supported or not but for now, we consider that
767 * it all or nothing : FPU exceptions support flag set <=> all 6 exceptions
768 * a supported.
769 */
770 static void
commpage_init_arm_optional_features_fpcr(uint64_t * commpage_bits)771 commpage_init_arm_optional_features_fpcr(uint64_t *commpage_bits)
772 {
773 uint64_t support_mask = FPCR_IDE | FPCR_IXE | FPCR_UFE | FPCR_OFE |
774 FPCR_DZE | FPCR_IOE;
775 uint64_t FPCR_bits = _test_sys_bits(FPCR, support_mask);
776 if (FPCR_bits == support_mask) {
777 gARM_FP_SyncExceptions = 1;
778 *commpage_bits |= kHasFP_SyncExceptions;
779 }
780 }
781
782
783 /**
784 * Reports whether stateless FEATs are present or not.
785 * Those only depend on the SoC and on previous variables.
786 */
787 static void
commpage_init_arm_optional_features_misc(__unused uint64_t * commpage_bits)788 commpage_init_arm_optional_features_misc(__unused uint64_t *commpage_bits)
789 {
790 }
791
792 /**
793 * Initializes all commpage entries and sysctls for ARM64 optional features accessible from EL0.
794 */
795 static void
commpage_init_arm_optional_features(uint64_t * commpage_bits)796 commpage_init_arm_optional_features(uint64_t *commpage_bits)
797 {
798 commpage_init_arm_optional_features_isar0(commpage_bits);
799 commpage_init_arm_optional_features_isar1(commpage_bits);
800 commpage_init_arm_optional_features_isar2();
801 commpage_init_arm_optional_features_mmfr0(commpage_bits);
802 commpage_init_arm_optional_features_mmfr1(commpage_bits);
803 commpage_init_arm_optional_features_mmfr2(commpage_bits);
804 commpage_init_arm_optional_features_pfr0(commpage_bits);
805 commpage_init_arm_optional_features_pfr1(commpage_bits);
806 commpage_init_arm_optional_features_pfr2(commpage_bits);
807 commpage_init_arm_optional_features_smfr0();
808 commpage_init_arm_optional_features_fpcr(commpage_bits);
809 /*
810 * commpage_init_arm_optional_features_misc handles features flags
811 * derived from other feature flags, so it must run last.
812 */
813 commpage_init_arm_optional_features_misc(commpage_bits);
814 }
815 #endif /* __arm64__ */
816
817 /*
818 * Initialize _cpu_capabilities vector
819 */
820 static void
commpage_init_cpu_capabilities(void)821 commpage_init_cpu_capabilities( void )
822 {
823 uint64_t bits;
824 int cpus;
825 ml_cpu_info_t cpu_info;
826
827 bits = 0;
828 ml_cpu_get_info(&cpu_info);
829
830 switch (cpu_info.cache_line_size) {
831 case 128:
832 bits |= kCache128;
833 break;
834 case 64:
835 bits |= kCache64;
836 break;
837 case 32:
838 bits |= kCache32;
839 break;
840 default:
841 break;
842 }
843 cpus = commpage_cpus();
844
845 if (cpus == 1) {
846 bits |= kUP;
847 }
848
849 bits |= (cpus << kNumCPUsShift);
850
851 bits |= kFastThreadLocalStorage; // TPIDRURO for TLS
852
853 bits |= kHasVfp;
854
855 #if defined(__arm64__)
856 bits |= kHasFMA;
857 #endif
858 bits |= kHasEvent;
859 #ifdef __arm64__
860 commpage_init_arm_optional_features(&bits);
861 #endif
862
863
864
865 #if HAS_UCNORMAL_MEM
866 gUCNormalMem = 1;
867 bits |= kHasUCNormalMemory;
868 #endif
869
870 _cpu_capabilities = bits;
871
872 *((uint32_t *)(_COMM_PAGE_CPU_CAPABILITIES + _COMM_PAGE_RW_OFFSET)) = (uint32_t)_cpu_capabilities;
873 *((uint64_t *)(_COMM_PAGE_CPU_CAPABILITIES64 + _COMM_PAGE_RW_OFFSET)) = _cpu_capabilities;
874
875 }
876
877 /*
878 * Updated every time a logical CPU goes offline/online
879 */
880 void
commpage_update_active_cpus(void)881 commpage_update_active_cpus(void)
882 {
883 if (!commPagePtr) {
884 return;
885 }
886 *((uint8_t *)(_COMM_PAGE_ACTIVE_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t)processor_avail_count;
887
888 }
889
890 /*
891 * Update the commpage bits for mach_absolute_time and mach_continuous_time (for userspace)
892 */
893 void
commpage_update_timebase(void)894 commpage_update_timebase(void)
895 {
896 if (commPagePtr) {
897 *((uint64_t*)(_COMM_PAGE_TIMEBASE_OFFSET + _COMM_PAGE_RW_OFFSET)) = rtclock_base_abstime;
898 }
899 }
900
901 /*
902 * Update the commpage with current kdebug state: whether tracing is enabled, a
903 * typefilter is present, and continuous time should be used for timestamps.
904 *
905 * Disregards configuration and set to 0 if tracing is disabled.
906 */
907 void
commpage_update_kdebug_state(void)908 commpage_update_kdebug_state(void)
909 {
910 if (commPagePtr) {
911 uint32_t state = kdebug_commpage_state();
912 *((volatile uint32_t *)(_COMM_PAGE_KDEBUG_ENABLE + _COMM_PAGE_RW_OFFSET)) = state;
913 }
914 }
915
916 /* Ditto for atm_diagnostic_config */
917 void
commpage_update_atm_diagnostic_config(uint32_t diagnostic_config)918 commpage_update_atm_diagnostic_config(uint32_t diagnostic_config)
919 {
920 if (commPagePtr) {
921 *((volatile uint32_t*)(_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG + _COMM_PAGE_RW_OFFSET)) = diagnostic_config;
922 }
923 }
924
925 /*
926 * Update the commpage data with the state of multiuser mode for
927 * this device. Allowing various services in userspace to avoid
928 * IPC in the (more common) non-multiuser environment.
929 */
930 void
commpage_update_multiuser_config(uint32_t multiuser_config)931 commpage_update_multiuser_config(uint32_t multiuser_config)
932 {
933 if (commPagePtr) {
934 *((volatile uint32_t *)(_COMM_PAGE_MULTIUSER_CONFIG + _COMM_PAGE_RW_OFFSET)) = multiuser_config;
935 }
936 }
937
938 /*
939 * update the commpage data for
940 * last known value of mach_absolute_time()
941 */
942
943 void
commpage_update_mach_approximate_time(uint64_t abstime)944 commpage_update_mach_approximate_time(uint64_t abstime)
945 {
946 #ifdef CONFIG_MACH_APPROXIMATE_TIME
947 if (!commPagePtr) {
948 return;
949 }
950
951 uint64_t *approx_time_base = (uint64_t *)(uintptr_t)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET);
952
953 uint64_t saved_data = os_atomic_load_wide(approx_time_base, relaxed);
954 if (saved_data < abstime) {
955 /*
956 * ignore the success/fail return value assuming that
957 * if the value has been updated since we last read it,
958 * someone else has written a timestamp that is new enough.
959 */
960 __unused bool ret = os_atomic_cmpxchg(approx_time_base,
961 saved_data, abstime, relaxed);
962 }
963
964
965 #else /* CONFIG_MACH_APPROXIMATE_TIME */
966 #pragma unused (abstime)
967 #endif
968 }
969
970 /*
971 * update the commpage data's total system sleep time for
972 * userspace call to mach_continuous_time()
973 */
974 void
commpage_update_mach_continuous_time(uint64_t sleeptime)975 commpage_update_mach_continuous_time(uint64_t sleeptime)
976 {
977 if (!commPagePtr) {
978 return;
979 }
980
981 uint64_t *cont_time_base = (uint64_t *)(uintptr_t)(_COMM_PAGE_CONT_TIMEBASE + _COMM_PAGE_RW_OFFSET);
982
983 os_atomic_store_wide(cont_time_base, sleeptime, relaxed);
984
985 }
986
987 void
commpage_update_mach_continuous_time_hw_offset(uint64_t offset)988 commpage_update_mach_continuous_time_hw_offset(uint64_t offset)
989 {
990 *((uint64_t *)(_COMM_PAGE_CONT_HW_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = offset;
991 }
992
993 /*
994 * update the commpage's value for the boot time
995 */
996 void
commpage_update_boottime(uint64_t value)997 commpage_update_boottime(uint64_t value)
998 {
999 if (!commPagePtr) {
1000 return;
1001 }
1002
1003 uint64_t *boottime_usec = (uint64_t *)(uintptr_t)(_COMM_PAGE_BOOTTIME_USEC + _COMM_PAGE_RW_OFFSET);
1004
1005 os_atomic_store_wide(boottime_usec, value, relaxed);
1006
1007 }
1008
1009 /*
1010 * set the commpage's remote time params for
1011 * userspace call to mach_bridge_remote_time()
1012 */
1013 void
commpage_set_remotetime_params(double rate,uint64_t base_local_ts,uint64_t base_remote_ts)1014 commpage_set_remotetime_params(double rate, uint64_t base_local_ts, uint64_t base_remote_ts)
1015 {
1016 if (commPagePtr) {
1017 #ifdef __arm64__
1018 struct bt_params *paramsp = (struct bt_params *)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET);
1019 paramsp->base_local_ts = 0;
1020 __builtin_arm_dmb(DMB_ISH);
1021 paramsp->rate = rate;
1022 paramsp->base_remote_ts = base_remote_ts;
1023 __builtin_arm_dmb(DMB_ISH);
1024 paramsp->base_local_ts = base_local_ts; //This will act as a generation count
1025 #endif /* __arm64__ */
1026 }
1027 }
1028
1029
1030 /*
1031 * update the commpage with if dtrace user land probes are enabled
1032 */
1033 void
commpage_update_dof(boolean_t enabled)1034 commpage_update_dof(boolean_t enabled)
1035 {
1036 #if CONFIG_DTRACE
1037 *((uint8_t*)(_COMM_PAGE_DTRACE_DOF_ENABLED + _COMM_PAGE_RW_OFFSET)) = (enabled ? 1 : 0);
1038 #else
1039 (void)enabled;
1040 #endif
1041 }
1042
1043 /*
1044 * update the dyld global config flags
1045 */
1046 void
commpage_update_dyld_flags(uint64_t value)1047 commpage_update_dyld_flags(uint64_t value)
1048 {
1049 *((uint64_t*)(_COMM_PAGE_DYLD_FLAGS + _COMM_PAGE_RW_OFFSET)) = value;
1050
1051 }
1052
1053 /*
1054 * update the APT active indicator
1055 */
1056 void
commpage_update_apt_active(bool active)1057 commpage_update_apt_active(bool active)
1058 {
1059 uint8_t *slot = (uint8_t *)(void *)(_COMM_PAGE_APT_ACTIVE + _COMM_PAGE_RW_OFFSET);
1060 os_atomic_store(slot, active ? 1 : 0, relaxed);
1061 }
1062
1063 /*
1064 * set the Extended Research Mode active indicator
1065 */
1066 void
commpage_set_erm_active(bool active)1067 commpage_set_erm_active(bool active)
1068 {
1069 if (startup_phase < STARTUP_SUB_LOCKDOWN) {
1070 uint8_t *slot = (uint8_t *)(void *)(_COMM_PAGE_SECURITY_RESEARCH_DEVICE_ERM_ACTIVE + _COMM_PAGE_RW_OFFSET);
1071 os_atomic_store(slot, active ? 1 : 0, relaxed);
1072 }
1073 #if DEVELOPMENT || DEBUG
1074 else {
1075 kprintf("ERROR can't set ERM bit at startup_phase 0x%x. Action is ignored\n", startup_phase);
1076 }
1077 #endif
1078 }
1079