xref: /xnu-12377.41.6/osfmk/arm/commpage/commpage.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3  * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4  *
5  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6  *
7  * This file contains Original Code and/or Modifications of Original Code
8  * as defined in and that are subject to the Apple Public Source License
9  * Version 2.0 (the 'License'). You may not use this file except in
10  * compliance with the License. The rights granted to you under the License
11  * may not be used to create, or enable the creation or redistribution of,
12  * unlawful or unlicensed copies of an Apple operating system, or to
13  * circumvent, violate, or enable the circumvention or violation of, any
14  * terms of an Apple operating system software license agreement.
15  *
16  * Please obtain a copy of the License at
17  * http://www.opensource.apple.com/apsl/ and read it before using this file.
18  *
19  * The Original Code and all software distributed under the License are
20  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24  * Please see the License for the specific language governing rights and
25  * limitations under the License.
26  *
27  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28  */
29 /*
30  * @OSF_COPYRIGHT@
31  */
32 /*
33  * @APPLE_FREE_COPYRIGHT@
34  */
35 /*
36  *	File:		arm/commpage/commpage.c
37  *	Purpose:	Set up and export a RO/RW page
38  */
39 #include <libkern/section_keywords.h>
40 #include <mach/mach_types.h>
41 #include <mach/machine.h>
42 #include <mach/vm_map.h>
43 #include <machine/cpu_capabilities.h>
44 #include <machine/commpage.h>
45 #include <machine/config.h>
46 #include <machine/pmap.h>
47 #include <vm/vm_kern.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_protos.h>
50 #include <ipc/ipc_port.h>
51 #include <arm/cpuid.h>          /* for cpuid_info() & cache_info() */
52 #include <arm/cpu_capabilities_public.h>
53 #include <arm/misc_protos.h>
54 #include <arm/rtclock.h>
55 #include <libkern/OSAtomic.h>
56 #include <stdatomic.h>
57 #include <kern/remote_time.h>
58 #include <kern/smr.h>
59 #include <machine/atomic.h>
60 #include <machine/machine_remote_time.h>
61 #include <machine/machine_routines.h>
62 #include <sys/code_signing.h>
63 
64 #include <sys/kdebug.h>
65 #include <sys/random.h>
66 
67 #if CONFIG_ATM
68 #include <atm/atm_internal.h>
69 #endif
70 
71 static int commpage_cpus( void );
72 
73 #if defined (__arm64__)
74 #include <arm64/proc_reg.h>
75 #include <pexpert/arm64/apt_msg.h>
76 #endif
77 
78 
79 static void commpage_init_cpu_capabilities( void );
80 
81 SECURITY_READ_ONLY_LATE(vm_address_t)   commPagePtr = 0;
82 SECURITY_READ_ONLY_LATE(vm_address_t)   commpage_rw_addr = 0;
83 SECURITY_READ_ONLY_LATE(vm_address_t)   commpage_kernel_ro_addr = 0;
84 SECURITY_READ_ONLY_LATE(uint64_t)       _cpu_capabilities = 0;
85 SECURITY_READ_ONLY_LATE(vm_address_t)   commpage_rw_text_addr = 0;
86 
87 extern user64_addr_t commpage_text64_location;
88 extern user32_addr_t commpage_text32_location;
89 
90 /* For sysctl access from BSD side */
91 #define ARM_FEATURE_FLAG(x) \
92 	extern int gARM_ ## x;
93 #include <arm/arm_features.inc>
94 #undef ARM_FEATURE_FLAG
95 
96 extern int      gUCNormalMem;
97 
98 void
commpage_populate(void)99 commpage_populate(void)
100 {
101 	uint16_t        c2;
102 	int cpufamily;
103 
104 	// Create the data and the text commpage
105 	vm_map_address_t kernel_data_addr, kernel_text_addr, kernel_ro_data_addr, user_text_addr;
106 	pmap_create_commpages(&kernel_data_addr, &kernel_text_addr, &kernel_ro_data_addr, &user_text_addr);
107 
108 	commpage_rw_addr = kernel_data_addr;
109 	commpage_rw_text_addr = kernel_text_addr;
110 	commpage_kernel_ro_addr = kernel_ro_data_addr;
111 	commPagePtr = (vm_address_t) _COMM_PAGE_BASE_ADDRESS;
112 
113 #if __arm64__
114 	commpage_text64_location = user_text_addr;
115 	bcopy(_COMM_PAGE64_SIGNATURE_STRING, (void *)(_COMM_PAGE_SIGNATURE + _COMM_PAGE_RW_OFFSET),
116 	    MIN(_COMM_PAGE_SIGNATURELEN, strlen(_COMM_PAGE64_SIGNATURE_STRING)));
117 #endif
118 
119 	*((uint16_t*)(_COMM_PAGE_VERSION + _COMM_PAGE_RW_OFFSET)) = (uint16_t) _COMM_PAGE_THIS_VERSION;
120 
121 	commpage_init_cpu_capabilities();
122 	commpage_set_timestamp(0, 0, 0, 0, 0);
123 
124 	if (_cpu_capabilities & kCache32) {
125 		c2 = 32;
126 	} else if (_cpu_capabilities & kCache64) {
127 		c2 = 64;
128 	} else if (_cpu_capabilities & kCache128) {
129 		c2 = 128;
130 	} else {
131 		c2 = 0;
132 	}
133 
134 	*((uint16_t*)(_COMM_PAGE_CACHE_LINESIZE + _COMM_PAGE_RW_OFFSET)) = c2;
135 
136 	commpage_update_active_cpus();
137 	cpufamily = cpuid_get_cpufamily();
138 	*((uint8_t*)(_COMM_PAGE_CPU_CLUSTERS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) ml_get_cluster_count();
139 	*((uint8_t*)(_COMM_PAGE_PHYSICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.physical_cpu_max;
140 	*((uint8_t*)(_COMM_PAGE_LOGICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.logical_cpu_max;
141 	*((uint64_t*)(_COMM_PAGE_MEMORY_SIZE + _COMM_PAGE_RW_OFFSET)) = machine_info.max_mem;
142 	*((uint32_t*)(_COMM_PAGE_CPUFAMILY + _COMM_PAGE_RW_OFFSET)) = (uint32_t)cpufamily;
143 	*((uint32_t*)(_COMM_PAGE_DEV_FIRM_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint32_t)PE_i_can_has_debugger(NULL);
144 	*((uint32_t*)(_COMM_PAGE_DEV_FIRM + _COMM_PAGE_RO_OFFSET)) = (uint32_t)PE_i_can_has_debugger(NULL);
145 	*((uint8_t*)(_COMM_PAGE_USER_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = user_timebase_type();
146 
147 	// Populate logical CPU -> logical cluster table
148 	ml_map_cpus_to_clusters((uint8_t*)(_COMM_PAGE_CPU_TO_CLUSTER + _COMM_PAGE_RW_OFFSET));
149 
150 	*((uint8_t*)(_COMM_PAGE_CONT_HWCLOCK + _COMM_PAGE_RW_OFFSET)) = (uint8_t)user_cont_hwclock_allowed();
151 	*((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift;
152 	*((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT + _COMM_PAGE_RO_OFFSET)) = (uint8_t) page_shift;
153 
154 #if __arm64__
155 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift_user32;
156 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32 + _COMM_PAGE_RO_OFFSET)) = (uint8_t) page_shift_user32;
157 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
158 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64 + _COMM_PAGE_RO_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
159 #endif /* __arm64__ */
160 
161 	commpage_update_timebase();
162 	commpage_update_mach_continuous_time(0);
163 
164 	clock_sec_t secs;
165 	clock_usec_t microsecs;
166 	clock_get_boottime_microtime(&secs, &microsecs);
167 	commpage_update_boottime(secs * USEC_PER_SEC + microsecs);
168 
169 	/*
170 	 * set commpage approximate time to zero for initialization.
171 	 * scheduler shall populate correct value before running user thread
172 	 */
173 	*((uint64_t *)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET)) = 0;
174 #ifdef CONFIG_MACH_APPROXIMATE_TIME
175 	*((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 1;
176 #else
177 	*((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 0;
178 #endif
179 
180 	commpage_update_kdebug_state();
181 
182 #if CONFIG_ATM
183 	commpage_update_atm_diagnostic_config(atm_get_diagnostic_config());
184 #endif
185 
186 
187 	*((uint64_t*)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET)) = BT_RESET_SENTINEL_TS;
188 
189 #if CONFIG_QUIESCE_COUNTER
190 	cpu_quiescent_set_storage((_Atomic uint64_t *)(_COMM_PAGE_CPU_QUIESCENT_COUNTER +
191 	    _COMM_PAGE_RW_OFFSET));
192 #endif /* CONFIG_QUIESCE_COUNTER */
193 
194 	/*
195 	 * Set random values for targets in Apple Security Bounty
196 	 * addr should be unmapped for userland processes
197 	 * kaddr should be unmapped for kernel
198 	 */
199 	uint64_t asb_value, asb_addr, asb_kvalue, asb_kaddr;
200 	uint64_t asb_rand_vals[] = {
201 		0x93e78adcded4d3d5, 0xd16c5b76ad99bccf, 0x67dfbbd12c4a594e, 0x7365636e6f6f544f,
202 		0x239a974c9811e04b, 0xbf60e7fa45741446, 0x8acf5210b466b05, 0x67dfbbd12c4a594e
203 	};
204 	const int nrandval = sizeof(asb_rand_vals) / sizeof(asb_rand_vals[0]);
205 	uint8_t randidx;
206 
207 	read_random(&randidx, sizeof(uint8_t));
208 	asb_value = asb_rand_vals[randidx++ % nrandval];
209 	*((uint64_t*)(_COMM_PAGE_ASB_TARGET_VALUE + _COMM_PAGE_RW_OFFSET)) = asb_value;
210 
211 	// userspace faulting address should be > MACH_VM_MAX_ADDRESS
212 	asb_addr = asb_rand_vals[randidx++ % nrandval];
213 	uint64_t user_min = MACH_VM_MAX_ADDRESS;
214 	uint64_t user_max = UINT64_MAX;
215 	asb_addr %= (user_max - user_min);
216 	asb_addr += user_min;
217 	*((uint64_t*)(_COMM_PAGE_ASB_TARGET_ADDRESS + _COMM_PAGE_RW_OFFSET)) = asb_addr;
218 
219 	asb_kvalue = asb_rand_vals[randidx++ % nrandval];
220 	*((uint64_t*)(_COMM_PAGE_ASB_TARGET_KERN_VALUE + _COMM_PAGE_RW_OFFSET)) = asb_kvalue;
221 
222 	// kernel faulting address should be < VM_MIN_KERNEL_ADDRESS
223 	asb_kaddr = asb_rand_vals[randidx++ % nrandval];
224 	uint64_t kernel_min = 0x0LL;
225 	uint64_t kernel_max = VM_MIN_KERNEL_ADDRESS;
226 	asb_kaddr %= (kernel_max - kernel_min);
227 	asb_kaddr += kernel_min;
228 	*((uint64_t*)(_COMM_PAGE_ASB_TARGET_KERN_ADDRESS + _COMM_PAGE_RW_OFFSET)) = asb_kaddr;
229 
230 #if __arm64__
231 	*((uint8_t*)(_COMM_PAGE_APT_MSG_POLICY + _COMM_PAGE_RW_OFFSET)) = apt_msg_policy();
232 #endif
233 
234 	commpage_set_erm_active(extended_research_mode_state());
235 }
236 
237 #define COMMPAGE_TEXT_SEGMENT "__TEXT_EXEC"
238 #define COMMPAGE_TEXT_SECTION "__commpage_text"
239 
240 /* Get a pointer to the start of the ARM PFZ code section. This macro tell the
241  * linker that the storage for the variable here is at the start of the section */
242 extern char commpage_text_start[]
243 __SECTION_START_SYM(COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
244 
245 /* Get a pointer to the end of the ARM PFZ code section. This macro tell the
246  * linker that the storage for the variable here is at the end of the section */
247 extern char commpage_text_end[]
248 __SECTION_END_SYM(COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
249 
250 /* This is defined in the commpage text section as a symbol at the start of the preemptible
251  * functions */
252 extern char commpage_text_preemptible_functions;
253 
254 #if CONFIG_ARM_PFZ
255 static size_t size_of_pfz = 0;
256 #endif
257 
258 /* This is the opcode for brk #666 */
259 #define BRK_666_OPCODE 0xD4205340
260 
261 void
commpage_text_populate(void)262 commpage_text_populate(void)
263 {
264 #if CONFIG_ARM_PFZ
265 	size_t size_of_commpage_text = commpage_text_end - commpage_text_start;
266 	if (size_of_commpage_text == 0) {
267 		panic("ARM comm page text section %s,%s missing", COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
268 	}
269 	assert(size_of_commpage_text <= PAGE_SIZE);
270 	assert(size_of_commpage_text > 0);
271 
272 	/* Get the size of the PFZ half of the comm page text section. */
273 	size_of_pfz = &commpage_text_preemptible_functions - commpage_text_start;
274 
275 	// Copy the code segment of comm page text section into the PFZ
276 	memcpy((void *) _COMM_PAGE64_TEXT_START_ADDRESS, (void *) commpage_text_start, size_of_commpage_text);
277 
278 	// Make sure to populate the rest of it with brk 666 so that undefined code
279 	// doesn't get  run
280 	memset((char *) _COMM_PAGE64_TEXT_START_ADDRESS + size_of_commpage_text, BRK_666_OPCODE,
281 	    PAGE_SIZE - size_of_commpage_text);
282 #endif
283 }
284 
285 uint32_t
commpage_is_in_pfz64(addr64_t addr64)286 commpage_is_in_pfz64(addr64_t addr64)
287 {
288 #if CONFIG_ARM_PFZ
289 	if ((addr64 >= commpage_text64_location) &&
290 	    (addr64 < (commpage_text64_location + size_of_pfz))) {
291 		return 1;
292 	} else {
293 		return 0;
294 	}
295 #else
296 #pragma unused (addr64)
297 	return 0;
298 #endif
299 }
300 
301 
302 void
commpage_set_timestamp(uint64_t tbr,uint64_t secs,uint64_t frac,uint64_t scale,uint64_t tick_per_sec)303 commpage_set_timestamp(
304 	uint64_t        tbr,
305 	uint64_t        secs,
306 	uint64_t        frac,
307 	uint64_t        scale,
308 	uint64_t        tick_per_sec)
309 {
310 	new_commpage_timeofday_data_t *commpage_timeofday_datap;
311 
312 	if (commPagePtr == 0) {
313 		return;
314 	}
315 
316 	commpage_timeofday_datap =  (new_commpage_timeofday_data_t *)(_COMM_PAGE_NEWTIMEOFDAY_DATA + _COMM_PAGE_RW_OFFSET);
317 
318 	commpage_timeofday_datap->TimeStamp_tick = 0x0ULL;
319 
320 	__builtin_arm_dmb(DMB_ISH);
321 
322 	commpage_timeofday_datap->TimeStamp_sec = secs;
323 	commpage_timeofday_datap->TimeStamp_frac = frac;
324 	commpage_timeofday_datap->Ticks_scale = scale;
325 	commpage_timeofday_datap->Ticks_per_sec = tick_per_sec;
326 
327 	__builtin_arm_dmb(DMB_ISH);
328 
329 	commpage_timeofday_datap->TimeStamp_tick = tbr;
330 
331 }
332 
333 /*
334  * Update _COMM_PAGE_MEMORY_PRESSURE.  Called periodically from vm's compute_memory_pressure()
335  */
336 
337 void
commpage_set_memory_pressure(unsigned int pressure)338 commpage_set_memory_pressure(
339 	unsigned int    pressure )
340 {
341 	if (commPagePtr == 0) {
342 		return;
343 	}
344 	*((uint32_t *)(_COMM_PAGE_MEMORY_PRESSURE + _COMM_PAGE_RW_OFFSET)) = pressure;
345 }
346 
347 /*
348  * Determine number of CPUs on this system.
349  */
350 static int
commpage_cpus(void)351 commpage_cpus( void )
352 {
353 	int cpus;
354 
355 	cpus = machine_info.max_cpus;
356 
357 	if (cpus == 0) {
358 		panic("commpage cpus==0");
359 	}
360 	if (cpus > 0xFF) {
361 		cpus = 0xFF;
362 	}
363 
364 	return cpus;
365 }
366 
367 uint64_t
_get_cpu_capabilities(void)368 _get_cpu_capabilities(void)
369 {
370 	return _cpu_capabilities;
371 }
372 
373 vm_address_t
_get_commpage_priv_address(void)374 _get_commpage_priv_address(void)
375 {
376 	return commpage_rw_addr;
377 }
378 
379 vm_address_t
_get_commpage_ro_address(void)380 _get_commpage_ro_address(void)
381 {
382 	return commpage_kernel_ro_addr;
383 }
384 
385 vm_address_t
_get_commpage_text_priv_address(void)386 _get_commpage_text_priv_address(void)
387 {
388 	return commpage_rw_text_addr;
389 }
390 
391 #if defined(__arm64__)
392 
393 
394 /**
395  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64ISAR0_EL1
396  */
397 static void
commpage_init_arm_optional_features_isar0(uint64_t * commpage_bits)398 commpage_init_arm_optional_features_isar0(uint64_t *commpage_bits)
399 {
400 	uint64_t bits = 0;
401 	uint64_t isar0 = __builtin_arm_rsr64("ID_AA64ISAR0_EL1");
402 
403 	if ((isar0 & ID_AA64ISAR0_EL1_TS_MASK) >= ID_AA64ISAR0_EL1_TS_FLAGM_EN) {
404 		gARM_FEAT_FlagM = 1;
405 		bits |= kHasFEATFlagM;
406 	}
407 	if ((isar0 & ID_AA64ISAR0_EL1_TS_MASK) >= ID_AA64ISAR0_EL1_TS_FLAGM2_EN) {
408 		gARM_FEAT_FlagM2 = 1;
409 		bits |= kHasFEATFlagM2;
410 	}
411 	if ((isar0 & ID_AA64ISAR0_EL1_FHM_MASK) >= ID_AA64ISAR0_EL1_FHM_8_2) {
412 		gARM_FEAT_FHM = 1;
413 		bits |= kHasFeatFHM;
414 	}
415 	if ((isar0 & ID_AA64ISAR0_EL1_DP_MASK) >= ID_AA64ISAR0_EL1_DP_EN) {
416 		gARM_FEAT_DotProd = 1;
417 		bits |= kHasFeatDotProd;
418 	}
419 	if ((isar0 & ID_AA64ISAR0_EL1_SHA3_MASK) >= ID_AA64ISAR0_EL1_SHA3_EN) {
420 		gARM_FEAT_SHA3 = 1;
421 		bits |= kHasFeatSHA3;
422 	}
423 	if ((isar0 & ID_AA64ISAR0_EL1_RDM_MASK) >= ID_AA64ISAR0_EL1_RDM_EN) {
424 		gARM_FEAT_RDM = 1;
425 		bits |= kHasFeatRDM;
426 	}
427 	if ((isar0 & ID_AA64ISAR0_EL1_ATOMIC_MASK) >= ID_AA64ISAR0_EL1_ATOMIC_8_1) {
428 		gARM_FEAT_LSE = 1;
429 		bits |= kHasFeatLSE;
430 	}
431 	if ((isar0 & ID_AA64ISAR0_EL1_SHA2_MASK) >= ID_AA64ISAR0_EL1_SHA2_512_EN) {
432 		gARM_FEAT_SHA512 = 1;
433 		bits |= kHasFeatSHA512;
434 	}
435 	if ((isar0 & ID_AA64ISAR0_EL1_CRC32_MASK) == ID_AA64ISAR0_EL1_CRC32_EN) {
436 		gARM_FEAT_CRC32 = 1;
437 		bits |= kHasARMv8Crc32;
438 	}
439 
440 #if __ARM_V8_CRYPTO_EXTENSIONS__
441 	/**
442 	 * T7000 has a bug in the ISAR0 register that reports that PMULL is not
443 	 * supported when it actually is. To work around this, for all of the crypto
444 	 * extensions, just check if they're supported using the board_config.h
445 	 * values.
446 	 */
447 	gARM_FEAT_PMULL = 1;
448 	gARM_FEAT_SHA1 = 1;
449 	gARM_FEAT_AES = 1;
450 	gARM_FEAT_SHA256 = 1;
451 	bits |= kHasARMv8Crypto;
452 #endif /* __ARM_V8_CRYPTO_EXTENSIONS__ */
453 
454 	*commpage_bits |= bits;
455 }
456 
457 /**
458  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64ISAR1_EL1
459  */
460 static void
commpage_init_arm_optional_features_isar1(uint64_t * commpage_bits)461 commpage_init_arm_optional_features_isar1(uint64_t *commpage_bits)
462 {
463 	uint64_t bits = 0;
464 	uint64_t isar1 = __builtin_arm_rsr64("ID_AA64ISAR1_EL1");
465 	uint64_t sctlr = __builtin_arm_rsr64("SCTLR_EL1");
466 
467 	if ((isar1 & ID_AA64ISAR1_EL1_SPECRES_MASK) >= ID_AA64ISAR1_EL1_SPECRES_EN &&
468 	    sctlr & SCTLR_EnRCTX) {
469 		gARM_FEAT_SPECRES = 1;
470 		bits |= kHasFeatSPECRES;
471 #ifdef HAS_SPECRES2
472 		if ((isar1 & ID_AA64ISAR1_EL1_SPECRES_MASK) >= ID_AA64ISAR1_EL1_SPECRES2_EN) {
473 			gARM_FEAT_SPECRES2 = 1;
474 		}
475 #endif /* HAS_SPECRES2 */
476 	}
477 	if ((isar1 & ID_AA64ISAR1_EL1_SB_MASK) >= ID_AA64ISAR1_EL1_SB_EN) {
478 		gARM_FEAT_SB = 1;
479 		bits |= kHasFeatSB;
480 	}
481 	if ((isar1 & ID_AA64ISAR1_EL1_FRINTTS_MASK) >= ID_AA64ISAR1_EL1_FRINTTS_EN) {
482 		gARM_FEAT_FRINTTS = 1;
483 		bits |= kHasFeatFRINTTS;
484 	}
485 	if ((isar1 & ID_AA64ISAR1_EL1_GPI_MASK) >= ID_AA64ISAR1_EL1_GPI_EN) {
486 		gARM_FEAT_PACIMP = 1;
487 		bits |= kHasArmv8GPI;
488 	}
489 	if ((isar1 & ID_AA64ISAR1_EL1_LRCPC_MASK) >= ID_AA64ISAR1_EL1_LRCPC_EN) {
490 		gARM_FEAT_LRCPC = 1;
491 		bits |= kHasFeatLRCPC;
492 	}
493 	if ((isar1 & ID_AA64ISAR1_EL1_LRCPC_MASK) >= ID_AA64ISAR1_EL1_LRCP2C_EN) {
494 		gARM_FEAT_LRCPC2 = 1;
495 		bits |= kHasFeatLRCPC2;
496 	}
497 	if ((isar1 & ID_AA64ISAR1_EL1_FCMA_MASK) >= ID_AA64ISAR1_EL1_FCMA_EN) {
498 		gARM_FEAT_FCMA = 1;
499 		bits |= kHasFeatFCMA;
500 	}
501 	if ((isar1 & ID_AA64ISAR1_EL1_JSCVT_MASK) >= ID_AA64ISAR1_EL1_JSCVT_EN) {
502 		gARM_FEAT_JSCVT = 1;
503 		bits |= kHasFeatJSCVT;
504 	}
505 	if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_PAuth_EN) {
506 		gARM_FEAT_PAuth = 1;
507 		bits |= kHasFeatPAuth;
508 	}
509 	if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_PAuth2_EN) {
510 		gARM_FEAT_PAuth2 = 1;
511 	}
512 	if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_FPAC_EN) {
513 		gARM_FEAT_FPAC = 1;
514 	}
515 	if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_FPACCOMBINE) {
516 		gARM_FEAT_FPACCOMBINE = 1;
517 	}
518 	if ((isar1 & ID_AA64ISAR1_EL1_DPB_MASK) >= ID_AA64ISAR1_EL1_DPB_EN) {
519 		gARM_FEAT_DPB = 1;
520 		bits |= kHasFeatDPB;
521 	}
522 	if ((isar1 & ID_AA64ISAR1_EL1_DPB_MASK) >= ID_AA64ISAR1_EL1_DPB2_EN) {
523 		gARM_FEAT_DPB2 = 1;
524 		bits |= kHasFeatDPB2;
525 	}
526 	if ((isar1 & ID_AA64ISAR1_EL1_BF16_MASK) >= ID_AA64ISAR1_EL1_BF16_EN) {
527 		gARM_FEAT_BF16 = 1;
528 	}
529 	if ((isar1 & ID_AA64ISAR1_EL1_BF16_MASK) >= ID_AA64ISAR1_EL1_EBF16_EN) {
530 		gARM_FEAT_EBF16 = 1;
531 	}
532 	if ((isar1 & ID_AA64ISAR1_EL1_I8MM_MASK) >= ID_AA64ISAR1_EL1_I8MM_EN) {
533 		gARM_FEAT_I8MM = 1;
534 	}
535 
536 	*commpage_bits |= bits;
537 }
538 
539 /**
540  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64ISAR2_EL1
541  */
542 static void
commpage_init_arm_optional_features_isar2(void)543 commpage_init_arm_optional_features_isar2(void)
544 {
545 	uint64_t isar2 = __builtin_arm_rsr64("ID_AA64ISAR2_EL1");
546 
547 	if ((isar2 & ID_AA64ISAR2_EL1_WFxT_MASK) >= ID_AA64ISAR2_EL1_WFxT_EN) {
548 		gARM_FEAT_WFxT = 1;
549 	}
550 	if ((isar2 & ID_AA64ISAR2_EL1_RPRES_MASK) >= ID_AA64ISAR2_EL1_RPRES_EN) {
551 		gARM_FEAT_RPRES = 1;
552 	}
553 	if ((isar2 & ID_AA64ISAR2_EL1_CSSC_MASK) >= ID_AA64ISAR2_EL1_CSSC_EN) {
554 		gARM_FEAT_CSSC = 1;
555 	}
556 	if ((isar2 & ID_AA64ISAR2_EL1_BC_MASK) >= ID_AA64ISAR2_EL1_BC_EN) {
557 		gARM_FEAT_HBC = 1;
558 	}
559 }
560 
561 
562 /**
563  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64MMFR0_EL1
564  */
565 static void
commpage_init_arm_optional_features_mmfr0(uint64_t * commpage_bits)566 commpage_init_arm_optional_features_mmfr0(uint64_t *commpage_bits)
567 {
568 	uint64_t bits = 0;
569 	uint64_t mmfr0 = __builtin_arm_rsr64("ID_AA64MMFR0_EL1");
570 
571 	if ((mmfr0 & ID_AA64MMFR0_EL1_ECV_MASK) >= ID_AA64MMFR0_EL1_ECV_EN) {
572 		gARM_FEAT_ECV = 1;
573 	}
574 
575 	*commpage_bits |= bits;
576 }
577 
578 /**
579  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64MMFR2_EL1
580  */
581 static void
commpage_init_arm_optional_features_mmfr2(uint64_t * commpage_bits)582 commpage_init_arm_optional_features_mmfr2(uint64_t *commpage_bits)
583 {
584 	uint64_t bits = 0;
585 	uint64_t mmfr2 = __builtin_arm_rsr64("ID_AA64MMFR2_EL1");
586 
587 	if ((mmfr2 & ID_AA64MMFR2_EL1_AT_MASK) >= ID_AA64MMFR2_EL1_AT_LSE2_EN) {
588 		gARM_FEAT_LSE2 = 1;
589 		bits |= kHasFeatLSE2;
590 	}
591 
592 	*commpage_bits |= bits;
593 }
594 
595 /**
596  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64PFR0_EL1
597  */
598 static void
commpage_init_arm_optional_features_pfr0(uint64_t * commpage_bits)599 commpage_init_arm_optional_features_pfr0(uint64_t *commpage_bits)
600 {
601 	uint64_t bits = 0;
602 	uint64_t pfr0 = __builtin_arm_rsr64("ID_AA64PFR0_EL1");
603 
604 	if ((pfr0 & ID_AA64PFR0_EL1_CSV3_MASK) >= ID_AA64PFR0_EL1_CSV3_EN) {
605 		gARM_FEAT_CSV3 = 1;
606 		bits |= kHasFeatCSV3;
607 	}
608 	if ((pfr0 & ID_AA64PFR0_EL1_CSV2_MASK) >= ID_AA64PFR0_EL1_CSV2_EN) {
609 		gARM_FEAT_CSV2 = 1;
610 		bits |= kHasFeatCSV2;
611 	}
612 	if ((pfr0 & ID_AA64PFR0_EL1_DIT_MASK) >= ID_AA64PFR0_EL1_DIT_EN) {
613 		gARM_FEAT_DIT = 1;
614 		bits |= kHasFeatDIT;
615 	}
616 	if ((pfr0 & ID_AA64PFR0_EL1_AdvSIMD_MASK) != ID_AA64PFR0_EL1_AdvSIMD_DIS) {
617 		gARM_AdvSIMD = 1;
618 		bits |= kHasAdvSIMD;
619 		if ((pfr0 & ID_AA64PFR0_EL1_AdvSIMD_MASK) >= ID_AA64PFR0_EL1_AdvSIMD_HPFPCVT) {
620 			gARM_AdvSIMD_HPFPCvt = 1;
621 			bits |= kHasAdvSIMD_HPFPCvt;
622 		}
623 		if ((pfr0 & ID_AA64PFR0_EL1_AdvSIMD_MASK) >= ID_AA64PFR0_EL1_AdvSIMD_FP16) {
624 			gARM_FEAT_FP16 = 1;
625 			bits |= kHasFeatFP16;
626 		}
627 	}
628 
629 	*commpage_bits |= bits;
630 }
631 
632 /**
633  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64PFR1_EL1
634  */
635 static void
commpage_init_arm_optional_features_pfr1(uint64_t * commpage_bits)636 commpage_init_arm_optional_features_pfr1(uint64_t *commpage_bits)
637 {
638 	uint64_t pfr1 = __builtin_arm_rsr64("ID_AA64PFR1_EL1");
639 
640 	if ((pfr1 & ID_AA64PFR1_EL1_SSBS_MASK) >= ID_AA64PFR1_EL1_SSBS_EN) {
641 		gARM_FEAT_SSBS = 1;
642 	}
643 
644 	if ((pfr1 & ID_AA64PFR1_EL1_BT_MASK) >= ID_AA64PFR1_EL1_BT_EN) {
645 		gARM_FEAT_BTI = 1;
646 	}
647 
648 	unsigned int sme_version = arm_sme_version();
649 	if (sme_version >= ARM_FEAT_SME) {
650 		gARM_FEAT_SME = 1;
651 		*commpage_bits |= kHasFeatSME;
652 	}
653 	if (sme_version >= ARM_FEAT_SME2) {
654 		gARM_FEAT_SME2 = 1;
655 		*commpage_bits |= kHasFeatSME2;
656 	}
657 	if (sme_version >= ARM_FEAT_SME2p1) {
658 		gARM_FEAT_SME2p1 = 1;
659 		*commpage_bits |= kHasFeatSME2p1;
660 	}
661 
662 	uint64_t mte_ver = (pfr1 & ID_AA64PFR1_EL1_MTE_MASK) >> ID_AA64PFR1_EL1_MTE_OFFSET;
663 	if (mte_ver >= 1) {
664 		gARM_FEAT_MTE = 1;
665 	}
666 	if (mte_ver >= 2) {
667 		gARM_FEAT_MTE2 = 1;
668 		if ((pfr1 & ID_AA64PFR1_EL1_MTEX_MASK) >= ID_AA64PFR1_EL1_MTEX_EN) {
669 			gARM_FEAT_MTE4 = 1;
670 		}
671 		if ((pfr1 & ID_AA64PFR1_EL1_MTE_FRAC_MASK) == 0) {
672 			gARM_FEAT_MTE_ASYNC = 1;
673 		}
674 	}
675 	if (mte_ver >= 3) {
676 		gARM_FEAT_MTE3 = 1;
677 	}
678 }
679 
680 /**
681  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64PFR2_EL1
682  */
683 static void
commpage_init_arm_optional_features_pfr2(__unused uint64_t * commpage_bits)684 commpage_init_arm_optional_features_pfr2(__unused uint64_t *commpage_bits)
685 {
686 	uint64_t pfr2 __unused = __builtin_arm_rsr64("ID_AA64PFR2_EL1");
687 
688 	if ((pfr2 & ID_AA64PFR2_EL1_MTE_STORE_ONLY_MASK) >= ID_AA64PFR2_EL1_MTE_STORE_ONLY_EN) {
689 		gARM_FEAT_MTE_STORE_ONLY = 1;
690 	}
691 
692 }
693 
694 /**
695  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64SMFR0_EL1
696  */
697 __attribute__((target("sme")))
698 static void
commpage_init_arm_optional_features_smfr0(void)699 commpage_init_arm_optional_features_smfr0(void)
700 {
701 	if (arm_sme_version() == 0) {
702 		/*
703 		 * We can safely read ID_AA64SMFR0_EL1 on SME-less devices.  But
704 		 * arm_sme_version() == 0 could also mean that the user
705 		 * defeatured SME with a boot-arg.
706 		 */
707 		return;
708 	}
709 
710 	uint64_t smfr0 = __builtin_arm_rsr64("ID_AA64SMFR0_EL1");
711 
712 	/*
713 	 * ID_AA64SMFR0_EL1 has to be parsed differently from other feature ID
714 	 * registers.  See "Alternative ID scheme used for ID_AA64SMFR0_EL1" in
715 	 * the ARM ARM.
716 	 */
717 
718 	/* 1-bit fields */
719 	if (smfr0 & ID_AA64SMFR0_EL1_F32F32_EN) {
720 		gARM_SME_F32F32 = 1;
721 	}
722 	if (smfr0 & ID_AA64SMFR0_EL1_BI32I32_EN) {
723 		gARM_SME_BI32I32 = 1;
724 	}
725 	if (smfr0 & ID_AA64SMFR0_EL1_B16F32_EN) {
726 		gARM_SME_B16F32 = 1;
727 	}
728 	if (smfr0 & ID_AA64SMFR0_EL1_F16F32_EN) {
729 		gARM_SME_F16F32 = 1;
730 	}
731 	if (smfr0 & ID_AA64SMFR0_EL1_F64F64_EN) {
732 		gARM_FEAT_SME_F64F64 = 1;
733 	}
734 	if (smfr0 & ID_AA64SMFR0_EL1_F16F16_EN) {
735 		gARM_FEAT_SME_F16F16 = 1;
736 	}
737 	if (smfr0 & ID_AA64SMFR0_EL1_B16B16_EN) {
738 		gARM_FEAT_SME_B16B16 = 1;
739 	}
740 
741 	/* 4-bit fields (0 bits are ignored) */
742 	if ((smfr0 & ID_AA64SMFR0_EL1_I8I32_EN) == ID_AA64SMFR0_EL1_I8I32_EN) {
743 		gARM_SME_I8I32 = 1;
744 	}
745 	if ((smfr0 & ID_AA64SMFR0_EL1_I16I32_EN) == ID_AA64SMFR0_EL1_I16I32_EN) {
746 		gARM_SME_I16I32 = 1;
747 	}
748 	if ((smfr0 & ID_AA64SMFR0_EL1_I16I64_EN) == ID_AA64SMFR0_EL1_I16I64_EN) {
749 		gARM_FEAT_SME_I16I64 = 1;
750 	}
751 }
752 
753 static void
commpage_init_arm_optional_features_mmfr1(uint64_t * commpage_bits)754 commpage_init_arm_optional_features_mmfr1(uint64_t *commpage_bits)
755 {
756 	uint64_t bits = 0;
757 	const uint64_t mmfr1 = __builtin_arm_rsr64("ID_AA64MMFR1_EL1");
758 
759 	if ((mmfr1 & ID_AA64MMFR1_EL1_AFP_MASK) == ID_AA64MMFR1_EL1_AFP_EN) {
760 		gARM_FEAT_AFP = 1;
761 		bits |= kHasFeatAFP;
762 	}
763 
764 	*commpage_bits |= bits;
765 }
766 
767 /**
768  * Read the system register @name, attempt to set set bits of @mask if not
769  * already, test if bits were actually set, reset the register to its
770  * previous value if required, and 'return' @mask with only bits that
771  * were successfully set (or already set) in the system register. */
772 #define _test_sys_bits(name, mask) ({ \
773 	const uint64_t src = __builtin_arm_rsr64(#name); \
774     uint64_t test = src | mask; \
775     if (test != src) { \
776 	__builtin_arm_wsr64(#name, test); \
777 	test = __builtin_arm_rsr64(#name); \
778 	if (test != src) { \
779 	    __builtin_arm_wsr64(#name, src); \
780 	}\
781     } \
782     mask & test; \
783 })
784 
785 /**
786  * Reports whether FPU exceptions are supported.
787  * Possible FPU exceptions are :
788  * - input denormal;
789  * - inexact;
790  * - underflow;
791  * - overflow;
792  * - divide by 0;
793  * - invalid operation.
794  *
795  * Any of those can be supported or not but for now, we consider that
796  * it all or nothing : FPU exceptions support flag set <=> all 6 exceptions
797  * a supported.
798  */
799 static void
commpage_init_arm_optional_features_fpcr(uint64_t * commpage_bits)800 commpage_init_arm_optional_features_fpcr(uint64_t *commpage_bits)
801 {
802 	uint64_t support_mask = FPCR_IDE | FPCR_IXE | FPCR_UFE | FPCR_OFE |
803 	    FPCR_DZE | FPCR_IOE;
804 	uint64_t FPCR_bits = _test_sys_bits(FPCR, support_mask);
805 	if (FPCR_bits == support_mask) {
806 		gARM_FP_SyncExceptions = 1;
807 		*commpage_bits |= kHasFP_SyncExceptions;
808 	}
809 }
810 
811 
812 /**
813  * Reports whether stateless FEATs are present or not.
814  * Those only depend on the SoC and on previous variables.
815  */
816 static void
commpage_init_arm_optional_features_misc(__unused uint64_t * commpage_bits)817 commpage_init_arm_optional_features_misc(__unused uint64_t *commpage_bits)
818 {
819 	if (gARM_FEAT_MTE4) {
820 		gARM_FEAT_MTE_CANONICAL_TAGS = 1;
821 		gARM_FEAT_MTE_NO_ADDRESS_TAGS = 1;
822 	}
823 }
824 
825 /**
826  * Initializes all commpage entries and sysctls for ARM64 optional features accessible from EL0.
827  */
828 static void
commpage_init_arm_optional_features(uint64_t * commpage_bits)829 commpage_init_arm_optional_features(uint64_t *commpage_bits)
830 {
831 	commpage_init_arm_optional_features_isar0(commpage_bits);
832 	commpage_init_arm_optional_features_isar1(commpage_bits);
833 	commpage_init_arm_optional_features_isar2();
834 	commpage_init_arm_optional_features_mmfr0(commpage_bits);
835 	commpage_init_arm_optional_features_mmfr1(commpage_bits);
836 	commpage_init_arm_optional_features_mmfr2(commpage_bits);
837 	commpage_init_arm_optional_features_pfr0(commpage_bits);
838 	commpage_init_arm_optional_features_pfr1(commpage_bits);
839 	commpage_init_arm_optional_features_pfr2(commpage_bits);
840 	commpage_init_arm_optional_features_smfr0();
841 	commpage_init_arm_optional_features_fpcr(commpage_bits);
842 	/*
843 	 * commpage_init_arm_optional_features_misc handles features flags
844 	 * derived from other feature flags, so it must run last.
845 	 */
846 	commpage_init_arm_optional_features_misc(commpage_bits);
847 }
848 #endif /* __arm64__ */
849 
850 /*
851  * Initialize _cpu_capabilities vector
852  */
853 static void
commpage_init_cpu_capabilities(void)854 commpage_init_cpu_capabilities( void )
855 {
856 	uint64_t bits;
857 	int cpus;
858 	ml_cpu_info_t cpu_info;
859 
860 	bits = 0;
861 	ml_cpu_get_info(&cpu_info);
862 
863 	switch (cpu_info.cache_line_size) {
864 	case 128:
865 		bits |= kCache128;
866 		break;
867 	case 64:
868 		bits |= kCache64;
869 		break;
870 	case 32:
871 		bits |= kCache32;
872 		break;
873 	default:
874 		break;
875 	}
876 	cpus = commpage_cpus();
877 
878 	if (cpus == 1) {
879 		bits |= kUP;
880 	}
881 
882 	bits |= (cpus << kNumCPUsShift);
883 
884 	bits |= kFastThreadLocalStorage;        // TPIDRURO for TLS
885 
886 	bits |= kHasVfp;
887 
888 #if defined(__arm64__)
889 	bits |= kHasFMA;
890 #endif
891 	bits |= kHasEvent;
892 #ifdef __arm64__
893 	commpage_init_arm_optional_features(&bits);
894 #endif
895 
896 
897 
898 #if HAS_UCNORMAL_MEM
899 	gUCNormalMem = 1;
900 	bits |= kHasUCNormalMemory;
901 #endif
902 
903 	_cpu_capabilities = bits;
904 
905 	*((uint32_t *)(_COMM_PAGE_CPU_CAPABILITIES + _COMM_PAGE_RW_OFFSET)) = (uint32_t)_cpu_capabilities;
906 	*((uint64_t *)(_COMM_PAGE_CPU_CAPABILITIES64 + _COMM_PAGE_RW_OFFSET)) = _cpu_capabilities;
907 
908 }
909 
910 /*
911  * Updated every time a logical CPU goes offline/online
912  */
913 void
commpage_update_active_cpus(void)914 commpage_update_active_cpus(void)
915 {
916 	if (!commPagePtr) {
917 		return;
918 	}
919 	*((uint8_t *)(_COMM_PAGE_ACTIVE_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t)processor_avail_count;
920 
921 }
922 
923 /*
924  * Update the commpage bits for mach_absolute_time and mach_continuous_time (for userspace)
925  */
926 void
commpage_update_timebase(void)927 commpage_update_timebase(void)
928 {
929 	if (commPagePtr) {
930 		*((uint64_t*)(_COMM_PAGE_TIMEBASE_OFFSET + _COMM_PAGE_RW_OFFSET)) = rtclock_base_abstime;
931 	}
932 }
933 
934 /*
935  * Update the commpage with current kdebug state: whether tracing is enabled, a
936  * typefilter is present, and continuous time should be used for timestamps.
937  *
938  * Disregards configuration and set to 0 if tracing is disabled.
939  */
940 void
commpage_update_kdebug_state(void)941 commpage_update_kdebug_state(void)
942 {
943 	if (commPagePtr) {
944 		uint32_t state = kdebug_commpage_state();
945 		*((volatile uint32_t *)(_COMM_PAGE_KDEBUG_ENABLE + _COMM_PAGE_RW_OFFSET)) = state;
946 	}
947 }
948 
949 /* Ditto for atm_diagnostic_config */
950 void
commpage_update_atm_diagnostic_config(uint32_t diagnostic_config)951 commpage_update_atm_diagnostic_config(uint32_t diagnostic_config)
952 {
953 	if (commPagePtr) {
954 		*((volatile uint32_t*)(_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG + _COMM_PAGE_RW_OFFSET)) = diagnostic_config;
955 	}
956 }
957 
958 /*
959  * Update the commpage data with the state of multiuser mode for
960  * this device. Allowing various services in userspace to avoid
961  * IPC in the (more common) non-multiuser environment.
962  */
963 void
commpage_update_multiuser_config(uint32_t multiuser_config)964 commpage_update_multiuser_config(uint32_t multiuser_config)
965 {
966 	if (commPagePtr) {
967 		*((volatile uint32_t *)(_COMM_PAGE_MULTIUSER_CONFIG + _COMM_PAGE_RW_OFFSET)) = multiuser_config;
968 	}
969 }
970 
971 /*
972  * update the commpage data for
973  * last known value of mach_absolute_time()
974  */
975 
976 void
commpage_update_mach_approximate_time(uint64_t abstime)977 commpage_update_mach_approximate_time(uint64_t abstime)
978 {
979 #ifdef CONFIG_MACH_APPROXIMATE_TIME
980 	if (!commPagePtr) {
981 		return;
982 	}
983 
984 	uint64_t *approx_time_base = (uint64_t *)(uintptr_t)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET);
985 
986 	uint64_t saved_data = os_atomic_load_wide(approx_time_base, relaxed);
987 	if (saved_data < abstime) {
988 		/*
989 		 * ignore the success/fail return value assuming that
990 		 * if the value has been updated since we last read it,
991 		 * someone else has written a timestamp that is new enough.
992 		 */
993 		__unused bool ret = os_atomic_cmpxchg(approx_time_base,
994 		    saved_data, abstime, relaxed);
995 	}
996 
997 
998 #else /* CONFIG_MACH_APPROXIMATE_TIME */
999 #pragma unused (abstime)
1000 #endif
1001 }
1002 
1003 /*
1004  * update the commpage data's total system sleep time for
1005  * userspace call to mach_continuous_time()
1006  */
1007 void
commpage_update_mach_continuous_time(uint64_t sleeptime)1008 commpage_update_mach_continuous_time(uint64_t sleeptime)
1009 {
1010 	if (!commPagePtr) {
1011 		return;
1012 	}
1013 
1014 	uint64_t *cont_time_base = (uint64_t *)(uintptr_t)(_COMM_PAGE_CONT_TIMEBASE + _COMM_PAGE_RW_OFFSET);
1015 
1016 	os_atomic_store_wide(cont_time_base, sleeptime, relaxed);
1017 
1018 }
1019 
1020 void
commpage_update_mach_continuous_time_hw_offset(uint64_t offset)1021 commpage_update_mach_continuous_time_hw_offset(uint64_t offset)
1022 {
1023 	*((uint64_t *)(_COMM_PAGE_CONT_HW_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = offset;
1024 }
1025 
1026 /*
1027  * update the commpage's value for the boot time
1028  */
1029 void
commpage_update_boottime(uint64_t value)1030 commpage_update_boottime(uint64_t value)
1031 {
1032 	if (!commPagePtr) {
1033 		return;
1034 	}
1035 
1036 	uint64_t *boottime_usec = (uint64_t *)(uintptr_t)(_COMM_PAGE_BOOTTIME_USEC + _COMM_PAGE_RW_OFFSET);
1037 
1038 	os_atomic_store_wide(boottime_usec, value, relaxed);
1039 
1040 }
1041 
1042 /*
1043  * set the commpage's remote time params for
1044  * userspace call to mach_bridge_remote_time()
1045  */
1046 void
commpage_set_remotetime_params(double rate,uint64_t base_local_ts,uint64_t base_remote_ts)1047 commpage_set_remotetime_params(double rate, uint64_t base_local_ts, uint64_t base_remote_ts)
1048 {
1049 	if (commPagePtr) {
1050 #ifdef __arm64__
1051 		struct bt_params *paramsp = (struct bt_params *)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET);
1052 		paramsp->base_local_ts = 0;
1053 		__builtin_arm_dmb(DMB_ISH);
1054 		paramsp->rate = rate;
1055 		paramsp->base_remote_ts = base_remote_ts;
1056 		__builtin_arm_dmb(DMB_ISH);
1057 		paramsp->base_local_ts = base_local_ts;  //This will act as a generation count
1058 #endif /* __arm64__ */
1059 	}
1060 }
1061 
1062 
1063 /*
1064  * update the commpage with if dtrace user land probes are enabled
1065  */
1066 void
commpage_update_dof(boolean_t enabled)1067 commpage_update_dof(boolean_t enabled)
1068 {
1069 #if CONFIG_DTRACE
1070 	*((uint8_t*)(_COMM_PAGE_DTRACE_DOF_ENABLED + _COMM_PAGE_RW_OFFSET)) = (enabled ? 1 : 0);
1071 #else
1072 	(void)enabled;
1073 #endif
1074 }
1075 
1076 /*
1077  * update the dyld global config flags
1078  */
1079 void
commpage_update_dyld_flags(uint64_t value)1080 commpage_update_dyld_flags(uint64_t value)
1081 {
1082 	*((uint64_t*)(_COMM_PAGE_DYLD_FLAGS + _COMM_PAGE_RW_OFFSET)) = value;
1083 
1084 }
1085 
1086 /*
1087  * update the APT active indicator
1088  */
1089 void
commpage_update_apt_active(bool active)1090 commpage_update_apt_active(bool active)
1091 {
1092 	uint8_t *slot = (uint8_t *)(void *)(_COMM_PAGE_APT_ACTIVE + _COMM_PAGE_RW_OFFSET);
1093 	os_atomic_store(slot, active ? 1 : 0, relaxed);
1094 }
1095 
1096 /*
1097  * set the Extended Research Mode active indicator
1098  */
1099 void
commpage_set_erm_active(bool active)1100 commpage_set_erm_active(bool active)
1101 {
1102 	if (startup_phase < STARTUP_SUB_LOCKDOWN) {
1103 		uint8_t *slot = (uint8_t *)(void *)(_COMM_PAGE_SECURITY_RESEARCH_DEVICE_ERM_ACTIVE + _COMM_PAGE_RW_OFFSET);
1104 		os_atomic_store(slot, active ? 1 : 0, relaxed);
1105 	}
1106 #if DEVELOPMENT || DEBUG
1107 	else {
1108 		kprintf("ERROR can't set ERM bit at startup_phase 0x%x. Action is ignored\n", startup_phase);
1109 	}
1110 #endif
1111 }
1112