xref: /xnu-8019.80.24/osfmk/arm/commpage/commpage.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2007 Apple Inc. All rights reserved.
3  * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4  *
5  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6  *
7  * This file contains Original Code and/or Modifications of Original Code
8  * as defined in and that are subject to the Apple Public Source License
9  * Version 2.0 (the 'License'). You may not use this file except in
10  * compliance with the License. The rights granted to you under the License
11  * may not be used to create, or enable the creation or redistribution of,
12  * unlawful or unlicensed copies of an Apple operating system, or to
13  * circumvent, violate, or enable the circumvention or violation of, any
14  * terms of an Apple operating system software license agreement.
15  *
16  * Please obtain a copy of the License at
17  * http://www.opensource.apple.com/apsl/ and read it before using this file.
18  *
19  * The Original Code and all software distributed under the License are
20  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24  * Please see the License for the specific language governing rights and
25  * limitations under the License.
26  *
27  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28  */
29 /*
30  * @OSF_COPYRIGHT@
31  */
32 /*
33  * @APPLE_FREE_COPYRIGHT@
34  */
35 /*
36  *	File:		arm/commpage/commpage.c
37  *	Purpose:	Set up and export a RO/RW page
38  */
39 #include <libkern/section_keywords.h>
40 #include <mach/mach_types.h>
41 #include <mach/machine.h>
42 #include <mach/vm_map.h>
43 #include <machine/cpu_capabilities.h>
44 #include <machine/commpage.h>
45 #include <machine/config.h>
46 #include <machine/pmap.h>
47 #include <vm/vm_kern.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_protos.h>
50 #include <ipc/ipc_port.h>
51 #include <arm/cpuid.h>          /* for cpuid_info() & cache_info() */
52 #include <arm/rtclock.h>
53 #include <libkern/OSAtomic.h>
54 #include <stdatomic.h>
55 #include <kern/remote_time.h>
56 #include <machine/atomic.h>
57 #include <machine/machine_remote_time.h>
58 #include <machine/machine_routines.h>
59 
60 #include <sys/kdebug.h>
61 
62 #if CONFIG_ATM
63 #include <atm/atm_internal.h>
64 #endif
65 
66 static int commpage_cpus( void );
67 
68 
69 static void commpage_init_cpu_capabilities( void );
70 
71 SECURITY_READ_ONLY_LATE(vm_address_t)   commPagePtr = 0;
72 SECURITY_READ_ONLY_LATE(vm_address_t)   sharedpage_rw_addr = 0;
73 SECURITY_READ_ONLY_LATE(uint64_t)       _cpu_capabilities = 0;
74 SECURITY_READ_ONLY_LATE(vm_address_t)   sharedpage_rw_text_addr = 0;
75 
76 extern user64_addr_t commpage_text64_location;
77 extern user32_addr_t commpage_text32_location;
78 
79 /* For sysctl access from BSD side */
80 extern int gARMv8Crc32;
81 extern int gARMv8Gpi;
82 extern int gARM_FEAT_FlagM;
83 extern int gARM_FEAT_FlagM2;
84 extern int gARM_FEAT_FHM;
85 extern int gARM_FEAT_DotProd;
86 extern int gARM_FEAT_SHA3;
87 extern int gARM_FEAT_RDM;
88 extern int gARM_FEAT_LSE;
89 extern int gARM_FEAT_SHA256;
90 extern int gARM_FEAT_SHA512;
91 extern int gARM_FEAT_SHA1;
92 extern int gARM_FEAT_AES;
93 extern int gARM_FEAT_PMULL;
94 extern int gARM_FEAT_SPECRES;
95 extern int gARM_FEAT_SB;
96 extern int gARM_FEAT_FRINTTS;
97 extern int gARM_FEAT_LRCPC;
98 extern int gARM_FEAT_LRCPC2;
99 extern int gARM_FEAT_FCMA;
100 extern int gARM_FEAT_JSCVT;
101 extern int gARM_FEAT_PAuth;
102 extern int gARM_FEAT_PAuth2;
103 extern int gARM_FEAT_FPAC;
104 extern int gARM_FEAT_DPB;
105 extern int gARM_FEAT_DPB2;
106 extern int gARM_FEAT_BF16;
107 extern int gARM_FEAT_I8MM;
108 extern int gARM_FEAT_ECV;
109 extern int gARM_FEAT_LSE2;
110 extern int gARM_FEAT_CSV2;
111 extern int gARM_FEAT_CSV3;
112 extern int gARM_AdvSIMD;
113 extern int gARM_AdvSIMD_HPFPCvt;
114 extern int gARM_FEAT_FP16;
115 extern int gARM_FEAT_SSBS;
116 extern int gARM_FEAT_BTI;
117 
118 extern int      gUCNormalMem;
119 
120 void
commpage_populate(void)121 commpage_populate(void)
122 {
123 	uint16_t        c2;
124 	int cpufamily;
125 
126 	// Create the data and the text commpage
127 	vm_map_address_t kernel_data_addr, kernel_text_addr, user_text_addr;
128 	pmap_create_sharedpages(&kernel_data_addr, &kernel_text_addr, &user_text_addr);
129 
130 	sharedpage_rw_addr = kernel_data_addr;
131 	sharedpage_rw_text_addr = kernel_text_addr;
132 	commPagePtr = (vm_address_t) _COMM_PAGE_BASE_ADDRESS;
133 
134 #if __arm64__
135 	commpage_text64_location = user_text_addr;
136 	bcopy(_COMM_PAGE64_SIGNATURE_STRING, (void *)(_COMM_PAGE_SIGNATURE + _COMM_PAGE_RW_OFFSET),
137 	    MIN(_COMM_PAGE_SIGNATURELEN, strlen(_COMM_PAGE64_SIGNATURE_STRING)));
138 #else
139 	commpage_text32_location = user_text_addr;
140 	bcopy(_COMM_PAGE32_SIGNATURE_STRING, (void *)(_COMM_PAGE_SIGNATURE + _COMM_PAGE_RW_OFFSET),
141 	    MIN(_COMM_PAGE_SIGNATURELEN, strlen(_COMM_PAGE32_SIGNATURE_STRING)));
142 #endif
143 
144 	*((uint16_t*)(_COMM_PAGE_VERSION + _COMM_PAGE_RW_OFFSET)) = (uint16_t) _COMM_PAGE_THIS_VERSION;
145 
146 	commpage_init_cpu_capabilities();
147 	commpage_set_timestamp(0, 0, 0, 0, 0);
148 
149 	if (_cpu_capabilities & kCache32) {
150 		c2 = 32;
151 	} else if (_cpu_capabilities & kCache64) {
152 		c2 = 64;
153 	} else if (_cpu_capabilities & kCache128) {
154 		c2 = 128;
155 	} else {
156 		c2 = 0;
157 	}
158 
159 	*((uint16_t*)(_COMM_PAGE_CACHE_LINESIZE + _COMM_PAGE_RW_OFFSET)) = c2;
160 
161 	commpage_update_active_cpus();
162 	cpufamily = cpuid_get_cpufamily();
163 
164 	*((uint8_t*)(_COMM_PAGE_PHYSICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.physical_cpu_max;
165 	*((uint8_t*)(_COMM_PAGE_LOGICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.logical_cpu_max;
166 	*((uint64_t*)(_COMM_PAGE_MEMORY_SIZE + _COMM_PAGE_RW_OFFSET)) = machine_info.max_mem;
167 	*((uint32_t*)(_COMM_PAGE_CPUFAMILY + _COMM_PAGE_RW_OFFSET)) = (uint32_t)cpufamily;
168 	*((uint32_t*)(_COMM_PAGE_DEV_FIRM + _COMM_PAGE_RW_OFFSET)) = (uint32_t)PE_i_can_has_debugger(NULL);
169 	*((uint8_t*)(_COMM_PAGE_USER_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = user_timebase_type();
170 	*((uint8_t*)(_COMM_PAGE_CONT_HWCLOCK + _COMM_PAGE_RW_OFFSET)) = (uint8_t)user_cont_hwclock_allowed();
171 	*((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift;
172 
173 #if __arm64__
174 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift_user32;
175 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
176 #elif (__ARM_ARCH_7K__ >= 2)
177 	/* enforce 16KB alignment for watch targets with new ABI */
178 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
179 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
180 #else /* __arm64__ */
181 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) PAGE_SHIFT;
182 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) PAGE_SHIFT;
183 #endif /* __arm64__ */
184 
185 	commpage_update_timebase();
186 	commpage_update_mach_continuous_time(0);
187 
188 	clock_sec_t secs;
189 	clock_usec_t microsecs;
190 	clock_get_boottime_microtime(&secs, &microsecs);
191 	commpage_update_boottime(secs * USEC_PER_SEC + microsecs);
192 
193 	/*
194 	 * set commpage approximate time to zero for initialization.
195 	 * scheduler shall populate correct value before running user thread
196 	 */
197 	*((uint64_t *)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET)) = 0;
198 #ifdef CONFIG_MACH_APPROXIMATE_TIME
199 	*((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 1;
200 #else
201 	*((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 0;
202 #endif
203 
204 	commpage_update_kdebug_state();
205 
206 #if CONFIG_ATM
207 	commpage_update_atm_diagnostic_config(atm_get_diagnostic_config());
208 #endif
209 
210 
211 	*((uint64_t*)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET)) = BT_RESET_SENTINEL_TS;
212 }
213 
214 #define COMMPAGE_TEXT_SEGMENT "__TEXT_EXEC"
215 #define COMMPAGE_TEXT_SECTION "__commpage_text"
216 
217 /* Get a pointer to the start of the ARM PFZ code section. This macro tell the
218  * linker that the storage for the variable here is at the start of the section */
219 extern char commpage_text_start[]
220 __SECTION_START_SYM(COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
221 
222 /* Get a pointer to the end of the ARM PFZ code section. This macro tell the
223  * linker that the storage for the variable here is at the end of the section */
224 extern char commpage_text_end[]
225 __SECTION_END_SYM(COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
226 
227 /* This is defined in the commpage text section as a symbol at the start of the preemptible
228  * functions */
229 extern char commpage_text_preemptible_functions;
230 
231 #if CONFIG_ARM_PFZ
232 static size_t size_of_pfz = 0;
233 #endif
234 
235 /* This is the opcode for brk #666 */
236 #define BRK_666_OPCODE 0xD4205340
237 
238 void
commpage_text_populate(void)239 commpage_text_populate(void)
240 {
241 #if CONFIG_ARM_PFZ
242 	size_t size_of_commpage_text = commpage_text_end - commpage_text_start;
243 	if (size_of_commpage_text == 0) {
244 		panic("ARM comm page text section %s,%s missing", COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
245 	}
246 	assert(size_of_commpage_text <= PAGE_SIZE);
247 	assert(size_of_commpage_text > 0);
248 
249 	/* Get the size of the PFZ half of the comm page text section. */
250 	size_of_pfz = &commpage_text_preemptible_functions - commpage_text_start;
251 
252 	// Copy the code segment of comm page text section into the PFZ
253 	memcpy((void *) _COMM_PAGE64_TEXT_START_ADDRESS, (void *) commpage_text_start, size_of_commpage_text);
254 
255 	// Make sure to populate the rest of it with brk 666 so that undefined code
256 	// doesn't get  run
257 	memset((char *) _COMM_PAGE64_TEXT_START_ADDRESS + size_of_commpage_text, BRK_666_OPCODE,
258 	    PAGE_SIZE - size_of_commpage_text);
259 #endif
260 }
261 
262 uint32_t
commpage_is_in_pfz64(addr64_t addr64)263 commpage_is_in_pfz64(addr64_t addr64)
264 {
265 #if CONFIG_ARM_PFZ
266 	if ((addr64 >= commpage_text64_location) &&
267 	    (addr64 < (commpage_text64_location + size_of_pfz))) {
268 		return 1;
269 	} else {
270 		return 0;
271 	}
272 #else
273 #pragma unused (addr64)
274 	return 0;
275 #endif
276 }
277 
278 
279 void
commpage_set_timestamp(uint64_t tbr,uint64_t secs,uint64_t frac,uint64_t scale,uint64_t tick_per_sec)280 commpage_set_timestamp(
281 	uint64_t        tbr,
282 	uint64_t        secs,
283 	uint64_t        frac,
284 	uint64_t        scale,
285 	uint64_t        tick_per_sec)
286 {
287 	new_commpage_timeofday_data_t *commpage_timeofday_datap;
288 
289 	if (commPagePtr == 0) {
290 		return;
291 	}
292 
293 	commpage_timeofday_datap =  (new_commpage_timeofday_data_t *)(_COMM_PAGE_NEWTIMEOFDAY_DATA + _COMM_PAGE_RW_OFFSET);
294 
295 	commpage_timeofday_datap->TimeStamp_tick = 0x0ULL;
296 
297 	__builtin_arm_dmb(DMB_ISH);
298 
299 	commpage_timeofday_datap->TimeStamp_sec = secs;
300 	commpage_timeofday_datap->TimeStamp_frac = frac;
301 	commpage_timeofday_datap->Ticks_scale = scale;
302 	commpage_timeofday_datap->Ticks_per_sec = tick_per_sec;
303 
304 	__builtin_arm_dmb(DMB_ISH);
305 
306 	commpage_timeofday_datap->TimeStamp_tick = tbr;
307 
308 }
309 
310 /*
311  * Update _COMM_PAGE_MEMORY_PRESSURE.  Called periodically from vm's compute_memory_pressure()
312  */
313 
314 void
commpage_set_memory_pressure(unsigned int pressure)315 commpage_set_memory_pressure(
316 	unsigned int    pressure )
317 {
318 	if (commPagePtr == 0) {
319 		return;
320 	}
321 	*((uint32_t *)(_COMM_PAGE_MEMORY_PRESSURE + _COMM_PAGE_RW_OFFSET)) = pressure;
322 }
323 
324 /*
325  * Determine number of CPUs on this system.
326  */
327 static int
commpage_cpus(void)328 commpage_cpus( void )
329 {
330 	int cpus;
331 
332 	cpus = machine_info.max_cpus;
333 
334 	if (cpus == 0) {
335 		panic("commpage cpus==0");
336 	}
337 	if (cpus > 0xFF) {
338 		cpus = 0xFF;
339 	}
340 
341 	return cpus;
342 }
343 
344 uint64_t
_get_cpu_capabilities(void)345 _get_cpu_capabilities(void)
346 {
347 	return _cpu_capabilities;
348 }
349 
350 vm_address_t
_get_commpage_priv_address(void)351 _get_commpage_priv_address(void)
352 {
353 	return sharedpage_rw_addr;
354 }
355 
356 vm_address_t
_get_commpage_text_priv_address(void)357 _get_commpage_text_priv_address(void)
358 {
359 	return sharedpage_rw_text_addr;
360 }
361 
362 #if defined(__arm64__)
363 /**
364  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64ISAR0_EL1
365  */
366 static void
commpage_init_arm_optional_features_isar0(uint64_t * commpage_bits)367 commpage_init_arm_optional_features_isar0(uint64_t *commpage_bits)
368 {
369 	uint64_t bits = 0;
370 	uint64_t isar0 = __builtin_arm_rsr64("ID_AA64ISAR0_EL1");
371 
372 	if ((isar0 & ID_AA64ISAR0_EL1_TS_MASK) >= ID_AA64ISAR0_EL1_TS_FLAGM_EN) {
373 		gARM_FEAT_FlagM = 1;
374 		bits |= kHasFEATFlagM;
375 	}
376 	if ((isar0 & ID_AA64ISAR0_EL1_TS_MASK) >= ID_AA64ISAR0_EL1_TS_FLAGM2_EN) {
377 		gARM_FEAT_FlagM2 = 1;
378 		bits |= kHasFEATFlagM2;
379 	}
380 	if ((isar0 & ID_AA64ISAR0_EL1_FHM_MASK) >= ID_AA64ISAR0_EL1_FHM_8_2) {
381 		gARM_FEAT_FHM = 1;
382 		bits |= kHasFeatFHM;
383 	}
384 	if ((isar0 & ID_AA64ISAR0_EL1_DP_MASK) >= ID_AA64ISAR0_EL1_DP_EN) {
385 		gARM_FEAT_DotProd = 1;
386 		bits |= kHasFeatDotProd;
387 	}
388 	if ((isar0 & ID_AA64ISAR0_EL1_SHA3_MASK) >= ID_AA64ISAR0_EL1_SHA3_EN) {
389 		gARM_FEAT_SHA3 = 1;
390 		bits |= kHasFeatSHA3;
391 	}
392 	if ((isar0 & ID_AA64ISAR0_EL1_RDM_MASK) >= ID_AA64ISAR0_EL1_RDM_EN) {
393 		gARM_FEAT_RDM = 1;
394 		bits |= kHasFeatRDM;
395 	}
396 	if ((isar0 & ID_AA64ISAR0_EL1_ATOMIC_MASK) >= ID_AA64ISAR0_EL1_ATOMIC_8_1) {
397 		gARM_FEAT_LSE = 1;
398 		bits |= kHasFeatLSE;
399 	}
400 	if ((isar0 & ID_AA64ISAR0_EL1_SHA2_MASK) >= ID_AA64ISAR0_EL1_SHA2_512_EN) {
401 		gARM_FEAT_SHA512 = 1;
402 		bits |= kHasFeatSHA512;
403 	}
404 	if ((isar0 & ID_AA64ISAR0_EL1_CRC32_MASK) == ID_AA64ISAR0_EL1_CRC32_EN) {
405 		gARMv8Crc32 = 1;
406 		bits |= kHasARMv8Crc32;
407 	}
408 
409 #if __ARM_V8_CRYPTO_EXTENSIONS__
410 	/**
411 	 * T7000 has a bug in the ISAR0 register that reports that PMULL is not
412 	 * supported when it actually is. To work around this, for all of the crypto
413 	 * extensions, just check if they're supported using the board_config.h
414 	 * values.
415 	 */
416 	gARM_FEAT_PMULL = 1;
417 	gARM_FEAT_SHA1 = 1;
418 	gARM_FEAT_AES = 1;
419 	gARM_FEAT_SHA256 = 1;
420 	bits |= kHasARMv8Crypto;
421 #endif /* __ARM_V8_CRYPTO_EXTENSIONS__ */
422 
423 	*commpage_bits |= bits;
424 }
425 
426 /**
427  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64ISAR1_EL1
428  */
429 static void
commpage_init_arm_optional_features_isar1(uint64_t * commpage_bits)430 commpage_init_arm_optional_features_isar1(uint64_t *commpage_bits)
431 {
432 	uint64_t bits = 0;
433 	uint64_t isar1 = __builtin_arm_rsr64("ID_AA64ISAR1_EL1");
434 	uint64_t sctlr = __builtin_arm_rsr64("SCTLR_EL1");
435 
436 	if ((isar1 & ID_AA64ISAR1_EL1_SPECRES_MASK) >= ID_AA64ISAR1_EL1_SPECRES_EN &&
437 	    sctlr & SCTLR_EnRCTX) {
438 		gARM_FEAT_SPECRES = 1;
439 		bits |= kHasFeatSPECRES;
440 	}
441 	if ((isar1 & ID_AA64ISAR1_EL1_SB_MASK) >= ID_AA64ISAR1_EL1_SB_EN) {
442 		gARM_FEAT_SB = 1;
443 		bits |= kHasFeatSB;
444 	}
445 	if ((isar1 & ID_AA64ISAR1_EL1_FRINTTS_MASK) >= ID_AA64ISAR1_EL1_FRINTTS_EN) {
446 		gARM_FEAT_FRINTTS = 1;
447 		bits |= kHasFeatFRINTTS;
448 	}
449 	if ((isar1 & ID_AA64ISAR1_EL1_GPI_MASK) >= ID_AA64ISAR1_EL1_GPI_EN) {
450 		gARMv8Gpi = 1;
451 		bits |= kHasArmv8GPI;
452 	}
453 	if ((isar1 & ID_AA64ISAR1_EL1_LRCPC_MASK) >= ID_AA64ISAR1_EL1_LRCPC_EN) {
454 		gARM_FEAT_LRCPC = 1;
455 		bits |= kHasFeatLRCPC;
456 	}
457 	if ((isar1 & ID_AA64ISAR1_EL1_LRCPC_MASK) >= ID_AA64ISAR1_EL1_LRCP2C_EN) {
458 		gARM_FEAT_LRCPC2 = 1;
459 		bits |= kHasFeatLRCPC2;
460 	}
461 	if ((isar1 & ID_AA64ISAR1_EL1_FCMA_MASK) >= ID_AA64ISAR1_EL1_FCMA_EN) {
462 		gARM_FEAT_FCMA = 1;
463 		bits |= kHasFeatFCMA;
464 	}
465 	if ((isar1 & ID_AA64ISAR1_EL1_JSCVT_MASK) >= ID_AA64ISAR1_EL1_JSCVT_EN) {
466 		gARM_FEAT_JSCVT = 1;
467 		bits |= kHasFeatJSCVT;
468 	}
469 	if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_PAuth_EN) {
470 		gARM_FEAT_PAuth = 1;
471 		bits |= kHasFeatPAuth;
472 	}
473 	if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_PAuth2_EN) {
474 		gARM_FEAT_PAuth2 = 1;
475 	}
476 	if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_FPAC_EN) {
477 		gARM_FEAT_FPAC = 1;
478 	}
479 	if ((isar1 & ID_AA64ISAR1_EL1_DPB_MASK) >= ID_AA64ISAR1_EL1_DPB_EN) {
480 		gARM_FEAT_DPB = 1;
481 		bits |= kHasFeatDPB;
482 	}
483 	if ((isar1 & ID_AA64ISAR1_EL1_DPB_MASK) >= ID_AA64ISAR1_EL1_DPB2_EN) {
484 		gARM_FEAT_DPB2 = 1;
485 		bits |= kHasFeatDPB2;
486 	}
487 	if ((isar1 & ID_AA64ISAR1_EL1_BF16_MASK) >= ID_AA64ISAR1_EL1_BF16_EN) {
488 		gARM_FEAT_BF16 = 1;
489 	}
490 	if ((isar1 & ID_AA64ISAR1_EL1_I8MM_MASK) >= ID_AA64ISAR1_EL1_I8MM_EN) {
491 		gARM_FEAT_I8MM = 1;
492 	}
493 
494 	*commpage_bits |= bits;
495 }
496 
497 /**
498  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64MMFR0_EL1
499  */
500 static void
commpage_init_arm_optional_features_mmfr0(uint64_t * commpage_bits)501 commpage_init_arm_optional_features_mmfr0(uint64_t *commpage_bits)
502 {
503 	uint64_t bits = 0;
504 	uint64_t mmfr0 = __builtin_arm_rsr64("ID_AA64MMFR0_EL1");
505 
506 	if ((mmfr0 & ID_AA64MMFR0_EL1_ECV_MASK) >= ID_AA64MMFR0_EL1_ECV_EN) {
507 		gARM_FEAT_ECV = 1;
508 	}
509 
510 	*commpage_bits |= bits;
511 }
512 
513 /**
514  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64MMFR2_EL1
515  */
516 static void
commpage_init_arm_optional_features_mmfr2(uint64_t * commpage_bits)517 commpage_init_arm_optional_features_mmfr2(uint64_t *commpage_bits)
518 {
519 	uint64_t bits = 0;
520 	uint64_t mmfr2 = __builtin_arm_rsr64("ID_AA64MMFR2_EL1");
521 
522 	if ((mmfr2 & ID_AA64MMFR2_EL1_AT_MASK) >= ID_AA64MMFR2_EL1_AT_LSE2_EN) {
523 		gARM_FEAT_LSE2 = 1;
524 		bits |= kHasFeatLSE2;
525 	}
526 
527 	*commpage_bits |= bits;
528 }
529 
530 /**
531  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64PFR0_EL1
532  */
533 static void
commpage_init_arm_optional_features_pfr0(uint64_t * commpage_bits)534 commpage_init_arm_optional_features_pfr0(uint64_t *commpage_bits)
535 {
536 	uint64_t bits = 0;
537 	uint64_t pfr0 = __builtin_arm_rsr64("ID_AA64PFR0_EL1");
538 
539 	if ((pfr0 & ID_AA64PFR0_EL1_CSV3_MASK) >= ID_AA64PFR0_EL1_CSV3_EN) {
540 		gARM_FEAT_CSV3 = 1;
541 		bits |= kHasFeatCSV3;
542 	}
543 	if ((pfr0 & ID_AA64PFR0_EL1_CSV2_MASK) >= ID_AA64PFR0_EL1_CSV2_EN) {
544 		gARM_FEAT_CSV2 = 1;
545 		bits |= kHasFeatCSV2;
546 	}
547 	if ((pfr0 & ID_AA64PFR0_EL1_AdvSIMD_MASK) != ID_AA64PFR0_EL1_AdvSIMD_DIS) {
548 		gARM_AdvSIMD = 1;
549 		bits |= kHasAdvSIMD;
550 		if ((pfr0 & ID_AA64PFR0_EL1_AdvSIMD_MASK) >= ID_AA64PFR0_EL1_AdvSIMD_HPFPCVT) {
551 			gARM_AdvSIMD_HPFPCvt = 1;
552 			bits |= kHasAdvSIMD_HPFPCvt;
553 		}
554 		if ((pfr0 & ID_AA64PFR0_EL1_AdvSIMD_MASK) >= ID_AA64PFR0_EL1_AdvSIMD_FP16) {
555 			gARM_FEAT_FP16 = 1;
556 			bits |= kHasFeatFP16;
557 		}
558 	}
559 
560 	*commpage_bits |= bits;
561 }
562 
563 /**
564  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64PFR1_EL1
565  */
566 static void
commpage_init_arm_optional_features_pfr1(uint64_t * commpage_bits __unused)567 commpage_init_arm_optional_features_pfr1(uint64_t *commpage_bits __unused)
568 {
569 	uint64_t pfr1 = __builtin_arm_rsr64("ID_AA64PFR1_EL1");
570 
571 	if ((pfr1 & ID_AA64PFR1_EL1_SSBS_MASK) >= ID_AA64PFR1_EL1_SSBS_EN) {
572 		gARM_FEAT_SSBS = 1;
573 	}
574 
575 	if ((pfr1 & ID_AA64PFR1_EL1_BT_MASK) >= ID_AA64PFR1_EL1_BT_EN) {
576 		gARM_FEAT_BTI = 1;
577 	}
578 }
579 
580 static void
commpage_init_arm_optional_features_mmfr1(uint64_t * commpage_bits)581 commpage_init_arm_optional_features_mmfr1(uint64_t *commpage_bits)
582 {
583 	uint64_t bits = 0;
584 	const uint64_t mmfr1 = __builtin_arm_rsr64("ID_AA64MMFR1_EL1");
585 
586 	if ((mmfr1 & ID_AA64MMFR1_EL1_AFP_MASK) == ID_AA64MMFR1_EL1_AFP_EN) {
587 		bits |= kHasARMv87AFP;
588 	}
589 
590 	*commpage_bits |= bits;
591 }
592 
593 /**
594  * Initializes all commpage entries and sysctls for ARM64 optional features accessible from EL0.
595  */
596 static void
commpage_init_arm_optional_features(uint64_t * commpage_bits)597 commpage_init_arm_optional_features(uint64_t *commpage_bits)
598 {
599 	commpage_init_arm_optional_features_isar0(commpage_bits);
600 	commpage_init_arm_optional_features_isar1(commpage_bits);
601 	commpage_init_arm_optional_features_mmfr0(commpage_bits);
602 	commpage_init_arm_optional_features_mmfr1(commpage_bits);
603 	commpage_init_arm_optional_features_mmfr2(commpage_bits);
604 	commpage_init_arm_optional_features_pfr0(commpage_bits);
605 	commpage_init_arm_optional_features_pfr1(commpage_bits);
606 }
607 #endif /* __arm64__ */
608 
609 /*
610  * Initialize _cpu_capabilities vector
611  */
612 static void
commpage_init_cpu_capabilities(void)613 commpage_init_cpu_capabilities( void )
614 {
615 	uint64_t bits;
616 	int cpus;
617 	ml_cpu_info_t cpu_info;
618 
619 	bits = 0;
620 	ml_cpu_get_info(&cpu_info);
621 
622 	switch (cpu_info.cache_line_size) {
623 	case 128:
624 		bits |= kCache128;
625 		break;
626 	case 64:
627 		bits |= kCache64;
628 		break;
629 	case 32:
630 		bits |= kCache32;
631 		break;
632 	default:
633 		break;
634 	}
635 	cpus = commpage_cpus();
636 
637 	if (cpus == 1) {
638 		bits |= kUP;
639 	}
640 
641 	bits |= (cpus << kNumCPUsShift);
642 
643 	bits |= kFastThreadLocalStorage;        // TPIDRURO for TLS
644 
645 #if     __ARM_VFP__
646 	bits |= kHasVfp;
647 #if defined(__arm__)
648 	arm_mvfp_info_t *mvfp_info = arm_mvfp_info();
649 	if (mvfp_info->neon) {
650 		bits |= kHasNeon;
651 	}
652 	if (mvfp_info->neon_hpfp) {
653 		bits |= kHasNeonHPFP;
654 	}
655 	if (mvfp_info->neon_fp16) {
656 		bits |= kHasNeonFP16;
657 	}
658 #endif /* defined(__arm__) */
659 #endif
660 #if defined(__arm64__)
661 	bits |= kHasFMA;
662 #endif
663 	bits |= kHasEvent;
664 #ifdef __arm64__
665 	commpage_init_arm_optional_features(&bits);
666 #endif
667 
668 
669 
670 #if HAS_UCNORMAL_MEM
671 	gUCNormalMem = 1;
672 	bits |= kHasUCNormalMemory;
673 #endif
674 
675 	_cpu_capabilities = bits;
676 
677 	*((uint32_t *)(_COMM_PAGE_CPU_CAPABILITIES + _COMM_PAGE_RW_OFFSET)) = (uint32_t)_cpu_capabilities;
678 	*((uint64_t *)(_COMM_PAGE_CPU_CAPABILITIES64 + _COMM_PAGE_RW_OFFSET)) = _cpu_capabilities;
679 
680 }
681 
682 /*
683  * Updated every time a logical CPU goes offline/online
684  */
685 void
commpage_update_active_cpus(void)686 commpage_update_active_cpus(void)
687 {
688 	if (!commPagePtr) {
689 		return;
690 	}
691 	*((uint8_t *)(_COMM_PAGE_ACTIVE_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t)processor_avail_count;
692 
693 }
694 
695 /*
696  * Update the commpage bits for mach_absolute_time and mach_continuous_time (for userspace)
697  */
698 void
commpage_update_timebase(void)699 commpage_update_timebase(void)
700 {
701 	if (commPagePtr) {
702 		*((uint64_t*)(_COMM_PAGE_TIMEBASE_OFFSET + _COMM_PAGE_RW_OFFSET)) = rtclock_base_abstime;
703 	}
704 }
705 
706 /*
707  * Update the commpage with current kdebug state. This currently has bits for
708  * global trace state, and typefilter enablement. It is likely additional state
709  * will be tracked in the future.
710  *
711  * INVARIANT: This value will always be 0 if global tracing is disabled. This
712  * allows simple guard tests of "if (*_COMM_PAGE_KDEBUG_ENABLE) { ... }"
713  */
714 void
commpage_update_kdebug_state(void)715 commpage_update_kdebug_state(void)
716 {
717 	if (commPagePtr) {
718 		*((volatile uint32_t*)(_COMM_PAGE_KDEBUG_ENABLE + _COMM_PAGE_RW_OFFSET)) = kdebug_commpage_state();
719 	}
720 }
721 
722 /* Ditto for atm_diagnostic_config */
723 void
commpage_update_atm_diagnostic_config(uint32_t diagnostic_config)724 commpage_update_atm_diagnostic_config(uint32_t diagnostic_config)
725 {
726 	if (commPagePtr) {
727 		*((volatile uint32_t*)(_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG + _COMM_PAGE_RW_OFFSET)) = diagnostic_config;
728 	}
729 }
730 
731 /*
732  * Update the commpage data with the state of multiuser mode for
733  * this device. Allowing various services in userspace to avoid
734  * IPC in the (more common) non-multiuser environment.
735  */
736 void
commpage_update_multiuser_config(uint32_t multiuser_config)737 commpage_update_multiuser_config(uint32_t multiuser_config)
738 {
739 	if (commPagePtr) {
740 		*((volatile uint32_t *)(_COMM_PAGE_MULTIUSER_CONFIG + _COMM_PAGE_RW_OFFSET)) = multiuser_config;
741 	}
742 }
743 
744 /*
745  * update the commpage data for
746  * last known value of mach_absolute_time()
747  */
748 
749 void
commpage_update_mach_approximate_time(uint64_t abstime)750 commpage_update_mach_approximate_time(uint64_t abstime)
751 {
752 #ifdef CONFIG_MACH_APPROXIMATE_TIME
753 	if (!commPagePtr) {
754 		return;
755 	}
756 
757 	uint64_t *approx_time_base = (uint64_t *)(uintptr_t)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET);
758 
759 	uint64_t saved_data = os_atomic_load_wide(approx_time_base, relaxed);
760 	if (saved_data < abstime) {
761 		/*
762 		 * ignore the success/fail return value assuming that
763 		 * if the value has been updated since we last read it,
764 		 * someone else has written a timestamp that is new enough.
765 		 */
766 		__unused bool ret = os_atomic_cmpxchg(approx_time_base,
767 		    saved_data, abstime, relaxed);
768 	}
769 
770 
771 #else /* CONFIG_MACH_APPROXIMATE_TIME */
772 #pragma unused (abstime)
773 #endif
774 }
775 
776 /*
777  * update the commpage data's total system sleep time for
778  * userspace call to mach_continuous_time()
779  */
780 void
commpage_update_mach_continuous_time(uint64_t sleeptime)781 commpage_update_mach_continuous_time(uint64_t sleeptime)
782 {
783 	if (!commPagePtr) {
784 		return;
785 	}
786 
787 	uint64_t *cont_time_base = (uint64_t *)(uintptr_t)(_COMM_PAGE_CONT_TIMEBASE + _COMM_PAGE_RW_OFFSET);
788 
789 	os_atomic_store_wide(cont_time_base, sleeptime, relaxed);
790 
791 }
792 
793 void
commpage_update_mach_continuous_time_hw_offset(uint64_t offset)794 commpage_update_mach_continuous_time_hw_offset(uint64_t offset)
795 {
796 	*((uint64_t *)(_COMM_PAGE_CONT_HW_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = offset;
797 }
798 
799 /*
800  * update the commpage's value for the boot time
801  */
802 void
commpage_update_boottime(uint64_t value)803 commpage_update_boottime(uint64_t value)
804 {
805 	if (!commPagePtr) {
806 		return;
807 	}
808 
809 	uint64_t *boottime_usec = (uint64_t *)(uintptr_t)(_COMM_PAGE_BOOTTIME_USEC + _COMM_PAGE_RW_OFFSET);
810 
811 	os_atomic_store_wide(boottime_usec, value, relaxed);
812 
813 }
814 
815 /*
816  * set the commpage's remote time params for
817  * userspace call to mach_bridge_remote_time()
818  */
819 void
commpage_set_remotetime_params(double rate,uint64_t base_local_ts,uint64_t base_remote_ts)820 commpage_set_remotetime_params(double rate, uint64_t base_local_ts, uint64_t base_remote_ts)
821 {
822 	if (commPagePtr) {
823 #ifdef __arm64__
824 		struct bt_params *paramsp = (struct bt_params *)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET);
825 		paramsp->base_local_ts = 0;
826 		__builtin_arm_dmb(DMB_ISH);
827 		paramsp->rate = rate;
828 		paramsp->base_remote_ts = base_remote_ts;
829 		__builtin_arm_dmb(DMB_ISH);
830 		paramsp->base_local_ts = base_local_ts;  //This will act as a generation count
831 #else
832 		(void)rate;
833 		(void)base_local_ts;
834 		(void)base_remote_ts;
835 #endif /* __arm64__ */
836 	}
837 }
838 
839 
840 /*
841  * After this counter has incremented, all running CPUs are guaranteed to
842  * have quiesced, i.e. executed serially dependent memory barriers.
843  * This is only tracked for CPUs running in userspace, therefore only useful
844  * outside the kernel.
845  *
846  * Note that you can't know which side of those barriers your read was from,
847  * so you have to observe 2 increments in order to ensure that you saw a
848  * serially dependent barrier chain across all running CPUs.
849  */
850 uint64_t
commpage_increment_cpu_quiescent_counter(void)851 commpage_increment_cpu_quiescent_counter(void)
852 {
853 	if (!commPagePtr) {
854 		return 0;
855 	}
856 
857 	uint64_t old_gen;
858 
859 	_Atomic uint64_t *sched_gen = (_Atomic uint64_t *)(_COMM_PAGE_CPU_QUIESCENT_COUNTER +
860 	    _COMM_PAGE_RW_OFFSET);
861 	/*
862 	 * On 32bit architectures, double-wide atomic load or stores are a CAS,
863 	 * so the atomic increment is the most efficient way to increment the
864 	 * counter.
865 	 *
866 	 * On 64bit architectures however, because the update is synchronized by
867 	 * the cpu mask, relaxed loads and stores is more efficient.
868 	 */
869 #if __LP64__
870 	old_gen = os_atomic_load(sched_gen, relaxed);
871 	os_atomic_store(sched_gen, old_gen + 1, relaxed);
872 #else
873 	old_gen = atomic_fetch_add_explicit(sched_gen, 1, memory_order_relaxed);
874 #endif
875 	return old_gen;
876 }
877 
878 /*
879  * update the commpage with if dtrace user land probes are enabled
880  */
881 void
commpage_update_dof(boolean_t enabled)882 commpage_update_dof(boolean_t enabled)
883 {
884 #if CONFIG_DTRACE
885 	*((uint8_t*)(_COMM_PAGE_DTRACE_DOF_ENABLED + _COMM_PAGE_RW_OFFSET)) = (enabled ? 1 : 0);
886 #else
887 	(void)enabled;
888 #endif
889 }
890 
891 /*
892  * update the dyld global config flags
893  */
894 void
commpage_update_dyld_flags(uint64_t value)895 commpage_update_dyld_flags(uint64_t value)
896 {
897 	*((uint64_t*)(_COMM_PAGE_DYLD_FLAGS + _COMM_PAGE_RW_OFFSET)) = value;
898 
899 }
900