xref: /xnu-8792.61.2/osfmk/arm/commpage/commpage.c (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3  * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4  *
5  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6  *
7  * This file contains Original Code and/or Modifications of Original Code
8  * as defined in and that are subject to the Apple Public Source License
9  * Version 2.0 (the 'License'). You may not use this file except in
10  * compliance with the License. The rights granted to you under the License
11  * may not be used to create, or enable the creation or redistribution of,
12  * unlawful or unlicensed copies of an Apple operating system, or to
13  * circumvent, violate, or enable the circumvention or violation of, any
14  * terms of an Apple operating system software license agreement.
15  *
16  * Please obtain a copy of the License at
17  * http://www.opensource.apple.com/apsl/ and read it before using this file.
18  *
19  * The Original Code and all software distributed under the License are
20  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24  * Please see the License for the specific language governing rights and
25  * limitations under the License.
26  *
27  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28  */
29 /*
30  * @OSF_COPYRIGHT@
31  */
32 /*
33  * @APPLE_FREE_COPYRIGHT@
34  */
35 /*
36  *	File:		arm/commpage/commpage.c
37  *	Purpose:	Set up and export a RO/RW page
38  */
39 #include <libkern/section_keywords.h>
40 #include <mach/mach_types.h>
41 #include <mach/machine.h>
42 #include <mach/vm_map.h>
43 #include <machine/cpu_capabilities.h>
44 #include <machine/commpage.h>
45 #include <machine/config.h>
46 #include <machine/pmap.h>
47 #include <vm/vm_kern.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_protos.h>
50 #include <ipc/ipc_port.h>
51 #include <arm/cpuid.h>          /* for cpuid_info() & cache_info() */
52 #include <arm/rtclock.h>
53 #include <libkern/OSAtomic.h>
54 #include <stdatomic.h>
55 #include <kern/remote_time.h>
56 #include <machine/atomic.h>
57 #include <machine/machine_remote_time.h>
58 #include <machine/machine_routines.h>
59 
60 #include <sys/kdebug.h>
61 
62 #if CONFIG_ATM
63 #include <atm/atm_internal.h>
64 #endif
65 
66 static int commpage_cpus( void );
67 
68 
69 static void commpage_init_cpu_capabilities( void );
70 
71 SECURITY_READ_ONLY_LATE(vm_address_t)   commPagePtr = 0;
72 SECURITY_READ_ONLY_LATE(vm_address_t)   sharedpage_rw_addr = 0;
73 SECURITY_READ_ONLY_LATE(vm_address_t)   sharedpage_kernel_ro_addr = 0;
74 SECURITY_READ_ONLY_LATE(uint64_t)       _cpu_capabilities = 0;
75 SECURITY_READ_ONLY_LATE(vm_address_t)   sharedpage_rw_text_addr = 0;
76 
77 extern user64_addr_t commpage_text64_location;
78 extern user32_addr_t commpage_text32_location;
79 
80 /* For sysctl access from BSD side */
81 extern int gARMv8Crc32;
82 extern int gARMv8Gpi;
83 extern int gARM_FEAT_FlagM;
84 extern int gARM_FEAT_FlagM2;
85 extern int gARM_FEAT_FHM;
86 extern int gARM_FEAT_DotProd;
87 extern int gARM_FEAT_SHA3;
88 extern int gARM_FEAT_RDM;
89 extern int gARM_FEAT_LSE;
90 extern int gARM_FEAT_SHA256;
91 extern int gARM_FEAT_SHA512;
92 extern int gARM_FEAT_SHA1;
93 extern int gARM_FEAT_AES;
94 extern int gARM_FEAT_PMULL;
95 extern int gARM_FEAT_SPECRES;
96 extern int gARM_FEAT_SB;
97 extern int gARM_FEAT_FRINTTS;
98 extern int gARM_FEAT_LRCPC;
99 extern int gARM_FEAT_LRCPC2;
100 extern int gARM_FEAT_FCMA;
101 extern int gARM_FEAT_JSCVT;
102 extern int gARM_FEAT_PAuth;
103 extern int gARM_FEAT_PAuth2;
104 extern int gARM_FEAT_FPAC;
105 extern int gARM_FEAT_DPB;
106 extern int gARM_FEAT_DPB2;
107 extern int gARM_FEAT_BF16;
108 extern int gARM_FEAT_I8MM;
109 extern int gARM_FEAT_ECV;
110 extern int gARM_FEAT_LSE2;
111 extern int gARM_FEAT_CSV2;
112 extern int gARM_FEAT_CSV3;
113 extern int gARM_FEAT_DIT;
114 extern int gARM_AdvSIMD;
115 extern int gARM_AdvSIMD_HPFPCvt;
116 extern int gARM_FEAT_FP16;
117 extern int gARM_FEAT_SSBS;
118 extern int gARM_FEAT_BTI;
119 extern int gARM_FP_SyncExceptions;
120 
121 extern int      gUCNormalMem;
122 
123 void
commpage_populate(void)124 commpage_populate(void)
125 {
126 	uint16_t        c2;
127 	int cpufamily;
128 
129 	// Create the data and the text commpage
130 	vm_map_address_t kernel_data_addr, kernel_text_addr, kernel_ro_data_addr, user_text_addr;
131 	pmap_create_sharedpages(&kernel_data_addr, &kernel_text_addr, &kernel_ro_data_addr, &user_text_addr);
132 
133 	sharedpage_rw_addr = kernel_data_addr;
134 	sharedpage_rw_text_addr = kernel_text_addr;
135 	sharedpage_kernel_ro_addr = kernel_ro_data_addr;
136 	commPagePtr = (vm_address_t) _COMM_PAGE_BASE_ADDRESS;
137 
138 #if __arm64__
139 	commpage_text64_location = user_text_addr;
140 	bcopy(_COMM_PAGE64_SIGNATURE_STRING, (void *)(_COMM_PAGE_SIGNATURE + _COMM_PAGE_RW_OFFSET),
141 	    MIN(_COMM_PAGE_SIGNATURELEN, strlen(_COMM_PAGE64_SIGNATURE_STRING)));
142 #endif
143 
144 	*((uint16_t*)(_COMM_PAGE_VERSION + _COMM_PAGE_RW_OFFSET)) = (uint16_t) _COMM_PAGE_THIS_VERSION;
145 
146 	commpage_init_cpu_capabilities();
147 	commpage_set_timestamp(0, 0, 0, 0, 0);
148 
149 	if (_cpu_capabilities & kCache32) {
150 		c2 = 32;
151 	} else if (_cpu_capabilities & kCache64) {
152 		c2 = 64;
153 	} else if (_cpu_capabilities & kCache128) {
154 		c2 = 128;
155 	} else {
156 		c2 = 0;
157 	}
158 
159 	*((uint16_t*)(_COMM_PAGE_CACHE_LINESIZE + _COMM_PAGE_RW_OFFSET)) = c2;
160 
161 	commpage_update_active_cpus();
162 	cpufamily = cpuid_get_cpufamily();
163 
164 	*((uint8_t*)(_COMM_PAGE_PHYSICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.physical_cpu_max;
165 	*((uint8_t*)(_COMM_PAGE_LOGICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.logical_cpu_max;
166 	*((uint64_t*)(_COMM_PAGE_MEMORY_SIZE + _COMM_PAGE_RW_OFFSET)) = machine_info.max_mem;
167 	*((uint32_t*)(_COMM_PAGE_CPUFAMILY + _COMM_PAGE_RW_OFFSET)) = (uint32_t)cpufamily;
168 	*((uint32_t*)(_COMM_PAGE_DEV_FIRM_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint32_t)PE_i_can_has_debugger(NULL);
169 	*((uint32_t*)(_COMM_PAGE_DEV_FIRM + _COMM_PAGE_RO_OFFSET)) = (uint32_t)PE_i_can_has_debugger(NULL);
170 	*((uint8_t*)(_COMM_PAGE_USER_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = user_timebase_type();
171 	*((uint8_t*)(_COMM_PAGE_CONT_HWCLOCK + _COMM_PAGE_RW_OFFSET)) = (uint8_t)user_cont_hwclock_allowed();
172 	*((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift;
173 	*((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT + _COMM_PAGE_RO_OFFSET)) = (uint8_t) page_shift;
174 
175 #if __arm64__
176 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift_user32;
177 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32 + _COMM_PAGE_RO_OFFSET)) = (uint8_t) page_shift_user32;
178 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
179 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64 + _COMM_PAGE_RO_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
180 #endif /* __arm64__ */
181 
182 	commpage_update_timebase();
183 	commpage_update_mach_continuous_time(0);
184 
185 	clock_sec_t secs;
186 	clock_usec_t microsecs;
187 	clock_get_boottime_microtime(&secs, &microsecs);
188 	commpage_update_boottime(secs * USEC_PER_SEC + microsecs);
189 
190 	/*
191 	 * set commpage approximate time to zero for initialization.
192 	 * scheduler shall populate correct value before running user thread
193 	 */
194 	*((uint64_t *)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET)) = 0;
195 #ifdef CONFIG_MACH_APPROXIMATE_TIME
196 	*((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 1;
197 #else
198 	*((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 0;
199 #endif
200 
201 	commpage_update_kdebug_state();
202 
203 #if CONFIG_ATM
204 	commpage_update_atm_diagnostic_config(atm_get_diagnostic_config());
205 #endif
206 
207 
208 	*((uint64_t*)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET)) = BT_RESET_SENTINEL_TS;
209 }
210 
211 #define COMMPAGE_TEXT_SEGMENT "__TEXT_EXEC"
212 #define COMMPAGE_TEXT_SECTION "__commpage_text"
213 
214 /* Get a pointer to the start of the ARM PFZ code section. This macro tell the
215  * linker that the storage for the variable here is at the start of the section */
216 extern char commpage_text_start[]
217 __SECTION_START_SYM(COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
218 
219 /* Get a pointer to the end of the ARM PFZ code section. This macro tell the
220  * linker that the storage for the variable here is at the end of the section */
221 extern char commpage_text_end[]
222 __SECTION_END_SYM(COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
223 
224 /* This is defined in the commpage text section as a symbol at the start of the preemptible
225  * functions */
226 extern char commpage_text_preemptible_functions;
227 
228 #if CONFIG_ARM_PFZ
229 static size_t size_of_pfz = 0;
230 #endif
231 
232 /* This is the opcode for brk #666 */
233 #define BRK_666_OPCODE 0xD4205340
234 
235 void
commpage_text_populate(void)236 commpage_text_populate(void)
237 {
238 #if CONFIG_ARM_PFZ
239 	size_t size_of_commpage_text = commpage_text_end - commpage_text_start;
240 	if (size_of_commpage_text == 0) {
241 		panic("ARM comm page text section %s,%s missing", COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
242 	}
243 	assert(size_of_commpage_text <= PAGE_SIZE);
244 	assert(size_of_commpage_text > 0);
245 
246 	/* Get the size of the PFZ half of the comm page text section. */
247 	size_of_pfz = &commpage_text_preemptible_functions - commpage_text_start;
248 
249 	// Copy the code segment of comm page text section into the PFZ
250 	memcpy((void *) _COMM_PAGE64_TEXT_START_ADDRESS, (void *) commpage_text_start, size_of_commpage_text);
251 
252 	// Make sure to populate the rest of it with brk 666 so that undefined code
253 	// doesn't get  run
254 	memset((char *) _COMM_PAGE64_TEXT_START_ADDRESS + size_of_commpage_text, BRK_666_OPCODE,
255 	    PAGE_SIZE - size_of_commpage_text);
256 #endif
257 }
258 
259 uint32_t
commpage_is_in_pfz64(addr64_t addr64)260 commpage_is_in_pfz64(addr64_t addr64)
261 {
262 #if CONFIG_ARM_PFZ
263 	if ((addr64 >= commpage_text64_location) &&
264 	    (addr64 < (commpage_text64_location + size_of_pfz))) {
265 		return 1;
266 	} else {
267 		return 0;
268 	}
269 #else
270 #pragma unused (addr64)
271 	return 0;
272 #endif
273 }
274 
275 
276 void
commpage_set_timestamp(uint64_t tbr,uint64_t secs,uint64_t frac,uint64_t scale,uint64_t tick_per_sec)277 commpage_set_timestamp(
278 	uint64_t        tbr,
279 	uint64_t        secs,
280 	uint64_t        frac,
281 	uint64_t        scale,
282 	uint64_t        tick_per_sec)
283 {
284 	new_commpage_timeofday_data_t *commpage_timeofday_datap;
285 
286 	if (commPagePtr == 0) {
287 		return;
288 	}
289 
290 	commpage_timeofday_datap =  (new_commpage_timeofday_data_t *)(_COMM_PAGE_NEWTIMEOFDAY_DATA + _COMM_PAGE_RW_OFFSET);
291 
292 	commpage_timeofday_datap->TimeStamp_tick = 0x0ULL;
293 
294 	__builtin_arm_dmb(DMB_ISH);
295 
296 	commpage_timeofday_datap->TimeStamp_sec = secs;
297 	commpage_timeofday_datap->TimeStamp_frac = frac;
298 	commpage_timeofday_datap->Ticks_scale = scale;
299 	commpage_timeofday_datap->Ticks_per_sec = tick_per_sec;
300 
301 	__builtin_arm_dmb(DMB_ISH);
302 
303 	commpage_timeofday_datap->TimeStamp_tick = tbr;
304 
305 }
306 
307 /*
308  * Update _COMM_PAGE_MEMORY_PRESSURE.  Called periodically from vm's compute_memory_pressure()
309  */
310 
311 void
commpage_set_memory_pressure(unsigned int pressure)312 commpage_set_memory_pressure(
313 	unsigned int    pressure )
314 {
315 	if (commPagePtr == 0) {
316 		return;
317 	}
318 	*((uint32_t *)(_COMM_PAGE_MEMORY_PRESSURE + _COMM_PAGE_RW_OFFSET)) = pressure;
319 }
320 
321 /*
322  * Determine number of CPUs on this system.
323  */
324 static int
commpage_cpus(void)325 commpage_cpus( void )
326 {
327 	int cpus;
328 
329 	cpus = machine_info.max_cpus;
330 
331 	if (cpus == 0) {
332 		panic("commpage cpus==0");
333 	}
334 	if (cpus > 0xFF) {
335 		cpus = 0xFF;
336 	}
337 
338 	return cpus;
339 }
340 
341 uint64_t
_get_cpu_capabilities(void)342 _get_cpu_capabilities(void)
343 {
344 	return _cpu_capabilities;
345 }
346 
347 vm_address_t
_get_commpage_priv_address(void)348 _get_commpage_priv_address(void)
349 {
350 	return sharedpage_rw_addr;
351 }
352 
353 vm_address_t
_get_commpage_ro_address(void)354 _get_commpage_ro_address(void)
355 {
356 	return sharedpage_kernel_ro_addr;
357 }
358 
359 vm_address_t
_get_commpage_text_priv_address(void)360 _get_commpage_text_priv_address(void)
361 {
362 	return sharedpage_rw_text_addr;
363 }
364 
365 #if defined(__arm64__)
366 /**
367  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64ISAR0_EL1
368  */
369 static void
commpage_init_arm_optional_features_isar0(uint64_t * commpage_bits)370 commpage_init_arm_optional_features_isar0(uint64_t *commpage_bits)
371 {
372 	uint64_t bits = 0;
373 	uint64_t isar0 = __builtin_arm_rsr64("ID_AA64ISAR0_EL1");
374 
375 	if ((isar0 & ID_AA64ISAR0_EL1_TS_MASK) >= ID_AA64ISAR0_EL1_TS_FLAGM_EN) {
376 		gARM_FEAT_FlagM = 1;
377 		bits |= kHasFEATFlagM;
378 	}
379 	if ((isar0 & ID_AA64ISAR0_EL1_TS_MASK) >= ID_AA64ISAR0_EL1_TS_FLAGM2_EN) {
380 		gARM_FEAT_FlagM2 = 1;
381 		bits |= kHasFEATFlagM2;
382 	}
383 	if ((isar0 & ID_AA64ISAR0_EL1_FHM_MASK) >= ID_AA64ISAR0_EL1_FHM_8_2) {
384 		gARM_FEAT_FHM = 1;
385 		bits |= kHasFeatFHM;
386 	}
387 	if ((isar0 & ID_AA64ISAR0_EL1_DP_MASK) >= ID_AA64ISAR0_EL1_DP_EN) {
388 		gARM_FEAT_DotProd = 1;
389 		bits |= kHasFeatDotProd;
390 	}
391 	if ((isar0 & ID_AA64ISAR0_EL1_SHA3_MASK) >= ID_AA64ISAR0_EL1_SHA3_EN) {
392 		gARM_FEAT_SHA3 = 1;
393 		bits |= kHasFeatSHA3;
394 	}
395 	if ((isar0 & ID_AA64ISAR0_EL1_RDM_MASK) >= ID_AA64ISAR0_EL1_RDM_EN) {
396 		gARM_FEAT_RDM = 1;
397 		bits |= kHasFeatRDM;
398 	}
399 	if ((isar0 & ID_AA64ISAR0_EL1_ATOMIC_MASK) >= ID_AA64ISAR0_EL1_ATOMIC_8_1) {
400 		gARM_FEAT_LSE = 1;
401 		bits |= kHasFeatLSE;
402 	}
403 	if ((isar0 & ID_AA64ISAR0_EL1_SHA2_MASK) >= ID_AA64ISAR0_EL1_SHA2_512_EN) {
404 		gARM_FEAT_SHA512 = 1;
405 		bits |= kHasFeatSHA512;
406 	}
407 	if ((isar0 & ID_AA64ISAR0_EL1_CRC32_MASK) == ID_AA64ISAR0_EL1_CRC32_EN) {
408 		gARMv8Crc32 = 1;
409 		bits |= kHasARMv8Crc32;
410 	}
411 
412 #if __ARM_V8_CRYPTO_EXTENSIONS__
413 	/**
414 	 * T7000 has a bug in the ISAR0 register that reports that PMULL is not
415 	 * supported when it actually is. To work around this, for all of the crypto
416 	 * extensions, just check if they're supported using the board_config.h
417 	 * values.
418 	 */
419 	gARM_FEAT_PMULL = 1;
420 	gARM_FEAT_SHA1 = 1;
421 	gARM_FEAT_AES = 1;
422 	gARM_FEAT_SHA256 = 1;
423 	bits |= kHasARMv8Crypto;
424 #endif /* __ARM_V8_CRYPTO_EXTENSIONS__ */
425 
426 	*commpage_bits |= bits;
427 }
428 
429 /**
430  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64ISAR1_EL1
431  */
432 static void
commpage_init_arm_optional_features_isar1(uint64_t * commpage_bits)433 commpage_init_arm_optional_features_isar1(uint64_t *commpage_bits)
434 {
435 	uint64_t bits = 0;
436 	uint64_t isar1 = __builtin_arm_rsr64("ID_AA64ISAR1_EL1");
437 	uint64_t sctlr = __builtin_arm_rsr64("SCTLR_EL1");
438 
439 	if ((isar1 & ID_AA64ISAR1_EL1_SPECRES_MASK) >= ID_AA64ISAR1_EL1_SPECRES_EN &&
440 	    sctlr & SCTLR_EnRCTX) {
441 		gARM_FEAT_SPECRES = 1;
442 		bits |= kHasFeatSPECRES;
443 	}
444 	if ((isar1 & ID_AA64ISAR1_EL1_SB_MASK) >= ID_AA64ISAR1_EL1_SB_EN) {
445 		gARM_FEAT_SB = 1;
446 		bits |= kHasFeatSB;
447 	}
448 	if ((isar1 & ID_AA64ISAR1_EL1_FRINTTS_MASK) >= ID_AA64ISAR1_EL1_FRINTTS_EN) {
449 		gARM_FEAT_FRINTTS = 1;
450 		bits |= kHasFeatFRINTTS;
451 	}
452 	if ((isar1 & ID_AA64ISAR1_EL1_GPI_MASK) >= ID_AA64ISAR1_EL1_GPI_EN) {
453 		gARMv8Gpi = 1;
454 		bits |= kHasArmv8GPI;
455 	}
456 	if ((isar1 & ID_AA64ISAR1_EL1_LRCPC_MASK) >= ID_AA64ISAR1_EL1_LRCPC_EN) {
457 		gARM_FEAT_LRCPC = 1;
458 		bits |= kHasFeatLRCPC;
459 	}
460 	if ((isar1 & ID_AA64ISAR1_EL1_LRCPC_MASK) >= ID_AA64ISAR1_EL1_LRCP2C_EN) {
461 		gARM_FEAT_LRCPC2 = 1;
462 		bits |= kHasFeatLRCPC2;
463 	}
464 	if ((isar1 & ID_AA64ISAR1_EL1_FCMA_MASK) >= ID_AA64ISAR1_EL1_FCMA_EN) {
465 		gARM_FEAT_FCMA = 1;
466 		bits |= kHasFeatFCMA;
467 	}
468 	if ((isar1 & ID_AA64ISAR1_EL1_JSCVT_MASK) >= ID_AA64ISAR1_EL1_JSCVT_EN) {
469 		gARM_FEAT_JSCVT = 1;
470 		bits |= kHasFeatJSCVT;
471 	}
472 	if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_PAuth_EN) {
473 		gARM_FEAT_PAuth = 1;
474 		bits |= kHasFeatPAuth;
475 	}
476 	if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_PAuth2_EN) {
477 		gARM_FEAT_PAuth2 = 1;
478 	}
479 	if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_FPAC_EN) {
480 		gARM_FEAT_FPAC = 1;
481 	}
482 	if ((isar1 & ID_AA64ISAR1_EL1_DPB_MASK) >= ID_AA64ISAR1_EL1_DPB_EN) {
483 		gARM_FEAT_DPB = 1;
484 		bits |= kHasFeatDPB;
485 	}
486 	if ((isar1 & ID_AA64ISAR1_EL1_DPB_MASK) >= ID_AA64ISAR1_EL1_DPB2_EN) {
487 		gARM_FEAT_DPB2 = 1;
488 		bits |= kHasFeatDPB2;
489 	}
490 	if ((isar1 & ID_AA64ISAR1_EL1_BF16_MASK) >= ID_AA64ISAR1_EL1_BF16_EN) {
491 		gARM_FEAT_BF16 = 1;
492 	}
493 	if ((isar1 & ID_AA64ISAR1_EL1_I8MM_MASK) >= ID_AA64ISAR1_EL1_I8MM_EN) {
494 		gARM_FEAT_I8MM = 1;
495 	}
496 
497 	*commpage_bits |= bits;
498 }
499 
500 /**
501  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64MMFR0_EL1
502  */
503 static void
commpage_init_arm_optional_features_mmfr0(uint64_t * commpage_bits)504 commpage_init_arm_optional_features_mmfr0(uint64_t *commpage_bits)
505 {
506 	uint64_t bits = 0;
507 	uint64_t mmfr0 = __builtin_arm_rsr64("ID_AA64MMFR0_EL1");
508 
509 	if ((mmfr0 & ID_AA64MMFR0_EL1_ECV_MASK) >= ID_AA64MMFR0_EL1_ECV_EN) {
510 		gARM_FEAT_ECV = 1;
511 	}
512 
513 	*commpage_bits |= bits;
514 }
515 
516 /**
517  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64MMFR2_EL1
518  */
519 static void
commpage_init_arm_optional_features_mmfr2(uint64_t * commpage_bits)520 commpage_init_arm_optional_features_mmfr2(uint64_t *commpage_bits)
521 {
522 	uint64_t bits = 0;
523 	uint64_t mmfr2 = __builtin_arm_rsr64("ID_AA64MMFR2_EL1");
524 
525 	if ((mmfr2 & ID_AA64MMFR2_EL1_AT_MASK) >= ID_AA64MMFR2_EL1_AT_LSE2_EN) {
526 		gARM_FEAT_LSE2 = 1;
527 		bits |= kHasFeatLSE2;
528 	}
529 
530 	*commpage_bits |= bits;
531 }
532 
533 /**
534  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64PFR0_EL1
535  */
536 static void
commpage_init_arm_optional_features_pfr0(uint64_t * commpage_bits)537 commpage_init_arm_optional_features_pfr0(uint64_t *commpage_bits)
538 {
539 	uint64_t bits = 0;
540 	uint64_t pfr0 = __builtin_arm_rsr64("ID_AA64PFR0_EL1");
541 
542 	if ((pfr0 & ID_AA64PFR0_EL1_CSV3_MASK) >= ID_AA64PFR0_EL1_CSV3_EN) {
543 		gARM_FEAT_CSV3 = 1;
544 		bits |= kHasFeatCSV3;
545 	}
546 	if ((pfr0 & ID_AA64PFR0_EL1_CSV2_MASK) >= ID_AA64PFR0_EL1_CSV2_EN) {
547 		gARM_FEAT_CSV2 = 1;
548 		bits |= kHasFeatCSV2;
549 	}
550 	if ((pfr0 & ID_AA64PFR0_EL1_DIT_MASK) >= ID_AA64PFR0_EL1_DIT_EN) {
551 		gARM_FEAT_DIT = 1;
552 		bits |= kHasFeatDIT;
553 	}
554 	if ((pfr0 & ID_AA64PFR0_EL1_AdvSIMD_MASK) != ID_AA64PFR0_EL1_AdvSIMD_DIS) {
555 		gARM_AdvSIMD = 1;
556 		bits |= kHasAdvSIMD;
557 		if ((pfr0 & ID_AA64PFR0_EL1_AdvSIMD_MASK) >= ID_AA64PFR0_EL1_AdvSIMD_HPFPCVT) {
558 			gARM_AdvSIMD_HPFPCvt = 1;
559 			bits |= kHasAdvSIMD_HPFPCvt;
560 		}
561 		if ((pfr0 & ID_AA64PFR0_EL1_AdvSIMD_MASK) >= ID_AA64PFR0_EL1_AdvSIMD_FP16) {
562 			gARM_FEAT_FP16 = 1;
563 			bits |= kHasFeatFP16;
564 		}
565 	}
566 
567 	*commpage_bits |= bits;
568 }
569 
570 /**
571  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64PFR1_EL1
572  */
573 static void
commpage_init_arm_optional_features_pfr1(uint64_t * commpage_bits __unused)574 commpage_init_arm_optional_features_pfr1(uint64_t *commpage_bits __unused)
575 {
576 	uint64_t pfr1 = __builtin_arm_rsr64("ID_AA64PFR1_EL1");
577 
578 	if ((pfr1 & ID_AA64PFR1_EL1_SSBS_MASK) >= ID_AA64PFR1_EL1_SSBS_EN) {
579 		gARM_FEAT_SSBS = 1;
580 	}
581 
582 	if ((pfr1 & ID_AA64PFR1_EL1_BT_MASK) >= ID_AA64PFR1_EL1_BT_EN) {
583 		gARM_FEAT_BTI = 1;
584 	}
585 }
586 
587 
588 /**
589  * Read the system register @name, attempt to set set bits of @mask if not
590  * already, test if bits were actually set, reset the register to its
591  * previous value if required, and 'return' @mask with only bits that
592  * were successfully set (or already set) in the system register. */
593 #define _test_sys_bits(name, mask) ({ \
594 	const uint64_t src = __builtin_arm_rsr64(#name); \
595     uint64_t test = src | mask; \
596     if (test != src) { \
597 	__builtin_arm_wsr64(#name, test); \
598 	test = __builtin_arm_rsr64(#name); \
599 	if (test != src) { \
600 	    __builtin_arm_wsr64(#name, src); \
601 	}\
602     } \
603     mask & test; \
604 })
605 
606 /**
607  * Reports whether FPU exceptions are supported.
608  * Possible FPU exceptions are :
609  * - input denormal;
610  * - inexact;
611  * - underflow;
612  * - overflow;
613  * - divide by 0;
614  * - invalid operation.
615  *
616  * Any of those can be supported or not but for now, we consider that
617  * it all or nothing : FPU exceptions support flag set <=> all 6 exceptions
618  * a supported.
619  */
620 static void
commpage_init_arm_optional_features_fpcr(uint64_t * commpage_bits)621 commpage_init_arm_optional_features_fpcr(uint64_t *commpage_bits)
622 {
623 	uint64_t support_mask = FPCR_IDE | FPCR_IXE | FPCR_UFE | FPCR_OFE |
624 	    FPCR_DZE | FPCR_IOE;
625 	uint64_t FPCR_bits = _test_sys_bits(FPCR, support_mask);
626 	if (FPCR_bits == support_mask) {
627 		gARM_FP_SyncExceptions = 1;
628 		*commpage_bits |= kHasFP_SyncExceptions;
629 	}
630 }
631 
632 /**
633  * Initializes all commpage entries and sysctls for ARM64 optional features accessible from EL0.
634  */
635 static void
commpage_init_arm_optional_features(uint64_t * commpage_bits)636 commpage_init_arm_optional_features(uint64_t *commpage_bits)
637 {
638 	commpage_init_arm_optional_features_isar0(commpage_bits);
639 	commpage_init_arm_optional_features_isar1(commpage_bits);
640 	commpage_init_arm_optional_features_mmfr0(commpage_bits);
641 	commpage_init_arm_optional_features_mmfr2(commpage_bits);
642 	commpage_init_arm_optional_features_pfr0(commpage_bits);
643 	commpage_init_arm_optional_features_pfr1(commpage_bits);
644 	commpage_init_arm_optional_features_fpcr(commpage_bits);
645 }
646 #endif /* __arm64__ */
647 
648 /*
649  * Initialize _cpu_capabilities vector
650  */
651 static void
commpage_init_cpu_capabilities(void)652 commpage_init_cpu_capabilities( void )
653 {
654 	uint64_t bits;
655 	int cpus;
656 	ml_cpu_info_t cpu_info;
657 
658 	bits = 0;
659 	ml_cpu_get_info(&cpu_info);
660 
661 	switch (cpu_info.cache_line_size) {
662 	case 128:
663 		bits |= kCache128;
664 		break;
665 	case 64:
666 		bits |= kCache64;
667 		break;
668 	case 32:
669 		bits |= kCache32;
670 		break;
671 	default:
672 		break;
673 	}
674 	cpus = commpage_cpus();
675 
676 	if (cpus == 1) {
677 		bits |= kUP;
678 	}
679 
680 	bits |= (cpus << kNumCPUsShift);
681 
682 	bits |= kFastThreadLocalStorage;        // TPIDRURO for TLS
683 
684 	bits |= kHasVfp;
685 
686 #if defined(__arm64__)
687 	bits |= kHasFMA;
688 #endif
689 	bits |= kHasEvent;
690 #ifdef __arm64__
691 	commpage_init_arm_optional_features(&bits);
692 #endif
693 
694 
695 
696 #if HAS_UCNORMAL_MEM
697 	gUCNormalMem = 1;
698 	bits |= kHasUCNormalMemory;
699 #endif
700 
701 	_cpu_capabilities = bits;
702 
703 	*((uint32_t *)(_COMM_PAGE_CPU_CAPABILITIES + _COMM_PAGE_RW_OFFSET)) = (uint32_t)_cpu_capabilities;
704 	*((uint64_t *)(_COMM_PAGE_CPU_CAPABILITIES64 + _COMM_PAGE_RW_OFFSET)) = _cpu_capabilities;
705 
706 }
707 
708 /*
709  * Updated every time a logical CPU goes offline/online
710  */
711 void
commpage_update_active_cpus(void)712 commpage_update_active_cpus(void)
713 {
714 	if (!commPagePtr) {
715 		return;
716 	}
717 	*((uint8_t *)(_COMM_PAGE_ACTIVE_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t)processor_avail_count;
718 
719 }
720 
721 /*
722  * Update the commpage bits for mach_absolute_time and mach_continuous_time (for userspace)
723  */
724 void
commpage_update_timebase(void)725 commpage_update_timebase(void)
726 {
727 	if (commPagePtr) {
728 		*((uint64_t*)(_COMM_PAGE_TIMEBASE_OFFSET + _COMM_PAGE_RW_OFFSET)) = rtclock_base_abstime;
729 	}
730 }
731 
732 /*
733  * Update the commpage with current kdebug state: whether tracing is enabled, a
734  * typefilter is present, and continuous time should be used for timestamps.
735  *
736  * Disregards configuration and set to 0 if tracing is disabled.
737  */
738 void
commpage_update_kdebug_state(void)739 commpage_update_kdebug_state(void)
740 {
741 	if (commPagePtr) {
742 		uint32_t state = kdebug_commpage_state();
743 		*((volatile uint32_t *)(_COMM_PAGE_KDEBUG_ENABLE + _COMM_PAGE_RW_OFFSET)) = state;
744 	}
745 }
746 
747 /* Ditto for atm_diagnostic_config */
748 void
commpage_update_atm_diagnostic_config(uint32_t diagnostic_config)749 commpage_update_atm_diagnostic_config(uint32_t diagnostic_config)
750 {
751 	if (commPagePtr) {
752 		*((volatile uint32_t*)(_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG + _COMM_PAGE_RW_OFFSET)) = diagnostic_config;
753 	}
754 }
755 
756 /*
757  * Update the commpage data with the state of multiuser mode for
758  * this device. Allowing various services in userspace to avoid
759  * IPC in the (more common) non-multiuser environment.
760  */
761 void
commpage_update_multiuser_config(uint32_t multiuser_config)762 commpage_update_multiuser_config(uint32_t multiuser_config)
763 {
764 	if (commPagePtr) {
765 		*((volatile uint32_t *)(_COMM_PAGE_MULTIUSER_CONFIG + _COMM_PAGE_RW_OFFSET)) = multiuser_config;
766 	}
767 }
768 
769 /*
770  * update the commpage data for
771  * last known value of mach_absolute_time()
772  */
773 
774 void
commpage_update_mach_approximate_time(uint64_t abstime)775 commpage_update_mach_approximate_time(uint64_t abstime)
776 {
777 #ifdef CONFIG_MACH_APPROXIMATE_TIME
778 	if (!commPagePtr) {
779 		return;
780 	}
781 
782 	uint64_t *approx_time_base = (uint64_t *)(uintptr_t)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET);
783 
784 	uint64_t saved_data = os_atomic_load_wide(approx_time_base, relaxed);
785 	if (saved_data < abstime) {
786 		/*
787 		 * ignore the success/fail return value assuming that
788 		 * if the value has been updated since we last read it,
789 		 * someone else has written a timestamp that is new enough.
790 		 */
791 		__unused bool ret = os_atomic_cmpxchg(approx_time_base,
792 		    saved_data, abstime, relaxed);
793 	}
794 
795 
796 #else /* CONFIG_MACH_APPROXIMATE_TIME */
797 #pragma unused (abstime)
798 #endif
799 }
800 
801 /*
802  * update the commpage data's total system sleep time for
803  * userspace call to mach_continuous_time()
804  */
805 void
commpage_update_mach_continuous_time(uint64_t sleeptime)806 commpage_update_mach_continuous_time(uint64_t sleeptime)
807 {
808 	if (!commPagePtr) {
809 		return;
810 	}
811 
812 	uint64_t *cont_time_base = (uint64_t *)(uintptr_t)(_COMM_PAGE_CONT_TIMEBASE + _COMM_PAGE_RW_OFFSET);
813 
814 	os_atomic_store_wide(cont_time_base, sleeptime, relaxed);
815 
816 }
817 
818 void
commpage_update_mach_continuous_time_hw_offset(uint64_t offset)819 commpage_update_mach_continuous_time_hw_offset(uint64_t offset)
820 {
821 	*((uint64_t *)(_COMM_PAGE_CONT_HW_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = offset;
822 }
823 
824 /*
825  * update the commpage's value for the boot time
826  */
827 void
commpage_update_boottime(uint64_t value)828 commpage_update_boottime(uint64_t value)
829 {
830 	if (!commPagePtr) {
831 		return;
832 	}
833 
834 	uint64_t *boottime_usec = (uint64_t *)(uintptr_t)(_COMM_PAGE_BOOTTIME_USEC + _COMM_PAGE_RW_OFFSET);
835 
836 	os_atomic_store_wide(boottime_usec, value, relaxed);
837 
838 }
839 
840 /*
841  * set the commpage's remote time params for
842  * userspace call to mach_bridge_remote_time()
843  */
844 void
commpage_set_remotetime_params(double rate,uint64_t base_local_ts,uint64_t base_remote_ts)845 commpage_set_remotetime_params(double rate, uint64_t base_local_ts, uint64_t base_remote_ts)
846 {
847 	if (commPagePtr) {
848 #ifdef __arm64__
849 		struct bt_params *paramsp = (struct bt_params *)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET);
850 		paramsp->base_local_ts = 0;
851 		__builtin_arm_dmb(DMB_ISH);
852 		paramsp->rate = rate;
853 		paramsp->base_remote_ts = base_remote_ts;
854 		__builtin_arm_dmb(DMB_ISH);
855 		paramsp->base_local_ts = base_local_ts;  //This will act as a generation count
856 #endif /* __arm64__ */
857 	}
858 }
859 
860 
861 /*
862  * After this counter has incremented, all running CPUs are guaranteed to
863  * have quiesced, i.e. executed serially dependent memory barriers.
864  * This is only tracked for CPUs running in userspace, therefore only useful
865  * outside the kernel.
866  *
867  * Note that you can't know which side of those barriers your read was from,
868  * so you have to observe 2 increments in order to ensure that you saw a
869  * serially dependent barrier chain across all running CPUs.
870  */
871 uint64_t
commpage_increment_cpu_quiescent_counter(void)872 commpage_increment_cpu_quiescent_counter(void)
873 {
874 	if (!commPagePtr) {
875 		return 0;
876 	}
877 
878 	uint64_t old_gen;
879 
880 	_Atomic uint64_t *sched_gen = (_Atomic uint64_t *)(_COMM_PAGE_CPU_QUIESCENT_COUNTER +
881 	    _COMM_PAGE_RW_OFFSET);
882 	/*
883 	 * On 32bit architectures, double-wide atomic load or stores are a CAS,
884 	 * so the atomic increment is the most efficient way to increment the
885 	 * counter.
886 	 *
887 	 * On 64bit architectures however, because the update is synchronized by
888 	 * the cpu mask, relaxed loads and stores is more efficient.
889 	 */
890 #if __LP64__
891 	old_gen = os_atomic_load(sched_gen, relaxed);
892 	os_atomic_store(sched_gen, old_gen + 1, relaxed);
893 #else
894 	old_gen = atomic_fetch_add_explicit(sched_gen, 1, memory_order_relaxed);
895 #endif
896 	return old_gen;
897 }
898 
899 /*
900  * update the commpage with if dtrace user land probes are enabled
901  */
902 void
commpage_update_dof(boolean_t enabled)903 commpage_update_dof(boolean_t enabled)
904 {
905 #if CONFIG_DTRACE
906 	*((uint8_t*)(_COMM_PAGE_DTRACE_DOF_ENABLED + _COMM_PAGE_RW_OFFSET)) = (enabled ? 1 : 0);
907 #else
908 	(void)enabled;
909 #endif
910 }
911 
912 /*
913  * update the dyld global config flags
914  */
915 void
commpage_update_dyld_flags(uint64_t value)916 commpage_update_dyld_flags(uint64_t value)
917 {
918 	*((uint64_t*)(_COMM_PAGE_DYLD_FLAGS + _COMM_PAGE_RW_OFFSET)) = value;
919 
920 }
921