xref: /xnu-10002.81.5/osfmk/arm/commpage/commpage.c (revision 5e3eaea39dcf651e66cb99ba7d70e32cc4a99587)
1 /*
2  * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3  * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4  *
5  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6  *
7  * This file contains Original Code and/or Modifications of Original Code
8  * as defined in and that are subject to the Apple Public Source License
9  * Version 2.0 (the 'License'). You may not use this file except in
10  * compliance with the License. The rights granted to you under the License
11  * may not be used to create, or enable the creation or redistribution of,
12  * unlawful or unlicensed copies of an Apple operating system, or to
13  * circumvent, violate, or enable the circumvention or violation of, any
14  * terms of an Apple operating system software license agreement.
15  *
16  * Please obtain a copy of the License at
17  * http://www.opensource.apple.com/apsl/ and read it before using this file.
18  *
19  * The Original Code and all software distributed under the License are
20  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24  * Please see the License for the specific language governing rights and
25  * limitations under the License.
26  *
27  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28  */
29 /*
30  * @OSF_COPYRIGHT@
31  */
32 /*
33  * @APPLE_FREE_COPYRIGHT@
34  */
35 /*
36  *	File:		arm/commpage/commpage.c
37  *	Purpose:	Set up and export a RO/RW page
38  */
39 #include <libkern/section_keywords.h>
40 #include <mach/mach_types.h>
41 #include <mach/machine.h>
42 #include <mach/vm_map.h>
43 #include <machine/cpu_capabilities.h>
44 #include <machine/commpage.h>
45 #include <machine/config.h>
46 #include <machine/pmap.h>
47 #include <vm/vm_kern.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_protos.h>
50 #include <ipc/ipc_port.h>
51 #include <arm/cpuid.h>          /* for cpuid_info() & cache_info() */
52 #include <arm/misc_protos.h>
53 #include <arm/rtclock.h>
54 #include <libkern/OSAtomic.h>
55 #include <stdatomic.h>
56 #include <kern/remote_time.h>
57 #include <kern/smr.h>
58 #include <machine/atomic.h>
59 #include <machine/machine_remote_time.h>
60 #include <machine/machine_routines.h>
61 
62 #include <sys/kdebug.h>
63 
64 #if CONFIG_ATM
65 #include <atm/atm_internal.h>
66 #endif
67 
68 static int commpage_cpus( void );
69 
70 
71 static void commpage_init_cpu_capabilities( void );
72 
73 SECURITY_READ_ONLY_LATE(vm_address_t)   commPagePtr = 0;
74 SECURITY_READ_ONLY_LATE(vm_address_t)   commpage_rw_addr = 0;
75 SECURITY_READ_ONLY_LATE(vm_address_t)   commpage_kernel_ro_addr = 0;
76 SECURITY_READ_ONLY_LATE(uint64_t)       _cpu_capabilities = 0;
77 SECURITY_READ_ONLY_LATE(vm_address_t)   commpage_rw_text_addr = 0;
78 
79 extern user64_addr_t commpage_text64_location;
80 extern user32_addr_t commpage_text32_location;
81 
82 /* For sysctl access from BSD side */
83 extern int gARMv8Crc32;
84 extern int gARMv8Gpi;
85 extern int gARM_FEAT_FlagM;
86 extern int gARM_FEAT_FlagM2;
87 extern int gARM_FEAT_FHM;
88 extern int gARM_FEAT_DotProd;
89 extern int gARM_FEAT_SHA3;
90 extern int gARM_FEAT_RDM;
91 extern int gARM_FEAT_LSE;
92 extern int gARM_FEAT_SHA256;
93 extern int gARM_FEAT_SHA512;
94 extern int gARM_FEAT_SHA1;
95 extern int gARM_FEAT_AES;
96 extern int gARM_FEAT_PMULL;
97 extern int gARM_FEAT_SPECRES;
98 extern int gARM_FEAT_SB;
99 extern int gARM_FEAT_FRINTTS;
100 extern int gARM_FEAT_LRCPC;
101 extern int gARM_FEAT_LRCPC2;
102 extern int gARM_FEAT_FCMA;
103 extern int gARM_FEAT_JSCVT;
104 extern int gARM_FEAT_PAuth;
105 extern int gARM_FEAT_PAuth2;
106 extern int gARM_FEAT_FPAC;
107 extern int gARM_FEAT_DPB;
108 extern int gARM_FEAT_DPB2;
109 extern int gARM_FEAT_BF16;
110 extern int gARM_FEAT_I8MM;
111 extern int gARM_FEAT_ECV;
112 extern int gARM_FEAT_LSE2;
113 extern int gARM_FEAT_CSV2;
114 extern int gARM_FEAT_CSV3;
115 extern int gARM_FEAT_DIT;
116 extern int gARM_AdvSIMD;
117 extern int gARM_AdvSIMD_HPFPCvt;
118 extern int gARM_FEAT_FP16;
119 extern int gARM_FEAT_SSBS;
120 extern int gARM_FEAT_BTI;
121 extern int gARM_FP_SyncExceptions;
122 
123 extern int      gUCNormalMem;
124 
125 void
commpage_populate(void)126 commpage_populate(void)
127 {
128 	uint16_t        c2;
129 	int cpufamily;
130 
131 	// Create the data and the text commpage
132 	vm_map_address_t kernel_data_addr, kernel_text_addr, kernel_ro_data_addr, user_text_addr;
133 	pmap_create_commpages(&kernel_data_addr, &kernel_text_addr, &kernel_ro_data_addr, &user_text_addr);
134 
135 	commpage_rw_addr = kernel_data_addr;
136 	commpage_rw_text_addr = kernel_text_addr;
137 	commpage_kernel_ro_addr = kernel_ro_data_addr;
138 	commPagePtr = (vm_address_t) _COMM_PAGE_BASE_ADDRESS;
139 
140 #if __arm64__
141 	commpage_text64_location = user_text_addr;
142 	bcopy(_COMM_PAGE64_SIGNATURE_STRING, (void *)(_COMM_PAGE_SIGNATURE + _COMM_PAGE_RW_OFFSET),
143 	    MIN(_COMM_PAGE_SIGNATURELEN, strlen(_COMM_PAGE64_SIGNATURE_STRING)));
144 #endif
145 
146 	*((uint16_t*)(_COMM_PAGE_VERSION + _COMM_PAGE_RW_OFFSET)) = (uint16_t) _COMM_PAGE_THIS_VERSION;
147 
148 	commpage_init_cpu_capabilities();
149 	commpage_set_timestamp(0, 0, 0, 0, 0);
150 
151 	if (_cpu_capabilities & kCache32) {
152 		c2 = 32;
153 	} else if (_cpu_capabilities & kCache64) {
154 		c2 = 64;
155 	} else if (_cpu_capabilities & kCache128) {
156 		c2 = 128;
157 	} else {
158 		c2 = 0;
159 	}
160 
161 	*((uint16_t*)(_COMM_PAGE_CACHE_LINESIZE + _COMM_PAGE_RW_OFFSET)) = c2;
162 
163 	commpage_update_active_cpus();
164 	cpufamily = cpuid_get_cpufamily();
165 	*((uint8_t*)(_COMM_PAGE_CPU_CLUSTERS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) ml_get_cluster_count();
166 	*((uint8_t*)(_COMM_PAGE_PHYSICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.physical_cpu_max;
167 	*((uint8_t*)(_COMM_PAGE_LOGICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.logical_cpu_max;
168 	*((uint64_t*)(_COMM_PAGE_MEMORY_SIZE + _COMM_PAGE_RW_OFFSET)) = machine_info.max_mem;
169 	*((uint32_t*)(_COMM_PAGE_CPUFAMILY + _COMM_PAGE_RW_OFFSET)) = (uint32_t)cpufamily;
170 	*((uint32_t*)(_COMM_PAGE_DEV_FIRM_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint32_t)PE_i_can_has_debugger(NULL);
171 	*((uint32_t*)(_COMM_PAGE_DEV_FIRM + _COMM_PAGE_RO_OFFSET)) = (uint32_t)PE_i_can_has_debugger(NULL);
172 	*((uint8_t*)(_COMM_PAGE_USER_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = user_timebase_type();
173 
174 	// Populate logical CPU -> logical cluster table
175 	ml_map_cpus_to_clusters((uint8_t*)(_COMM_PAGE_CPU_TO_CLUSTER + _COMM_PAGE_RW_OFFSET));
176 
177 	*((uint8_t*)(_COMM_PAGE_CONT_HWCLOCK + _COMM_PAGE_RW_OFFSET)) = (uint8_t)user_cont_hwclock_allowed();
178 	*((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift;
179 	*((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT + _COMM_PAGE_RO_OFFSET)) = (uint8_t) page_shift;
180 
181 #if __arm64__
182 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift_user32;
183 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32 + _COMM_PAGE_RO_OFFSET)) = (uint8_t) page_shift_user32;
184 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
185 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64 + _COMM_PAGE_RO_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
186 #endif /* __arm64__ */
187 
188 	commpage_update_timebase();
189 	commpage_update_mach_continuous_time(0);
190 
191 	clock_sec_t secs;
192 	clock_usec_t microsecs;
193 	clock_get_boottime_microtime(&secs, &microsecs);
194 	commpage_update_boottime(secs * USEC_PER_SEC + microsecs);
195 
196 	/*
197 	 * set commpage approximate time to zero for initialization.
198 	 * scheduler shall populate correct value before running user thread
199 	 */
200 	*((uint64_t *)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET)) = 0;
201 #ifdef CONFIG_MACH_APPROXIMATE_TIME
202 	*((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 1;
203 #else
204 	*((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 0;
205 #endif
206 
207 	commpage_update_kdebug_state();
208 
209 #if CONFIG_ATM
210 	commpage_update_atm_diagnostic_config(atm_get_diagnostic_config());
211 #endif
212 
213 
214 	*((uint64_t*)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET)) = BT_RESET_SENTINEL_TS;
215 
216 #if CONFIG_QUIESCE_COUNTER
217 	cpu_quiescent_set_storage((_Atomic uint64_t *)(_COMM_PAGE_CPU_QUIESCENT_COUNTER +
218 	    _COMM_PAGE_RW_OFFSET));
219 #endif /* CONFIG_QUIESCE_COUNTER */
220 }
221 
222 #define COMMPAGE_TEXT_SEGMENT "__TEXT_EXEC"
223 #define COMMPAGE_TEXT_SECTION "__commpage_text"
224 
225 /* Get a pointer to the start of the ARM PFZ code section. This macro tell the
226  * linker that the storage for the variable here is at the start of the section */
227 extern char commpage_text_start[]
228 __SECTION_START_SYM(COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
229 
230 /* Get a pointer to the end of the ARM PFZ code section. This macro tell the
231  * linker that the storage for the variable here is at the end of the section */
232 extern char commpage_text_end[]
233 __SECTION_END_SYM(COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
234 
235 /* This is defined in the commpage text section as a symbol at the start of the preemptible
236  * functions */
237 extern char commpage_text_preemptible_functions;
238 
239 #if CONFIG_ARM_PFZ
240 static size_t size_of_pfz = 0;
241 #endif
242 
243 /* This is the opcode for brk #666 */
244 #define BRK_666_OPCODE 0xD4205340
245 
246 void
commpage_text_populate(void)247 commpage_text_populate(void)
248 {
249 #if CONFIG_ARM_PFZ
250 	size_t size_of_commpage_text = commpage_text_end - commpage_text_start;
251 	if (size_of_commpage_text == 0) {
252 		panic("ARM comm page text section %s,%s missing", COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
253 	}
254 	assert(size_of_commpage_text <= PAGE_SIZE);
255 	assert(size_of_commpage_text > 0);
256 
257 	/* Get the size of the PFZ half of the comm page text section. */
258 	size_of_pfz = &commpage_text_preemptible_functions - commpage_text_start;
259 
260 	// Copy the code segment of comm page text section into the PFZ
261 	memcpy((void *) _COMM_PAGE64_TEXT_START_ADDRESS, (void *) commpage_text_start, size_of_commpage_text);
262 
263 	// Make sure to populate the rest of it with brk 666 so that undefined code
264 	// doesn't get  run
265 	memset((char *) _COMM_PAGE64_TEXT_START_ADDRESS + size_of_commpage_text, BRK_666_OPCODE,
266 	    PAGE_SIZE - size_of_commpage_text);
267 #endif
268 }
269 
270 uint32_t
commpage_is_in_pfz64(addr64_t addr64)271 commpage_is_in_pfz64(addr64_t addr64)
272 {
273 #if CONFIG_ARM_PFZ
274 	if ((addr64 >= commpage_text64_location) &&
275 	    (addr64 < (commpage_text64_location + size_of_pfz))) {
276 		return 1;
277 	} else {
278 		return 0;
279 	}
280 #else
281 #pragma unused (addr64)
282 	return 0;
283 #endif
284 }
285 
286 
287 void
commpage_set_timestamp(uint64_t tbr,uint64_t secs,uint64_t frac,uint64_t scale,uint64_t tick_per_sec)288 commpage_set_timestamp(
289 	uint64_t        tbr,
290 	uint64_t        secs,
291 	uint64_t        frac,
292 	uint64_t        scale,
293 	uint64_t        tick_per_sec)
294 {
295 	new_commpage_timeofday_data_t *commpage_timeofday_datap;
296 
297 	if (commPagePtr == 0) {
298 		return;
299 	}
300 
301 	commpage_timeofday_datap =  (new_commpage_timeofday_data_t *)(_COMM_PAGE_NEWTIMEOFDAY_DATA + _COMM_PAGE_RW_OFFSET);
302 
303 	commpage_timeofday_datap->TimeStamp_tick = 0x0ULL;
304 
305 	__builtin_arm_dmb(DMB_ISH);
306 
307 	commpage_timeofday_datap->TimeStamp_sec = secs;
308 	commpage_timeofday_datap->TimeStamp_frac = frac;
309 	commpage_timeofday_datap->Ticks_scale = scale;
310 	commpage_timeofday_datap->Ticks_per_sec = tick_per_sec;
311 
312 	__builtin_arm_dmb(DMB_ISH);
313 
314 	commpage_timeofday_datap->TimeStamp_tick = tbr;
315 
316 }
317 
318 /*
319  * Update _COMM_PAGE_MEMORY_PRESSURE.  Called periodically from vm's compute_memory_pressure()
320  */
321 
322 void
commpage_set_memory_pressure(unsigned int pressure)323 commpage_set_memory_pressure(
324 	unsigned int    pressure )
325 {
326 	if (commPagePtr == 0) {
327 		return;
328 	}
329 	*((uint32_t *)(_COMM_PAGE_MEMORY_PRESSURE + _COMM_PAGE_RW_OFFSET)) = pressure;
330 }
331 
332 /*
333  * Determine number of CPUs on this system.
334  */
335 static int
commpage_cpus(void)336 commpage_cpus( void )
337 {
338 	int cpus;
339 
340 	cpus = machine_info.max_cpus;
341 
342 	if (cpus == 0) {
343 		panic("commpage cpus==0");
344 	}
345 	if (cpus > 0xFF) {
346 		cpus = 0xFF;
347 	}
348 
349 	return cpus;
350 }
351 
352 uint64_t
_get_cpu_capabilities(void)353 _get_cpu_capabilities(void)
354 {
355 	return _cpu_capabilities;
356 }
357 
358 vm_address_t
_get_commpage_priv_address(void)359 _get_commpage_priv_address(void)
360 {
361 	return commpage_rw_addr;
362 }
363 
364 vm_address_t
_get_commpage_ro_address(void)365 _get_commpage_ro_address(void)
366 {
367 	return commpage_kernel_ro_addr;
368 }
369 
370 vm_address_t
_get_commpage_text_priv_address(void)371 _get_commpage_text_priv_address(void)
372 {
373 	return commpage_rw_text_addr;
374 }
375 
376 #if defined(__arm64__)
377 /**
378  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64ISAR0_EL1
379  */
380 static void
commpage_init_arm_optional_features_isar0(uint64_t * commpage_bits)381 commpage_init_arm_optional_features_isar0(uint64_t *commpage_bits)
382 {
383 	uint64_t bits = 0;
384 	uint64_t isar0 = __builtin_arm_rsr64("ID_AA64ISAR0_EL1");
385 
386 	if ((isar0 & ID_AA64ISAR0_EL1_TS_MASK) >= ID_AA64ISAR0_EL1_TS_FLAGM_EN) {
387 		gARM_FEAT_FlagM = 1;
388 		bits |= kHasFEATFlagM;
389 	}
390 	if ((isar0 & ID_AA64ISAR0_EL1_TS_MASK) >= ID_AA64ISAR0_EL1_TS_FLAGM2_EN) {
391 		gARM_FEAT_FlagM2 = 1;
392 		bits |= kHasFEATFlagM2;
393 	}
394 	if ((isar0 & ID_AA64ISAR0_EL1_FHM_MASK) >= ID_AA64ISAR0_EL1_FHM_8_2) {
395 		gARM_FEAT_FHM = 1;
396 		bits |= kHasFeatFHM;
397 	}
398 	if ((isar0 & ID_AA64ISAR0_EL1_DP_MASK) >= ID_AA64ISAR0_EL1_DP_EN) {
399 		gARM_FEAT_DotProd = 1;
400 		bits |= kHasFeatDotProd;
401 	}
402 	if ((isar0 & ID_AA64ISAR0_EL1_SHA3_MASK) >= ID_AA64ISAR0_EL1_SHA3_EN) {
403 		gARM_FEAT_SHA3 = 1;
404 		bits |= kHasFeatSHA3;
405 	}
406 	if ((isar0 & ID_AA64ISAR0_EL1_RDM_MASK) >= ID_AA64ISAR0_EL1_RDM_EN) {
407 		gARM_FEAT_RDM = 1;
408 		bits |= kHasFeatRDM;
409 	}
410 	if ((isar0 & ID_AA64ISAR0_EL1_ATOMIC_MASK) >= ID_AA64ISAR0_EL1_ATOMIC_8_1) {
411 		gARM_FEAT_LSE = 1;
412 		bits |= kHasFeatLSE;
413 	}
414 	if ((isar0 & ID_AA64ISAR0_EL1_SHA2_MASK) >= ID_AA64ISAR0_EL1_SHA2_512_EN) {
415 		gARM_FEAT_SHA512 = 1;
416 		bits |= kHasFeatSHA512;
417 	}
418 	if ((isar0 & ID_AA64ISAR0_EL1_CRC32_MASK) == ID_AA64ISAR0_EL1_CRC32_EN) {
419 		gARMv8Crc32 = 1;
420 		bits |= kHasARMv8Crc32;
421 	}
422 
423 #if __ARM_V8_CRYPTO_EXTENSIONS__
424 	/**
425 	 * T7000 has a bug in the ISAR0 register that reports that PMULL is not
426 	 * supported when it actually is. To work around this, for all of the crypto
427 	 * extensions, just check if they're supported using the board_config.h
428 	 * values.
429 	 */
430 	gARM_FEAT_PMULL = 1;
431 	gARM_FEAT_SHA1 = 1;
432 	gARM_FEAT_AES = 1;
433 	gARM_FEAT_SHA256 = 1;
434 	bits |= kHasARMv8Crypto;
435 #endif /* __ARM_V8_CRYPTO_EXTENSIONS__ */
436 
437 	*commpage_bits |= bits;
438 }
439 
440 /**
441  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64ISAR1_EL1
442  */
443 static void
commpage_init_arm_optional_features_isar1(uint64_t * commpage_bits)444 commpage_init_arm_optional_features_isar1(uint64_t *commpage_bits)
445 {
446 	uint64_t bits = 0;
447 	uint64_t isar1 = __builtin_arm_rsr64("ID_AA64ISAR1_EL1");
448 	uint64_t sctlr = __builtin_arm_rsr64("SCTLR_EL1");
449 
450 	if ((isar1 & ID_AA64ISAR1_EL1_SPECRES_MASK) >= ID_AA64ISAR1_EL1_SPECRES_EN &&
451 	    sctlr & SCTLR_EnRCTX) {
452 		gARM_FEAT_SPECRES = 1;
453 		bits |= kHasFeatSPECRES;
454 	}
455 	if ((isar1 & ID_AA64ISAR1_EL1_SB_MASK) >= ID_AA64ISAR1_EL1_SB_EN) {
456 		gARM_FEAT_SB = 1;
457 		bits |= kHasFeatSB;
458 	}
459 	if ((isar1 & ID_AA64ISAR1_EL1_FRINTTS_MASK) >= ID_AA64ISAR1_EL1_FRINTTS_EN) {
460 		gARM_FEAT_FRINTTS = 1;
461 		bits |= kHasFeatFRINTTS;
462 	}
463 	if ((isar1 & ID_AA64ISAR1_EL1_GPI_MASK) >= ID_AA64ISAR1_EL1_GPI_EN) {
464 		gARMv8Gpi = 1;
465 		bits |= kHasArmv8GPI;
466 	}
467 	if ((isar1 & ID_AA64ISAR1_EL1_LRCPC_MASK) >= ID_AA64ISAR1_EL1_LRCPC_EN) {
468 		gARM_FEAT_LRCPC = 1;
469 		bits |= kHasFeatLRCPC;
470 	}
471 	if ((isar1 & ID_AA64ISAR1_EL1_LRCPC_MASK) >= ID_AA64ISAR1_EL1_LRCP2C_EN) {
472 		gARM_FEAT_LRCPC2 = 1;
473 		bits |= kHasFeatLRCPC2;
474 	}
475 	if ((isar1 & ID_AA64ISAR1_EL1_FCMA_MASK) >= ID_AA64ISAR1_EL1_FCMA_EN) {
476 		gARM_FEAT_FCMA = 1;
477 		bits |= kHasFeatFCMA;
478 	}
479 	if ((isar1 & ID_AA64ISAR1_EL1_JSCVT_MASK) >= ID_AA64ISAR1_EL1_JSCVT_EN) {
480 		gARM_FEAT_JSCVT = 1;
481 		bits |= kHasFeatJSCVT;
482 	}
483 	if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_PAuth_EN) {
484 		gARM_FEAT_PAuth = 1;
485 		bits |= kHasFeatPAuth;
486 	}
487 	if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_PAuth2_EN) {
488 		gARM_FEAT_PAuth2 = 1;
489 	}
490 	if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_FPAC_EN) {
491 		gARM_FEAT_FPAC = 1;
492 	}
493 	if ((isar1 & ID_AA64ISAR1_EL1_DPB_MASK) >= ID_AA64ISAR1_EL1_DPB_EN) {
494 		gARM_FEAT_DPB = 1;
495 		bits |= kHasFeatDPB;
496 	}
497 	if ((isar1 & ID_AA64ISAR1_EL1_DPB_MASK) >= ID_AA64ISAR1_EL1_DPB2_EN) {
498 		gARM_FEAT_DPB2 = 1;
499 		bits |= kHasFeatDPB2;
500 	}
501 	if ((isar1 & ID_AA64ISAR1_EL1_BF16_MASK) >= ID_AA64ISAR1_EL1_BF16_EN) {
502 		gARM_FEAT_BF16 = 1;
503 	}
504 	if ((isar1 & ID_AA64ISAR1_EL1_I8MM_MASK) >= ID_AA64ISAR1_EL1_I8MM_EN) {
505 		gARM_FEAT_I8MM = 1;
506 	}
507 
508 	*commpage_bits |= bits;
509 }
510 
511 /**
512  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64MMFR0_EL1
513  */
514 static void
commpage_init_arm_optional_features_mmfr0(uint64_t * commpage_bits)515 commpage_init_arm_optional_features_mmfr0(uint64_t *commpage_bits)
516 {
517 	uint64_t bits = 0;
518 	uint64_t mmfr0 = __builtin_arm_rsr64("ID_AA64MMFR0_EL1");
519 
520 	if ((mmfr0 & ID_AA64MMFR0_EL1_ECV_MASK) >= ID_AA64MMFR0_EL1_ECV_EN) {
521 		gARM_FEAT_ECV = 1;
522 	}
523 
524 	*commpage_bits |= bits;
525 }
526 
527 /**
528  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64MMFR2_EL1
529  */
530 static void
commpage_init_arm_optional_features_mmfr2(uint64_t * commpage_bits)531 commpage_init_arm_optional_features_mmfr2(uint64_t *commpage_bits)
532 {
533 	uint64_t bits = 0;
534 	uint64_t mmfr2 = __builtin_arm_rsr64("ID_AA64MMFR2_EL1");
535 
536 	if ((mmfr2 & ID_AA64MMFR2_EL1_AT_MASK) >= ID_AA64MMFR2_EL1_AT_LSE2_EN) {
537 		gARM_FEAT_LSE2 = 1;
538 		bits |= kHasFeatLSE2;
539 	}
540 
541 	*commpage_bits |= bits;
542 }
543 
544 /**
545  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64PFR0_EL1
546  */
547 static void
commpage_init_arm_optional_features_pfr0(uint64_t * commpage_bits)548 commpage_init_arm_optional_features_pfr0(uint64_t *commpage_bits)
549 {
550 	uint64_t bits = 0;
551 	uint64_t pfr0 = __builtin_arm_rsr64("ID_AA64PFR0_EL1");
552 
553 	if ((pfr0 & ID_AA64PFR0_EL1_CSV3_MASK) >= ID_AA64PFR0_EL1_CSV3_EN) {
554 		gARM_FEAT_CSV3 = 1;
555 		bits |= kHasFeatCSV3;
556 	}
557 	if ((pfr0 & ID_AA64PFR0_EL1_CSV2_MASK) >= ID_AA64PFR0_EL1_CSV2_EN) {
558 		gARM_FEAT_CSV2 = 1;
559 		bits |= kHasFeatCSV2;
560 	}
561 	if ((pfr0 & ID_AA64PFR0_EL1_DIT_MASK) >= ID_AA64PFR0_EL1_DIT_EN) {
562 		gARM_FEAT_DIT = 1;
563 		bits |= kHasFeatDIT;
564 	}
565 	if ((pfr0 & ID_AA64PFR0_EL1_AdvSIMD_MASK) != ID_AA64PFR0_EL1_AdvSIMD_DIS) {
566 		gARM_AdvSIMD = 1;
567 		bits |= kHasAdvSIMD;
568 		if ((pfr0 & ID_AA64PFR0_EL1_AdvSIMD_MASK) >= ID_AA64PFR0_EL1_AdvSIMD_HPFPCVT) {
569 			gARM_AdvSIMD_HPFPCvt = 1;
570 			bits |= kHasAdvSIMD_HPFPCvt;
571 		}
572 		if ((pfr0 & ID_AA64PFR0_EL1_AdvSIMD_MASK) >= ID_AA64PFR0_EL1_AdvSIMD_FP16) {
573 			gARM_FEAT_FP16 = 1;
574 			bits |= kHasFeatFP16;
575 		}
576 	}
577 
578 	*commpage_bits |= bits;
579 }
580 
581 /**
582  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64PFR1_EL1
583  */
584 static void
commpage_init_arm_optional_features_pfr1(uint64_t * commpage_bits)585 commpage_init_arm_optional_features_pfr1(uint64_t *commpage_bits)
586 {
587 	uint64_t pfr1 = __builtin_arm_rsr64("ID_AA64PFR1_EL1");
588 
589 	if ((pfr1 & ID_AA64PFR1_EL1_SSBS_MASK) >= ID_AA64PFR1_EL1_SSBS_EN) {
590 		gARM_FEAT_SSBS = 1;
591 	}
592 
593 	if ((pfr1 & ID_AA64PFR1_EL1_BT_MASK) >= ID_AA64PFR1_EL1_BT_EN) {
594 		gARM_FEAT_BTI = 1;
595 	}
596 
597 #pragma unused(commpage_bits)
598 }
599 
600 
601 /**
602  * Read the system register @name, attempt to set set bits of @mask if not
603  * already, test if bits were actually set, reset the register to its
604  * previous value if required, and 'return' @mask with only bits that
605  * were successfully set (or already set) in the system register. */
606 #define _test_sys_bits(name, mask) ({ \
607 	const uint64_t src = __builtin_arm_rsr64(#name); \
608     uint64_t test = src | mask; \
609     if (test != src) { \
610 	__builtin_arm_wsr64(#name, test); \
611 	test = __builtin_arm_rsr64(#name); \
612 	if (test != src) { \
613 	    __builtin_arm_wsr64(#name, src); \
614 	}\
615     } \
616     mask & test; \
617 })
618 
619 /**
620  * Reports whether FPU exceptions are supported.
621  * Possible FPU exceptions are :
622  * - input denormal;
623  * - inexact;
624  * - underflow;
625  * - overflow;
626  * - divide by 0;
627  * - invalid operation.
628  *
629  * Any of those can be supported or not but for now, we consider that
630  * it all or nothing : FPU exceptions support flag set <=> all 6 exceptions
631  * a supported.
632  */
633 static void
commpage_init_arm_optional_features_fpcr(uint64_t * commpage_bits)634 commpage_init_arm_optional_features_fpcr(uint64_t *commpage_bits)
635 {
636 	uint64_t support_mask = FPCR_IDE | FPCR_IXE | FPCR_UFE | FPCR_OFE |
637 	    FPCR_DZE | FPCR_IOE;
638 	uint64_t FPCR_bits = _test_sys_bits(FPCR, support_mask);
639 	if (FPCR_bits == support_mask) {
640 		gARM_FP_SyncExceptions = 1;
641 		*commpage_bits |= kHasFP_SyncExceptions;
642 	}
643 }
644 
645 /**
646  * Initializes all commpage entries and sysctls for ARM64 optional features accessible from EL0.
647  */
648 static void
commpage_init_arm_optional_features(uint64_t * commpage_bits)649 commpage_init_arm_optional_features(uint64_t *commpage_bits)
650 {
651 	commpage_init_arm_optional_features_isar0(commpage_bits);
652 	commpage_init_arm_optional_features_isar1(commpage_bits);
653 	commpage_init_arm_optional_features_mmfr0(commpage_bits);
654 	commpage_init_arm_optional_features_mmfr2(commpage_bits);
655 	commpage_init_arm_optional_features_pfr0(commpage_bits);
656 	commpage_init_arm_optional_features_pfr1(commpage_bits);
657 	commpage_init_arm_optional_features_fpcr(commpage_bits);
658 }
659 #endif /* __arm64__ */
660 
661 /*
662  * Initialize _cpu_capabilities vector
663  */
664 static void
commpage_init_cpu_capabilities(void)665 commpage_init_cpu_capabilities( void )
666 {
667 	uint64_t bits;
668 	int cpus;
669 	ml_cpu_info_t cpu_info;
670 
671 	bits = 0;
672 	ml_cpu_get_info(&cpu_info);
673 
674 	switch (cpu_info.cache_line_size) {
675 	case 128:
676 		bits |= kCache128;
677 		break;
678 	case 64:
679 		bits |= kCache64;
680 		break;
681 	case 32:
682 		bits |= kCache32;
683 		break;
684 	default:
685 		break;
686 	}
687 	cpus = commpage_cpus();
688 
689 	if (cpus == 1) {
690 		bits |= kUP;
691 	}
692 
693 	bits |= (cpus << kNumCPUsShift);
694 
695 	bits |= kFastThreadLocalStorage;        // TPIDRURO for TLS
696 
697 	bits |= kHasVfp;
698 
699 #if defined(__arm64__)
700 	bits |= kHasFMA;
701 #endif
702 	bits |= kHasEvent;
703 #ifdef __arm64__
704 	commpage_init_arm_optional_features(&bits);
705 #endif
706 
707 
708 
709 #if HAS_UCNORMAL_MEM
710 	gUCNormalMem = 1;
711 	bits |= kHasUCNormalMemory;
712 #endif
713 
714 	_cpu_capabilities = bits;
715 
716 	*((uint32_t *)(_COMM_PAGE_CPU_CAPABILITIES + _COMM_PAGE_RW_OFFSET)) = (uint32_t)_cpu_capabilities;
717 	*((uint64_t *)(_COMM_PAGE_CPU_CAPABILITIES64 + _COMM_PAGE_RW_OFFSET)) = _cpu_capabilities;
718 
719 }
720 
721 /*
722  * Updated every time a logical CPU goes offline/online
723  */
724 void
commpage_update_active_cpus(void)725 commpage_update_active_cpus(void)
726 {
727 	if (!commPagePtr) {
728 		return;
729 	}
730 	*((uint8_t *)(_COMM_PAGE_ACTIVE_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t)processor_avail_count;
731 
732 }
733 
734 /*
735  * Update the commpage bits for mach_absolute_time and mach_continuous_time (for userspace)
736  */
737 void
commpage_update_timebase(void)738 commpage_update_timebase(void)
739 {
740 	if (commPagePtr) {
741 		*((uint64_t*)(_COMM_PAGE_TIMEBASE_OFFSET + _COMM_PAGE_RW_OFFSET)) = rtclock_base_abstime;
742 	}
743 }
744 
745 /*
746  * Update the commpage with current kdebug state: whether tracing is enabled, a
747  * typefilter is present, and continuous time should be used for timestamps.
748  *
749  * Disregards configuration and set to 0 if tracing is disabled.
750  */
751 void
commpage_update_kdebug_state(void)752 commpage_update_kdebug_state(void)
753 {
754 	if (commPagePtr) {
755 		uint32_t state = kdebug_commpage_state();
756 		*((volatile uint32_t *)(_COMM_PAGE_KDEBUG_ENABLE + _COMM_PAGE_RW_OFFSET)) = state;
757 	}
758 }
759 
760 /* Ditto for atm_diagnostic_config */
761 void
commpage_update_atm_diagnostic_config(uint32_t diagnostic_config)762 commpage_update_atm_diagnostic_config(uint32_t diagnostic_config)
763 {
764 	if (commPagePtr) {
765 		*((volatile uint32_t*)(_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG + _COMM_PAGE_RW_OFFSET)) = diagnostic_config;
766 	}
767 }
768 
769 /*
770  * Update the commpage data with the state of multiuser mode for
771  * this device. Allowing various services in userspace to avoid
772  * IPC in the (more common) non-multiuser environment.
773  */
774 void
commpage_update_multiuser_config(uint32_t multiuser_config)775 commpage_update_multiuser_config(uint32_t multiuser_config)
776 {
777 	if (commPagePtr) {
778 		*((volatile uint32_t *)(_COMM_PAGE_MULTIUSER_CONFIG + _COMM_PAGE_RW_OFFSET)) = multiuser_config;
779 	}
780 }
781 
782 /*
783  * update the commpage data for
784  * last known value of mach_absolute_time()
785  */
786 
787 void
commpage_update_mach_approximate_time(uint64_t abstime)788 commpage_update_mach_approximate_time(uint64_t abstime)
789 {
790 #ifdef CONFIG_MACH_APPROXIMATE_TIME
791 	if (!commPagePtr) {
792 		return;
793 	}
794 
795 	uint64_t *approx_time_base = (uint64_t *)(uintptr_t)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET);
796 
797 	uint64_t saved_data = os_atomic_load_wide(approx_time_base, relaxed);
798 	if (saved_data < abstime) {
799 		/*
800 		 * ignore the success/fail return value assuming that
801 		 * if the value has been updated since we last read it,
802 		 * someone else has written a timestamp that is new enough.
803 		 */
804 		__unused bool ret = os_atomic_cmpxchg(approx_time_base,
805 		    saved_data, abstime, relaxed);
806 	}
807 
808 
809 #else /* CONFIG_MACH_APPROXIMATE_TIME */
810 #pragma unused (abstime)
811 #endif
812 }
813 
814 /*
815  * update the commpage data's total system sleep time for
816  * userspace call to mach_continuous_time()
817  */
818 void
commpage_update_mach_continuous_time(uint64_t sleeptime)819 commpage_update_mach_continuous_time(uint64_t sleeptime)
820 {
821 	if (!commPagePtr) {
822 		return;
823 	}
824 
825 	uint64_t *cont_time_base = (uint64_t *)(uintptr_t)(_COMM_PAGE_CONT_TIMEBASE + _COMM_PAGE_RW_OFFSET);
826 
827 	os_atomic_store_wide(cont_time_base, sleeptime, relaxed);
828 
829 }
830 
831 void
commpage_update_mach_continuous_time_hw_offset(uint64_t offset)832 commpage_update_mach_continuous_time_hw_offset(uint64_t offset)
833 {
834 	*((uint64_t *)(_COMM_PAGE_CONT_HW_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = offset;
835 }
836 
837 /*
838  * update the commpage's value for the boot time
839  */
840 void
commpage_update_boottime(uint64_t value)841 commpage_update_boottime(uint64_t value)
842 {
843 	if (!commPagePtr) {
844 		return;
845 	}
846 
847 	uint64_t *boottime_usec = (uint64_t *)(uintptr_t)(_COMM_PAGE_BOOTTIME_USEC + _COMM_PAGE_RW_OFFSET);
848 
849 	os_atomic_store_wide(boottime_usec, value, relaxed);
850 
851 }
852 
853 /*
854  * set the commpage's remote time params for
855  * userspace call to mach_bridge_remote_time()
856  */
857 void
commpage_set_remotetime_params(double rate,uint64_t base_local_ts,uint64_t base_remote_ts)858 commpage_set_remotetime_params(double rate, uint64_t base_local_ts, uint64_t base_remote_ts)
859 {
860 	if (commPagePtr) {
861 #ifdef __arm64__
862 		struct bt_params *paramsp = (struct bt_params *)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET);
863 		paramsp->base_local_ts = 0;
864 		__builtin_arm_dmb(DMB_ISH);
865 		paramsp->rate = rate;
866 		paramsp->base_remote_ts = base_remote_ts;
867 		__builtin_arm_dmb(DMB_ISH);
868 		paramsp->base_local_ts = base_local_ts;  //This will act as a generation count
869 #endif /* __arm64__ */
870 	}
871 }
872 
873 
874 /*
875  * update the commpage with if dtrace user land probes are enabled
876  */
877 void
commpage_update_dof(boolean_t enabled)878 commpage_update_dof(boolean_t enabled)
879 {
880 #if CONFIG_DTRACE
881 	*((uint8_t*)(_COMM_PAGE_DTRACE_DOF_ENABLED + _COMM_PAGE_RW_OFFSET)) = (enabled ? 1 : 0);
882 #else
883 	(void)enabled;
884 #endif
885 }
886 
887 /*
888  * update the dyld global config flags
889  */
890 void
commpage_update_dyld_flags(uint64_t value)891 commpage_update_dyld_flags(uint64_t value)
892 {
893 	*((uint64_t*)(_COMM_PAGE_DYLD_FLAGS + _COMM_PAGE_RW_OFFSET)) = value;
894 
895 }
896