xref: /xnu-8792.61.2/osfmk/x86_64/loose_ends.c (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 #include <mach_assert.h>
59 
60 #include <string.h>
61 #include <mach/boolean.h>
62 #include <mach/i386/vm_types.h>
63 #include <mach/i386/vm_param.h>
64 #include <kern/iotrace.h>
65 #include <kern/kern_types.h>
66 #include <kern/misc_protos.h>
67 #include <kern/locks.h>
68 #include <sys/errno.h>
69 #include <i386/param.h>
70 #include <i386/misc_protos.h>
71 #include <i386/panic_notify.h>
72 #include <i386/cpu_data.h>
73 #include <i386/machine_routines.h>
74 #include <i386/cpuid.h>
75 #include <i386/vmx.h>
76 #include <vm/pmap.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_fault.h>
80 
81 #include <libkern/OSAtomic.h>
82 #include <libkern/OSDebug.h>
83 #include <sys/kdebug.h>
84 
85 #if !MACH_KDP
86 #include <kdp/kdp_callout.h>
87 #endif /* !MACH_KDP */
88 
89 #include <architecture/i386/pio.h>
90 
91 #include <libkern/OSDebug.h>
92 #if CONFIG_DTRACE
93 #include <mach/sdt.h>
94 #endif
95 
96 #if 0
97 
98 #undef KERNEL_DEBUG
99 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
100 #define KDEBUG 1
101 
102 #endif
103 
104 /* prevent infinite recursion when memmove calls bcopy; in string.h, bcopy is defined to call memmove */
105 #undef bcopy
106 
107 /* XXX - should be gone from here */
108 extern void             invalidate_icache64(addr64_t addr, unsigned cnt, int phys);
109 extern void             flush_dcache64(addr64_t addr, unsigned count, int phys);
110 extern boolean_t        phys_page_exists(ppnum_t);
111 extern void             bcopy_no_overwrite(const char *from, char *to, vm_size_t bytes);
112 extern void             pmap_set_reference(ppnum_t pn);
113 extern void             mapping_set_mod(ppnum_t pa);
114 extern void             mapping_set_ref(ppnum_t pn);
115 
116 extern void             ovbcopy(const char      *from,
117     char            *to,
118     vm_size_t       nbytes);
119 
120 
121 #define value_64bit(value)  ((value) & 0xFFFFFFFF00000000ULL)
122 #define low32(x)  ((unsigned int)((x) & 0x00000000FFFFFFFFULL))
123 
124 #define INT_SIZE        (BYTE_SIZE * sizeof (int))
125 
126 /*
127  * Set indicated bit in bit string.
128  */
129 void
setbit(int bitno,int * s)130 setbit(int bitno, int *s)
131 {
132 	s[bitno / INT_SIZE] |= 1 << (bitno % INT_SIZE);
133 }
134 
135 /*
136  * Clear indicated bit in bit string.
137  */
138 void
clrbit(int bitno,int * s)139 clrbit(int bitno, int *s)
140 {
141 	s[bitno / INT_SIZE] &= ~(1 << (bitno % INT_SIZE));
142 }
143 
144 /*
145  * Test if indicated bit is set in bit string.
146  */
147 int
testbit(int bitno,int * s)148 testbit(int bitno, int *s)
149 {
150 	return s[bitno / INT_SIZE] & (1 << (bitno % INT_SIZE));
151 }
152 
153 /*
154  * Find first bit set in bit string.
155  */
156 int
ffsbit(int * s)157 ffsbit(int *s)
158 {
159 	int             offset;
160 
161 	for (offset = 0; !*s; offset += (int)INT_SIZE, ++s) {
162 		;
163 	}
164 	return offset + __builtin_ctz(*s);
165 }
166 
167 int
ffs(unsigned int mask)168 ffs(unsigned int mask)
169 {
170 	if (mask == 0) {
171 		return 0;
172 	}
173 
174 	/*
175 	 * NOTE: cannot use __builtin_ffs because it generates a call to
176 	 * 'ffs'
177 	 */
178 	return 1 + __builtin_ctz(mask);
179 }
180 
181 int
ffsll(unsigned long long mask)182 ffsll(unsigned long long mask)
183 {
184 	if (mask == 0) {
185 		return 0;
186 	}
187 
188 	/*
189 	 * NOTE: cannot use __builtin_ffsll because it generates a call to
190 	 * 'ffsll'
191 	 */
192 	return 1 + __builtin_ctzll(mask);
193 }
194 
195 /*
196  * Find last bit set in bit string.
197  */
198 int
fls(unsigned int mask)199 fls(unsigned int mask)
200 {
201 	if (mask == 0) {
202 		return 0;
203 	}
204 
205 	return (sizeof(mask) << 3) - __builtin_clz(mask);
206 }
207 
208 int
flsll(unsigned long long mask)209 flsll(unsigned long long mask)
210 {
211 	if (mask == 0) {
212 		return 0;
213 	}
214 
215 	return (sizeof(mask) << 3) - __builtin_clzll(mask);
216 }
217 
218 void
bzero_phys_nc(addr64_t src64,uint32_t bytes)219 bzero_phys_nc(
220 	addr64_t src64,
221 	uint32_t bytes)
222 {
223 	bzero_phys(src64, bytes);
224 }
225 
226 void
bzero_phys(addr64_t src64,uint32_t bytes)227 bzero_phys(
228 	addr64_t src64,
229 	uint32_t bytes)
230 {
231 	bzero(PHYSMAP_PTOV(src64), bytes);
232 }
233 
234 
235 /*
236  * bcopy_phys - like bcopy but copies from/to physical addresses.
237  */
238 
239 void
bcopy_phys(addr64_t src64,addr64_t dst64,vm_size_t bytes)240 bcopy_phys(
241 	addr64_t src64,
242 	addr64_t dst64,
243 	vm_size_t bytes)
244 {
245 	/* Not necessary for K64 - but ensure we stay within a page */
246 	if (((((uint32_t)src64 & (NBPG - 1)) + bytes) > NBPG) ||
247 	    ((((uint32_t)dst64 & (NBPG - 1)) + bytes) > NBPG)) {
248 		panic("bcopy_phys alignment");
249 	}
250 	bcopy(PHYSMAP_PTOV(src64), PHYSMAP_PTOV(dst64), bytes);
251 }
252 
253 /*
254  * allow a function to get a quick virtual mapping of a physical page
255  */
256 
257 int
apply_func_phys(addr64_t dst64,vm_size_t bytes,int (* func)(void * buffer,vm_size_t bytes,void * arg),void * arg)258 apply_func_phys(
259 	addr64_t dst64,
260 	vm_size_t bytes,
261 	int (*func)(void * buffer, vm_size_t bytes, void * arg),
262 	void * arg)
263 {
264 	/* Not necessary for K64 - but ensure we stay within a page */
265 	if (((((uint32_t)dst64 & (NBPG - 1)) + bytes) > NBPG)) {
266 		panic("apply_func_phys alignment");
267 	}
268 
269 	return func(PHYSMAP_PTOV(dst64), bytes, arg);
270 }
271 
272 /*
273  * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
274  *           them correctly.
275  */
276 
277 void
ovbcopy(const char * from,char * to,vm_size_t bytes)278 ovbcopy(
279 	const char      *from,
280 	char            *to,
281 	vm_size_t       bytes)          /* num bytes to copy */
282 {
283 	/* Assume that bcopy copies left-to-right (low addr first). */
284 	if (from + bytes <= to || to + bytes <= from || to == from) {
285 		bcopy_no_overwrite(from, to, bytes);    /* non-overlapping or no-op*/
286 	} else if (from > to) {
287 		bcopy_no_overwrite(from, to, bytes);    /* overlapping but OK */
288 	} else {
289 		/* to > from: overlapping, and must copy right-to-left. */
290 		from += bytes - 1;
291 		to += bytes - 1;
292 		while (bytes-- > 0) {
293 			*to-- = *from--;
294 		}
295 	}
296 }
297 
298 
299 /*
300  *  Read data from a physical address. Memory should not be cache inhibited.
301  */
302 
303 uint64_t report_phy_read_delay;
304 uint64_t report_phy_write_delay;
305 uint32_t report_phy_read_osbt;
306 uint32_t report_phy_write_osbt;
307 
308 #if DEVELOPMENT || DEBUG
309 uint64_t trace_phy_read_delay = 50 * NSEC_PER_USEC;
310 uint64_t trace_phy_write_delay = 50 * NSEC_PER_USEC;
311 extern uint64_t simulate_stretched_io;
312 #else
313 uint64_t trace_phy_read_delay = 0;
314 uint64_t trace_phy_write_delay = 0;
315 #endif
316 
317 __private_extern__ uint64_t
ml_phys_read_data(uint64_t paddr,int size)318 ml_phys_read_data(uint64_t paddr, int size)
319 {
320 	uint64_t result = 0;
321 	unsigned char s1;
322 	unsigned short s2;
323 	boolean_t istate = TRUE, timeread = FALSE;
324 	uint64_t sabs = 0, eabs;
325 
326 	if (__improbable(!physmap_enclosed(paddr))) {
327 		panic("%s: 0x%llx out of bounds", __FUNCTION__, paddr);
328 	}
329 
330 	if (__improbable(report_phy_read_delay != 0)) {
331 		istate = ml_set_interrupts_enabled(FALSE);
332 		sabs = mach_absolute_time();
333 		timeread = TRUE;
334 	}
335 #if DEVELOPMENT || DEBUG
336 	if (__improbable(timeread && simulate_stretched_io)) {
337 		sabs -= simulate_stretched_io;
338 	}
339 #endif /* x86_64 DEVELOPMENT || DEBUG */
340 
341 	switch (size) {
342 	case 1:
343 		s1 = *(volatile unsigned char *)PHYSMAP_PTOV(paddr);
344 		result = s1;
345 		break;
346 	case 2:
347 		s2 = *(volatile unsigned short *)PHYSMAP_PTOV(paddr);
348 		result = s2;
349 		break;
350 	case 4:
351 		result = *(volatile unsigned int *)PHYSMAP_PTOV(paddr);
352 		break;
353 	case 8:
354 		result = *(volatile unsigned long long *)PHYSMAP_PTOV(paddr);
355 		break;
356 	default:
357 		panic("Invalid size %d for ml_phys_read_data", size);
358 		break;
359 	}
360 
361 	if (__improbable(timeread == TRUE)) {
362 		eabs = mach_absolute_time();
363 
364 		iotrace(IOTRACE_PHYS_READ, 0, paddr, size, result, sabs, eabs - sabs);
365 
366 		if (__improbable((eabs - sabs) > report_phy_read_delay)) {
367 			(void)ml_set_interrupts_enabled(istate);
368 
369 			if (phy_read_panic && (machine_timeout_suspended() == FALSE)) {
370 				panic_notify();
371 				panic("Read from physical addr 0x%llx took %llu ns, "
372 				    "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
373 				    paddr, (eabs - sabs), result, sabs, eabs,
374 				    report_phy_read_delay);
375 			}
376 
377 			if (report_phy_read_osbt) {
378 				OSReportWithBacktrace("ml_phys_read_data took %lluus",
379 				    (eabs - sabs) / NSEC_PER_USEC);
380 			}
381 #if CONFIG_DTRACE
382 			DTRACE_PHYSLAT4(physread, uint64_t, (eabs - sabs),
383 			    uint64_t, paddr, uint32_t, size, uint64_t, result);
384 #endif /* CONFIG_DTRACE */
385 		} else if (__improbable(trace_phy_read_delay > 0 && (eabs - sabs) > trace_phy_read_delay)) {
386 			KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_READ),
387 			    (eabs - sabs), sabs, paddr, result);
388 
389 			(void)ml_set_interrupts_enabled(istate);
390 		} else {
391 			(void)ml_set_interrupts_enabled(istate);
392 		}
393 	}
394 
395 	return result;
396 }
397 
398 static unsigned long long
ml_phys_read_long_long(uint64_t paddr)399 ml_phys_read_long_long(uint64_t paddr)
400 {
401 	return ml_phys_read_data(paddr, 8);
402 }
403 
404 unsigned int
ml_phys_read(vm_offset_t paddr)405 ml_phys_read(vm_offset_t paddr)
406 {
407 	return (unsigned int) ml_phys_read_data(paddr, 4);
408 }
409 
410 unsigned int
ml_phys_read_word(vm_offset_t paddr)411 ml_phys_read_word(vm_offset_t paddr)
412 {
413 	return (unsigned int) ml_phys_read_data(paddr, 4);
414 }
415 
416 unsigned int
ml_phys_read_64(addr64_t paddr64)417 ml_phys_read_64(addr64_t paddr64)
418 {
419 	return (unsigned int) ml_phys_read_data(paddr64, 4);
420 }
421 
422 unsigned int
ml_phys_read_word_64(addr64_t paddr64)423 ml_phys_read_word_64(addr64_t paddr64)
424 {
425 	return (unsigned int) ml_phys_read_data(paddr64, 4);
426 }
427 
428 unsigned int
ml_phys_read_half(vm_offset_t paddr)429 ml_phys_read_half(vm_offset_t paddr)
430 {
431 	return (unsigned int) ml_phys_read_data(paddr, 2);
432 }
433 
434 unsigned int
ml_phys_read_half_64(addr64_t paddr64)435 ml_phys_read_half_64(addr64_t paddr64)
436 {
437 	return (unsigned int) ml_phys_read_data(paddr64, 2);
438 }
439 
440 unsigned int
ml_phys_read_byte(vm_offset_t paddr)441 ml_phys_read_byte(vm_offset_t paddr)
442 {
443 	return (unsigned int) ml_phys_read_data(paddr, 1);
444 }
445 
446 unsigned int
ml_phys_read_byte_64(addr64_t paddr64)447 ml_phys_read_byte_64(addr64_t paddr64)
448 {
449 	return (unsigned int) ml_phys_read_data(paddr64, 1);
450 }
451 
452 unsigned long long
ml_phys_read_double(vm_offset_t paddr)453 ml_phys_read_double(vm_offset_t paddr)
454 {
455 	return ml_phys_read_long_long(paddr);
456 }
457 
458 unsigned long long
ml_phys_read_double_64(addr64_t paddr64)459 ml_phys_read_double_64(addr64_t paddr64)
460 {
461 	return ml_phys_read_long_long(paddr64);
462 }
463 
464 
465 
466 /*
467  *  Write data to a physical address. Memory should not be cache inhibited.
468  */
469 
470 __private_extern__ void
ml_phys_write_data(uint64_t paddr,unsigned long long data,int size)471 ml_phys_write_data(uint64_t paddr, unsigned long long data, int size)
472 {
473 	boolean_t istate = TRUE, timewrite = FALSE;
474 	uint64_t sabs = 0, eabs;
475 
476 	if (__improbable(!physmap_enclosed(paddr))) {
477 		panic("%s: 0x%llx out of bounds", __FUNCTION__, paddr);
478 	}
479 
480 	if (__improbable(report_phy_write_delay != 0)) {
481 		istate = ml_set_interrupts_enabled(FALSE);
482 		sabs = mach_absolute_time();
483 		timewrite = TRUE;
484 	}
485 #if DEVELOPMENT || DEBUG
486 	if (__improbable(timewrite && simulate_stretched_io)) {
487 		sabs -= simulate_stretched_io;
488 	}
489 #endif /* x86_64 DEVELOPMENT || DEBUG */
490 
491 	switch (size) {
492 	case 1:
493 		*(volatile unsigned char *)PHYSMAP_PTOV(paddr) = (unsigned char)data;
494 		break;
495 	case 2:
496 		*(volatile unsigned short *)PHYSMAP_PTOV(paddr) = (unsigned short)data;
497 		break;
498 	case 4:
499 		*(volatile unsigned int *)PHYSMAP_PTOV(paddr) = (unsigned int)data;
500 		break;
501 	case 8:
502 		*(volatile unsigned long *)PHYSMAP_PTOV(paddr) = data;
503 		break;
504 	default:
505 		panic("Invalid size %d for ml_phys_write_data", size);
506 		break;
507 	}
508 
509 	if (__improbable(timewrite == TRUE)) {
510 		eabs = mach_absolute_time();
511 
512 		iotrace(IOTRACE_PHYS_WRITE, 0, paddr, size, data, sabs, eabs - sabs);
513 
514 		if (__improbable((eabs - sabs) > report_phy_write_delay)) {
515 			(void)ml_set_interrupts_enabled(istate);
516 
517 			if (phy_write_panic && (machine_timeout_suspended() == FALSE)) {
518 				panic_notify();
519 				panic("Write to physical addr 0x%llx took %llu ns, "
520 				    "data: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
521 				    paddr, (eabs - sabs), data, sabs, eabs,
522 				    report_phy_write_delay);
523 			}
524 
525 			if (report_phy_write_osbt) {
526 				OSReportWithBacktrace("ml_phys_write_data (%p, 0x%llx) "
527 				    "took %lluus", (void *)paddr, data,
528 				    (eabs - sabs) / NSEC_PER_USEC);
529 			}
530 #if CONFIG_DTRACE
531 			DTRACE_PHYSLAT4(physwrite, uint64_t, (eabs - sabs),
532 			    uint64_t, paddr, uint32_t, size, uint64_t, data);
533 #endif /* CONFIG_DTRACE */
534 		} else if (__improbable(trace_phy_write_delay > 0 && (eabs - sabs) > trace_phy_write_delay)) {
535 			KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_WRITE),
536 			    (eabs - sabs), sabs, paddr, data);
537 
538 			(void)ml_set_interrupts_enabled(istate);
539 		} else {
540 			(void)ml_set_interrupts_enabled(istate);
541 		}
542 	}
543 }
544 
545 void
ml_phys_write_byte(vm_offset_t paddr,unsigned int data)546 ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
547 {
548 	ml_phys_write_data(paddr, data, 1);
549 }
550 
551 void
ml_phys_write_byte_64(addr64_t paddr64,unsigned int data)552 ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
553 {
554 	ml_phys_write_data(paddr64, data, 1);
555 }
556 
557 void
ml_phys_write_half(vm_offset_t paddr,unsigned int data)558 ml_phys_write_half(vm_offset_t paddr, unsigned int data)
559 {
560 	ml_phys_write_data(paddr, data, 2);
561 }
562 
563 void
ml_phys_write_half_64(addr64_t paddr64,unsigned int data)564 ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
565 {
566 	ml_phys_write_data(paddr64, data, 2);
567 }
568 
569 void
ml_phys_write(vm_offset_t paddr,unsigned int data)570 ml_phys_write(vm_offset_t paddr, unsigned int data)
571 {
572 	ml_phys_write_data(paddr, data, 4);
573 }
574 
575 void
ml_phys_write_64(addr64_t paddr64,unsigned int data)576 ml_phys_write_64(addr64_t paddr64, unsigned int data)
577 {
578 	ml_phys_write_data(paddr64, data, 4);
579 }
580 
581 void
ml_phys_write_word(vm_offset_t paddr,unsigned int data)582 ml_phys_write_word(vm_offset_t paddr, unsigned int data)
583 {
584 	ml_phys_write_data(paddr, data, 4);
585 }
586 
587 void
ml_phys_write_word_64(addr64_t paddr64,unsigned int data)588 ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
589 {
590 	ml_phys_write_data(paddr64, data, 4);
591 }
592 
593 void
ml_phys_write_double(vm_offset_t paddr,unsigned long long data)594 ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
595 {
596 	ml_phys_write_data(paddr, data, 8);
597 }
598 
599 void
ml_phys_write_double_64(addr64_t paddr64,unsigned long long data)600 ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
601 {
602 	ml_phys_write_data(paddr64, data, 8);
603 }
604 
605 uint32_t
ml_port_io_read(uint16_t ioport,int size)606 ml_port_io_read(uint16_t ioport, int size)
607 {
608 	uint32_t result = 0;
609 
610 	uint64_t sabs, eabs;
611 	boolean_t istate, timeread = FALSE;
612 
613 	if (__improbable(report_phy_read_delay != 0)) {
614 		istate = ml_set_interrupts_enabled(FALSE);
615 		sabs = mach_absolute_time();
616 		timeread = TRUE;
617 	}
618 
619 #if DEVELOPMENT || DEBUG
620 	if (__improbable(timeread && simulate_stretched_io)) {
621 		sabs -= simulate_stretched_io;
622 	}
623 #endif /* x86_64 DEVELOPMENT || DEBUG */
624 
625 	switch (size) {
626 	case 1:
627 		result = inb(ioport);
628 		break;
629 	case 2:
630 		result = inw(ioport);
631 		break;
632 	case 4:
633 		result = inl(ioport);
634 		break;
635 	default:
636 		panic("Invalid size %d for ml_port_io_read(0x%x)", size, (unsigned)ioport);
637 		break;
638 	}
639 
640 	if (__improbable(timeread == TRUE)) {
641 		eabs = mach_absolute_time();
642 
643 		iotrace(IOTRACE_PORTIO_READ, 0, ioport, size, result, sabs, eabs - sabs);
644 
645 		if (__improbable((eabs - sabs) > report_phy_read_delay)) {
646 			(void)ml_set_interrupts_enabled(istate);
647 
648 			if (phy_read_panic && (machine_timeout_suspended() == FALSE)) {
649 				panic_notify();
650 				panic("Read from IO port 0x%x took %llu ns, "
651 				    "result: 0x%x (start: %llu, end: %llu), ceiling: %llu",
652 				    ioport, (eabs - sabs), result, sabs, eabs,
653 				    report_phy_read_delay);
654 			}
655 
656 			if (report_phy_read_osbt) {
657 				OSReportWithBacktrace("ml_port_io_read(0x%x) took %lluus",
658 				    ioport, (eabs - sabs) / NSEC_PER_USEC);
659 			}
660 #if CONFIG_DTRACE
661 			DTRACE_PHYSLAT3(portioread, uint64_t, (eabs - sabs),
662 			    uint16_t, ioport, uint32_t, size);
663 #endif /* CONFIG_DTRACE */
664 		} else if (__improbable(trace_phy_read_delay > 0 && (eabs - sabs) > trace_phy_read_delay)) {
665 			KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PORTIO_READ),
666 			    (eabs - sabs), sabs, ioport, result);
667 
668 			(void)ml_set_interrupts_enabled(istate);
669 		} else {
670 			(void)ml_set_interrupts_enabled(istate);
671 		}
672 	}
673 
674 	return result;
675 }
676 
677 void
ml_port_io_write(uint16_t ioport,uint32_t val,int size)678 ml_port_io_write(uint16_t ioport, uint32_t val, int size)
679 {
680 	uint64_t sabs, eabs;
681 	boolean_t istate, timewrite = FALSE;
682 
683 	if (__improbable(report_phy_write_delay != 0)) {
684 		istate = ml_set_interrupts_enabled(FALSE);
685 		sabs = mach_absolute_time();
686 		timewrite = TRUE;
687 	}
688 #if DEVELOPMENT || DEBUG
689 	if (__improbable(timewrite && simulate_stretched_io)) {
690 		sabs -= simulate_stretched_io;
691 	}
692 #endif /* x86_64 DEVELOPMENT || DEBUG */
693 
694 	switch (size) {
695 	case 1:
696 		outb(ioport, (uint8_t)val);
697 		break;
698 	case 2:
699 		outw(ioport, (uint16_t)val);
700 		break;
701 	case 4:
702 		outl(ioport, (uint32_t)val);
703 		break;
704 	default:
705 		panic("Invalid size %d for ml_port_io_write(0x%x)", size, (unsigned)ioport);
706 		break;
707 	}
708 
709 	if (__improbable(timewrite == TRUE)) {
710 		eabs = mach_absolute_time();
711 
712 		iotrace(IOTRACE_PORTIO_WRITE, 0, ioport, size, val, sabs, eabs - sabs);
713 
714 		if (__improbable((eabs - sabs) > report_phy_write_delay)) {
715 			(void)ml_set_interrupts_enabled(istate);
716 
717 			if (phy_write_panic && (machine_timeout_suspended() == FALSE)) {
718 				panic_notify();
719 				panic("Write to IO port 0x%x took %llu ns, val: 0x%x"
720 				    " (start: %llu, end: %llu), ceiling: %llu",
721 				    ioport, (eabs - sabs), val, sabs, eabs,
722 				    report_phy_write_delay);
723 			}
724 
725 			if (report_phy_write_osbt) {
726 				OSReportWithBacktrace("ml_port_io_write(0x%x, %d, 0x%x) "
727 				    "took %lluus",
728 				    ioport, size, val, (eabs - sabs) / NSEC_PER_USEC);
729 			}
730 
731 #if CONFIG_DTRACE
732 			DTRACE_PHYSLAT4(portiowrite, uint64_t, (eabs - sabs),
733 			    uint16_t, ioport, uint32_t, size, uint64_t, val);
734 #endif /* CONFIG_DTRACE */
735 		} else if (__improbable(trace_phy_write_delay > 0 && (eabs - sabs) > trace_phy_write_delay)) {
736 			KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PORTIO_WRITE),
737 			    (eabs - sabs), sabs, ioport, val);
738 
739 			(void)ml_set_interrupts_enabled(istate);
740 		} else {
741 			(void)ml_set_interrupts_enabled(istate);
742 		}
743 	}
744 }
745 
746 uint8_t
ml_port_io_read8(uint16_t ioport)747 ml_port_io_read8(uint16_t ioport)
748 {
749 	return ml_port_io_read(ioport, 1);
750 }
751 
752 uint16_t
ml_port_io_read16(uint16_t ioport)753 ml_port_io_read16(uint16_t ioport)
754 {
755 	return ml_port_io_read(ioport, 2);
756 }
757 
758 uint32_t
ml_port_io_read32(uint16_t ioport)759 ml_port_io_read32(uint16_t ioport)
760 {
761 	return ml_port_io_read(ioport, 4);
762 }
763 
764 void
ml_port_io_write8(uint16_t ioport,uint8_t val)765 ml_port_io_write8(uint16_t ioport, uint8_t val)
766 {
767 	ml_port_io_write(ioport, val, 1);
768 }
769 
770 void
ml_port_io_write16(uint16_t ioport,uint16_t val)771 ml_port_io_write16(uint16_t ioport, uint16_t val)
772 {
773 	ml_port_io_write(ioport, val, 2);
774 }
775 
776 void
ml_port_io_write32(uint16_t ioport,uint32_t val)777 ml_port_io_write32(uint16_t ioport, uint32_t val)
778 {
779 	ml_port_io_write(ioport, val, 4);
780 }
781 
782 /* PCI config cycle probing
783  *
784  *
785  *      Read the memory location at physical address paddr.
786  * *Does not* recover from machine checks, unlike the PowerPC implementation.
787  * Should probably be deprecated.
788  */
789 
790 boolean_t
ml_probe_read(vm_offset_t paddr,unsigned int * val)791 ml_probe_read(vm_offset_t paddr, unsigned int *val)
792 {
793 	if ((PAGE_SIZE - (paddr & PAGE_MASK)) < 4) {
794 		return FALSE;
795 	}
796 
797 	*val = ml_phys_read(paddr);
798 
799 	return TRUE;
800 }
801 
802 /*
803  *  Read the memory location at physical address paddr.
804  *  This is a part of a device probe, so there is a good chance we will
805  *  have a machine check here. So we have to be able to handle that.
806  *  We assume that machine checks are enabled both in MSR and HIDs
807  */
808 boolean_t
ml_probe_read_64(addr64_t paddr64,unsigned int * val)809 ml_probe_read_64(addr64_t paddr64, unsigned int *val)
810 {
811 	if ((PAGE_SIZE - (paddr64 & PAGE_MASK)) < 4) {
812 		return FALSE;
813 	}
814 
815 	*val = ml_phys_read_64(paddr64);
816 	return TRUE;
817 }
818 
819 #undef bcmp
820 int
bcmp(const void * pa,const void * pb,size_t len)821 bcmp(
822 	const void      *pa,
823 	const void      *pb,
824 	size_t  len)
825 {
826 	const char *a = (const char *)pa;
827 	const char *b = (const char *)pb;
828 
829 	if (len == 0) {
830 		return 0;
831 	}
832 
833 	do {
834 		if (*a++ != *b++) {
835 			break;
836 		}
837 	} while (--len);
838 
839 	/*
840 	 * Check for the overflow case but continue to handle the non-overflow
841 	 * case the same way just in case someone is using the return value
842 	 * as more than zero/non-zero
843 	 */
844 	if (__improbable(!(len & 0x00000000FFFFFFFFULL) && (len & 0xFFFFFFFF00000000ULL))) {
845 		return 0xFFFFFFFF;
846 	} else {
847 		return (int)len;
848 	}
849 }
850 
851 #undef memcmp
852 int
memcmp(const void * s1,const void * s2,size_t n)853 memcmp(const void *s1, const void *s2, size_t n)
854 {
855 	if (n != 0) {
856 		const unsigned char *p1 = s1, *p2 = s2;
857 
858 		do {
859 			if (*p1++ != *p2++) {
860 				return *--p1 - *--p2;
861 			}
862 		} while (--n != 0);
863 	}
864 	return 0;
865 }
866 
867 unsigned long
memcmp_zero_ptr_aligned(const void * addr,size_t size)868 memcmp_zero_ptr_aligned(const void *addr, size_t size)
869 {
870 	const uint64_t *p = (const uint64_t *)addr;
871 	uint64_t a = p[0];
872 
873 	static_assert(sizeof(unsigned long) == sizeof(uint64_t));
874 
875 	if (size < 4 * sizeof(uint64_t)) {
876 		if (size > 1 * sizeof(uint64_t)) {
877 			a |= p[1];
878 			if (size > 2 * sizeof(uint64_t)) {
879 				a |= p[2];
880 			}
881 		}
882 	} else {
883 		size_t count = size / sizeof(uint64_t);
884 		uint64_t b = p[1];
885 		uint64_t c = p[2];
886 		uint64_t d = p[3];
887 
888 		/*
889 		 * note: for sizes not a multiple of 32 bytes, this will load
890 		 * the bytes [size % 32 .. 32) twice which is ok
891 		 */
892 		while (count > 4) {
893 			count -= 4;
894 			a |= p[count + 0];
895 			b |= p[count + 1];
896 			c |= p[count + 2];
897 			d |= p[count + 3];
898 		}
899 
900 		a |= b | c | d;
901 	}
902 
903 	return a;
904 }
905 
906 #undef memmove
907 void *
memmove(void * dst,const void * src,size_t ulen)908 memmove(void *dst, const void *src, size_t ulen)
909 {
910 	bcopy(src, dst, ulen);
911 	return dst;
912 }
913 
914 /*
915  * Abstract:
916  * strlen returns the number of characters in "string" preceeding
917  * the terminating null character.
918  */
919 
920 #undef strlen
921 size_t
strlen(const char * string)922 strlen(
923 	const char *string)
924 {
925 	const char *ret = string;
926 
927 	while (*string++ != '\0') {
928 		continue;
929 	}
930 	return string - 1 - ret;
931 }
932 
933 void
fillPage(ppnum_t pa,unsigned int fill)934 fillPage(ppnum_t pa, unsigned int fill)
935 {
936 	uint64_t        src;
937 	int             cnt = PAGE_SIZE / sizeof(unsigned int);
938 
939 	src = i386_ptob(pa);
940 	memset_word((int *)PHYSMAP_PTOV(src), fill, cnt);
941 }
942 
943 static inline void
__clflush(void * ptr)944 __clflush(void *ptr)
945 {
946 	__asm__ volatile ("clflush (%0)" : : "r" (ptr));
947 }
948 
949 void
dcache_incoherent_io_store64(addr64_t pa,unsigned int count)950 dcache_incoherent_io_store64(addr64_t pa, unsigned int count)
951 {
952 	addr64_t  linesize = cpuid_info()->cache_linesize;
953 	addr64_t  bound = (pa + count + linesize - 1) & ~(linesize - 1);
954 
955 	mfence();
956 
957 	while (pa < bound) {
958 		__clflush(PHYSMAP_PTOV(pa));
959 		pa += linesize;
960 	}
961 
962 	mfence();
963 }
964 
965 void
dcache_incoherent_io_flush64(addr64_t pa,unsigned int count)966 dcache_incoherent_io_flush64(addr64_t pa, unsigned int count)
967 {
968 	return dcache_incoherent_io_store64(pa, count);
969 }
970 
971 void
flush_dcache64(addr64_t addr,unsigned count,int phys)972 flush_dcache64(addr64_t addr, unsigned count, int phys)
973 {
974 	if (phys) {
975 		dcache_incoherent_io_flush64(addr, count);
976 	} else {
977 		uint64_t  linesize = cpuid_info()->cache_linesize;
978 		addr64_t  bound = (addr + count + linesize - 1) & ~(linesize - 1);
979 		mfence();
980 		while (addr < bound) {
981 			__clflush((void *) (uintptr_t) addr);
982 			addr += linesize;
983 		}
984 		mfence();
985 	}
986 }
987 
988 void
invalidate_icache64(__unused addr64_t addr,__unused unsigned count,__unused int phys)989 invalidate_icache64(__unused addr64_t addr,
990     __unused unsigned count,
991     __unused int phys)
992 {
993 }
994 
995 
996 addr64_t         vm_last_addr;
997 
998 void
mapping_set_mod(ppnum_t pn)999 mapping_set_mod(ppnum_t pn)
1000 {
1001 	pmap_set_modify(pn);
1002 }
1003 
1004 void
mapping_set_ref(ppnum_t pn)1005 mapping_set_ref(ppnum_t pn)
1006 {
1007 	pmap_set_reference(pn);
1008 }
1009 
1010 extern i386_cpu_info_t  cpuid_cpu_info;
1011 void
cache_flush_page_phys(ppnum_t pa)1012 cache_flush_page_phys(ppnum_t pa)
1013 {
1014 	boolean_t       istate;
1015 	unsigned char   *cacheline_addr;
1016 	i386_cpu_info_t *cpuid_infop = cpuid_info();
1017 	int             cacheline_size;
1018 	int             cachelines_to_flush;
1019 
1020 	cacheline_size = cpuid_infop->cache_linesize;
1021 	if (cacheline_size == 0) {
1022 		panic("cacheline_size=0 cpuid_infop=%p", cpuid_infop);
1023 	}
1024 	cachelines_to_flush = PAGE_SIZE / cacheline_size;
1025 
1026 	mfence();
1027 
1028 	istate = ml_set_interrupts_enabled(FALSE);
1029 
1030 	for (cacheline_addr = (unsigned char *)PHYSMAP_PTOV(i386_ptob(pa));
1031 	    cachelines_to_flush > 0;
1032 	    cachelines_to_flush--, cacheline_addr += cacheline_size) {
1033 		__clflush((void *) cacheline_addr);
1034 	}
1035 
1036 	(void) ml_set_interrupts_enabled(istate);
1037 
1038 	mfence();
1039 }
1040 
1041 
1042 #if !MACH_KDP
1043 void
kdp_register_callout(kdp_callout_fn_t fn,void * arg)1044 kdp_register_callout(kdp_callout_fn_t fn, void *arg)
1045 {
1046 #pragma unused(fn,arg)
1047 }
1048 #endif
1049 
1050 #if !CONFIG_VMX
1051 int
host_vmxon(boolean_t exclusive __unused)1052 host_vmxon(boolean_t exclusive __unused)
1053 {
1054 	return VMX_UNSUPPORTED;
1055 }
1056 
1057 void
host_vmxoff(void)1058 host_vmxoff(void)
1059 {
1060 	return;
1061 }
1062 #endif
1063 
1064 static lck_grp_t       xcpm_lck_grp;
1065 static lck_grp_attr_t  xcpm_lck_grp_attr;
1066 static lck_attr_t      xcpm_lck_attr;
1067 static lck_spin_t      xcpm_lock;
1068 
1069 void xcpm_bootstrap(void);
1070 void xcpm_mbox_lock(void);
1071 void xcpm_mbox_unlock(void);
1072 uint32_t xcpm_bios_mbox_cmd_read(uint32_t cmd);
1073 uint32_t xcpm_bios_mbox_cmd_unsafe_read(uint32_t cmd);
1074 void xcpm_bios_mbox_cmd_write(uint32_t cmd, uint32_t data);
1075 boolean_t xcpm_is_hwp_enabled(void);
1076 
1077 void
xcpm_bootstrap(void)1078 xcpm_bootstrap(void)
1079 {
1080 	lck_grp_attr_setdefault(&xcpm_lck_grp_attr);
1081 	lck_grp_init(&xcpm_lck_grp, "xcpm", &xcpm_lck_grp_attr);
1082 	lck_attr_setdefault(&xcpm_lck_attr);
1083 	lck_spin_init(&xcpm_lock, &xcpm_lck_grp, &xcpm_lck_attr);
1084 }
1085 
1086 void
xcpm_mbox_lock(void)1087 xcpm_mbox_lock(void)
1088 {
1089 	lck_spin_lock(&xcpm_lock);
1090 }
1091 
1092 void
xcpm_mbox_unlock(void)1093 xcpm_mbox_unlock(void)
1094 {
1095 	lck_spin_unlock(&xcpm_lock);
1096 }
1097 
1098 static uint32_t __xcpm_state[64] = {};
1099 
1100 uint32_t
xcpm_bios_mbox_cmd_read(uint32_t cmd)1101 xcpm_bios_mbox_cmd_read(uint32_t cmd)
1102 {
1103 	uint32_t reg;
1104 	boolean_t istate = ml_set_interrupts_enabled(FALSE);
1105 	xcpm_mbox_lock();
1106 	reg = xcpm_bios_mbox_cmd_unsafe_read(cmd);
1107 	xcpm_mbox_unlock();
1108 	ml_set_interrupts_enabled(istate);
1109 	return reg;
1110 }
1111 
1112 uint32_t
xcpm_bios_mbox_cmd_unsafe_read(uint32_t cmd)1113 xcpm_bios_mbox_cmd_unsafe_read(uint32_t cmd)
1114 {
1115 	return __xcpm_state[cmd % (sizeof(__xcpm_state) / sizeof(__xcpm_state[0]))];
1116 }
1117 
1118 void
xcpm_bios_mbox_cmd_write(uint32_t cmd,uint32_t data)1119 xcpm_bios_mbox_cmd_write(uint32_t cmd, uint32_t data)
1120 {
1121 	uint32_t idx = cmd % (sizeof(__xcpm_state) / sizeof(__xcpm_state[0]));
1122 	idx &= ~0x1;
1123 
1124 	boolean_t istate = ml_set_interrupts_enabled(FALSE);
1125 	xcpm_mbox_lock();
1126 	__xcpm_state[idx] = data;
1127 	xcpm_mbox_unlock();
1128 	ml_set_interrupts_enabled(istate);
1129 }
1130 
1131 boolean_t
xcpm_is_hwp_enabled(void)1132 xcpm_is_hwp_enabled(void)
1133 {
1134 	return FALSE;
1135 }
1136 
1137