xref: /xnu-8020.101.4/osfmk/x86_64/loose_ends.c (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 #include <mach_assert.h>
59 
60 #include <string.h>
61 #include <mach/boolean.h>
62 #include <mach/i386/vm_types.h>
63 #include <mach/i386/vm_param.h>
64 #include <kern/kern_types.h>
65 #include <kern/misc_protos.h>
66 #include <kern/locks.h>
67 #include <sys/errno.h>
68 #include <i386/param.h>
69 #include <i386/misc_protos.h>
70 #include <i386/panic_notify.h>
71 #include <i386/cpu_data.h>
72 #include <i386/machine_routines.h>
73 #include <i386/cpuid.h>
74 #include <i386/vmx.h>
75 #include <vm/pmap.h>
76 #include <vm/vm_map.h>
77 #include <vm/vm_kern.h>
78 #include <vm/vm_fault.h>
79 
80 #include <libkern/OSAtomic.h>
81 #include <libkern/OSDebug.h>
82 #include <sys/kdebug.h>
83 
84 #if !MACH_KDP
85 #include <kdp/kdp_callout.h>
86 #endif /* !MACH_KDP */
87 
88 #include <architecture/i386/pio.h>
89 
90 #include <libkern/OSDebug.h>
91 #if CONFIG_DTRACE
92 #include <mach/sdt.h>
93 #endif
94 
95 #if 0
96 
97 #undef KERNEL_DEBUG
98 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
99 #define KDEBUG 1
100 
101 #endif
102 
103 /* prevent infinite recursion when memmove calls bcopy; in string.h, bcopy is defined to call memmove */
104 #undef bcopy
105 
106 /* XXX - should be gone from here */
107 extern void             invalidate_icache64(addr64_t addr, unsigned cnt, int phys);
108 extern void             flush_dcache64(addr64_t addr, unsigned count, int phys);
109 extern boolean_t        phys_page_exists(ppnum_t);
110 extern void             bcopy_no_overwrite(const char *from, char *to, vm_size_t bytes);
111 extern void             pmap_set_reference(ppnum_t pn);
112 extern void             mapping_set_mod(ppnum_t pa);
113 extern void             mapping_set_ref(ppnum_t pn);
114 
115 extern void             ovbcopy(const char      *from,
116     char            *to,
117     vm_size_t       nbytes);
118 
119 
120 #define value_64bit(value)  ((value) & 0xFFFFFFFF00000000ULL)
121 #define low32(x)  ((unsigned int)((x) & 0x00000000FFFFFFFFULL))
122 
123 #define INT_SIZE        (BYTE_SIZE * sizeof (int))
124 
125 /*
126  * Set indicated bit in bit string.
127  */
128 void
setbit(int bitno,int * s)129 setbit(int bitno, int *s)
130 {
131 	s[bitno / INT_SIZE] |= 1 << (bitno % INT_SIZE);
132 }
133 
134 /*
135  * Clear indicated bit in bit string.
136  */
137 void
clrbit(int bitno,int * s)138 clrbit(int bitno, int *s)
139 {
140 	s[bitno / INT_SIZE] &= ~(1 << (bitno % INT_SIZE));
141 }
142 
143 /*
144  * Test if indicated bit is set in bit string.
145  */
146 int
testbit(int bitno,int * s)147 testbit(int bitno, int *s)
148 {
149 	return s[bitno / INT_SIZE] & (1 << (bitno % INT_SIZE));
150 }
151 
152 /*
153  * Find first bit set in bit string.
154  */
155 int
ffsbit(int * s)156 ffsbit(int *s)
157 {
158 	int             offset;
159 
160 	for (offset = 0; !*s; offset += (int)INT_SIZE, ++s) {
161 		;
162 	}
163 	return offset + __builtin_ctz(*s);
164 }
165 
166 int
ffs(unsigned int mask)167 ffs(unsigned int mask)
168 {
169 	if (mask == 0) {
170 		return 0;
171 	}
172 
173 	/*
174 	 * NOTE: cannot use __builtin_ffs because it generates a call to
175 	 * 'ffs'
176 	 */
177 	return 1 + __builtin_ctz(mask);
178 }
179 
180 int
ffsll(unsigned long long mask)181 ffsll(unsigned long long mask)
182 {
183 	if (mask == 0) {
184 		return 0;
185 	}
186 
187 	/*
188 	 * NOTE: cannot use __builtin_ffsll because it generates a call to
189 	 * 'ffsll'
190 	 */
191 	return 1 + __builtin_ctzll(mask);
192 }
193 
194 /*
195  * Find last bit set in bit string.
196  */
197 int
fls(unsigned int mask)198 fls(unsigned int mask)
199 {
200 	if (mask == 0) {
201 		return 0;
202 	}
203 
204 	return (sizeof(mask) << 3) - __builtin_clz(mask);
205 }
206 
207 int
flsll(unsigned long long mask)208 flsll(unsigned long long mask)
209 {
210 	if (mask == 0) {
211 		return 0;
212 	}
213 
214 	return (sizeof(mask) << 3) - __builtin_clzll(mask);
215 }
216 
217 void
bzero_phys_nc(addr64_t src64,uint32_t bytes)218 bzero_phys_nc(
219 	addr64_t src64,
220 	uint32_t bytes)
221 {
222 	bzero_phys(src64, bytes);
223 }
224 
225 void
bzero_phys(addr64_t src64,uint32_t bytes)226 bzero_phys(
227 	addr64_t src64,
228 	uint32_t bytes)
229 {
230 	bzero(PHYSMAP_PTOV(src64), bytes);
231 }
232 
233 
234 /*
235  * bcopy_phys - like bcopy but copies from/to physical addresses.
236  */
237 
238 void
bcopy_phys(addr64_t src64,addr64_t dst64,vm_size_t bytes)239 bcopy_phys(
240 	addr64_t src64,
241 	addr64_t dst64,
242 	vm_size_t bytes)
243 {
244 	/* Not necessary for K64 - but ensure we stay within a page */
245 	if (((((uint32_t)src64 & (NBPG - 1)) + bytes) > NBPG) ||
246 	    ((((uint32_t)dst64 & (NBPG - 1)) + bytes) > NBPG)) {
247 		panic("bcopy_phys alignment");
248 	}
249 	bcopy(PHYSMAP_PTOV(src64), PHYSMAP_PTOV(dst64), bytes);
250 }
251 
252 /*
253  * allow a function to get a quick virtual mapping of a physical page
254  */
255 
256 int
apply_func_phys(addr64_t dst64,vm_size_t bytes,int (* func)(void * buffer,vm_size_t bytes,void * arg),void * arg)257 apply_func_phys(
258 	addr64_t dst64,
259 	vm_size_t bytes,
260 	int (*func)(void * buffer, vm_size_t bytes, void * arg),
261 	void * arg)
262 {
263 	/* Not necessary for K64 - but ensure we stay within a page */
264 	if (((((uint32_t)dst64 & (NBPG - 1)) + bytes) > NBPG)) {
265 		panic("apply_func_phys alignment");
266 	}
267 
268 	return func(PHYSMAP_PTOV(dst64), bytes, arg);
269 }
270 
271 /*
272  * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
273  *           them correctly.
274  */
275 
276 void
ovbcopy(const char * from,char * to,vm_size_t bytes)277 ovbcopy(
278 	const char      *from,
279 	char            *to,
280 	vm_size_t       bytes)          /* num bytes to copy */
281 {
282 	/* Assume that bcopy copies left-to-right (low addr first). */
283 	if (from + bytes <= to || to + bytes <= from || to == from) {
284 		bcopy_no_overwrite(from, to, bytes);    /* non-overlapping or no-op*/
285 	} else if (from > to) {
286 		bcopy_no_overwrite(from, to, bytes);    /* overlapping but OK */
287 	} else {
288 		/* to > from: overlapping, and must copy right-to-left. */
289 		from += bytes - 1;
290 		to += bytes - 1;
291 		while (bytes-- > 0) {
292 			*to-- = *from--;
293 		}
294 	}
295 }
296 
297 
298 /*
299  *  Read data from a physical address. Memory should not be cache inhibited.
300  */
301 
302 uint64_t report_phy_read_delay;
303 uint64_t report_phy_write_delay;
304 uint32_t report_phy_read_osbt;
305 uint32_t report_phy_write_osbt;
306 
307 #if DEVELOPMENT || DEBUG
308 uint64_t trace_phy_read_delay = 50 * NSEC_PER_USEC;
309 uint64_t trace_phy_write_delay = 50 * NSEC_PER_USEC;
310 extern uint64_t simulate_stretched_io;
311 #else
312 uint64_t trace_phy_read_delay = 0;
313 uint64_t trace_phy_write_delay = 0;
314 #endif
315 
316 __private_extern__ uint64_t
ml_phys_read_data(uint64_t paddr,int size)317 ml_phys_read_data(uint64_t paddr, int size)
318 {
319 	uint64_t result = 0;
320 	unsigned char s1;
321 	unsigned short s2;
322 	boolean_t istate = TRUE, timeread = FALSE;
323 	uint64_t sabs = 0, eabs;
324 
325 	if (__improbable(!physmap_enclosed(paddr))) {
326 		panic("%s: 0x%llx out of bounds", __FUNCTION__, paddr);
327 	}
328 
329 	if (__improbable(report_phy_read_delay != 0)) {
330 		istate = ml_set_interrupts_enabled(FALSE);
331 		sabs = mach_absolute_time();
332 		timeread = TRUE;
333 	}
334 #if DEVELOPMENT || DEBUG
335 	if (__improbable(timeread && simulate_stretched_io)) {
336 		sabs -= simulate_stretched_io;
337 	}
338 #endif /* x86_64 DEVELOPMENT || DEBUG */
339 
340 	switch (size) {
341 	case 1:
342 		s1 = *(volatile unsigned char *)PHYSMAP_PTOV(paddr);
343 		result = s1;
344 		break;
345 	case 2:
346 		s2 = *(volatile unsigned short *)PHYSMAP_PTOV(paddr);
347 		result = s2;
348 		break;
349 	case 4:
350 		result = *(volatile unsigned int *)PHYSMAP_PTOV(paddr);
351 		break;
352 	case 8:
353 		result = *(volatile unsigned long long *)PHYSMAP_PTOV(paddr);
354 		break;
355 	default:
356 		panic("Invalid size %d for ml_phys_read_data", size);
357 		break;
358 	}
359 
360 	if (__improbable(timeread == TRUE)) {
361 		eabs = mach_absolute_time();
362 
363 #if DEVELOPMENT || DEBUG
364 		iotrace(IOTRACE_PHYS_READ, 0, paddr, size, result, sabs, eabs - sabs);
365 #endif
366 
367 		if (__improbable((eabs - sabs) > report_phy_read_delay)) {
368 			(void)ml_set_interrupts_enabled(istate);
369 
370 			if (phy_read_panic && (machine_timeout_suspended() == FALSE)) {
371 				panic_notify();
372 				panic("Read from physical addr 0x%llx took %llu ns, "
373 				    "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
374 				    paddr, (eabs - sabs), result, sabs, eabs,
375 				    report_phy_read_delay);
376 			}
377 
378 			if (report_phy_read_osbt) {
379 				OSReportWithBacktrace("ml_phys_read_data took %lluus",
380 				    (eabs - sabs) / NSEC_PER_USEC);
381 			}
382 #if CONFIG_DTRACE
383 			DTRACE_PHYSLAT4(physread, uint64_t, (eabs - sabs),
384 			    uint64_t, paddr, uint32_t, size, uint64_t, result);
385 #endif /* CONFIG_DTRACE */
386 		} else if (__improbable(trace_phy_read_delay > 0 && (eabs - sabs) > trace_phy_read_delay)) {
387 			KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_READ),
388 			    (eabs - sabs), sabs, paddr, result);
389 
390 			(void)ml_set_interrupts_enabled(istate);
391 		} else {
392 			(void)ml_set_interrupts_enabled(istate);
393 		}
394 	}
395 
396 	return result;
397 }
398 
399 static unsigned long long
ml_phys_read_long_long(uint64_t paddr)400 ml_phys_read_long_long(uint64_t paddr)
401 {
402 	return ml_phys_read_data(paddr, 8);
403 }
404 
405 unsigned int
ml_phys_read(vm_offset_t paddr)406 ml_phys_read(vm_offset_t paddr)
407 {
408 	return (unsigned int) ml_phys_read_data(paddr, 4);
409 }
410 
411 unsigned int
ml_phys_read_word(vm_offset_t paddr)412 ml_phys_read_word(vm_offset_t paddr)
413 {
414 	return (unsigned int) ml_phys_read_data(paddr, 4);
415 }
416 
417 unsigned int
ml_phys_read_64(addr64_t paddr64)418 ml_phys_read_64(addr64_t paddr64)
419 {
420 	return (unsigned int) ml_phys_read_data(paddr64, 4);
421 }
422 
423 unsigned int
ml_phys_read_word_64(addr64_t paddr64)424 ml_phys_read_word_64(addr64_t paddr64)
425 {
426 	return (unsigned int) ml_phys_read_data(paddr64, 4);
427 }
428 
429 unsigned int
ml_phys_read_half(vm_offset_t paddr)430 ml_phys_read_half(vm_offset_t paddr)
431 {
432 	return (unsigned int) ml_phys_read_data(paddr, 2);
433 }
434 
435 unsigned int
ml_phys_read_half_64(addr64_t paddr64)436 ml_phys_read_half_64(addr64_t paddr64)
437 {
438 	return (unsigned int) ml_phys_read_data(paddr64, 2);
439 }
440 
441 unsigned int
ml_phys_read_byte(vm_offset_t paddr)442 ml_phys_read_byte(vm_offset_t paddr)
443 {
444 	return (unsigned int) ml_phys_read_data(paddr, 1);
445 }
446 
447 unsigned int
ml_phys_read_byte_64(addr64_t paddr64)448 ml_phys_read_byte_64(addr64_t paddr64)
449 {
450 	return (unsigned int) ml_phys_read_data(paddr64, 1);
451 }
452 
453 unsigned long long
ml_phys_read_double(vm_offset_t paddr)454 ml_phys_read_double(vm_offset_t paddr)
455 {
456 	return ml_phys_read_long_long(paddr);
457 }
458 
459 unsigned long long
ml_phys_read_double_64(addr64_t paddr64)460 ml_phys_read_double_64(addr64_t paddr64)
461 {
462 	return ml_phys_read_long_long(paddr64);
463 }
464 
465 
466 
467 /*
468  *  Write data to a physical address. Memory should not be cache inhibited.
469  */
470 
471 __private_extern__ void
ml_phys_write_data(uint64_t paddr,unsigned long long data,int size)472 ml_phys_write_data(uint64_t paddr, unsigned long long data, int size)
473 {
474 	boolean_t istate = TRUE, timewrite = FALSE;
475 	uint64_t sabs = 0, eabs;
476 
477 	if (__improbable(!physmap_enclosed(paddr))) {
478 		panic("%s: 0x%llx out of bounds", __FUNCTION__, paddr);
479 	}
480 
481 	if (__improbable(report_phy_write_delay != 0)) {
482 		istate = ml_set_interrupts_enabled(FALSE);
483 		sabs = mach_absolute_time();
484 		timewrite = TRUE;
485 	}
486 #if DEVELOPMENT || DEBUG
487 	if (__improbable(timewrite && simulate_stretched_io)) {
488 		sabs -= simulate_stretched_io;
489 	}
490 #endif /* x86_64 DEVELOPMENT || DEBUG */
491 
492 	switch (size) {
493 	case 1:
494 		*(volatile unsigned char *)PHYSMAP_PTOV(paddr) = (unsigned char)data;
495 		break;
496 	case 2:
497 		*(volatile unsigned short *)PHYSMAP_PTOV(paddr) = (unsigned short)data;
498 		break;
499 	case 4:
500 		*(volatile unsigned int *)PHYSMAP_PTOV(paddr) = (unsigned int)data;
501 		break;
502 	case 8:
503 		*(volatile unsigned long *)PHYSMAP_PTOV(paddr) = data;
504 		break;
505 	default:
506 		panic("Invalid size %d for ml_phys_write_data", size);
507 		break;
508 	}
509 
510 	if (__improbable(timewrite == TRUE)) {
511 		eabs = mach_absolute_time();
512 
513 #if DEVELOPMENT || DEBUG
514 		iotrace(IOTRACE_PHYS_WRITE, 0, paddr, size, data, sabs, eabs - sabs);
515 #endif
516 
517 		if (__improbable((eabs - sabs) > report_phy_write_delay)) {
518 			(void)ml_set_interrupts_enabled(istate);
519 
520 			if (phy_write_panic && (machine_timeout_suspended() == FALSE)) {
521 				panic_notify();
522 				panic("Write to physical addr 0x%llx took %llu ns, "
523 				    "data: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
524 				    paddr, (eabs - sabs), data, sabs, eabs,
525 				    report_phy_write_delay);
526 			}
527 
528 			if (report_phy_write_osbt) {
529 				OSReportWithBacktrace("ml_phys_write_data (%p, 0x%llx) "
530 				    "took %lluus", (void *)paddr, data,
531 				    (eabs - sabs) / NSEC_PER_USEC);
532 			}
533 #if CONFIG_DTRACE
534 			DTRACE_PHYSLAT4(physwrite, uint64_t, (eabs - sabs),
535 			    uint64_t, paddr, uint32_t, size, uint64_t, data);
536 #endif /* CONFIG_DTRACE */
537 		} else if (__improbable(trace_phy_write_delay > 0 && (eabs - sabs) > trace_phy_write_delay)) {
538 			KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_WRITE),
539 			    (eabs - sabs), sabs, paddr, data);
540 
541 			(void)ml_set_interrupts_enabled(istate);
542 		} else {
543 			(void)ml_set_interrupts_enabled(istate);
544 		}
545 	}
546 }
547 
548 void
ml_phys_write_byte(vm_offset_t paddr,unsigned int data)549 ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
550 {
551 	ml_phys_write_data(paddr, data, 1);
552 }
553 
554 void
ml_phys_write_byte_64(addr64_t paddr64,unsigned int data)555 ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
556 {
557 	ml_phys_write_data(paddr64, data, 1);
558 }
559 
560 void
ml_phys_write_half(vm_offset_t paddr,unsigned int data)561 ml_phys_write_half(vm_offset_t paddr, unsigned int data)
562 {
563 	ml_phys_write_data(paddr, data, 2);
564 }
565 
566 void
ml_phys_write_half_64(addr64_t paddr64,unsigned int data)567 ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
568 {
569 	ml_phys_write_data(paddr64, data, 2);
570 }
571 
572 void
ml_phys_write(vm_offset_t paddr,unsigned int data)573 ml_phys_write(vm_offset_t paddr, unsigned int data)
574 {
575 	ml_phys_write_data(paddr, data, 4);
576 }
577 
578 void
ml_phys_write_64(addr64_t paddr64,unsigned int data)579 ml_phys_write_64(addr64_t paddr64, unsigned int data)
580 {
581 	ml_phys_write_data(paddr64, data, 4);
582 }
583 
584 void
ml_phys_write_word(vm_offset_t paddr,unsigned int data)585 ml_phys_write_word(vm_offset_t paddr, unsigned int data)
586 {
587 	ml_phys_write_data(paddr, data, 4);
588 }
589 
590 void
ml_phys_write_word_64(addr64_t paddr64,unsigned int data)591 ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
592 {
593 	ml_phys_write_data(paddr64, data, 4);
594 }
595 
596 void
ml_phys_write_double(vm_offset_t paddr,unsigned long long data)597 ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
598 {
599 	ml_phys_write_data(paddr, data, 8);
600 }
601 
602 void
ml_phys_write_double_64(addr64_t paddr64,unsigned long long data)603 ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
604 {
605 	ml_phys_write_data(paddr64, data, 8);
606 }
607 
608 uint32_t
ml_port_io_read(uint16_t ioport,int size)609 ml_port_io_read(uint16_t ioport, int size)
610 {
611 	uint32_t result = 0;
612 
613 	uint64_t sabs, eabs;
614 	boolean_t istate, timeread = FALSE;
615 
616 	if (__improbable(report_phy_read_delay != 0)) {
617 		istate = ml_set_interrupts_enabled(FALSE);
618 		sabs = mach_absolute_time();
619 		timeread = TRUE;
620 	}
621 
622 #if DEVELOPMENT || DEBUG
623 	if (__improbable(timeread && simulate_stretched_io)) {
624 		sabs -= simulate_stretched_io;
625 	}
626 #endif /* x86_64 DEVELOPMENT || DEBUG */
627 
628 	switch (size) {
629 	case 1:
630 		result = inb(ioport);
631 		break;
632 	case 2:
633 		result = inw(ioport);
634 		break;
635 	case 4:
636 		result = inl(ioport);
637 		break;
638 	default:
639 		panic("Invalid size %d for ml_port_io_read(0x%x)", size, (unsigned)ioport);
640 		break;
641 	}
642 
643 	if (__improbable(timeread == TRUE)) {
644 		eabs = mach_absolute_time();
645 
646 #if DEVELOPMENT || DEBUG
647 		iotrace(IOTRACE_PORTIO_READ, 0, ioport, size, result, sabs, eabs - sabs);
648 #endif
649 
650 		if (__improbable((eabs - sabs) > report_phy_read_delay)) {
651 			(void)ml_set_interrupts_enabled(istate);
652 
653 			if (phy_read_panic && (machine_timeout_suspended() == FALSE)) {
654 				panic_notify();
655 				panic("Read from IO port 0x%x took %llu ns, "
656 				    "result: 0x%x (start: %llu, end: %llu), ceiling: %llu",
657 				    ioport, (eabs - sabs), result, sabs, eabs,
658 				    report_phy_read_delay);
659 			}
660 
661 			if (report_phy_read_osbt) {
662 				OSReportWithBacktrace("ml_port_io_read(0x%x) took %lluus",
663 				    ioport, (eabs - sabs) / NSEC_PER_USEC);
664 			}
665 #if CONFIG_DTRACE
666 			DTRACE_PHYSLAT3(portioread, uint64_t, (eabs - sabs),
667 			    uint16_t, ioport, uint32_t, size);
668 #endif /* CONFIG_DTRACE */
669 		} else if (__improbable(trace_phy_read_delay > 0 && (eabs - sabs) > trace_phy_read_delay)) {
670 			KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PORTIO_READ),
671 			    (eabs - sabs), sabs, ioport, result);
672 
673 			(void)ml_set_interrupts_enabled(istate);
674 		} else {
675 			(void)ml_set_interrupts_enabled(istate);
676 		}
677 	}
678 
679 	return result;
680 }
681 
682 void
ml_port_io_write(uint16_t ioport,uint32_t val,int size)683 ml_port_io_write(uint16_t ioport, uint32_t val, int size)
684 {
685 	uint64_t sabs, eabs;
686 	boolean_t istate, timewrite = FALSE;
687 
688 	if (__improbable(report_phy_write_delay != 0)) {
689 		istate = ml_set_interrupts_enabled(FALSE);
690 		sabs = mach_absolute_time();
691 		timewrite = TRUE;
692 	}
693 #if DEVELOPMENT || DEBUG
694 	if (__improbable(timewrite && simulate_stretched_io)) {
695 		sabs -= simulate_stretched_io;
696 	}
697 #endif /* x86_64 DEVELOPMENT || DEBUG */
698 
699 	switch (size) {
700 	case 1:
701 		outb(ioport, (uint8_t)val);
702 		break;
703 	case 2:
704 		outw(ioport, (uint16_t)val);
705 		break;
706 	case 4:
707 		outl(ioport, (uint32_t)val);
708 		break;
709 	default:
710 		panic("Invalid size %d for ml_port_io_write(0x%x)", size, (unsigned)ioport);
711 		break;
712 	}
713 
714 	if (__improbable(timewrite == TRUE)) {
715 		eabs = mach_absolute_time();
716 
717 #if DEVELOPMENT || DEBUG
718 		iotrace(IOTRACE_PORTIO_WRITE, 0, ioport, size, val, sabs, eabs - sabs);
719 #endif
720 
721 		if (__improbable((eabs - sabs) > report_phy_write_delay)) {
722 			(void)ml_set_interrupts_enabled(istate);
723 
724 			if (phy_write_panic && (machine_timeout_suspended() == FALSE)) {
725 				panic_notify();
726 				panic("Write to IO port 0x%x took %llu ns, val: 0x%x"
727 				    " (start: %llu, end: %llu), ceiling: %llu",
728 				    ioport, (eabs - sabs), val, sabs, eabs,
729 				    report_phy_write_delay);
730 			}
731 
732 			if (report_phy_write_osbt) {
733 				OSReportWithBacktrace("ml_port_io_write(0x%x, %d, 0x%x) "
734 				    "took %lluus",
735 				    ioport, size, val, (eabs - sabs) / NSEC_PER_USEC);
736 			}
737 
738 #if CONFIG_DTRACE
739 			DTRACE_PHYSLAT4(portiowrite, uint64_t, (eabs - sabs),
740 			    uint16_t, ioport, uint32_t, size, uint64_t, val);
741 #endif /* CONFIG_DTRACE */
742 		} else if (__improbable(trace_phy_write_delay > 0 && (eabs - sabs) > trace_phy_write_delay)) {
743 			KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PORTIO_WRITE),
744 			    (eabs - sabs), sabs, ioport, val);
745 
746 			(void)ml_set_interrupts_enabled(istate);
747 		} else {
748 			(void)ml_set_interrupts_enabled(istate);
749 		}
750 	}
751 }
752 
753 uint8_t
ml_port_io_read8(uint16_t ioport)754 ml_port_io_read8(uint16_t ioport)
755 {
756 	return ml_port_io_read(ioport, 1);
757 }
758 
759 uint16_t
ml_port_io_read16(uint16_t ioport)760 ml_port_io_read16(uint16_t ioport)
761 {
762 	return ml_port_io_read(ioport, 2);
763 }
764 
765 uint32_t
ml_port_io_read32(uint16_t ioport)766 ml_port_io_read32(uint16_t ioport)
767 {
768 	return ml_port_io_read(ioport, 4);
769 }
770 
771 void
ml_port_io_write8(uint16_t ioport,uint8_t val)772 ml_port_io_write8(uint16_t ioport, uint8_t val)
773 {
774 	ml_port_io_write(ioport, val, 1);
775 }
776 
777 void
ml_port_io_write16(uint16_t ioport,uint16_t val)778 ml_port_io_write16(uint16_t ioport, uint16_t val)
779 {
780 	ml_port_io_write(ioport, val, 2);
781 }
782 
783 void
ml_port_io_write32(uint16_t ioport,uint32_t val)784 ml_port_io_write32(uint16_t ioport, uint32_t val)
785 {
786 	ml_port_io_write(ioport, val, 4);
787 }
788 
789 /* PCI config cycle probing
790  *
791  *
792  *      Read the memory location at physical address paddr.
793  * *Does not* recover from machine checks, unlike the PowerPC implementation.
794  * Should probably be deprecated.
795  */
796 
797 boolean_t
ml_probe_read(vm_offset_t paddr,unsigned int * val)798 ml_probe_read(vm_offset_t paddr, unsigned int *val)
799 {
800 	if ((PAGE_SIZE - (paddr & PAGE_MASK)) < 4) {
801 		return FALSE;
802 	}
803 
804 	*val = ml_phys_read(paddr);
805 
806 	return TRUE;
807 }
808 
809 /*
810  *  Read the memory location at physical address paddr.
811  *  This is a part of a device probe, so there is a good chance we will
812  *  have a machine check here. So we have to be able to handle that.
813  *  We assume that machine checks are enabled both in MSR and HIDs
814  */
815 boolean_t
ml_probe_read_64(addr64_t paddr64,unsigned int * val)816 ml_probe_read_64(addr64_t paddr64, unsigned int *val)
817 {
818 	if ((PAGE_SIZE - (paddr64 & PAGE_MASK)) < 4) {
819 		return FALSE;
820 	}
821 
822 	*val = ml_phys_read_64(paddr64);
823 	return TRUE;
824 }
825 
826 
827 #undef bcmp
828 int
bcmp(const void * pa,const void * pb,size_t len)829 bcmp(
830 	const void      *pa,
831 	const void      *pb,
832 	size_t  len)
833 {
834 	const char *a = (const char *)pa;
835 	const char *b = (const char *)pb;
836 
837 	if (len == 0) {
838 		return 0;
839 	}
840 
841 	do {
842 		if (*a++ != *b++) {
843 			break;
844 		}
845 	} while (--len);
846 
847 	/*
848 	 * Check for the overflow case but continue to handle the non-overflow
849 	 * case the same way just in case someone is using the return value
850 	 * as more than zero/non-zero
851 	 */
852 	if (__improbable(!(len & 0x00000000FFFFFFFFULL) && (len & 0xFFFFFFFF00000000ULL))) {
853 		return 0xFFFFFFFF;
854 	} else {
855 		return (int)len;
856 	}
857 }
858 
859 #undef memcmp
860 int
memcmp(const void * s1,const void * s2,size_t n)861 memcmp(const void *s1, const void *s2, size_t n)
862 {
863 	if (n != 0) {
864 		const unsigned char *p1 = s1, *p2 = s2;
865 
866 		do {
867 			if (*p1++ != *p2++) {
868 				return *--p1 - *--p2;
869 			}
870 		} while (--n != 0);
871 	}
872 	return 0;
873 }
874 
875 unsigned long
memcmp_zero_ptr_aligned(const void * addr,size_t size)876 memcmp_zero_ptr_aligned(const void *addr, size_t size)
877 {
878 	const uint64_t *p = (const uint64_t *)addr;
879 	uint64_t a = p[0];
880 
881 	static_assert(sizeof(unsigned long) == sizeof(uint64_t));
882 
883 	if (size < 4 * sizeof(uint64_t)) {
884 		if (size > 1 * sizeof(uint64_t)) {
885 			a |= p[1];
886 			if (size > 2 * sizeof(uint64_t)) {
887 				a |= p[2];
888 			}
889 		}
890 	} else {
891 		size_t count = size / sizeof(uint64_t);
892 		uint64_t b = p[1];
893 		uint64_t c = p[2];
894 		uint64_t d = p[3];
895 
896 		/*
897 		 * note: for sizes not a multiple of 32 bytes, this will load
898 		 * the bytes [size % 32 .. 32) twice which is ok
899 		 */
900 		while (count > 4) {
901 			count -= 4;
902 			a |= p[count + 0];
903 			b |= p[count + 1];
904 			c |= p[count + 2];
905 			d |= p[count + 3];
906 		}
907 
908 		a |= b | c | d;
909 	}
910 
911 	return a;
912 }
913 
914 #undef memmove
915 void *
memmove(void * dst,const void * src,size_t ulen)916 memmove(void *dst, const void *src, size_t ulen)
917 {
918 	bcopy(src, dst, ulen);
919 	return dst;
920 }
921 
922 /*
923  * Abstract:
924  * strlen returns the number of characters in "string" preceeding
925  * the terminating null character.
926  */
927 
928 #undef strlen
929 size_t
strlen(const char * string)930 strlen(
931 	const char *string)
932 {
933 	const char *ret = string;
934 
935 	while (*string++ != '\0') {
936 		continue;
937 	}
938 	return string - 1 - ret;
939 }
940 
941 void
fillPage(ppnum_t pa,unsigned int fill)942 fillPage(ppnum_t pa, unsigned int fill)
943 {
944 	uint64_t        src;
945 	int             cnt = PAGE_SIZE / sizeof(unsigned int);
946 
947 	src = i386_ptob(pa);
948 	memset_word((int *)PHYSMAP_PTOV(src), fill, cnt);
949 }
950 
951 static inline void
__clflush(void * ptr)952 __clflush(void *ptr)
953 {
954 	__asm__ volatile ("clflush (%0)" : : "r" (ptr));
955 }
956 
957 void
dcache_incoherent_io_store64(addr64_t pa,unsigned int count)958 dcache_incoherent_io_store64(addr64_t pa, unsigned int count)
959 {
960 	addr64_t  linesize = cpuid_info()->cache_linesize;
961 	addr64_t  bound = (pa + count + linesize - 1) & ~(linesize - 1);
962 
963 	mfence();
964 
965 	while (pa < bound) {
966 		__clflush(PHYSMAP_PTOV(pa));
967 		pa += linesize;
968 	}
969 
970 	mfence();
971 }
972 
973 void
dcache_incoherent_io_flush64(addr64_t pa,unsigned int count)974 dcache_incoherent_io_flush64(addr64_t pa, unsigned int count)
975 {
976 	return dcache_incoherent_io_store64(pa, count);
977 }
978 
979 void
flush_dcache64(addr64_t addr,unsigned count,int phys)980 flush_dcache64(addr64_t addr, unsigned count, int phys)
981 {
982 	if (phys) {
983 		dcache_incoherent_io_flush64(addr, count);
984 	} else {
985 		uint64_t  linesize = cpuid_info()->cache_linesize;
986 		addr64_t  bound = (addr + count + linesize - 1) & ~(linesize - 1);
987 		mfence();
988 		while (addr < bound) {
989 			__clflush((void *) (uintptr_t) addr);
990 			addr += linesize;
991 		}
992 		mfence();
993 	}
994 }
995 
996 void
invalidate_icache64(__unused addr64_t addr,__unused unsigned count,__unused int phys)997 invalidate_icache64(__unused addr64_t addr,
998     __unused unsigned count,
999     __unused int phys)
1000 {
1001 }
1002 
1003 
1004 addr64_t         vm_last_addr;
1005 
1006 void
mapping_set_mod(ppnum_t pn)1007 mapping_set_mod(ppnum_t pn)
1008 {
1009 	pmap_set_modify(pn);
1010 }
1011 
1012 void
mapping_set_ref(ppnum_t pn)1013 mapping_set_ref(ppnum_t pn)
1014 {
1015 	pmap_set_reference(pn);
1016 }
1017 
1018 extern i386_cpu_info_t  cpuid_cpu_info;
1019 void
cache_flush_page_phys(ppnum_t pa)1020 cache_flush_page_phys(ppnum_t pa)
1021 {
1022 	boolean_t       istate;
1023 	unsigned char   *cacheline_addr;
1024 	i386_cpu_info_t *cpuid_infop = cpuid_info();
1025 	int             cacheline_size;
1026 	int             cachelines_to_flush;
1027 
1028 	cacheline_size = cpuid_infop->cache_linesize;
1029 	if (cacheline_size == 0) {
1030 		panic("cacheline_size=0 cpuid_infop=%p", cpuid_infop);
1031 	}
1032 	cachelines_to_flush = PAGE_SIZE / cacheline_size;
1033 
1034 	mfence();
1035 
1036 	istate = ml_set_interrupts_enabled(FALSE);
1037 
1038 	for (cacheline_addr = (unsigned char *)PHYSMAP_PTOV(i386_ptob(pa));
1039 	    cachelines_to_flush > 0;
1040 	    cachelines_to_flush--, cacheline_addr += cacheline_size) {
1041 		__clflush((void *) cacheline_addr);
1042 	}
1043 
1044 	(void) ml_set_interrupts_enabled(istate);
1045 
1046 	mfence();
1047 }
1048 
1049 
1050 #if !MACH_KDP
1051 void
kdp_register_callout(kdp_callout_fn_t fn,void * arg)1052 kdp_register_callout(kdp_callout_fn_t fn, void *arg)
1053 {
1054 #pragma unused(fn,arg)
1055 }
1056 #endif
1057 
1058 #if !CONFIG_VMX
1059 int
host_vmxon(boolean_t exclusive __unused)1060 host_vmxon(boolean_t exclusive __unused)
1061 {
1062 	return VMX_UNSUPPORTED;
1063 }
1064 
1065 void
host_vmxoff(void)1066 host_vmxoff(void)
1067 {
1068 	return;
1069 }
1070 #endif
1071 
1072 static lck_grp_t       xcpm_lck_grp;
1073 static lck_grp_attr_t  xcpm_lck_grp_attr;
1074 static lck_attr_t      xcpm_lck_attr;
1075 static lck_spin_t      xcpm_lock;
1076 
1077 void xcpm_bootstrap(void);
1078 void xcpm_mbox_lock(void);
1079 void xcpm_mbox_unlock(void);
1080 uint32_t xcpm_bios_mbox_cmd_read(uint32_t cmd);
1081 uint32_t xcpm_bios_mbox_cmd_unsafe_read(uint32_t cmd);
1082 void xcpm_bios_mbox_cmd_write(uint32_t cmd, uint32_t data);
1083 boolean_t xcpm_is_hwp_enabled(void);
1084 
1085 void
xcpm_bootstrap(void)1086 xcpm_bootstrap(void)
1087 {
1088 	lck_grp_attr_setdefault(&xcpm_lck_grp_attr);
1089 	lck_grp_init(&xcpm_lck_grp, "xcpm", &xcpm_lck_grp_attr);
1090 	lck_attr_setdefault(&xcpm_lck_attr);
1091 	lck_spin_init(&xcpm_lock, &xcpm_lck_grp, &xcpm_lck_attr);
1092 }
1093 
1094 void
xcpm_mbox_lock(void)1095 xcpm_mbox_lock(void)
1096 {
1097 	lck_spin_lock(&xcpm_lock);
1098 }
1099 
1100 void
xcpm_mbox_unlock(void)1101 xcpm_mbox_unlock(void)
1102 {
1103 	lck_spin_unlock(&xcpm_lock);
1104 }
1105 
1106 static uint32_t __xcpm_state[64] = {};
1107 
1108 uint32_t
xcpm_bios_mbox_cmd_read(uint32_t cmd)1109 xcpm_bios_mbox_cmd_read(uint32_t cmd)
1110 {
1111 	uint32_t reg;
1112 	boolean_t istate = ml_set_interrupts_enabled(FALSE);
1113 	xcpm_mbox_lock();
1114 	reg = xcpm_bios_mbox_cmd_unsafe_read(cmd);
1115 	xcpm_mbox_unlock();
1116 	ml_set_interrupts_enabled(istate);
1117 	return reg;
1118 }
1119 
1120 uint32_t
xcpm_bios_mbox_cmd_unsafe_read(uint32_t cmd)1121 xcpm_bios_mbox_cmd_unsafe_read(uint32_t cmd)
1122 {
1123 	return __xcpm_state[cmd % (sizeof(__xcpm_state) / sizeof(__xcpm_state[0]))];
1124 }
1125 
1126 void
xcpm_bios_mbox_cmd_write(uint32_t cmd,uint32_t data)1127 xcpm_bios_mbox_cmd_write(uint32_t cmd, uint32_t data)
1128 {
1129 	uint32_t idx = cmd % (sizeof(__xcpm_state) / sizeof(__xcpm_state[0]));
1130 	idx &= ~0x1;
1131 
1132 	boolean_t istate = ml_set_interrupts_enabled(FALSE);
1133 	xcpm_mbox_lock();
1134 	__xcpm_state[idx] = data;
1135 	xcpm_mbox_unlock();
1136 	ml_set_interrupts_enabled(istate);
1137 }
1138 
1139 boolean_t
xcpm_is_hwp_enabled(void)1140 xcpm_is_hwp_enabled(void)
1141 {
1142 	return FALSE;
1143 }
1144 
1145