xref: /xnu-8796.141.3/osfmk/x86_64/loose_ends.c (revision 1b191cb58250d0705d8a51287127505aa4bc0789)
1 /*
2  * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 #include <mach_assert.h>
59 
60 #include <string.h>
61 #include <mach/boolean.h>
62 #include <mach/i386/vm_types.h>
63 #include <mach/i386/vm_param.h>
64 #include <kern/iotrace.h>
65 #include <kern/kern_types.h>
66 #include <kern/misc_protos.h>
67 #include <kern/locks.h>
68 #include <sys/errno.h>
69 #include <i386/param.h>
70 #include <i386/misc_protos.h>
71 #include <i386/panic_notify.h>
72 #include <i386/cpu_data.h>
73 #include <i386/machine_routines.h>
74 #include <i386/cpuid.h>
75 #include <i386/vmx.h>
76 #include <vm/pmap.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_fault.h>
80 
81 #include <libkern/OSAtomic.h>
82 #include <libkern/OSDebug.h>
83 #include <sys/kdebug.h>
84 
85 #if !MACH_KDP
86 #include <kdp/kdp_callout.h>
87 #endif /* !MACH_KDP */
88 
89 #include <architecture/i386/pio.h>
90 
91 #include <libkern/OSDebug.h>
92 #if CONFIG_DTRACE
93 #include <mach/sdt.h>
94 #endif
95 
96 #if 0
97 
98 #undef KERNEL_DEBUG
99 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
100 #define KDEBUG 1
101 
102 #endif
103 
104 /* XXX - should be gone from here */
105 extern void             invalidate_icache64(addr64_t addr, unsigned cnt, int phys);
106 extern void             flush_dcache64(addr64_t addr, unsigned count, int phys);
107 extern boolean_t        phys_page_exists(ppnum_t);
108 extern void             bcopy_no_overwrite(const char *from, char *to, vm_size_t bytes);
109 extern void             pmap_set_reference(ppnum_t pn);
110 extern void             mapping_set_mod(ppnum_t pa);
111 extern void             mapping_set_ref(ppnum_t pn);
112 
113 extern void             ovbcopy(const char      *from,
114     char            *to,
115     vm_size_t       nbytes);
116 
117 
118 #define value_64bit(value)  ((value) & 0xFFFFFFFF00000000ULL)
119 #define low32(x)  ((unsigned int)((x) & 0x00000000FFFFFFFFULL))
120 
121 #define INT_SIZE        (BYTE_SIZE * sizeof (int))
122 
123 /*
124  * Set indicated bit in bit string.
125  */
126 void
setbit(int bitno,int * s)127 setbit(int bitno, int *s)
128 {
129 	s[bitno / INT_SIZE] |= 1 << (bitno % INT_SIZE);
130 }
131 
132 /*
133  * Clear indicated bit in bit string.
134  */
135 void
clrbit(int bitno,int * s)136 clrbit(int bitno, int *s)
137 {
138 	s[bitno / INT_SIZE] &= ~(1 << (bitno % INT_SIZE));
139 }
140 
141 /*
142  * Test if indicated bit is set in bit string.
143  */
144 int
testbit(int bitno,int * s)145 testbit(int bitno, int *s)
146 {
147 	return s[bitno / INT_SIZE] & (1 << (bitno % INT_SIZE));
148 }
149 
150 /*
151  * Find first bit set in bit string.
152  */
153 int
ffsbit(int * s)154 ffsbit(int *s)
155 {
156 	int             offset;
157 
158 	for (offset = 0; !*s; offset += (int)INT_SIZE, ++s) {
159 		;
160 	}
161 	return offset + __builtin_ctz(*s);
162 }
163 
164 int
ffs(unsigned int mask)165 ffs(unsigned int mask)
166 {
167 	if (mask == 0) {
168 		return 0;
169 	}
170 
171 	/*
172 	 * NOTE: cannot use __builtin_ffs because it generates a call to
173 	 * 'ffs'
174 	 */
175 	return 1 + __builtin_ctz(mask);
176 }
177 
178 int
ffsll(unsigned long long mask)179 ffsll(unsigned long long mask)
180 {
181 	if (mask == 0) {
182 		return 0;
183 	}
184 
185 	/*
186 	 * NOTE: cannot use __builtin_ffsll because it generates a call to
187 	 * 'ffsll'
188 	 */
189 	return 1 + __builtin_ctzll(mask);
190 }
191 
192 /*
193  * Find last bit set in bit string.
194  */
195 int
fls(unsigned int mask)196 fls(unsigned int mask)
197 {
198 	if (mask == 0) {
199 		return 0;
200 	}
201 
202 	return (sizeof(mask) << 3) - __builtin_clz(mask);
203 }
204 
205 int
flsll(unsigned long long mask)206 flsll(unsigned long long mask)
207 {
208 	if (mask == 0) {
209 		return 0;
210 	}
211 
212 	return (sizeof(mask) << 3) - __builtin_clzll(mask);
213 }
214 
215 void
bzero_phys_nc(addr64_t src64,uint32_t bytes)216 bzero_phys_nc(
217 	addr64_t src64,
218 	uint32_t bytes)
219 {
220 	bzero_phys(src64, bytes);
221 }
222 
223 void
bzero_phys(addr64_t src64,uint32_t bytes)224 bzero_phys(
225 	addr64_t src64,
226 	uint32_t bytes)
227 {
228 	bzero(PHYSMAP_PTOV(src64), bytes);
229 }
230 
231 
232 /*
233  * bcopy_phys - like bcopy but copies from/to physical addresses.
234  */
235 
236 void
bcopy_phys(addr64_t src64,addr64_t dst64,vm_size_t bytes)237 bcopy_phys(
238 	addr64_t src64,
239 	addr64_t dst64,
240 	vm_size_t bytes)
241 {
242 	/* Not necessary for K64 - but ensure we stay within a page */
243 	if (((((uint32_t)src64 & (NBPG - 1)) + bytes) > NBPG) ||
244 	    ((((uint32_t)dst64 & (NBPG - 1)) + bytes) > NBPG)) {
245 		panic("bcopy_phys alignment");
246 	}
247 	bcopy(PHYSMAP_PTOV(src64), PHYSMAP_PTOV(dst64), bytes);
248 }
249 
250 /*
251  * allow a function to get a quick virtual mapping of a physical page
252  */
253 
254 int
apply_func_phys(addr64_t dst64,vm_size_t bytes,int (* func)(void * buffer,vm_size_t bytes,void * arg),void * arg)255 apply_func_phys(
256 	addr64_t dst64,
257 	vm_size_t bytes,
258 	int (*func)(void * buffer, vm_size_t bytes, void * arg),
259 	void * arg)
260 {
261 	/* Not necessary for K64 - but ensure we stay within a page */
262 	if (((((uint32_t)dst64 & (NBPG - 1)) + bytes) > NBPG)) {
263 		panic("apply_func_phys alignment");
264 	}
265 
266 	return func(PHYSMAP_PTOV(dst64), bytes, arg);
267 }
268 
269 /*
270  * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
271  *           them correctly.
272  */
273 
274 void
ovbcopy(const char * from,char * to,vm_size_t bytes)275 ovbcopy(
276 	const char      *from,
277 	char            *to,
278 	vm_size_t       bytes)          /* num bytes to copy */
279 {
280 	/* Assume that bcopy copies left-to-right (low addr first). */
281 	if (from + bytes <= to || to + bytes <= from || to == from) {
282 		bcopy_no_overwrite(from, to, bytes);    /* non-overlapping or no-op*/
283 	} else if (from > to) {
284 		bcopy_no_overwrite(from, to, bytes);    /* overlapping but OK */
285 	} else {
286 		/* to > from: overlapping, and must copy right-to-left. */
287 		from += bytes - 1;
288 		to += bytes - 1;
289 		while (bytes-- > 0) {
290 			*to-- = *from--;
291 		}
292 	}
293 }
294 
295 
296 /*
297  *  Read data from a physical address. Memory should not be cache inhibited.
298  */
299 
300 uint64_t report_phy_read_delay;
301 uint64_t report_phy_write_delay;
302 uint32_t report_phy_read_osbt;
303 uint32_t report_phy_write_osbt;
304 
305 #if DEVELOPMENT || DEBUG
306 uint64_t trace_phy_read_delay = 50 * NSEC_PER_USEC;
307 uint64_t trace_phy_write_delay = 50 * NSEC_PER_USEC;
308 extern uint64_t simulate_stretched_io;
309 #else
310 uint64_t trace_phy_read_delay = 0;
311 uint64_t trace_phy_write_delay = 0;
312 #endif
313 
314 __private_extern__ uint64_t
ml_phys_read_data(uint64_t paddr,int size)315 ml_phys_read_data(uint64_t paddr, int size)
316 {
317 	uint64_t result = 0;
318 	unsigned char s1;
319 	unsigned short s2;
320 	boolean_t istate = TRUE, timeread = FALSE;
321 	uint64_t sabs = 0, eabs;
322 
323 	if (__improbable(!physmap_enclosed(paddr))) {
324 		panic("%s: 0x%llx out of bounds", __FUNCTION__, paddr);
325 	}
326 
327 	if (__improbable(report_phy_read_delay != 0)) {
328 		istate = ml_set_interrupts_enabled(FALSE);
329 		sabs = mach_absolute_time();
330 		timeread = TRUE;
331 	}
332 #if DEVELOPMENT || DEBUG
333 	if (__improbable(timeread && simulate_stretched_io)) {
334 		sabs -= simulate_stretched_io;
335 	}
336 #endif /* x86_64 DEVELOPMENT || DEBUG */
337 
338 	switch (size) {
339 	case 1:
340 		s1 = *(volatile unsigned char *)PHYSMAP_PTOV(paddr);
341 		result = s1;
342 		break;
343 	case 2:
344 		s2 = *(volatile unsigned short *)PHYSMAP_PTOV(paddr);
345 		result = s2;
346 		break;
347 	case 4:
348 		result = *(volatile unsigned int *)PHYSMAP_PTOV(paddr);
349 		break;
350 	case 8:
351 		result = *(volatile unsigned long long *)PHYSMAP_PTOV(paddr);
352 		break;
353 	default:
354 		panic("Invalid size %d for ml_phys_read_data", size);
355 		break;
356 	}
357 
358 	if (__improbable(timeread == TRUE)) {
359 		eabs = mach_absolute_time();
360 
361 		iotrace(IOTRACE_PHYS_READ, 0, paddr, size, result, sabs, eabs - sabs);
362 
363 		if (__improbable((eabs - sabs) > report_phy_read_delay)) {
364 			(void)ml_set_interrupts_enabled(istate);
365 
366 			if (phy_read_panic && (machine_timeout_suspended() == FALSE)) {
367 				panic_notify();
368 				panic("Read from physical addr 0x%llx took %llu ns, "
369 				    "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
370 				    paddr, (eabs - sabs), result, sabs, eabs,
371 				    report_phy_read_delay);
372 			}
373 
374 			if (report_phy_read_osbt) {
375 				OSReportWithBacktrace("ml_phys_read_data took %lluus",
376 				    (eabs - sabs) / NSEC_PER_USEC);
377 			}
378 #if CONFIG_DTRACE
379 			DTRACE_PHYSLAT4(physread, uint64_t, (eabs - sabs),
380 			    uint64_t, paddr, uint32_t, size, uint64_t, result);
381 #endif /* CONFIG_DTRACE */
382 		} else if (__improbable(trace_phy_read_delay > 0 && (eabs - sabs) > trace_phy_read_delay)) {
383 			KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_READ),
384 			    (eabs - sabs), sabs, paddr, result);
385 
386 			(void)ml_set_interrupts_enabled(istate);
387 		} else {
388 			(void)ml_set_interrupts_enabled(istate);
389 		}
390 	}
391 
392 	return result;
393 }
394 
395 static unsigned long long
ml_phys_read_long_long(uint64_t paddr)396 ml_phys_read_long_long(uint64_t paddr)
397 {
398 	return ml_phys_read_data(paddr, 8);
399 }
400 
401 unsigned int
ml_phys_read(vm_offset_t paddr)402 ml_phys_read(vm_offset_t paddr)
403 {
404 	return (unsigned int) ml_phys_read_data(paddr, 4);
405 }
406 
407 unsigned int
ml_phys_read_word(vm_offset_t paddr)408 ml_phys_read_word(vm_offset_t paddr)
409 {
410 	return (unsigned int) ml_phys_read_data(paddr, 4);
411 }
412 
413 unsigned int
ml_phys_read_64(addr64_t paddr64)414 ml_phys_read_64(addr64_t paddr64)
415 {
416 	return (unsigned int) ml_phys_read_data(paddr64, 4);
417 }
418 
419 unsigned int
ml_phys_read_word_64(addr64_t paddr64)420 ml_phys_read_word_64(addr64_t paddr64)
421 {
422 	return (unsigned int) ml_phys_read_data(paddr64, 4);
423 }
424 
425 unsigned int
ml_phys_read_half(vm_offset_t paddr)426 ml_phys_read_half(vm_offset_t paddr)
427 {
428 	return (unsigned int) ml_phys_read_data(paddr, 2);
429 }
430 
431 unsigned int
ml_phys_read_half_64(addr64_t paddr64)432 ml_phys_read_half_64(addr64_t paddr64)
433 {
434 	return (unsigned int) ml_phys_read_data(paddr64, 2);
435 }
436 
437 unsigned int
ml_phys_read_byte(vm_offset_t paddr)438 ml_phys_read_byte(vm_offset_t paddr)
439 {
440 	return (unsigned int) ml_phys_read_data(paddr, 1);
441 }
442 
443 unsigned int
ml_phys_read_byte_64(addr64_t paddr64)444 ml_phys_read_byte_64(addr64_t paddr64)
445 {
446 	return (unsigned int) ml_phys_read_data(paddr64, 1);
447 }
448 
449 unsigned long long
ml_phys_read_double(vm_offset_t paddr)450 ml_phys_read_double(vm_offset_t paddr)
451 {
452 	return ml_phys_read_long_long(paddr);
453 }
454 
455 unsigned long long
ml_phys_read_double_64(addr64_t paddr64)456 ml_phys_read_double_64(addr64_t paddr64)
457 {
458 	return ml_phys_read_long_long(paddr64);
459 }
460 
461 
462 
463 /*
464  *  Write data to a physical address. Memory should not be cache inhibited.
465  */
466 
467 __private_extern__ void
ml_phys_write_data(uint64_t paddr,unsigned long long data,int size)468 ml_phys_write_data(uint64_t paddr, unsigned long long data, int size)
469 {
470 	boolean_t istate = TRUE, timewrite = FALSE;
471 	uint64_t sabs = 0, eabs;
472 
473 	if (__improbable(!physmap_enclosed(paddr))) {
474 		panic("%s: 0x%llx out of bounds", __FUNCTION__, paddr);
475 	}
476 
477 	if (__improbable(report_phy_write_delay != 0)) {
478 		istate = ml_set_interrupts_enabled(FALSE);
479 		sabs = mach_absolute_time();
480 		timewrite = TRUE;
481 	}
482 #if DEVELOPMENT || DEBUG
483 	if (__improbable(timewrite && simulate_stretched_io)) {
484 		sabs -= simulate_stretched_io;
485 	}
486 #endif /* x86_64 DEVELOPMENT || DEBUG */
487 
488 	switch (size) {
489 	case 1:
490 		*(volatile unsigned char *)PHYSMAP_PTOV(paddr) = (unsigned char)data;
491 		break;
492 	case 2:
493 		*(volatile unsigned short *)PHYSMAP_PTOV(paddr) = (unsigned short)data;
494 		break;
495 	case 4:
496 		*(volatile unsigned int *)PHYSMAP_PTOV(paddr) = (unsigned int)data;
497 		break;
498 	case 8:
499 		*(volatile unsigned long *)PHYSMAP_PTOV(paddr) = data;
500 		break;
501 	default:
502 		panic("Invalid size %d for ml_phys_write_data", size);
503 		break;
504 	}
505 
506 	if (__improbable(timewrite == TRUE)) {
507 		eabs = mach_absolute_time();
508 
509 		iotrace(IOTRACE_PHYS_WRITE, 0, paddr, size, data, sabs, eabs - sabs);
510 
511 		if (__improbable((eabs - sabs) > report_phy_write_delay)) {
512 			(void)ml_set_interrupts_enabled(istate);
513 
514 			if (phy_write_panic && (machine_timeout_suspended() == FALSE)) {
515 				panic_notify();
516 				panic("Write to physical addr 0x%llx took %llu ns, "
517 				    "data: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
518 				    paddr, (eabs - sabs), data, sabs, eabs,
519 				    report_phy_write_delay);
520 			}
521 
522 			if (report_phy_write_osbt) {
523 				OSReportWithBacktrace("ml_phys_write_data (%p, 0x%llx) "
524 				    "took %lluus", (void *)paddr, data,
525 				    (eabs - sabs) / NSEC_PER_USEC);
526 			}
527 #if CONFIG_DTRACE
528 			DTRACE_PHYSLAT4(physwrite, uint64_t, (eabs - sabs),
529 			    uint64_t, paddr, uint32_t, size, uint64_t, data);
530 #endif /* CONFIG_DTRACE */
531 		} else if (__improbable(trace_phy_write_delay > 0 && (eabs - sabs) > trace_phy_write_delay)) {
532 			KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_WRITE),
533 			    (eabs - sabs), sabs, paddr, data);
534 
535 			(void)ml_set_interrupts_enabled(istate);
536 		} else {
537 			(void)ml_set_interrupts_enabled(istate);
538 		}
539 	}
540 }
541 
542 void
ml_phys_write_byte(vm_offset_t paddr,unsigned int data)543 ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
544 {
545 	ml_phys_write_data(paddr, data, 1);
546 }
547 
548 void
ml_phys_write_byte_64(addr64_t paddr64,unsigned int data)549 ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
550 {
551 	ml_phys_write_data(paddr64, data, 1);
552 }
553 
554 void
ml_phys_write_half(vm_offset_t paddr,unsigned int data)555 ml_phys_write_half(vm_offset_t paddr, unsigned int data)
556 {
557 	ml_phys_write_data(paddr, data, 2);
558 }
559 
560 void
ml_phys_write_half_64(addr64_t paddr64,unsigned int data)561 ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
562 {
563 	ml_phys_write_data(paddr64, data, 2);
564 }
565 
566 void
ml_phys_write(vm_offset_t paddr,unsigned int data)567 ml_phys_write(vm_offset_t paddr, unsigned int data)
568 {
569 	ml_phys_write_data(paddr, data, 4);
570 }
571 
572 void
ml_phys_write_64(addr64_t paddr64,unsigned int data)573 ml_phys_write_64(addr64_t paddr64, unsigned int data)
574 {
575 	ml_phys_write_data(paddr64, data, 4);
576 }
577 
578 void
ml_phys_write_word(vm_offset_t paddr,unsigned int data)579 ml_phys_write_word(vm_offset_t paddr, unsigned int data)
580 {
581 	ml_phys_write_data(paddr, data, 4);
582 }
583 
584 void
ml_phys_write_word_64(addr64_t paddr64,unsigned int data)585 ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
586 {
587 	ml_phys_write_data(paddr64, data, 4);
588 }
589 
590 void
ml_phys_write_double(vm_offset_t paddr,unsigned long long data)591 ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
592 {
593 	ml_phys_write_data(paddr, data, 8);
594 }
595 
596 void
ml_phys_write_double_64(addr64_t paddr64,unsigned long long data)597 ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
598 {
599 	ml_phys_write_data(paddr64, data, 8);
600 }
601 
602 uint32_t
ml_port_io_read(uint16_t ioport,int size)603 ml_port_io_read(uint16_t ioport, int size)
604 {
605 	uint32_t result = 0;
606 
607 	uint64_t sabs, eabs;
608 	boolean_t istate, timeread = FALSE;
609 
610 	if (__improbable(report_phy_read_delay != 0)) {
611 		istate = ml_set_interrupts_enabled(FALSE);
612 		sabs = mach_absolute_time();
613 		timeread = TRUE;
614 	}
615 
616 #if DEVELOPMENT || DEBUG
617 	if (__improbable(timeread && simulate_stretched_io)) {
618 		sabs -= simulate_stretched_io;
619 	}
620 #endif /* x86_64 DEVELOPMENT || DEBUG */
621 
622 	switch (size) {
623 	case 1:
624 		result = inb(ioport);
625 		break;
626 	case 2:
627 		result = inw(ioport);
628 		break;
629 	case 4:
630 		result = inl(ioport);
631 		break;
632 	default:
633 		panic("Invalid size %d for ml_port_io_read(0x%x)", size, (unsigned)ioport);
634 		break;
635 	}
636 
637 	if (__improbable(timeread == TRUE)) {
638 		eabs = mach_absolute_time();
639 
640 		iotrace(IOTRACE_PORTIO_READ, 0, ioport, size, result, sabs, eabs - sabs);
641 
642 		if (__improbable((eabs - sabs) > report_phy_read_delay)) {
643 			(void)ml_set_interrupts_enabled(istate);
644 
645 			if (phy_read_panic && (machine_timeout_suspended() == FALSE)) {
646 				panic_notify();
647 				panic("Read from IO port 0x%x took %llu ns, "
648 				    "result: 0x%x (start: %llu, end: %llu), ceiling: %llu",
649 				    ioport, (eabs - sabs), result, sabs, eabs,
650 				    report_phy_read_delay);
651 			}
652 
653 			if (report_phy_read_osbt) {
654 				OSReportWithBacktrace("ml_port_io_read(0x%x) took %lluus",
655 				    ioport, (eabs - sabs) / NSEC_PER_USEC);
656 			}
657 #if CONFIG_DTRACE
658 			DTRACE_PHYSLAT3(portioread, uint64_t, (eabs - sabs),
659 			    uint16_t, ioport, uint32_t, size);
660 #endif /* CONFIG_DTRACE */
661 		} else if (__improbable(trace_phy_read_delay > 0 && (eabs - sabs) > trace_phy_read_delay)) {
662 			KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PORTIO_READ),
663 			    (eabs - sabs), sabs, ioport, result);
664 
665 			(void)ml_set_interrupts_enabled(istate);
666 		} else {
667 			(void)ml_set_interrupts_enabled(istate);
668 		}
669 	}
670 
671 	return result;
672 }
673 
674 void
ml_port_io_write(uint16_t ioport,uint32_t val,int size)675 ml_port_io_write(uint16_t ioport, uint32_t val, int size)
676 {
677 	uint64_t sabs, eabs;
678 	boolean_t istate, timewrite = FALSE;
679 
680 	if (__improbable(report_phy_write_delay != 0)) {
681 		istate = ml_set_interrupts_enabled(FALSE);
682 		sabs = mach_absolute_time();
683 		timewrite = TRUE;
684 	}
685 #if DEVELOPMENT || DEBUG
686 	if (__improbable(timewrite && simulate_stretched_io)) {
687 		sabs -= simulate_stretched_io;
688 	}
689 #endif /* x86_64 DEVELOPMENT || DEBUG */
690 
691 	switch (size) {
692 	case 1:
693 		outb(ioport, (uint8_t)val);
694 		break;
695 	case 2:
696 		outw(ioport, (uint16_t)val);
697 		break;
698 	case 4:
699 		outl(ioport, (uint32_t)val);
700 		break;
701 	default:
702 		panic("Invalid size %d for ml_port_io_write(0x%x)", size, (unsigned)ioport);
703 		break;
704 	}
705 
706 	if (__improbable(timewrite == TRUE)) {
707 		eabs = mach_absolute_time();
708 
709 		iotrace(IOTRACE_PORTIO_WRITE, 0, ioport, size, val, sabs, eabs - sabs);
710 
711 		if (__improbable((eabs - sabs) > report_phy_write_delay)) {
712 			(void)ml_set_interrupts_enabled(istate);
713 
714 			if (phy_write_panic && (machine_timeout_suspended() == FALSE)) {
715 				panic_notify();
716 				panic("Write to IO port 0x%x took %llu ns, val: 0x%x"
717 				    " (start: %llu, end: %llu), ceiling: %llu",
718 				    ioport, (eabs - sabs), val, sabs, eabs,
719 				    report_phy_write_delay);
720 			}
721 
722 			if (report_phy_write_osbt) {
723 				OSReportWithBacktrace("ml_port_io_write(0x%x, %d, 0x%x) "
724 				    "took %lluus",
725 				    ioport, size, val, (eabs - sabs) / NSEC_PER_USEC);
726 			}
727 
728 #if CONFIG_DTRACE
729 			DTRACE_PHYSLAT4(portiowrite, uint64_t, (eabs - sabs),
730 			    uint16_t, ioport, uint32_t, size, uint64_t, val);
731 #endif /* CONFIG_DTRACE */
732 		} else if (__improbable(trace_phy_write_delay > 0 && (eabs - sabs) > trace_phy_write_delay)) {
733 			KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PORTIO_WRITE),
734 			    (eabs - sabs), sabs, ioport, val);
735 
736 			(void)ml_set_interrupts_enabled(istate);
737 		} else {
738 			(void)ml_set_interrupts_enabled(istate);
739 		}
740 	}
741 }
742 
743 uint8_t
ml_port_io_read8(uint16_t ioport)744 ml_port_io_read8(uint16_t ioport)
745 {
746 	return ml_port_io_read(ioport, 1);
747 }
748 
749 uint16_t
ml_port_io_read16(uint16_t ioport)750 ml_port_io_read16(uint16_t ioport)
751 {
752 	return ml_port_io_read(ioport, 2);
753 }
754 
755 uint32_t
ml_port_io_read32(uint16_t ioport)756 ml_port_io_read32(uint16_t ioport)
757 {
758 	return ml_port_io_read(ioport, 4);
759 }
760 
761 void
ml_port_io_write8(uint16_t ioport,uint8_t val)762 ml_port_io_write8(uint16_t ioport, uint8_t val)
763 {
764 	ml_port_io_write(ioport, val, 1);
765 }
766 
767 void
ml_port_io_write16(uint16_t ioport,uint16_t val)768 ml_port_io_write16(uint16_t ioport, uint16_t val)
769 {
770 	ml_port_io_write(ioport, val, 2);
771 }
772 
773 void
ml_port_io_write32(uint16_t ioport,uint32_t val)774 ml_port_io_write32(uint16_t ioport, uint32_t val)
775 {
776 	ml_port_io_write(ioport, val, 4);
777 }
778 
779 /* PCI config cycle probing
780  *
781  *
782  *      Read the memory location at physical address paddr.
783  * *Does not* recover from machine checks, unlike the PowerPC implementation.
784  * Should probably be deprecated.
785  */
786 
787 boolean_t
ml_probe_read(vm_offset_t paddr,unsigned int * val)788 ml_probe_read(vm_offset_t paddr, unsigned int *val)
789 {
790 	if ((PAGE_SIZE - (paddr & PAGE_MASK)) < 4) {
791 		return FALSE;
792 	}
793 
794 	*val = ml_phys_read(paddr);
795 
796 	return TRUE;
797 }
798 
799 /*
800  *  Read the memory location at physical address paddr.
801  *  This is a part of a device probe, so there is a good chance we will
802  *  have a machine check here. So we have to be able to handle that.
803  *  We assume that machine checks are enabled both in MSR and HIDs
804  */
805 boolean_t
ml_probe_read_64(addr64_t paddr64,unsigned int * val)806 ml_probe_read_64(addr64_t paddr64, unsigned int *val)
807 {
808 	if ((PAGE_SIZE - (paddr64 & PAGE_MASK)) < 4) {
809 		return FALSE;
810 	}
811 
812 	*val = ml_phys_read_64(paddr64);
813 	return TRUE;
814 }
815 
816 void
fillPage(ppnum_t pa,unsigned int fill)817 fillPage(ppnum_t pa, unsigned int fill)
818 {
819 	uint64_t        src;
820 	int             cnt = PAGE_SIZE / sizeof(unsigned int);
821 
822 	src = i386_ptob(pa);
823 	memset_word((int *)PHYSMAP_PTOV(src), fill, cnt);
824 }
825 
826 static inline void
__clflush(void * ptr)827 __clflush(void *ptr)
828 {
829 	__asm__ volatile ("clflush (%0)" : : "r" (ptr));
830 }
831 
832 void
dcache_incoherent_io_store64(addr64_t pa,unsigned int count)833 dcache_incoherent_io_store64(addr64_t pa, unsigned int count)
834 {
835 	addr64_t  linesize = cpuid_info()->cache_linesize;
836 	addr64_t  bound = (pa + count + linesize - 1) & ~(linesize - 1);
837 
838 	mfence();
839 
840 	while (pa < bound) {
841 		__clflush(PHYSMAP_PTOV(pa));
842 		pa += linesize;
843 	}
844 
845 	mfence();
846 }
847 
848 void
dcache_incoherent_io_flush64(addr64_t pa,unsigned int count)849 dcache_incoherent_io_flush64(addr64_t pa, unsigned int count)
850 {
851 	return dcache_incoherent_io_store64(pa, count);
852 }
853 
854 void
flush_dcache64(addr64_t addr,unsigned count,int phys)855 flush_dcache64(addr64_t addr, unsigned count, int phys)
856 {
857 	if (phys) {
858 		dcache_incoherent_io_flush64(addr, count);
859 	} else {
860 		uint64_t  linesize = cpuid_info()->cache_linesize;
861 		addr64_t  bound = (addr + count + linesize - 1) & ~(linesize - 1);
862 		mfence();
863 		while (addr < bound) {
864 			__clflush((void *) (uintptr_t) addr);
865 			addr += linesize;
866 		}
867 		mfence();
868 	}
869 }
870 
871 void
invalidate_icache64(__unused addr64_t addr,__unused unsigned count,__unused int phys)872 invalidate_icache64(__unused addr64_t addr,
873     __unused unsigned count,
874     __unused int phys)
875 {
876 }
877 
878 
879 addr64_t         vm_last_addr;
880 
881 void
mapping_set_mod(ppnum_t pn)882 mapping_set_mod(ppnum_t pn)
883 {
884 	pmap_set_modify(pn);
885 }
886 
887 void
mapping_set_ref(ppnum_t pn)888 mapping_set_ref(ppnum_t pn)
889 {
890 	pmap_set_reference(pn);
891 }
892 
893 extern i386_cpu_info_t  cpuid_cpu_info;
894 void
cache_flush_page_phys(ppnum_t pa)895 cache_flush_page_phys(ppnum_t pa)
896 {
897 	boolean_t       istate;
898 	unsigned char   *cacheline_addr;
899 	i386_cpu_info_t *cpuid_infop = cpuid_info();
900 	int             cacheline_size;
901 	int             cachelines_to_flush;
902 
903 	cacheline_size = cpuid_infop->cache_linesize;
904 	if (cacheline_size == 0) {
905 		panic("cacheline_size=0 cpuid_infop=%p", cpuid_infop);
906 	}
907 	cachelines_to_flush = PAGE_SIZE / cacheline_size;
908 
909 	mfence();
910 
911 	istate = ml_set_interrupts_enabled(FALSE);
912 
913 	for (cacheline_addr = (unsigned char *)PHYSMAP_PTOV(i386_ptob(pa));
914 	    cachelines_to_flush > 0;
915 	    cachelines_to_flush--, cacheline_addr += cacheline_size) {
916 		__clflush((void *) cacheline_addr);
917 	}
918 
919 	(void) ml_set_interrupts_enabled(istate);
920 
921 	mfence();
922 }
923 
924 
925 #if !MACH_KDP
926 void
kdp_register_callout(kdp_callout_fn_t fn,void * arg)927 kdp_register_callout(kdp_callout_fn_t fn, void *arg)
928 {
929 #pragma unused(fn,arg)
930 }
931 #endif
932 
933 #if !CONFIG_VMX
934 int
host_vmxon(boolean_t exclusive __unused)935 host_vmxon(boolean_t exclusive __unused)
936 {
937 	return VMX_UNSUPPORTED;
938 }
939 
940 void
host_vmxoff(void)941 host_vmxoff(void)
942 {
943 	return;
944 }
945 #endif
946 
947 static lck_grp_t       xcpm_lck_grp;
948 static lck_grp_attr_t  xcpm_lck_grp_attr;
949 static lck_attr_t      xcpm_lck_attr;
950 static lck_spin_t      xcpm_lock;
951 
952 void xcpm_bootstrap(void);
953 void xcpm_mbox_lock(void);
954 void xcpm_mbox_unlock(void);
955 uint32_t xcpm_bios_mbox_cmd_read(uint32_t cmd);
956 uint32_t xcpm_bios_mbox_cmd_unsafe_read(uint32_t cmd);
957 void xcpm_bios_mbox_cmd_write(uint32_t cmd, uint32_t data);
958 boolean_t xcpm_is_hwp_enabled(void);
959 
960 void
xcpm_bootstrap(void)961 xcpm_bootstrap(void)
962 {
963 	lck_grp_attr_setdefault(&xcpm_lck_grp_attr);
964 	lck_grp_init(&xcpm_lck_grp, "xcpm", &xcpm_lck_grp_attr);
965 	lck_attr_setdefault(&xcpm_lck_attr);
966 	lck_spin_init(&xcpm_lock, &xcpm_lck_grp, &xcpm_lck_attr);
967 }
968 
969 void
xcpm_mbox_lock(void)970 xcpm_mbox_lock(void)
971 {
972 	lck_spin_lock(&xcpm_lock);
973 }
974 
975 void
xcpm_mbox_unlock(void)976 xcpm_mbox_unlock(void)
977 {
978 	lck_spin_unlock(&xcpm_lock);
979 }
980 
981 static uint32_t __xcpm_state[64] = {};
982 
983 uint32_t
xcpm_bios_mbox_cmd_read(uint32_t cmd)984 xcpm_bios_mbox_cmd_read(uint32_t cmd)
985 {
986 	uint32_t reg;
987 	boolean_t istate = ml_set_interrupts_enabled(FALSE);
988 	xcpm_mbox_lock();
989 	reg = xcpm_bios_mbox_cmd_unsafe_read(cmd);
990 	xcpm_mbox_unlock();
991 	ml_set_interrupts_enabled(istate);
992 	return reg;
993 }
994 
995 uint32_t
xcpm_bios_mbox_cmd_unsafe_read(uint32_t cmd)996 xcpm_bios_mbox_cmd_unsafe_read(uint32_t cmd)
997 {
998 	return __xcpm_state[cmd % (sizeof(__xcpm_state) / sizeof(__xcpm_state[0]))];
999 }
1000 
1001 void
xcpm_bios_mbox_cmd_write(uint32_t cmd,uint32_t data)1002 xcpm_bios_mbox_cmd_write(uint32_t cmd, uint32_t data)
1003 {
1004 	uint32_t idx = cmd % (sizeof(__xcpm_state) / sizeof(__xcpm_state[0]));
1005 	idx &= ~0x1;
1006 
1007 	boolean_t istate = ml_set_interrupts_enabled(FALSE);
1008 	xcpm_mbox_lock();
1009 	__xcpm_state[idx] = data;
1010 	xcpm_mbox_unlock();
1011 	ml_set_interrupts_enabled(istate);
1012 }
1013 
1014 boolean_t
xcpm_is_hwp_enabled(void)1015 xcpm_is_hwp_enabled(void)
1016 {
1017 	return FALSE;
1018 }
1019 
1020