1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 #include <mach_assert.h>
59
60 #include <string.h>
61 #include <mach/boolean.h>
62 #include <mach/i386/vm_types.h>
63 #include <mach/i386/vm_param.h>
64 #include <kern/iotrace.h>
65 #include <kern/kern_types.h>
66 #include <kern/misc_protos.h>
67 #include <kern/locks.h>
68 #include <sys/errno.h>
69 #include <i386/param.h>
70 #include <i386/misc_protos.h>
71 #include <i386/panic_notify.h>
72 #include <i386/cpu_data.h>
73 #include <i386/machine_routines.h>
74 #include <i386/cpuid.h>
75 #include <i386/vmx.h>
76 #include <vm/pmap.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_fault.h>
80
81 #include <libkern/OSAtomic.h>
82 #include <libkern/OSDebug.h>
83 #include <sys/kdebug.h>
84
85 #if !MACH_KDP
86 #include <kdp/kdp_callout.h>
87 #endif /* !MACH_KDP */
88
89 #include <architecture/i386/pio.h>
90
91 #include <libkern/OSDebug.h>
92 #if CONFIG_DTRACE
93 #include <mach/sdt.h>
94 #endif
95
96 #if 0
97
98 #undef KERNEL_DEBUG
99 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
100 #define KDEBUG 1
101
102 #endif
103
104 /* XXX - should be gone from here */
105 extern void invalidate_icache64(addr64_t addr, unsigned cnt, int phys);
106 extern void flush_dcache64(addr64_t addr, unsigned count, int phys);
107 extern boolean_t phys_page_exists(ppnum_t);
108 extern void bcopy_no_overwrite(const char *from, char *to, vm_size_t bytes);
109 extern void pmap_set_reference(ppnum_t pn);
110 extern void mapping_set_mod(ppnum_t pa);
111 extern void mapping_set_ref(ppnum_t pn);
112
113 extern void ovbcopy(const char *from,
114 char *to,
115 vm_size_t nbytes);
116
117
118 #define value_64bit(value) ((value) & 0xFFFFFFFF00000000ULL)
119 #define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFULL))
120
121 #define INT_SIZE (BYTE_SIZE * sizeof (int))
122
123 /*
124 * Set indicated bit in bit string.
125 */
126 void
setbit(int bitno,int * s)127 setbit(int bitno, int *s)
128 {
129 s[bitno / INT_SIZE] |= 1 << (bitno % INT_SIZE);
130 }
131
132 /*
133 * Clear indicated bit in bit string.
134 */
135 void
clrbit(int bitno,int * s)136 clrbit(int bitno, int *s)
137 {
138 s[bitno / INT_SIZE] &= ~(1 << (bitno % INT_SIZE));
139 }
140
141 /*
142 * Test if indicated bit is set in bit string.
143 */
144 int
testbit(int bitno,int * s)145 testbit(int bitno, int *s)
146 {
147 return s[bitno / INT_SIZE] & (1 << (bitno % INT_SIZE));
148 }
149
150 /*
151 * Find first bit set in bit string.
152 */
153 int
ffsbit(int * s)154 ffsbit(int *s)
155 {
156 int offset;
157
158 for (offset = 0; !*s; offset += (int)INT_SIZE, ++s) {
159 ;
160 }
161 return offset + __builtin_ctz(*s);
162 }
163
164 int
ffs(unsigned int mask)165 ffs(unsigned int mask)
166 {
167 if (mask == 0) {
168 return 0;
169 }
170
171 /*
172 * NOTE: cannot use __builtin_ffs because it generates a call to
173 * 'ffs'
174 */
175 return 1 + __builtin_ctz(mask);
176 }
177
178 int
ffsll(unsigned long long mask)179 ffsll(unsigned long long mask)
180 {
181 if (mask == 0) {
182 return 0;
183 }
184
185 /*
186 * NOTE: cannot use __builtin_ffsll because it generates a call to
187 * 'ffsll'
188 */
189 return 1 + __builtin_ctzll(mask);
190 }
191
192 /*
193 * Find last bit set in bit string.
194 */
195 int
fls(unsigned int mask)196 fls(unsigned int mask)
197 {
198 if (mask == 0) {
199 return 0;
200 }
201
202 return (sizeof(mask) << 3) - __builtin_clz(mask);
203 }
204
205 int
flsll(unsigned long long mask)206 flsll(unsigned long long mask)
207 {
208 if (mask == 0) {
209 return 0;
210 }
211
212 return (sizeof(mask) << 3) - __builtin_clzll(mask);
213 }
214
215 void
bzero_phys_nc(addr64_t src64,uint32_t bytes)216 bzero_phys_nc(
217 addr64_t src64,
218 uint32_t bytes)
219 {
220 bzero_phys(src64, bytes);
221 }
222
223 void
bzero_phys(addr64_t src64,uint32_t bytes)224 bzero_phys(
225 addr64_t src64,
226 uint32_t bytes)
227 {
228 bzero(PHYSMAP_PTOV(src64), bytes);
229 }
230
231
232 /*
233 * bcopy_phys - like bcopy but copies from/to physical addresses.
234 */
235
236 void
bcopy_phys(addr64_t src64,addr64_t dst64,vm_size_t bytes)237 bcopy_phys(
238 addr64_t src64,
239 addr64_t dst64,
240 vm_size_t bytes)
241 {
242 /* Not necessary for K64 - but ensure we stay within a page */
243 if (((((uint32_t)src64 & (NBPG - 1)) + bytes) > NBPG) ||
244 ((((uint32_t)dst64 & (NBPG - 1)) + bytes) > NBPG)) {
245 panic("bcopy_phys alignment");
246 }
247 bcopy(PHYSMAP_PTOV(src64), PHYSMAP_PTOV(dst64), bytes);
248 }
249
250 /*
251 * allow a function to get a quick virtual mapping of a physical page
252 */
253
254 int
apply_func_phys(addr64_t dst64,vm_size_t bytes,int (* func)(void * buffer,vm_size_t bytes,void * arg),void * arg)255 apply_func_phys(
256 addr64_t dst64,
257 vm_size_t bytes,
258 int (*func)(void * buffer, vm_size_t bytes, void * arg),
259 void * arg)
260 {
261 /* Not necessary for K64 - but ensure we stay within a page */
262 if (((((uint32_t)dst64 & (NBPG - 1)) + bytes) > NBPG)) {
263 panic("apply_func_phys alignment");
264 }
265
266 return func(PHYSMAP_PTOV(dst64), bytes, arg);
267 }
268
269 /*
270 * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
271 * them correctly.
272 */
273
274 void
ovbcopy(const char * from,char * to,vm_size_t bytes)275 ovbcopy(
276 const char *from,
277 char *to,
278 vm_size_t bytes) /* num bytes to copy */
279 {
280 /* Assume that bcopy copies left-to-right (low addr first). */
281 if (from + bytes <= to || to + bytes <= from || to == from) {
282 bcopy_no_overwrite(from, to, bytes); /* non-overlapping or no-op*/
283 } else if (from > to) {
284 bcopy_no_overwrite(from, to, bytes); /* overlapping but OK */
285 } else {
286 /* to > from: overlapping, and must copy right-to-left. */
287 from += bytes - 1;
288 to += bytes - 1;
289 while (bytes-- > 0) {
290 *to-- = *from--;
291 }
292 }
293 }
294
295
296 /*
297 * Read data from a physical address. Memory should not be cache inhibited.
298 */
299
300 uint64_t report_phy_read_delay;
301 uint64_t report_phy_write_delay;
302
303 #if DEVELOPMENT || DEBUG
304 uint64_t trace_phy_read_delay = 50 * NSEC_PER_USEC;
305 uint64_t trace_phy_write_delay = 50 * NSEC_PER_USEC;
306 extern uint64_t simulate_stretched_io;
307 #else
308 uint64_t trace_phy_read_delay = 0;
309 uint64_t trace_phy_write_delay = 0;
310 #endif
311
312 __private_extern__ uint64_t
ml_phys_read_data(uint64_t paddr,int size)313 ml_phys_read_data(uint64_t paddr, int size)
314 {
315 uint64_t result = 0;
316 unsigned char s1;
317 unsigned short s2;
318 boolean_t istate = TRUE, timeread = FALSE;
319 uint64_t sabs = 0, eabs;
320
321 if (__improbable(!physmap_enclosed(paddr))) {
322 panic("%s: 0x%llx out of bounds", __FUNCTION__, paddr);
323 }
324
325 if (__improbable(report_phy_read_delay != 0)) {
326 istate = ml_set_interrupts_enabled(FALSE);
327 sabs = mach_absolute_time();
328 timeread = TRUE;
329 }
330 #if DEVELOPMENT || DEBUG
331 if (__improbable(timeread && simulate_stretched_io)) {
332 sabs -= simulate_stretched_io;
333 }
334 #endif /* x86_64 DEVELOPMENT || DEBUG */
335
336 switch (size) {
337 case 1:
338 s1 = *(volatile unsigned char *)PHYSMAP_PTOV(paddr);
339 result = s1;
340 break;
341 case 2:
342 s2 = *(volatile unsigned short *)PHYSMAP_PTOV(paddr);
343 result = s2;
344 break;
345 case 4:
346 result = *(volatile unsigned int *)PHYSMAP_PTOV(paddr);
347 break;
348 case 8:
349 result = *(volatile unsigned long long *)PHYSMAP_PTOV(paddr);
350 break;
351 default:
352 panic("Invalid size %d for ml_phys_read_data", size);
353 break;
354 }
355
356 if (__improbable(timeread == TRUE)) {
357 eabs = mach_absolute_time();
358
359 iotrace(IOTRACE_PHYS_READ, 0, paddr, size, result, sabs, eabs - sabs);
360
361 if (__improbable((eabs - sabs) > report_phy_read_delay)) {
362 if (phy_read_panic && (machine_timeout_suspended() == FALSE)) {
363 panic_notify();
364 panic("Read from physical addr 0x%llx took %llu ns, "
365 "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
366 paddr, (eabs - sabs), result, sabs, eabs,
367 report_phy_read_delay);
368 }
369
370 DTRACE_PHYSLAT4(physread, uint64_t, (eabs - sabs),
371 uint64_t, paddr, uint32_t, size, uint64_t, result);
372 }
373
374 if (__improbable(trace_phy_read_delay > 0 && (eabs - sabs) > trace_phy_read_delay)) {
375 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_READ),
376 (eabs - sabs), sabs, paddr, result);
377 }
378
379 (void)ml_set_interrupts_enabled(istate);
380 }
381
382 return result;
383 }
384
385 static unsigned long long
ml_phys_read_long_long(uint64_t paddr)386 ml_phys_read_long_long(uint64_t paddr)
387 {
388 return ml_phys_read_data(paddr, 8);
389 }
390
391 unsigned int
ml_phys_read(vm_offset_t paddr)392 ml_phys_read(vm_offset_t paddr)
393 {
394 return (unsigned int) ml_phys_read_data(paddr, 4);
395 }
396
397 unsigned int
ml_phys_read_word(vm_offset_t paddr)398 ml_phys_read_word(vm_offset_t paddr)
399 {
400 return (unsigned int) ml_phys_read_data(paddr, 4);
401 }
402
403 unsigned int
ml_phys_read_64(addr64_t paddr64)404 ml_phys_read_64(addr64_t paddr64)
405 {
406 return (unsigned int) ml_phys_read_data(paddr64, 4);
407 }
408
409 unsigned int
ml_phys_read_word_64(addr64_t paddr64)410 ml_phys_read_word_64(addr64_t paddr64)
411 {
412 return (unsigned int) ml_phys_read_data(paddr64, 4);
413 }
414
415 unsigned int
ml_phys_read_half(vm_offset_t paddr)416 ml_phys_read_half(vm_offset_t paddr)
417 {
418 return (unsigned int) ml_phys_read_data(paddr, 2);
419 }
420
421 unsigned int
ml_phys_read_half_64(addr64_t paddr64)422 ml_phys_read_half_64(addr64_t paddr64)
423 {
424 return (unsigned int) ml_phys_read_data(paddr64, 2);
425 }
426
427 unsigned int
ml_phys_read_byte(vm_offset_t paddr)428 ml_phys_read_byte(vm_offset_t paddr)
429 {
430 return (unsigned int) ml_phys_read_data(paddr, 1);
431 }
432
433 unsigned int
ml_phys_read_byte_64(addr64_t paddr64)434 ml_phys_read_byte_64(addr64_t paddr64)
435 {
436 return (unsigned int) ml_phys_read_data(paddr64, 1);
437 }
438
439 unsigned long long
ml_phys_read_double(vm_offset_t paddr)440 ml_phys_read_double(vm_offset_t paddr)
441 {
442 return ml_phys_read_long_long(paddr);
443 }
444
445 unsigned long long
ml_phys_read_double_64(addr64_t paddr64)446 ml_phys_read_double_64(addr64_t paddr64)
447 {
448 return ml_phys_read_long_long(paddr64);
449 }
450
451
452
453 /*
454 * Write data to a physical address. Memory should not be cache inhibited.
455 */
456
457 __private_extern__ void
ml_phys_write_data(uint64_t paddr,unsigned long long data,int size)458 ml_phys_write_data(uint64_t paddr, unsigned long long data, int size)
459 {
460 boolean_t istate = TRUE, timewrite = FALSE;
461 uint64_t sabs = 0, eabs;
462
463 if (__improbable(!physmap_enclosed(paddr))) {
464 panic("%s: 0x%llx out of bounds", __FUNCTION__, paddr);
465 }
466
467 if (__improbable(report_phy_write_delay != 0)) {
468 istate = ml_set_interrupts_enabled(FALSE);
469 sabs = mach_absolute_time();
470 timewrite = TRUE;
471 }
472 #if DEVELOPMENT || DEBUG
473 if (__improbable(timewrite && simulate_stretched_io)) {
474 sabs -= simulate_stretched_io;
475 }
476 #endif /* x86_64 DEVELOPMENT || DEBUG */
477
478 switch (size) {
479 case 1:
480 *(volatile unsigned char *)PHYSMAP_PTOV(paddr) = (unsigned char)data;
481 break;
482 case 2:
483 *(volatile unsigned short *)PHYSMAP_PTOV(paddr) = (unsigned short)data;
484 break;
485 case 4:
486 *(volatile unsigned int *)PHYSMAP_PTOV(paddr) = (unsigned int)data;
487 break;
488 case 8:
489 *(volatile unsigned long *)PHYSMAP_PTOV(paddr) = data;
490 break;
491 default:
492 panic("Invalid size %d for ml_phys_write_data", size);
493 break;
494 }
495
496 if (__improbable(timewrite == TRUE)) {
497 eabs = mach_absolute_time();
498
499 iotrace(IOTRACE_PHYS_WRITE, 0, paddr, size, data, sabs, eabs - sabs);
500
501 if (__improbable((eabs - sabs) > report_phy_write_delay)) {
502 if (phy_write_panic && (machine_timeout_suspended() == FALSE)) {
503 panic_notify();
504 panic("Write to physical addr 0x%llx took %llu ns, "
505 "data: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
506 paddr, (eabs - sabs), data, sabs, eabs,
507 report_phy_write_delay);
508 }
509
510 DTRACE_PHYSLAT4(physwrite, uint64_t, (eabs - sabs),
511 uint64_t, paddr, uint32_t, size, uint64_t, data);
512 }
513
514 if (__improbable(trace_phy_write_delay > 0 && (eabs - sabs) > trace_phy_write_delay)) {
515 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_WRITE),
516 (eabs - sabs), sabs, paddr, data);
517 }
518
519 (void)ml_set_interrupts_enabled(istate);
520 }
521 }
522
523 void
ml_phys_write_byte(vm_offset_t paddr,unsigned int data)524 ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
525 {
526 ml_phys_write_data(paddr, data, 1);
527 }
528
529 void
ml_phys_write_byte_64(addr64_t paddr64,unsigned int data)530 ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
531 {
532 ml_phys_write_data(paddr64, data, 1);
533 }
534
535 void
ml_phys_write_half(vm_offset_t paddr,unsigned int data)536 ml_phys_write_half(vm_offset_t paddr, unsigned int data)
537 {
538 ml_phys_write_data(paddr, data, 2);
539 }
540
541 void
ml_phys_write_half_64(addr64_t paddr64,unsigned int data)542 ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
543 {
544 ml_phys_write_data(paddr64, data, 2);
545 }
546
547 void
ml_phys_write(vm_offset_t paddr,unsigned int data)548 ml_phys_write(vm_offset_t paddr, unsigned int data)
549 {
550 ml_phys_write_data(paddr, data, 4);
551 }
552
553 void
ml_phys_write_64(addr64_t paddr64,unsigned int data)554 ml_phys_write_64(addr64_t paddr64, unsigned int data)
555 {
556 ml_phys_write_data(paddr64, data, 4);
557 }
558
559 void
ml_phys_write_word(vm_offset_t paddr,unsigned int data)560 ml_phys_write_word(vm_offset_t paddr, unsigned int data)
561 {
562 ml_phys_write_data(paddr, data, 4);
563 }
564
565 void
ml_phys_write_word_64(addr64_t paddr64,unsigned int data)566 ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
567 {
568 ml_phys_write_data(paddr64, data, 4);
569 }
570
571 void
ml_phys_write_double(vm_offset_t paddr,unsigned long long data)572 ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
573 {
574 ml_phys_write_data(paddr, data, 8);
575 }
576
577 void
ml_phys_write_double_64(addr64_t paddr64,unsigned long long data)578 ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
579 {
580 ml_phys_write_data(paddr64, data, 8);
581 }
582
583 uint32_t
ml_port_io_read(uint16_t ioport,int size)584 ml_port_io_read(uint16_t ioport, int size)
585 {
586 uint32_t result = 0;
587
588 uint64_t sabs, eabs;
589 boolean_t istate, timeread = FALSE;
590
591 if (__improbable(report_phy_read_delay != 0)) {
592 istate = ml_set_interrupts_enabled(FALSE);
593 sabs = mach_absolute_time();
594 timeread = TRUE;
595 }
596
597 #if DEVELOPMENT || DEBUG
598 if (__improbable(timeread && simulate_stretched_io)) {
599 sabs -= simulate_stretched_io;
600 }
601 #endif /* x86_64 DEVELOPMENT || DEBUG */
602
603 switch (size) {
604 case 1:
605 result = inb(ioport);
606 break;
607 case 2:
608 result = inw(ioport);
609 break;
610 case 4:
611 result = inl(ioport);
612 break;
613 default:
614 panic("Invalid size %d for ml_port_io_read(0x%x)", size, (unsigned)ioport);
615 break;
616 }
617
618 if (__improbable(timeread == TRUE)) {
619 eabs = mach_absolute_time();
620
621 iotrace(IOTRACE_PORTIO_READ, 0, ioport, size, result, sabs, eabs - sabs);
622
623 if (__improbable((eabs - sabs) > report_phy_read_delay)) {
624 if (phy_read_panic && (machine_timeout_suspended() == FALSE)) {
625 panic_notify();
626 panic("Read from IO port 0x%x took %llu ns, "
627 "result: 0x%x (start: %llu, end: %llu), ceiling: %llu",
628 ioport, (eabs - sabs), result, sabs, eabs,
629 report_phy_read_delay);
630 }
631
632 DTRACE_PHYSLAT3(portioread, uint64_t, (eabs - sabs),
633 uint16_t, ioport, uint32_t, size);
634 }
635
636 if (__improbable(trace_phy_read_delay > 0 && (eabs - sabs) > trace_phy_read_delay)) {
637 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PORTIO_READ),
638 (eabs - sabs), sabs, ioport, result);
639 }
640
641 (void)ml_set_interrupts_enabled(istate);
642 }
643
644 return result;
645 }
646
647 void
ml_port_io_write(uint16_t ioport,uint32_t val,int size)648 ml_port_io_write(uint16_t ioport, uint32_t val, int size)
649 {
650 uint64_t sabs, eabs;
651 boolean_t istate, timewrite = FALSE;
652
653 if (__improbable(report_phy_write_delay != 0)) {
654 istate = ml_set_interrupts_enabled(FALSE);
655 sabs = mach_absolute_time();
656 timewrite = TRUE;
657 }
658 #if DEVELOPMENT || DEBUG
659 if (__improbable(timewrite && simulate_stretched_io)) {
660 sabs -= simulate_stretched_io;
661 }
662 #endif /* x86_64 DEVELOPMENT || DEBUG */
663
664 switch (size) {
665 case 1:
666 outb(ioport, (uint8_t)val);
667 break;
668 case 2:
669 outw(ioport, (uint16_t)val);
670 break;
671 case 4:
672 outl(ioport, (uint32_t)val);
673 break;
674 default:
675 panic("Invalid size %d for ml_port_io_write(0x%x)", size, (unsigned)ioport);
676 break;
677 }
678
679 if (__improbable(timewrite == TRUE)) {
680 eabs = mach_absolute_time();
681
682 iotrace(IOTRACE_PORTIO_WRITE, 0, ioport, size, val, sabs, eabs - sabs);
683
684 if (__improbable((eabs - sabs) > report_phy_write_delay)) {
685 if (phy_write_panic && (machine_timeout_suspended() == FALSE)) {
686 panic_notify();
687 panic("Write to IO port 0x%x took %llu ns, val: 0x%x"
688 " (start: %llu, end: %llu), ceiling: %llu",
689 ioport, (eabs - sabs), val, sabs, eabs,
690 report_phy_write_delay);
691 }
692
693 DTRACE_PHYSLAT4(portiowrite, uint64_t, (eabs - sabs),
694 uint16_t, ioport, uint32_t, size, uint64_t, val);
695 }
696
697 if (__improbable(trace_phy_write_delay > 0 && (eabs - sabs) > trace_phy_write_delay)) {
698 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PORTIO_WRITE),
699 (eabs - sabs), sabs, ioport, val);
700 }
701
702 (void)ml_set_interrupts_enabled(istate);
703 }
704 }
705
706 uint8_t
ml_port_io_read8(uint16_t ioport)707 ml_port_io_read8(uint16_t ioport)
708 {
709 return ml_port_io_read(ioport, 1);
710 }
711
712 uint16_t
ml_port_io_read16(uint16_t ioport)713 ml_port_io_read16(uint16_t ioport)
714 {
715 return ml_port_io_read(ioport, 2);
716 }
717
718 uint32_t
ml_port_io_read32(uint16_t ioport)719 ml_port_io_read32(uint16_t ioport)
720 {
721 return ml_port_io_read(ioport, 4);
722 }
723
724 void
ml_port_io_write8(uint16_t ioport,uint8_t val)725 ml_port_io_write8(uint16_t ioport, uint8_t val)
726 {
727 ml_port_io_write(ioport, val, 1);
728 }
729
730 void
ml_port_io_write16(uint16_t ioport,uint16_t val)731 ml_port_io_write16(uint16_t ioport, uint16_t val)
732 {
733 ml_port_io_write(ioport, val, 2);
734 }
735
736 void
ml_port_io_write32(uint16_t ioport,uint32_t val)737 ml_port_io_write32(uint16_t ioport, uint32_t val)
738 {
739 ml_port_io_write(ioport, val, 4);
740 }
741
742 /* PCI config cycle probing
743 *
744 *
745 * Read the memory location at physical address paddr.
746 * *Does not* recover from machine checks, unlike the PowerPC implementation.
747 * Should probably be deprecated.
748 */
749
750 boolean_t
ml_probe_read(vm_offset_t paddr,unsigned int * val)751 ml_probe_read(vm_offset_t paddr, unsigned int *val)
752 {
753 if ((PAGE_SIZE - (paddr & PAGE_MASK)) < 4) {
754 return FALSE;
755 }
756
757 *val = ml_phys_read(paddr);
758
759 return TRUE;
760 }
761
762 /*
763 * Read the memory location at physical address paddr.
764 * This is a part of a device probe, so there is a good chance we will
765 * have a machine check here. So we have to be able to handle that.
766 * We assume that machine checks are enabled both in MSR and HIDs
767 */
768 boolean_t
ml_probe_read_64(addr64_t paddr64,unsigned int * val)769 ml_probe_read_64(addr64_t paddr64, unsigned int *val)
770 {
771 if ((PAGE_SIZE - (paddr64 & PAGE_MASK)) < 4) {
772 return FALSE;
773 }
774
775 *val = ml_phys_read_64(paddr64);
776 return TRUE;
777 }
778
779 void
fillPage(ppnum_t pa,unsigned int fill)780 fillPage(ppnum_t pa, unsigned int fill)
781 {
782 uint64_t src;
783 int cnt = PAGE_SIZE / sizeof(unsigned int);
784
785 src = i386_ptob(pa);
786 memset_word((int *)PHYSMAP_PTOV(src), fill, cnt);
787 }
788
789 static inline void
__clflush(void * ptr)790 __clflush(void *ptr)
791 {
792 __asm__ volatile ("clflush (%0)" : : "r" (ptr));
793 }
794
795 void
dcache_incoherent_io_store64(addr64_t pa,unsigned int count)796 dcache_incoherent_io_store64(addr64_t pa, unsigned int count)
797 {
798 addr64_t linesize = cpuid_info()->cache_linesize;
799 addr64_t bound = (pa + count + linesize - 1) & ~(linesize - 1);
800
801 mfence();
802
803 while (pa < bound) {
804 __clflush(PHYSMAP_PTOV(pa));
805 pa += linesize;
806 }
807
808 mfence();
809 }
810
811 void
dcache_incoherent_io_flush64(addr64_t pa,unsigned int count)812 dcache_incoherent_io_flush64(addr64_t pa, unsigned int count)
813 {
814 return dcache_incoherent_io_store64(pa, count);
815 }
816
817 void
flush_dcache64(addr64_t addr,unsigned count,int phys)818 flush_dcache64(addr64_t addr, unsigned count, int phys)
819 {
820 if (phys) {
821 dcache_incoherent_io_flush64(addr, count);
822 } else {
823 uint64_t linesize = cpuid_info()->cache_linesize;
824 addr64_t bound = (addr + count + linesize - 1) & ~(linesize - 1);
825 mfence();
826 while (addr < bound) {
827 __clflush((void *) (uintptr_t) addr);
828 addr += linesize;
829 }
830 mfence();
831 }
832 }
833
834 void
invalidate_icache64(__unused addr64_t addr,__unused unsigned count,__unused int phys)835 invalidate_icache64(__unused addr64_t addr,
836 __unused unsigned count,
837 __unused int phys)
838 {
839 }
840
841
842 addr64_t vm_last_addr;
843
844 void
mapping_set_mod(ppnum_t pn)845 mapping_set_mod(ppnum_t pn)
846 {
847 pmap_set_modify(pn);
848 }
849
850 void
mapping_set_ref(ppnum_t pn)851 mapping_set_ref(ppnum_t pn)
852 {
853 pmap_set_reference(pn);
854 }
855
856 extern i386_cpu_info_t cpuid_cpu_info;
857 void
cache_flush_page_phys(ppnum_t pa)858 cache_flush_page_phys(ppnum_t pa)
859 {
860 boolean_t istate;
861 unsigned char *cacheline_addr;
862 i386_cpu_info_t *cpuid_infop = cpuid_info();
863 int cacheline_size;
864 int cachelines_to_flush;
865
866 cacheline_size = cpuid_infop->cache_linesize;
867 if (cacheline_size == 0) {
868 panic("cacheline_size=0 cpuid_infop=%p", cpuid_infop);
869 }
870 cachelines_to_flush = PAGE_SIZE / cacheline_size;
871
872 mfence();
873
874 istate = ml_set_interrupts_enabled(FALSE);
875
876 for (cacheline_addr = (unsigned char *)PHYSMAP_PTOV(i386_ptob(pa));
877 cachelines_to_flush > 0;
878 cachelines_to_flush--, cacheline_addr += cacheline_size) {
879 __clflush((void *) cacheline_addr);
880 }
881
882 (void) ml_set_interrupts_enabled(istate);
883
884 mfence();
885 }
886
887
888 #if !MACH_KDP
889 void
kdp_register_callout(kdp_callout_fn_t fn,void * arg)890 kdp_register_callout(kdp_callout_fn_t fn, void *arg)
891 {
892 #pragma unused(fn,arg)
893 }
894 #endif
895
896 #if !CONFIG_VMX
897 int
host_vmxon(boolean_t exclusive __unused)898 host_vmxon(boolean_t exclusive __unused)
899 {
900 return VMX_UNSUPPORTED;
901 }
902
903 void
host_vmxoff(void)904 host_vmxoff(void)
905 {
906 return;
907 }
908 #endif
909
910 static lck_grp_t xcpm_lck_grp;
911 static lck_grp_attr_t xcpm_lck_grp_attr;
912 static lck_attr_t xcpm_lck_attr;
913 static lck_spin_t xcpm_lock;
914
915 void xcpm_bootstrap(void);
916 void xcpm_mbox_lock(void);
917 void xcpm_mbox_unlock(void);
918 uint32_t xcpm_bios_mbox_cmd_read(uint32_t cmd);
919 uint32_t xcpm_bios_mbox_cmd_unsafe_read(uint32_t cmd);
920 void xcpm_bios_mbox_cmd_write(uint32_t cmd, uint32_t data);
921 boolean_t xcpm_is_hwp_enabled(void);
922
923 void
xcpm_bootstrap(void)924 xcpm_bootstrap(void)
925 {
926 lck_grp_attr_setdefault(&xcpm_lck_grp_attr);
927 lck_grp_init(&xcpm_lck_grp, "xcpm", &xcpm_lck_grp_attr);
928 lck_attr_setdefault(&xcpm_lck_attr);
929 lck_spin_init(&xcpm_lock, &xcpm_lck_grp, &xcpm_lck_attr);
930 }
931
932 void
xcpm_mbox_lock(void)933 xcpm_mbox_lock(void)
934 {
935 lck_spin_lock(&xcpm_lock);
936 }
937
938 void
xcpm_mbox_unlock(void)939 xcpm_mbox_unlock(void)
940 {
941 lck_spin_unlock(&xcpm_lock);
942 }
943
944 static uint32_t __xcpm_state[64] = {};
945
946 uint32_t
xcpm_bios_mbox_cmd_read(uint32_t cmd)947 xcpm_bios_mbox_cmd_read(uint32_t cmd)
948 {
949 uint32_t reg;
950 boolean_t istate = ml_set_interrupts_enabled(FALSE);
951 xcpm_mbox_lock();
952 reg = xcpm_bios_mbox_cmd_unsafe_read(cmd);
953 xcpm_mbox_unlock();
954 ml_set_interrupts_enabled(istate);
955 return reg;
956 }
957
958 uint32_t
xcpm_bios_mbox_cmd_unsafe_read(uint32_t cmd)959 xcpm_bios_mbox_cmd_unsafe_read(uint32_t cmd)
960 {
961 return __xcpm_state[cmd % (sizeof(__xcpm_state) / sizeof(__xcpm_state[0]))];
962 }
963
964 void
xcpm_bios_mbox_cmd_write(uint32_t cmd,uint32_t data)965 xcpm_bios_mbox_cmd_write(uint32_t cmd, uint32_t data)
966 {
967 uint32_t idx = cmd % (sizeof(__xcpm_state) / sizeof(__xcpm_state[0]));
968 idx &= ~0x1;
969
970 boolean_t istate = ml_set_interrupts_enabled(FALSE);
971 xcpm_mbox_lock();
972 __xcpm_state[idx] = data;
973 xcpm_mbox_unlock();
974 ml_set_interrupts_enabled(istate);
975 }
976
977 boolean_t
xcpm_is_hwp_enabled(void)978 xcpm_is_hwp_enabled(void)
979 {
980 return FALSE;
981 }
982
983