xref: /xnu-10002.41.9/osfmk/i386/machine_routines.c (revision 699cd48037512bf4380799317ca44ca453c82f57) !
1 /*
2  * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <i386/machine_routines.h>
30 #include <i386/cpuid.h>
31 #include <i386/fpu.h>
32 #include <mach/processor.h>
33 #include <kern/processor.h>
34 #include <kern/machine.h>
35 
36 #include <kern/cpu_number.h>
37 #include <kern/thread.h>
38 #include <kern/thread_call.h>
39 #include <kern/policy_internal.h>
40 
41 #include <prng/random.h>
42 #include <prng/entropy.h>
43 #include <i386/machine_cpu.h>
44 #include <i386/lapic.h>
45 #include <i386/bit_routines.h>
46 #include <i386/mp_events.h>
47 #include <i386/pmCPU.h>
48 #include <i386/trap.h>
49 #include <i386/tsc.h>
50 #include <i386/cpu_threads.h>
51 #include <i386/proc_reg.h>
52 #include <mach/vm_param.h>
53 #include <i386/pmap.h>
54 #include <i386/pmap_internal.h>
55 #include <i386/misc_protos.h>
56 #include <kern/timer_queue.h>
57 #include <vm/vm_map.h>
58 #if KPC
59 #include <kern/kpc.h>
60 #endif
61 #include <architecture/i386/pio.h>
62 #include <i386/cpu_data.h>
63 #if DEBUG
64 #define DBG(x...)       kprintf("DBG: " x)
65 #else
66 #define DBG(x...)
67 #endif
68 
69 #if MONOTONIC
70 #include <kern/monotonic.h>
71 #endif /* MONOTONIC */
72 
73 extern void     wakeup(void *);
74 
75 uint64_t        LockTimeOut;
76 uint64_t        TLBTimeOut;
77 uint64_t        LockTimeOutTSC;
78 uint32_t        LockTimeOutUsec;
79 uint64_t        MutexSpin;
80 uint64_t        low_MutexSpin;
81 int64_t         high_MutexSpin;
82 uint64_t        LastDebuggerEntryAllowance;
83 uint64_t        delay_spin_threshold;
84 
85 extern uint64_t panic_restart_timeout;
86 
87 boolean_t virtualized = FALSE;
88 
89 static SIMPLE_LOCK_DECLARE(ml_timer_evaluation_slock, 0);
90 uint32_t ml_timer_eager_evaluations;
91 uint64_t ml_timer_eager_evaluation_max;
92 static boolean_t ml_timer_evaluation_in_progress = FALSE;
93 
94 LCK_GRP_DECLARE(max_cpus_grp, "max_cpus");
95 LCK_MTX_DECLARE(max_cpus_lock, &max_cpus_grp);
96 static int max_cpus_initialized = 0;
97 #define MAX_CPUS_SET    0x1
98 #define MAX_CPUS_WAIT   0x2
99 
100 /* IO memory map services */
101 
102 /* Map memory map IO space */
103 vm_offset_t
ml_io_map(vm_offset_t phys_addr,vm_size_t size)104 ml_io_map(
105 	vm_offset_t phys_addr,
106 	vm_size_t size)
107 {
108 	return io_map(phys_addr, size, VM_WIMG_IO, VM_PROT_DEFAULT, false);
109 }
110 
111 vm_offset_t
ml_io_map_wcomb(vm_offset_t phys_addr,vm_size_t size)112 ml_io_map_wcomb(
113 	vm_offset_t phys_addr,
114 	vm_size_t size)
115 {
116 	return io_map(phys_addr, size, VM_WIMG_WCOMB, VM_PROT_DEFAULT, false);
117 }
118 
119 vm_offset_t
ml_io_map_unmappable(vm_offset_t phys_addr,vm_size_t size,unsigned int flags)120 ml_io_map_unmappable(
121 	vm_offset_t             phys_addr,
122 	vm_size_t               size,
123 	unsigned int            flags)
124 {
125 	return io_map(phys_addr, size, flags, VM_PROT_DEFAULT, true);
126 }
127 
128 void
ml_get_bouncepool_info(vm_offset_t * phys_addr,vm_size_t * size)129 ml_get_bouncepool_info(vm_offset_t *phys_addr, vm_size_t *size)
130 {
131 	*phys_addr = 0;
132 	*size      = 0;
133 }
134 
135 
136 vm_offset_t
ml_static_ptovirt(vm_offset_t paddr)137 ml_static_ptovirt(
138 	vm_offset_t paddr)
139 {
140 #if defined(__x86_64__)
141 	return (vm_offset_t)(((unsigned long) paddr) | VM_MIN_KERNEL_ADDRESS);
142 #else
143 	return (vm_offset_t)((paddr) | LINEAR_KERNEL_ADDRESS);
144 #endif
145 }
146 
147 vm_offset_t
ml_static_slide(vm_offset_t vaddr)148 ml_static_slide(
149 	vm_offset_t vaddr)
150 {
151 	return vaddr + vm_kernel_slide;
152 }
153 
154 /*
155  * base must be page-aligned, and size must be a multiple of PAGE_SIZE
156  */
157 kern_return_t
ml_static_verify_page_protections(uint64_t base,uint64_t size,vm_prot_t prot)158 ml_static_verify_page_protections(
159 	uint64_t base, uint64_t size, vm_prot_t prot)
160 {
161 	vm_prot_t pageprot;
162 	uint64_t offset;
163 
164 	DBG("ml_static_verify_page_protections: vaddr 0x%llx sz 0x%llx prot 0x%x\n", base, size, prot);
165 
166 	/*
167 	 * base must be within the static bounds, defined to be:
168 	 * (vm_kernel_stext, kc_highest_nonlinkedit_vmaddr)
169 	 */
170 #if DEVELOPMENT || DEBUG || KASAN
171 	assert(kc_highest_nonlinkedit_vmaddr > 0 && base > vm_kernel_stext && base < kc_highest_nonlinkedit_vmaddr);
172 #else   /* On release kernels, assume this is a protection mismatch failure. */
173 	if (kc_highest_nonlinkedit_vmaddr == 0 || base < vm_kernel_stext || base >= kc_highest_nonlinkedit_vmaddr) {
174 		return KERN_FAILURE;
175 	}
176 #endif
177 
178 	for (offset = 0; offset < size; offset += PAGE_SIZE) {
179 		if (pmap_get_prot(kernel_pmap, base + offset, &pageprot) == KERN_FAILURE) {
180 			return KERN_FAILURE;
181 		}
182 		if ((pageprot & prot) != prot) {
183 			return KERN_FAILURE;
184 		}
185 	}
186 
187 	return KERN_SUCCESS;
188 }
189 
190 vm_offset_t
ml_static_unslide(vm_offset_t vaddr)191 ml_static_unslide(
192 	vm_offset_t vaddr)
193 {
194 	return vaddr - vm_kernel_slide;
195 }
196 
197 /*
198  * Reclaim memory, by virtual address, that was used in early boot that is no longer needed
199  * by the kernel.
200  */
201 void
ml_static_mfree(vm_offset_t vaddr,vm_size_t size)202 ml_static_mfree(
203 	vm_offset_t vaddr,
204 	vm_size_t size)
205 {
206 	addr64_t vaddr_cur;
207 	ppnum_t ppn;
208 	uint32_t freed_pages = 0;
209 	vm_size_t map_size;
210 
211 	assert(vaddr >= VM_MIN_KERNEL_ADDRESS);
212 
213 	assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */
214 
215 	for (vaddr_cur = vaddr; vaddr_cur < round_page_64(vaddr + size);) {
216 		map_size = pmap_query_pagesize(kernel_pmap, vaddr_cur);
217 
218 		/* just skip if nothing mapped here */
219 		if (map_size == 0) {
220 			vaddr_cur += PAGE_SIZE;
221 			continue;
222 		}
223 
224 		/*
225 		 * Can't free from the middle of a large page.
226 		 */
227 		assert((vaddr_cur & (map_size - 1)) == 0);
228 
229 		ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
230 		assert(ppn != (ppnum_t)NULL);
231 
232 		pmap_remove(kernel_pmap, vaddr_cur, vaddr_cur + map_size);
233 		while (map_size > 0) {
234 			assert(pmap_valid_page(ppn));
235 			if (IS_MANAGED_PAGE(ppn)) {
236 				vm_page_create(ppn, (ppn + 1));
237 				freed_pages++;
238 			}
239 			map_size -= PAGE_SIZE;
240 			vaddr_cur += PAGE_SIZE;
241 			ppn++;
242 		}
243 	}
244 	vm_page_lockspin_queues();
245 	vm_page_wire_count -= freed_pages;
246 	vm_page_wire_count_initial -= freed_pages;
247 	if (vm_page_wire_count_on_boot != 0) {
248 		assert(vm_page_wire_count_on_boot >= freed_pages);
249 		vm_page_wire_count_on_boot -= freed_pages;
250 	}
251 	vm_page_unlock_queues();
252 
253 #if     DEBUG
254 	kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn);
255 #endif
256 }
257 
258 /* Change page protections for addresses previously loaded by efiboot */
259 kern_return_t
ml_static_protect(vm_offset_t vmaddr,vm_size_t size,vm_prot_t prot)260 ml_static_protect(vm_offset_t vmaddr, vm_size_t size, vm_prot_t prot)
261 {
262 	boolean_t NX = !!!(prot & VM_PROT_EXECUTE), ro = !!!(prot & VM_PROT_WRITE);
263 
264 	assert(prot & VM_PROT_READ);
265 
266 	pmap_mark_range(kernel_pmap, vmaddr, size, NX, ro);
267 
268 	return KERN_SUCCESS;
269 }
270 
271 /* virtual to physical on wired pages */
272 vm_offset_t
ml_vtophys(vm_offset_t vaddr)273 ml_vtophys(
274 	vm_offset_t vaddr)
275 {
276 	return (vm_offset_t)kvtophys(vaddr);
277 }
278 
279 /*
280  *	Routine:        ml_nofault_copy
281  *	Function:	Perform a physical mode copy if the source and
282  *			destination have valid translations in the kernel pmap.
283  *			If translations are present, they are assumed to
284  *			be wired; i.e. no attempt is made to guarantee that the
285  *			translations obtained remained valid for
286  *			the duration of the copy process.
287  */
288 
289 vm_size_t
ml_nofault_copy(vm_offset_t virtsrc,vm_offset_t virtdst,vm_size_t size)290 ml_nofault_copy(
291 	vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size)
292 {
293 	addr64_t cur_phys_dst, cur_phys_src;
294 	uint32_t count, nbytes = 0;
295 
296 	while (size > 0) {
297 		if (!(cur_phys_src = kvtophys(virtsrc))) {
298 			break;
299 		}
300 		if (!(cur_phys_dst = kvtophys(virtdst))) {
301 			break;
302 		}
303 		if (!pmap_valid_page(i386_btop(cur_phys_dst)) || !pmap_valid_page(i386_btop(cur_phys_src))) {
304 			break;
305 		}
306 		count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
307 		if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) {
308 			count = (uint32_t)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK));
309 		}
310 		if (count > size) {
311 			count = (uint32_t)size;
312 		}
313 
314 		bcopy_phys(cur_phys_src, cur_phys_dst, count);
315 
316 		nbytes += count;
317 		virtsrc += count;
318 		virtdst += count;
319 		size -= count;
320 	}
321 
322 	return nbytes;
323 }
324 
325 /*
326  *	Routine:        ml_validate_nofault
327  *	Function: Validate that ths address range has a valid translations
328  *			in the kernel pmap.  If translations are present, they are
329  *			assumed to be wired; i.e. no attempt is made to guarantee
330  *			that the translation persist after the check.
331  *  Returns: TRUE if the range is mapped and will not cause a fault,
332  *			FALSE otherwise.
333  */
334 
335 boolean_t
ml_validate_nofault(vm_offset_t virtsrc,vm_size_t size)336 ml_validate_nofault(
337 	vm_offset_t virtsrc, vm_size_t size)
338 {
339 	addr64_t cur_phys_src;
340 	uint32_t count;
341 
342 	while (size > 0) {
343 		if (!(cur_phys_src = kvtophys(virtsrc))) {
344 			return FALSE;
345 		}
346 		if (!pmap_valid_page(i386_btop(cur_phys_src))) {
347 			return FALSE;
348 		}
349 		count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
350 		if (count > size) {
351 			count = (uint32_t)size;
352 		}
353 
354 		virtsrc += count;
355 		size -= count;
356 	}
357 
358 	return TRUE;
359 }
360 
361 /* Interrupt handling */
362 
363 /* Initialize Interrupts */
364 void
ml_init_interrupt(void)365 ml_init_interrupt(void)
366 {
367 	(void) ml_set_interrupts_enabled(TRUE);
368 }
369 
370 
371 /* Get Interrupts Enabled */
372 boolean_t
ml_get_interrupts_enabled(void)373 ml_get_interrupts_enabled(void)
374 {
375 	unsigned long flags;
376 
377 	__asm__ volatile ("pushf; pop	%0":  "=r" (flags));
378 	return (flags & EFL_IF) != 0;
379 }
380 
381 /* Set Interrupts Enabled */
382 boolean_t
ml_set_interrupts_enabled(boolean_t enable)383 ml_set_interrupts_enabled(boolean_t enable)
384 {
385 	unsigned long flags;
386 	boolean_t istate;
387 
388 	__asm__ volatile ("pushf; pop	%0"  :  "=r" (flags));
389 
390 	assert(get_interrupt_level() ? (enable == FALSE) : TRUE);
391 
392 	istate = ((flags & EFL_IF) != 0);
393 
394 	if (enable) {
395 		__asm__ volatile ("sti;nop");
396 
397 		if ((get_preemption_level() == 0) && (*ast_pending() & AST_URGENT)) {
398 			__asm__ volatile ("int %0" :: "N" (T_PREEMPT));
399 		}
400 	} else {
401 		if (istate) {
402 			__asm__ volatile ("cli");
403 		}
404 	}
405 
406 	return istate;
407 }
408 
409 /* Early Set Interrupts Enabled */
410 boolean_t
ml_early_set_interrupts_enabled(boolean_t enable)411 ml_early_set_interrupts_enabled(boolean_t enable)
412 {
413 	if (enable == TRUE) {
414 		kprintf("Caller attempted to enable interrupts too early in "
415 		    "kernel startup. Halting.\n");
416 		hlt();
417 		/*NOTREACHED*/
418 	}
419 
420 	/* On x86, do not allow interrupts to be enabled very early */
421 	return FALSE;
422 }
423 
424 /* Check if running at interrupt context */
425 boolean_t
ml_at_interrupt_context(void)426 ml_at_interrupt_context(void)
427 {
428 	return get_interrupt_level() != 0;
429 }
430 
431 /*
432  * This answers the question
433  * "after returning from this interrupt handler with the AST_URGENT bit set,
434  * will I end up in ast_taken_user or ast_taken_kernel?"
435  *
436  * If it's called in non-interrupt context (e.g. regular syscall), it should
437  * return false.
438  *
439  * Must be called with interrupts disabled.
440  */
441 bool
ml_did_interrupt_userspace(void)442 ml_did_interrupt_userspace(void)
443 {
444 	assert(ml_get_interrupts_enabled() == false);
445 
446 	x86_saved_state_t *state = current_cpu_datap()->cpu_int_state;
447 	if (!state) {
448 		return false;
449 	}
450 
451 	uint64_t cs;
452 
453 	if (is_saved_state64(state)) {
454 		cs = saved_state64(state)->isf.cs;
455 	} else {
456 		cs = saved_state32(state)->cs;
457 	}
458 
459 	return (cs & SEL_PL) == SEL_PL_U;
460 }
461 
462 void
ml_get_power_state(boolean_t * icp,boolean_t * pidlep)463 ml_get_power_state(boolean_t *icp, boolean_t *pidlep)
464 {
465 	*icp = (get_interrupt_level() != 0);
466 	/* These will be technically inaccurate for interrupts that occur
467 	 * successively within a single "idle exit" event, but shouldn't
468 	 * matter statistically.
469 	 */
470 	*pidlep = (current_cpu_datap()->lcpu.package->num_idle == topoParms.nLThreadsPerPackage);
471 }
472 
473 /* Generate a fake interrupt */
474 __dead2
475 void
ml_cause_interrupt(void)476 ml_cause_interrupt(void)
477 {
478 	panic("ml_cause_interrupt not defined yet on Intel");
479 }
480 
481 /*
482  * TODO: transition users of this to kernel_thread_start_priority
483  * ml_thread_policy is an unsupported KPI
484  */
485 void
ml_thread_policy(thread_t thread,__unused unsigned policy_id,unsigned policy_info)486 ml_thread_policy(
487 	thread_t thread,
488 	__unused        unsigned policy_id,
489 	unsigned policy_info)
490 {
491 	if (policy_info & MACHINE_NETWORK_WORKLOOP) {
492 		thread_precedence_policy_data_t info;
493 		__assert_only kern_return_t kret;
494 
495 		info.importance = 1;
496 
497 		kret = thread_policy_set_internal(thread, THREAD_PRECEDENCE_POLICY,
498 		    (thread_policy_t)&info,
499 		    THREAD_PRECEDENCE_POLICY_COUNT);
500 		assert(kret == KERN_SUCCESS);
501 	}
502 }
503 
504 /* Initialize Interrupts */
505 void
ml_install_interrupt_handler(void * nub,int source,void * target,IOInterruptHandler handler,void * refCon)506 ml_install_interrupt_handler(
507 	void *nub,
508 	int source,
509 	void *target,
510 	IOInterruptHandler handler,
511 	void *refCon)
512 {
513 	boolean_t current_state;
514 
515 	current_state = ml_set_interrupts_enabled(FALSE);
516 
517 	PE_install_interrupt_handler(nub, source, target,
518 	    (IOInterruptHandler) handler, refCon);
519 
520 	(void) ml_set_interrupts_enabled(current_state);
521 }
522 
523 
524 void
machine_signal_idle(processor_t processor)525 machine_signal_idle(
526 	processor_t processor)
527 {
528 	cpu_interrupt(processor->cpu_id);
529 }
530 
531 __dead2
532 void
machine_signal_idle_deferred(__unused processor_t processor)533 machine_signal_idle_deferred(
534 	__unused processor_t processor)
535 {
536 	panic("Unimplemented");
537 }
538 
539 __dead2
540 void
machine_signal_idle_cancel(__unused processor_t processor)541 machine_signal_idle_cancel(
542 	__unused processor_t processor)
543 {
544 	panic("Unimplemented");
545 }
546 
547 static kern_return_t
register_cpu(uint32_t lapic_id,processor_t * processor_out,boolean_t boot_cpu)548 register_cpu(
549 	uint32_t        lapic_id,
550 	processor_t     *processor_out,
551 	boolean_t       boot_cpu )
552 {
553 	int             target_cpu;
554 	cpu_data_t      *this_cpu_datap;
555 
556 	this_cpu_datap = cpu_data_alloc(boot_cpu);
557 	if (this_cpu_datap == NULL) {
558 		return KERN_FAILURE;
559 	}
560 	target_cpu = this_cpu_datap->cpu_number;
561 	assert((boot_cpu && (target_cpu == 0)) ||
562 	    (!boot_cpu && (target_cpu != 0)));
563 
564 	lapic_cpu_map(lapic_id, target_cpu);
565 
566 	/* The cpu_id is not known at registration phase. Just do
567 	 * lapic_id for now
568 	 */
569 	this_cpu_datap->cpu_phys_number = lapic_id;
570 
571 #if KPC
572 	if (kpc_register_cpu(this_cpu_datap) != TRUE) {
573 		goto failed;
574 	}
575 #endif
576 
577 	if (!boot_cpu) {
578 		cpu_thread_alloc(this_cpu_datap->cpu_number);
579 		if (this_cpu_datap->lcpu.core == NULL) {
580 			goto failed;
581 		}
582 	}
583 
584 	/*
585 	 * processor_init() deferred to topology start
586 	 * because "slot numbers" a.k.a. logical processor numbers
587 	 * are not yet finalized.
588 	 */
589 	*processor_out = this_cpu_datap->cpu_processor;
590 
591 	return KERN_SUCCESS;
592 
593 failed:
594 #if KPC
595 	kpc_unregister_cpu(this_cpu_datap);
596 #endif /* KPC */
597 
598 	return KERN_FAILURE;
599 }
600 
601 
602 kern_return_t
ml_processor_register(cpu_id_t cpu_id,uint32_t lapic_id,processor_t * processor_out,boolean_t boot_cpu,boolean_t start)603 ml_processor_register(
604 	cpu_id_t        cpu_id,
605 	uint32_t        lapic_id,
606 	processor_t     *processor_out,
607 	boolean_t       boot_cpu,
608 	boolean_t       start )
609 {
610 	static boolean_t done_topo_sort = FALSE;
611 	static uint32_t num_registered = 0;
612 
613 	/* Register all CPUs first, and track max */
614 	if (start == FALSE) {
615 		num_registered++;
616 
617 		DBG( "registering CPU lapic id %d\n", lapic_id );
618 
619 		return register_cpu( lapic_id, processor_out, boot_cpu );
620 	}
621 
622 	/* Sort by topology before we start anything */
623 	if (!done_topo_sort) {
624 		DBG( "about to start CPUs. %d registered\n", num_registered );
625 
626 		cpu_topology_sort( num_registered );
627 		done_topo_sort = TRUE;
628 	}
629 
630 	/* Assign the cpu ID */
631 	uint32_t cpunum = -1;
632 	cpu_data_t  *this_cpu_datap = NULL;
633 
634 	/* find cpu num and pointer */
635 	cpunum = ml_get_cpuid( lapic_id );
636 
637 	if (cpunum == 0xFFFFFFFF) { /* never heard of it? */
638 		panic( "trying to start invalid/unregistered CPU %d", lapic_id );
639 	}
640 
641 	this_cpu_datap = cpu_datap(cpunum);
642 
643 	/* fix the CPU id */
644 	this_cpu_datap->cpu_id = cpu_id;
645 
646 	/* allocate and initialize other per-cpu structures */
647 	if (!boot_cpu) {
648 		mp_cpus_call_cpu_init(cpunum);
649 		random_cpu_init(cpunum);
650 	}
651 
652 	/* output arg */
653 	*processor_out = this_cpu_datap->cpu_processor;
654 
655 	/* OK, try and start this CPU */
656 	return cpu_topology_start_cpu( cpunum );
657 }
658 
659 
660 void
ml_cpu_get_info_type(ml_cpu_info_t * cpu_infop,cluster_type_t cluster_type __unused)661 ml_cpu_get_info_type(ml_cpu_info_t *cpu_infop, cluster_type_t cluster_type __unused)
662 {
663 	boolean_t       os_supports_sse;
664 	i386_cpu_info_t *cpuid_infop;
665 
666 	if (cpu_infop == NULL) {
667 		return;
668 	}
669 
670 	/*
671 	 * Are we supporting MMX/SSE/SSE2/SSE3?
672 	 * As distinct from whether the cpu has these capabilities.
673 	 */
674 	os_supports_sse = !!(get_cr4() & CR4_OSXMM);
675 
676 	if (ml_fpu_avx_enabled()) {
677 		cpu_infop->vector_unit = 9;
678 	} else if ((cpuid_features() & CPUID_FEATURE_SSE4_2) && os_supports_sse) {
679 		cpu_infop->vector_unit = 8;
680 	} else if ((cpuid_features() & CPUID_FEATURE_SSE4_1) && os_supports_sse) {
681 		cpu_infop->vector_unit = 7;
682 	} else if ((cpuid_features() & CPUID_FEATURE_SSSE3) && os_supports_sse) {
683 		cpu_infop->vector_unit = 6;
684 	} else if ((cpuid_features() & CPUID_FEATURE_SSE3) && os_supports_sse) {
685 		cpu_infop->vector_unit = 5;
686 	} else if ((cpuid_features() & CPUID_FEATURE_SSE2) && os_supports_sse) {
687 		cpu_infop->vector_unit = 4;
688 	} else if ((cpuid_features() & CPUID_FEATURE_SSE) && os_supports_sse) {
689 		cpu_infop->vector_unit = 3;
690 	} else if (cpuid_features() & CPUID_FEATURE_MMX) {
691 		cpu_infop->vector_unit = 2;
692 	} else {
693 		cpu_infop->vector_unit = 0;
694 	}
695 
696 	cpuid_infop  = cpuid_info();
697 
698 	cpu_infop->cache_line_size = cpuid_infop->cache_linesize;
699 
700 	cpu_infop->l1_icache_size = cpuid_infop->cache_size[L1I];
701 	cpu_infop->l1_dcache_size = cpuid_infop->cache_size[L1D];
702 
703 	if (cpuid_infop->cache_size[L2U] > 0) {
704 		cpu_infop->l2_settings = 1;
705 		cpu_infop->l2_cache_size = cpuid_infop->cache_size[L2U];
706 	} else {
707 		cpu_infop->l2_settings = 0;
708 		cpu_infop->l2_cache_size = 0xFFFFFFFF;
709 	}
710 
711 	if (cpuid_infop->cache_size[L3U] > 0) {
712 		cpu_infop->l3_settings = 1;
713 		cpu_infop->l3_cache_size = cpuid_infop->cache_size[L3U];
714 	} else {
715 		cpu_infop->l3_settings = 0;
716 		cpu_infop->l3_cache_size = 0xFFFFFFFF;
717 	}
718 }
719 
720 /*
721  *	Routine:        ml_cpu_get_info
722  *	Function: Fill out the ml_cpu_info_t structure with parameters associated
723  *	with the boot cluster.
724  */
725 void
ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info)726 ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info)
727 {
728 	ml_cpu_get_info_type(ml_cpu_info, CLUSTER_TYPE_SMP);
729 }
730 
731 unsigned int
ml_get_cpu_number_type(cluster_type_t cluster_type __unused,bool logical,bool available)732 ml_get_cpu_number_type(cluster_type_t cluster_type __unused, bool logical, bool available)
733 {
734 	/*
735 	 * At present no supported x86 system features more than 1 CPU type. Because
736 	 * of this, the cluster_type parameter is ignored.
737 	 */
738 	if (logical && available) {
739 		return machine_info.logical_cpu;
740 	} else if (logical && !available) {
741 		return machine_info.logical_cpu_max;
742 	} else if (!logical && available) {
743 		return machine_info.physical_cpu;
744 	} else {
745 		return machine_info.physical_cpu_max;
746 	}
747 }
748 
749 void
ml_get_cluster_type_name(cluster_type_t cluster_type __unused,char * name,size_t name_size)750 ml_get_cluster_type_name(cluster_type_t cluster_type __unused, char *name, size_t name_size)
751 {
752 	strlcpy(name, "Standard", name_size);
753 }
754 
755 unsigned int
ml_get_cluster_number_type(cluster_type_t cluster_type __unused)756 ml_get_cluster_number_type(cluster_type_t cluster_type __unused)
757 {
758 	/*
759 	 * At present no supported x86 system has more than 1 CPU type and multiple
760 	 * clusters.
761 	 */
762 	return 1;
763 }
764 
765 unsigned int
ml_get_cpu_types(void)766 ml_get_cpu_types(void)
767 {
768 	return 1 << CLUSTER_TYPE_SMP;
769 }
770 
771 unsigned int
ml_get_cluster_count(void)772 ml_get_cluster_count(void)
773 {
774 	/*
775 	 * At present no supported x86 system has more than 1 CPU type and multiple
776 	 * clusters.
777 	 */
778 	return 1;
779 }
780 
781 static_assert(MAX_CPUS <= 256, "MAX_CPUS must fit in _COMM_PAGE_CPU_TO_CLUSTER; Increase table size if needed");
782 
783 void
ml_map_cpus_to_clusters(uint8_t * table)784 ml_map_cpus_to_clusters(uint8_t *table)
785 {
786 	for (uint16_t cpu_id = 0; cpu_id < machine_info.logical_cpu_max; cpu_id++) {
787 		// Supported x86 systems have 1 cluster
788 		*(table + cpu_id) = (uint8_t)0;
789 	}
790 }
791 
792 int
ml_early_cpu_max_number(void)793 ml_early_cpu_max_number(void)
794 {
795 	int n = max_ncpus;
796 
797 	assert(startup_phase >= STARTUP_SUB_TUNABLES);
798 	if (max_cpus_from_firmware) {
799 		n = MIN(n, max_cpus_from_firmware);
800 	}
801 	return n - 1;
802 }
803 
804 void
ml_set_max_cpus(unsigned int max_cpus)805 ml_set_max_cpus(unsigned int max_cpus)
806 {
807 	lck_mtx_lock(&max_cpus_lock);
808 	if (max_cpus_initialized != MAX_CPUS_SET) {
809 		if (max_cpus > 0 && max_cpus <= MAX_CPUS) {
810 			/*
811 			 * Note: max_cpus is the number of enabled processors
812 			 * that ACPI found; max_ncpus is the maximum number
813 			 * that the kernel supports or that the "cpus="
814 			 * boot-arg has set. Here we take int minimum.
815 			 */
816 			machine_info.max_cpus = (integer_t)MIN(max_cpus, max_ncpus);
817 		}
818 		if (max_cpus_initialized == MAX_CPUS_WAIT) {
819 			thread_wakeup((event_t) &max_cpus_initialized);
820 		}
821 		max_cpus_initialized = MAX_CPUS_SET;
822 	}
823 	lck_mtx_unlock(&max_cpus_lock);
824 }
825 
826 unsigned int
ml_wait_max_cpus(void)827 ml_wait_max_cpus(void)
828 {
829 	lck_mtx_lock(&max_cpus_lock);
830 	while (max_cpus_initialized != MAX_CPUS_SET) {
831 		max_cpus_initialized = MAX_CPUS_WAIT;
832 		lck_mtx_sleep(&max_cpus_lock, LCK_SLEEP_DEFAULT, &max_cpus_initialized, THREAD_UNINT);
833 	}
834 	lck_mtx_unlock(&max_cpus_lock);
835 	return machine_info.max_cpus;
836 }
837 
838 void
ml_panic_trap_to_debugger(__unused const char * panic_format_str,__unused va_list * panic_args,__unused unsigned int reason,__unused void * ctx,__unused uint64_t panic_options_mask,__unused unsigned long panic_caller)839 ml_panic_trap_to_debugger(__unused const char *panic_format_str,
840     __unused va_list *panic_args,
841     __unused unsigned int reason,
842     __unused void *ctx,
843     __unused uint64_t panic_options_mask,
844     __unused unsigned long panic_caller)
845 {
846 	return;
847 }
848 
849 static uint64_t
virtual_timeout_inflate64(unsigned int vti,uint64_t timeout,uint64_t max_timeout)850 virtual_timeout_inflate64(unsigned int vti, uint64_t timeout, uint64_t max_timeout)
851 {
852 	if (vti >= 64) {
853 		return max_timeout;
854 	}
855 
856 	if ((timeout << vti) >> vti != timeout) {
857 		return max_timeout;
858 	}
859 
860 	if ((timeout << vti) > max_timeout) {
861 		return max_timeout;
862 	}
863 
864 	return timeout << vti;
865 }
866 
867 static uint32_t
virtual_timeout_inflate32(unsigned int vti,uint32_t timeout,uint32_t max_timeout)868 virtual_timeout_inflate32(unsigned int vti, uint32_t timeout, uint32_t max_timeout)
869 {
870 	if (vti >= 32) {
871 		return max_timeout;
872 	}
873 
874 	if ((timeout << vti) >> vti != timeout) {
875 		return max_timeout;
876 	}
877 
878 	return timeout << vti;
879 }
880 
881 /*
882  * Some timeouts are later adjusted or used in calculations setting
883  * other values. In order to avoid overflow, cap the max timeout as
884  * 2^47ns (~39 hours).
885  */
886 static const uint64_t max_timeout_ns = 1ULL << 47;
887 
888 /*
889  * Inflate a timeout in absolutetime.
890  */
891 static uint64_t
virtual_timeout_inflate_abs(unsigned int vti,uint64_t timeout)892 virtual_timeout_inflate_abs(unsigned int vti, uint64_t timeout)
893 {
894 	uint64_t max_timeout;
895 	nanoseconds_to_absolutetime(max_timeout_ns, &max_timeout);
896 	return virtual_timeout_inflate64(vti, timeout, max_timeout);
897 }
898 
899 /*
900  * Inflate a value in TSC ticks.
901  */
902 static uint64_t
virtual_timeout_inflate_tsc(unsigned int vti,uint64_t timeout)903 virtual_timeout_inflate_tsc(unsigned int vti, uint64_t timeout)
904 {
905 	const uint64_t max_timeout = tmrCvt(max_timeout_ns, tscFCvtn2t);
906 	return virtual_timeout_inflate64(vti, timeout, max_timeout);
907 }
908 
909 /*
910  * Inflate a timeout in microseconds.
911  */
912 static uint32_t
virtual_timeout_inflate_us(unsigned int vti,uint64_t timeout)913 virtual_timeout_inflate_us(unsigned int vti, uint64_t timeout)
914 {
915 	const uint32_t max_timeout = ~0;
916 	return virtual_timeout_inflate32(vti, timeout, max_timeout);
917 }
918 
919 uint64_t
ml_get_timebase_entropy(void)920 ml_get_timebase_entropy(void)
921 {
922 	return __builtin_ia32_rdtsc();
923 }
924 
925 /*
926  *	Routine:        ml_init_lock_timeout
927  *	Function:
928  */
929 static void __startup_func
ml_init_lock_timeout(void)930 ml_init_lock_timeout(void)
931 {
932 	uint64_t        abstime;
933 	uint32_t        mtxspin;
934 #if DEVELOPMENT || DEBUG
935 	uint64_t        default_timeout_ns = NSEC_PER_SEC >> 2;
936 #else
937 	uint64_t        default_timeout_ns = NSEC_PER_SEC >> 1;
938 #endif
939 	uint32_t        slto;
940 	uint32_t        prt;
941 
942 	if (PE_parse_boot_argn("slto_us", &slto, sizeof(slto))) {
943 		default_timeout_ns = slto * NSEC_PER_USEC;
944 	}
945 
946 	/*
947 	 * LockTimeOut is absolutetime, LockTimeOutTSC is in TSC ticks,
948 	 * and LockTimeOutUsec is in microseconds and it's 32-bits.
949 	 */
950 	LockTimeOutUsec = (uint32_t) (default_timeout_ns / NSEC_PER_USEC);
951 	nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
952 	LockTimeOut = abstime;
953 	LockTimeOutTSC = tmrCvt(abstime, tscFCvtn2t);
954 
955 	/*
956 	 * TLBTimeOut dictates the TLB flush timeout period. It defaults to
957 	 * LockTimeOut but can be overriden separately. In particular, a
958 	 * zero value inhibits the timeout-panic and cuts a trace evnt instead
959 	 * - see pmap_flush_tlbs().
960 	 */
961 	if (PE_parse_boot_argn("tlbto_us", &slto, sizeof(slto))) {
962 		default_timeout_ns = slto * NSEC_PER_USEC;
963 		nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
964 		TLBTimeOut = (uint32_t) abstime;
965 	} else {
966 		TLBTimeOut = LockTimeOut;
967 	}
968 
969 #if DEVELOPMENT || DEBUG
970 	report_phy_read_delay = LockTimeOut >> 1;
971 #endif
972 	if (PE_parse_boot_argn("phyreadmaxus", &slto, sizeof(slto))) {
973 		default_timeout_ns = slto * NSEC_PER_USEC;
974 		nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
975 		report_phy_read_delay = abstime;
976 	}
977 
978 	if (PE_parse_boot_argn("phywritemaxus", &slto, sizeof(slto))) {
979 		nanoseconds_to_absolutetime((uint64_t)slto * NSEC_PER_USEC, &abstime);
980 		report_phy_write_delay = abstime;
981 	}
982 
983 	if (PE_parse_boot_argn("tracephyreadus", &slto, sizeof(slto))) {
984 		nanoseconds_to_absolutetime((uint64_t)slto * NSEC_PER_USEC, &abstime);
985 		trace_phy_read_delay = abstime;
986 	}
987 
988 	if (PE_parse_boot_argn("tracephywriteus", &slto, sizeof(slto))) {
989 		nanoseconds_to_absolutetime((uint64_t)slto * NSEC_PER_USEC, &abstime);
990 		trace_phy_write_delay = abstime;
991 	}
992 
993 	if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof(mtxspin))) {
994 		if (mtxspin > USEC_PER_SEC >> 4) {
995 			mtxspin =  USEC_PER_SEC >> 4;
996 		}
997 		nanoseconds_to_absolutetime(mtxspin * NSEC_PER_USEC, &abstime);
998 	} else {
999 		nanoseconds_to_absolutetime(10 * NSEC_PER_USEC, &abstime);
1000 	}
1001 	MutexSpin = (unsigned int)abstime;
1002 	low_MutexSpin = MutexSpin;
1003 	/*
1004 	 * high_MutexSpin should be initialized as low_MutexSpin * real_ncpus, but
1005 	 * real_ncpus is not set at this time
1006 	 */
1007 	high_MutexSpin = -1;
1008 
1009 	nanoseconds_to_absolutetime(4ULL * NSEC_PER_SEC, &LastDebuggerEntryAllowance);
1010 	if (PE_parse_boot_argn("panic_restart_timeout", &prt, sizeof(prt))) {
1011 		nanoseconds_to_absolutetime(prt * NSEC_PER_SEC, &panic_restart_timeout);
1012 	}
1013 
1014 	virtualized = ((cpuid_features() & CPUID_FEATURE_VMM) != 0);
1015 	if (virtualized) {
1016 		unsigned int vti;
1017 
1018 		if (!PE_parse_boot_argn("vti", &vti, sizeof(vti))) {
1019 			vti = 6;
1020 		}
1021 
1022 #define VIRTUAL_TIMEOUT_INFLATE_ABS(_timeout)              \
1023 MACRO_BEGIN                                                \
1024 	_timeout = virtual_timeout_inflate_abs(vti, _timeout); \
1025 MACRO_END
1026 
1027 #define VIRTUAL_TIMEOUT_INFLATE_TSC(_timeout)              \
1028 MACRO_BEGIN                                                \
1029 	_timeout = virtual_timeout_inflate_tsc(vti, _timeout); \
1030 MACRO_END
1031 #define VIRTUAL_TIMEOUT_INFLATE_US(_timeout)               \
1032 MACRO_BEGIN                                                \
1033 	_timeout = virtual_timeout_inflate_us(vti, _timeout);  \
1034 MACRO_END
1035 		/*
1036 		 * These timeout values are inflated because they cause
1037 		 * the kernel to panic when they expire.
1038 		 * (Needed when running as a guest VM as the host OS
1039 		 * may not always schedule vcpu threads in time to
1040 		 * meet the deadline implied by the narrower time
1041 		 * window used on hardware.)
1042 		 */
1043 		VIRTUAL_TIMEOUT_INFLATE_US(LockTimeOutUsec);
1044 		VIRTUAL_TIMEOUT_INFLATE_ABS(LockTimeOut);
1045 		VIRTUAL_TIMEOUT_INFLATE_TSC(LockTimeOutTSC);
1046 		VIRTUAL_TIMEOUT_INFLATE_ABS(TLBTimeOut);
1047 		VIRTUAL_TIMEOUT_INFLATE_ABS(report_phy_read_delay);
1048 		VIRTUAL_TIMEOUT_INFLATE_TSC(lock_panic_timeout);
1049 	}
1050 
1051 	interrupt_latency_tracker_setup();
1052 }
1053 STARTUP(TIMEOUTS, STARTUP_RANK_MIDDLE, ml_init_lock_timeout);
1054 
1055 /*
1056  * Threshold above which we should attempt to block
1057  * instead of spinning for clock_delay_until().
1058  */
1059 
1060 void
ml_init_delay_spin_threshold(int threshold_us)1061 ml_init_delay_spin_threshold(int threshold_us)
1062 {
1063 	nanoseconds_to_absolutetime(threshold_us * NSEC_PER_USEC, &delay_spin_threshold);
1064 }
1065 
1066 boolean_t
ml_delay_should_spin(uint64_t interval)1067 ml_delay_should_spin(uint64_t interval)
1068 {
1069 	return (interval < delay_spin_threshold) ? TRUE : FALSE;
1070 }
1071 
1072 TUNABLE(uint32_t, yield_delay_us, "yield_delay_us", 0);
1073 
1074 void
ml_delay_on_yield(void)1075 ml_delay_on_yield(void)
1076 {
1077 #if DEVELOPMENT || DEBUG
1078 	if (yield_delay_us) {
1079 		delay(yield_delay_us);
1080 	}
1081 #endif
1082 }
1083 
1084 /*
1085  * This is called from the machine-independent layer
1086  * to perform machine-dependent info updates. Defer to cpu_thread_init().
1087  */
1088 void
ml_cpu_up(void)1089 ml_cpu_up(void)
1090 {
1091 	return;
1092 }
1093 
1094 void
ml_cpu_up_update_counts(__unused int cpu_id)1095 ml_cpu_up_update_counts(__unused int cpu_id)
1096 {
1097 	return;
1098 }
1099 
1100 /*
1101  * This is called from the machine-independent layer
1102  * to perform machine-dependent info updates.
1103  */
1104 void
ml_cpu_down(void)1105 ml_cpu_down(void)
1106 {
1107 	i386_deactivate_cpu();
1108 
1109 	return;
1110 }
1111 
1112 void
ml_cpu_down_update_counts(__unused int cpu_id)1113 ml_cpu_down_update_counts(__unused int cpu_id)
1114 {
1115 	return;
1116 }
1117 
1118 thread_t
current_thread(void)1119 current_thread(void)
1120 {
1121 	return current_thread_fast();
1122 }
1123 
1124 
1125 boolean_t
ml_is64bit(void)1126 ml_is64bit(void)
1127 {
1128 	return cpu_mode_is64bit();
1129 }
1130 
1131 
1132 boolean_t
ml_thread_is64bit(thread_t thread)1133 ml_thread_is64bit(thread_t thread)
1134 {
1135 	return thread_is_64bit_addr(thread);
1136 }
1137 
1138 
1139 boolean_t
ml_state_is64bit(void * saved_state)1140 ml_state_is64bit(void *saved_state)
1141 {
1142 	return is_saved_state64(saved_state);
1143 }
1144 
1145 void
ml_cpu_set_ldt(int selector)1146 ml_cpu_set_ldt(int selector)
1147 {
1148 	/*
1149 	 * Avoid loading the LDT
1150 	 * if we're setting the KERNEL LDT and it's already set.
1151 	 */
1152 	if (selector == KERNEL_LDT &&
1153 	    current_cpu_datap()->cpu_ldt == KERNEL_LDT) {
1154 		return;
1155 	}
1156 
1157 	lldt(selector);
1158 	current_cpu_datap()->cpu_ldt = selector;
1159 }
1160 
1161 void
ml_fp_setvalid(boolean_t value)1162 ml_fp_setvalid(boolean_t value)
1163 {
1164 	fp_setvalid(value);
1165 }
1166 
1167 uint64_t
ml_cpu_int_event_time(void)1168 ml_cpu_int_event_time(void)
1169 {
1170 	return current_cpu_datap()->cpu_int_event_time;
1171 }
1172 
1173 vm_offset_t
ml_stack_remaining(void)1174 ml_stack_remaining(void)
1175 {
1176 	uintptr_t local = (uintptr_t) &local;
1177 
1178 	if (ml_at_interrupt_context() != 0) {
1179 		return local - (current_cpu_datap()->cpu_int_stack_top - INTSTACK_SIZE);
1180 	} else {
1181 		return local - current_thread()->kernel_stack;
1182 	}
1183 }
1184 
1185 #if KASAN
1186 vm_offset_t ml_stack_base(void);
1187 vm_size_t ml_stack_size(void);
1188 
1189 vm_offset_t
ml_stack_base(void)1190 ml_stack_base(void)
1191 {
1192 	if (ml_at_interrupt_context()) {
1193 		return current_cpu_datap()->cpu_int_stack_top - INTSTACK_SIZE;
1194 	} else {
1195 		return current_thread()->kernel_stack;
1196 	}
1197 }
1198 
1199 vm_size_t
ml_stack_size(void)1200 ml_stack_size(void)
1201 {
1202 	if (ml_at_interrupt_context()) {
1203 		return INTSTACK_SIZE;
1204 	} else {
1205 		return kernel_stack_size;
1206 	}
1207 }
1208 #endif
1209 
1210 #if CONFIG_KCOV
1211 kcov_cpu_data_t *
current_kcov_data(void)1212 current_kcov_data(void)
1213 {
1214 	return &current_cpu_datap()->cpu_kcov_data;
1215 }
1216 
1217 kcov_cpu_data_t *
cpu_kcov_data(int cpuid)1218 cpu_kcov_data(int cpuid)
1219 {
1220 	return &cpu_datap(cpuid)->cpu_kcov_data;
1221 }
1222 #endif /* CONFIG_KCOV */
1223 
1224 void
kernel_preempt_check(void)1225 kernel_preempt_check(void)
1226 {
1227 	boolean_t       intr;
1228 	unsigned long flags;
1229 
1230 	assert(get_preemption_level() == 0);
1231 
1232 	if (__improbable(*ast_pending() & AST_URGENT)) {
1233 		/*
1234 		 * can handle interrupts and preemptions
1235 		 * at this point
1236 		 */
1237 		__asm__ volatile ("pushf; pop	%0"  :  "=r" (flags));
1238 
1239 		intr = ((flags & EFL_IF) != 0);
1240 
1241 		/*
1242 		 * now cause the PRE-EMPTION trap
1243 		 */
1244 		if (intr == TRUE) {
1245 			__asm__ volatile ("int %0" :: "N" (T_PREEMPT));
1246 		}
1247 	}
1248 }
1249 
1250 boolean_t
machine_timeout_suspended(void)1251 machine_timeout_suspended(void)
1252 {
1253 	return pmap_tlb_flush_timeout || lck_spinlock_timeout_in_progress ||
1254 	       panic_active() || mp_recent_debugger_activity() ||
1255 	       ml_recent_wake();
1256 }
1257 
1258 /* Eagerly evaluate all pending timer and thread callouts
1259  */
1260 void
ml_timer_evaluate(void)1261 ml_timer_evaluate(void)
1262 {
1263 	KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN | DBG_FUNC_START, 0, 0, 0, 0, 0);
1264 
1265 	uint64_t te_end, te_start = mach_absolute_time();
1266 	simple_lock(&ml_timer_evaluation_slock, LCK_GRP_NULL);
1267 	ml_timer_evaluation_in_progress = TRUE;
1268 	thread_call_delayed_timer_rescan_all();
1269 	mp_cpus_call(CPUMASK_ALL, ASYNC, timer_queue_expire_rescan, NULL);
1270 	ml_timer_evaluation_in_progress = FALSE;
1271 	ml_timer_eager_evaluations++;
1272 	te_end = mach_absolute_time();
1273 	ml_timer_eager_evaluation_max = MAX(ml_timer_eager_evaluation_max, (te_end - te_start));
1274 	simple_unlock(&ml_timer_evaluation_slock);
1275 
1276 	KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN | DBG_FUNC_END, 0, 0, 0, 0, 0);
1277 }
1278 
1279 boolean_t
ml_timer_forced_evaluation(void)1280 ml_timer_forced_evaluation(void)
1281 {
1282 	return ml_timer_evaluation_in_progress;
1283 }
1284 
1285 void
ml_gpu_stat_update(uint64_t gpu_ns_delta)1286 ml_gpu_stat_update(uint64_t gpu_ns_delta)
1287 {
1288 	current_thread()->machine.thread_gpu_ns += gpu_ns_delta;
1289 }
1290 
1291 uint64_t
ml_gpu_stat(thread_t t)1292 ml_gpu_stat(thread_t t)
1293 {
1294 	return t->machine.thread_gpu_ns;
1295 }
1296 
1297 int plctrace_enabled = 0;
1298 
1299 void
_disable_preemption(void)1300 _disable_preemption(void)
1301 {
1302 	disable_preemption_internal();
1303 }
1304 
1305 void
_enable_preemption(void)1306 _enable_preemption(void)
1307 {
1308 	enable_preemption_internal();
1309 }
1310 
1311 void
plctrace_disable(void)1312 plctrace_disable(void)
1313 {
1314 	plctrace_enabled = 0;
1315 }
1316 
1317 static boolean_t ml_quiescing;
1318 
1319 void
ml_set_is_quiescing(boolean_t quiescing)1320 ml_set_is_quiescing(boolean_t quiescing)
1321 {
1322 	ml_quiescing = quiescing;
1323 }
1324 
1325 boolean_t
ml_is_quiescing(void)1326 ml_is_quiescing(void)
1327 {
1328 	return ml_quiescing;
1329 }
1330 
1331 uint64_t
ml_get_booter_memory_size(void)1332 ml_get_booter_memory_size(void)
1333 {
1334 	return 0;
1335 }
1336 
1337 void
machine_lockdown(void)1338 machine_lockdown(void)
1339 {
1340 	x86_64_protect_data_const();
1341 }
1342 
1343 bool
ml_cpu_can_exit(__unused int cpu_id,__unused processor_reason_t reason)1344 ml_cpu_can_exit(__unused int cpu_id, __unused processor_reason_t reason)
1345 {
1346 	return true;
1347 }
1348 
1349 void
ml_cpu_begin_state_transition(__unused int cpu_id)1350 ml_cpu_begin_state_transition(__unused int cpu_id)
1351 {
1352 }
1353 
1354 void
ml_cpu_end_state_transition(__unused int cpu_id)1355 ml_cpu_end_state_transition(__unused int cpu_id)
1356 {
1357 }
1358 
1359 void
ml_cpu_begin_loop(void)1360 ml_cpu_begin_loop(void)
1361 {
1362 }
1363 
1364 void
ml_cpu_end_loop(void)1365 ml_cpu_end_loop(void)
1366 {
1367 }
1368 
1369 size_t
ml_get_vm_reserved_regions(bool vm_is64bit,const struct vm_reserved_region ** regions)1370 ml_get_vm_reserved_regions(bool vm_is64bit, const struct vm_reserved_region **regions)
1371 {
1372 #pragma unused(vm_is64bit)
1373 	assert(regions != NULL);
1374 
1375 	*regions = NULL;
1376 	return 0;
1377 }
1378 
1379 void
ml_cpu_power_enable(__unused int cpu_id)1380 ml_cpu_power_enable(__unused int cpu_id)
1381 {
1382 }
1383 
1384 void
ml_cpu_power_disable(__unused int cpu_id)1385 ml_cpu_power_disable(__unused int cpu_id)
1386 {
1387 }
1388 
1389 int
ml_page_protection_type(void)1390 ml_page_protection_type(void)
1391 {
1392 	return 0; // not supported on x86
1393 }
1394 
1395 bool
ml_addr_in_non_xnu_stack(__unused uintptr_t addr)1396 ml_addr_in_non_xnu_stack(__unused uintptr_t addr)
1397 {
1398 	/* There are no non-XNU stacks on x86 systems. */
1399 	return false;
1400 }
1401