xref: /xnu-10063.121.3/osfmk/i386/machine_routines.c (revision 2c2f96dc2b9a4408a43d3150ae9c105355ca3daa)
1 /*
2  * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <i386/machine_routines.h>
30 #include <i386/cpuid.h>
31 #include <i386/fpu.h>
32 #include <mach/processor.h>
33 #include <kern/processor.h>
34 #include <kern/machine.h>
35 
36 #include <kern/cpu_number.h>
37 #include <kern/thread.h>
38 #include <kern/thread_call.h>
39 #include <kern/policy_internal.h>
40 
41 #include <prng/random.h>
42 #include <prng/entropy.h>
43 #include <i386/machine_cpu.h>
44 #include <i386/lapic.h>
45 #include <i386/bit_routines.h>
46 #include <i386/mp_events.h>
47 #include <i386/pmCPU.h>
48 #include <i386/trap_internal.h>
49 #include <i386/tsc.h>
50 #include <i386/cpu_threads.h>
51 #include <i386/proc_reg.h>
52 #include <mach/vm_param.h>
53 #include <i386/pmap.h>
54 #include <i386/pmap_internal.h>
55 #include <i386/misc_protos.h>
56 #include <kern/timer_queue.h>
57 #include <vm/vm_map.h>
58 #include <kern/monotonic.h>
59 #include <kern/kpc.h>
60 #include <architecture/i386/pio.h>
61 #include <i386/cpu_data.h>
62 #if DEBUG
63 #define DBG(x...)       kprintf("DBG: " x)
64 #else
65 #define DBG(x...)
66 #endif
67 
68 extern void     wakeup(void *);
69 
70 uint64_t        LockTimeOut;
71 uint64_t        TLBTimeOut;
72 uint64_t        LockTimeOutTSC;
73 uint32_t        LockTimeOutUsec;
74 uint64_t        MutexSpin;
75 uint64_t        low_MutexSpin;
76 int64_t         high_MutexSpin;
77 uint64_t        LastDebuggerEntryAllowance;
78 uint64_t        delay_spin_threshold;
79 
80 extern uint64_t panic_restart_timeout;
81 
82 boolean_t virtualized = FALSE;
83 
84 static SIMPLE_LOCK_DECLARE(ml_timer_evaluation_slock, 0);
85 uint32_t ml_timer_eager_evaluations;
86 uint64_t ml_timer_eager_evaluation_max;
87 static boolean_t ml_timer_evaluation_in_progress = FALSE;
88 
89 LCK_GRP_DECLARE(max_cpus_grp, "max_cpus");
90 LCK_MTX_DECLARE(max_cpus_lock, &max_cpus_grp);
91 static int max_cpus_initialized = 0;
92 #define MAX_CPUS_SET    0x1
93 #define MAX_CPUS_WAIT   0x2
94 
95 /* IO memory map services */
96 
97 /* Map memory map IO space */
98 vm_offset_t
ml_io_map(vm_offset_t phys_addr,vm_size_t size)99 ml_io_map(
100 	vm_offset_t phys_addr,
101 	vm_size_t size)
102 {
103 	return io_map(phys_addr, size, VM_WIMG_IO, VM_PROT_DEFAULT, false);
104 }
105 
106 vm_offset_t
ml_io_map_wcomb(vm_offset_t phys_addr,vm_size_t size)107 ml_io_map_wcomb(
108 	vm_offset_t phys_addr,
109 	vm_size_t size)
110 {
111 	return io_map(phys_addr, size, VM_WIMG_WCOMB, VM_PROT_DEFAULT, false);
112 }
113 
114 vm_offset_t
ml_io_map_unmappable(vm_offset_t phys_addr,vm_size_t size,unsigned int flags)115 ml_io_map_unmappable(
116 	vm_offset_t             phys_addr,
117 	vm_size_t               size,
118 	unsigned int            flags)
119 {
120 	return io_map(phys_addr, size, flags, VM_PROT_DEFAULT, true);
121 }
122 
123 void
ml_get_bouncepool_info(vm_offset_t * phys_addr,vm_size_t * size)124 ml_get_bouncepool_info(vm_offset_t *phys_addr, vm_size_t *size)
125 {
126 	*phys_addr = 0;
127 	*size      = 0;
128 }
129 
130 
131 vm_offset_t
ml_static_ptovirt(vm_offset_t paddr)132 ml_static_ptovirt(
133 	vm_offset_t paddr)
134 {
135 #if defined(__x86_64__)
136 	return (vm_offset_t)(((unsigned long) paddr) | VM_MIN_KERNEL_ADDRESS);
137 #else
138 	return (vm_offset_t)((paddr) | LINEAR_KERNEL_ADDRESS);
139 #endif
140 }
141 
142 vm_offset_t
ml_static_slide(vm_offset_t vaddr)143 ml_static_slide(
144 	vm_offset_t vaddr)
145 {
146 	return vaddr + vm_kernel_slide;
147 }
148 
149 /*
150  * base must be page-aligned, and size must be a multiple of PAGE_SIZE
151  */
152 kern_return_t
ml_static_verify_page_protections(uint64_t base,uint64_t size,vm_prot_t prot)153 ml_static_verify_page_protections(
154 	uint64_t base, uint64_t size, vm_prot_t prot)
155 {
156 	vm_prot_t pageprot;
157 	uint64_t offset;
158 
159 	DBG("ml_static_verify_page_protections: vaddr 0x%llx sz 0x%llx prot 0x%x\n", base, size, prot);
160 
161 	/*
162 	 * base must be within the static bounds, defined to be:
163 	 * (vm_kernel_stext, kc_highest_nonlinkedit_vmaddr)
164 	 */
165 #if DEVELOPMENT || DEBUG || KASAN
166 	assert(kc_highest_nonlinkedit_vmaddr > 0 && base > vm_kernel_stext && base < kc_highest_nonlinkedit_vmaddr);
167 #else   /* On release kernels, assume this is a protection mismatch failure. */
168 	if (kc_highest_nonlinkedit_vmaddr == 0 || base < vm_kernel_stext || base >= kc_highest_nonlinkedit_vmaddr) {
169 		return KERN_FAILURE;
170 	}
171 #endif
172 
173 	for (offset = 0; offset < size; offset += PAGE_SIZE) {
174 		if (pmap_get_prot(kernel_pmap, base + offset, &pageprot) == KERN_FAILURE) {
175 			return KERN_FAILURE;
176 		}
177 		if ((pageprot & prot) != prot) {
178 			return KERN_FAILURE;
179 		}
180 	}
181 
182 	return KERN_SUCCESS;
183 }
184 
185 vm_offset_t
ml_static_unslide(vm_offset_t vaddr)186 ml_static_unslide(
187 	vm_offset_t vaddr)
188 {
189 	return vaddr - vm_kernel_slide;
190 }
191 
192 /*
193  * Reclaim memory, by virtual address, that was used in early boot that is no longer needed
194  * by the kernel.
195  */
196 void
ml_static_mfree(vm_offset_t vaddr,vm_size_t size)197 ml_static_mfree(
198 	vm_offset_t vaddr,
199 	vm_size_t size)
200 {
201 	addr64_t vaddr_cur;
202 	ppnum_t ppn;
203 	uint32_t freed_pages = 0;
204 	vm_size_t map_size;
205 
206 	assert(vaddr >= VM_MIN_KERNEL_ADDRESS);
207 
208 	assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */
209 
210 	for (vaddr_cur = vaddr; vaddr_cur < round_page_64(vaddr + size);) {
211 		map_size = pmap_query_pagesize(kernel_pmap, vaddr_cur);
212 
213 		/* just skip if nothing mapped here */
214 		if (map_size == 0) {
215 			vaddr_cur += PAGE_SIZE;
216 			continue;
217 		}
218 
219 		/*
220 		 * Can't free from the middle of a large page.
221 		 */
222 		assert((vaddr_cur & (map_size - 1)) == 0);
223 
224 		ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
225 		assert(ppn != (ppnum_t)NULL);
226 
227 		pmap_remove(kernel_pmap, vaddr_cur, vaddr_cur + map_size);
228 		while (map_size > 0) {
229 			assert(pmap_valid_page(ppn));
230 			if (IS_MANAGED_PAGE(ppn)) {
231 				vm_page_create(ppn, (ppn + 1));
232 				freed_pages++;
233 			}
234 			map_size -= PAGE_SIZE;
235 			vaddr_cur += PAGE_SIZE;
236 			ppn++;
237 		}
238 	}
239 	vm_page_lockspin_queues();
240 	vm_page_wire_count -= freed_pages;
241 	vm_page_wire_count_initial -= freed_pages;
242 	if (vm_page_wire_count_on_boot != 0) {
243 		assert(vm_page_wire_count_on_boot >= freed_pages);
244 		vm_page_wire_count_on_boot -= freed_pages;
245 	}
246 	vm_page_unlock_queues();
247 
248 #if     DEBUG
249 	kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn);
250 #endif
251 }
252 
253 /* Change page protections for addresses previously loaded by efiboot */
254 kern_return_t
ml_static_protect(vm_offset_t vmaddr,vm_size_t size,vm_prot_t prot)255 ml_static_protect(vm_offset_t vmaddr, vm_size_t size, vm_prot_t prot)
256 {
257 	boolean_t NX = !!!(prot & VM_PROT_EXECUTE), ro = !!!(prot & VM_PROT_WRITE);
258 
259 	assert(prot & VM_PROT_READ);
260 
261 	pmap_mark_range(kernel_pmap, vmaddr, size, NX, ro);
262 
263 	return KERN_SUCCESS;
264 }
265 
266 /* virtual to physical on wired pages */
267 vm_offset_t
ml_vtophys(vm_offset_t vaddr)268 ml_vtophys(
269 	vm_offset_t vaddr)
270 {
271 	return (vm_offset_t)kvtophys(vaddr);
272 }
273 
274 /*
275  *	Routine:        ml_nofault_copy
276  *	Function:	Perform a physical mode copy if the source and
277  *			destination have valid translations in the kernel pmap.
278  *			If translations are present, they are assumed to
279  *			be wired; i.e. no attempt is made to guarantee that the
280  *			translations obtained remained valid for
281  *			the duration of the copy process.
282  */
283 
284 vm_size_t
ml_nofault_copy(vm_offset_t virtsrc,vm_offset_t virtdst,vm_size_t size)285 ml_nofault_copy(
286 	vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size)
287 {
288 	addr64_t cur_phys_dst, cur_phys_src;
289 	uint32_t count, nbytes = 0;
290 
291 	while (size > 0) {
292 		if (!(cur_phys_src = kvtophys(virtsrc))) {
293 			break;
294 		}
295 		if (!(cur_phys_dst = kvtophys(virtdst))) {
296 			break;
297 		}
298 		if (!pmap_valid_page(i386_btop(cur_phys_dst)) || !pmap_valid_page(i386_btop(cur_phys_src))) {
299 			break;
300 		}
301 		count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
302 		if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) {
303 			count = (uint32_t)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK));
304 		}
305 		if (count > size) {
306 			count = (uint32_t)size;
307 		}
308 
309 		bcopy_phys(cur_phys_src, cur_phys_dst, count);
310 
311 		nbytes += count;
312 		virtsrc += count;
313 		virtdst += count;
314 		size -= count;
315 	}
316 
317 	return nbytes;
318 }
319 
320 /*
321  *	Routine:        ml_validate_nofault
322  *	Function: Validate that ths address range has a valid translations
323  *			in the kernel pmap.  If translations are present, they are
324  *			assumed to be wired; i.e. no attempt is made to guarantee
325  *			that the translation persist after the check.
326  *  Returns: TRUE if the range is mapped and will not cause a fault,
327  *			FALSE otherwise.
328  */
329 
330 boolean_t
ml_validate_nofault(vm_offset_t virtsrc,vm_size_t size)331 ml_validate_nofault(
332 	vm_offset_t virtsrc, vm_size_t size)
333 {
334 	addr64_t cur_phys_src;
335 	uint32_t count;
336 
337 	while (size > 0) {
338 		if (!(cur_phys_src = kvtophys(virtsrc))) {
339 			return FALSE;
340 		}
341 		if (!pmap_valid_page(i386_btop(cur_phys_src))) {
342 			return FALSE;
343 		}
344 		count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
345 		if (count > size) {
346 			count = (uint32_t)size;
347 		}
348 
349 		virtsrc += count;
350 		size -= count;
351 	}
352 
353 	return TRUE;
354 }
355 
356 /* Interrupt handling */
357 
358 /* Initialize Interrupts */
359 void
ml_init_interrupt(void)360 ml_init_interrupt(void)
361 {
362 	(void) ml_set_interrupts_enabled(TRUE);
363 }
364 
365 
366 /* Get Interrupts Enabled */
367 boolean_t
ml_get_interrupts_enabled(void)368 ml_get_interrupts_enabled(void)
369 {
370 	unsigned long flags;
371 
372 	__asm__ volatile ("pushf; pop	%0":  "=r" (flags));
373 	return (flags & EFL_IF) != 0;
374 }
375 
376 /* Set Interrupts Enabled */
377 boolean_t
ml_set_interrupts_enabled(boolean_t enable)378 ml_set_interrupts_enabled(boolean_t enable)
379 {
380 	unsigned long flags;
381 	boolean_t istate;
382 
383 	__asm__ volatile ("pushf; pop	%0"  :  "=r" (flags));
384 
385 	assert(get_interrupt_level() ? (enable == FALSE) : TRUE);
386 
387 	istate = ((flags & EFL_IF) != 0);
388 
389 	if (enable) {
390 		__asm__ volatile ("sti;nop");
391 
392 		if ((get_preemption_level() == 0) && (*ast_pending() & AST_URGENT)) {
393 			__asm__ volatile ("int %0" :: "N" (T_PREEMPT));
394 		}
395 	} else {
396 		if (istate) {
397 			__asm__ volatile ("cli");
398 		}
399 	}
400 
401 	return istate;
402 }
403 
404 /* Early Set Interrupts Enabled */
405 boolean_t
ml_early_set_interrupts_enabled(boolean_t enable)406 ml_early_set_interrupts_enabled(boolean_t enable)
407 {
408 	if (enable == TRUE) {
409 		kprintf("Caller attempted to enable interrupts too early in "
410 		    "kernel startup. Halting.\n");
411 		hlt();
412 		/*NOTREACHED*/
413 	}
414 
415 	/* On x86, do not allow interrupts to be enabled very early */
416 	return FALSE;
417 }
418 
419 /* Check if running at interrupt context */
420 boolean_t
ml_at_interrupt_context(void)421 ml_at_interrupt_context(void)
422 {
423 	return get_interrupt_level() != 0;
424 }
425 
426 /*
427  * This answers the question
428  * "after returning from this interrupt handler with the AST_URGENT bit set,
429  * will I end up in ast_taken_user or ast_taken_kernel?"
430  *
431  * If it's called in non-interrupt context (e.g. regular syscall), it should
432  * return false.
433  *
434  * Must be called with interrupts disabled.
435  */
436 bool
ml_did_interrupt_userspace(void)437 ml_did_interrupt_userspace(void)
438 {
439 	assert(ml_get_interrupts_enabled() == false);
440 
441 	x86_saved_state_t *state = current_cpu_datap()->cpu_int_state;
442 	if (!state) {
443 		return false;
444 	}
445 
446 	uint64_t cs;
447 
448 	if (is_saved_state64(state)) {
449 		cs = saved_state64(state)->isf.cs;
450 	} else {
451 		cs = saved_state32(state)->cs;
452 	}
453 
454 	return (cs & SEL_PL) == SEL_PL_U;
455 }
456 
457 void
ml_get_power_state(boolean_t * icp,boolean_t * pidlep)458 ml_get_power_state(boolean_t *icp, boolean_t *pidlep)
459 {
460 	*icp = (get_interrupt_level() != 0);
461 	/* These will be technically inaccurate for interrupts that occur
462 	 * successively within a single "idle exit" event, but shouldn't
463 	 * matter statistically.
464 	 */
465 	*pidlep = (current_cpu_datap()->lcpu.package->num_idle == topoParms.nLThreadsPerPackage);
466 }
467 
468 /* Generate a fake interrupt */
469 __dead2
470 void
ml_cause_interrupt(void)471 ml_cause_interrupt(void)
472 {
473 	panic("ml_cause_interrupt not defined yet on Intel");
474 }
475 
476 /*
477  * TODO: transition users of this to kernel_thread_start_priority
478  * ml_thread_policy is an unsupported KPI
479  */
480 void
ml_thread_policy(thread_t thread,__unused unsigned policy_id,unsigned policy_info)481 ml_thread_policy(
482 	thread_t thread,
483 	__unused        unsigned policy_id,
484 	unsigned policy_info)
485 {
486 	if (policy_info & MACHINE_NETWORK_WORKLOOP) {
487 		thread_precedence_policy_data_t info;
488 		__assert_only kern_return_t kret;
489 
490 		info.importance = 1;
491 
492 		kret = thread_policy_set_internal(thread, THREAD_PRECEDENCE_POLICY,
493 		    (thread_policy_t)&info,
494 		    THREAD_PRECEDENCE_POLICY_COUNT);
495 		assert(kret == KERN_SUCCESS);
496 	}
497 }
498 
499 /* Initialize Interrupts */
500 void
ml_install_interrupt_handler(void * nub,int source,void * target,IOInterruptHandler handler,void * refCon)501 ml_install_interrupt_handler(
502 	void *nub,
503 	int source,
504 	void *target,
505 	IOInterruptHandler handler,
506 	void *refCon)
507 {
508 	boolean_t current_state;
509 
510 	current_state = ml_set_interrupts_enabled(FALSE);
511 
512 	PE_install_interrupt_handler(nub, source, target,
513 	    (IOInterruptHandler) handler, refCon);
514 
515 	(void) ml_set_interrupts_enabled(current_state);
516 }
517 
518 
519 void
machine_signal_idle(processor_t processor)520 machine_signal_idle(
521 	processor_t processor)
522 {
523 	cpu_interrupt(processor->cpu_id);
524 }
525 
526 __dead2
527 void
machine_signal_idle_deferred(__unused processor_t processor)528 machine_signal_idle_deferred(
529 	__unused processor_t processor)
530 {
531 	panic("Unimplemented");
532 }
533 
534 __dead2
535 void
machine_signal_idle_cancel(__unused processor_t processor)536 machine_signal_idle_cancel(
537 	__unused processor_t processor)
538 {
539 	panic("Unimplemented");
540 }
541 
542 static kern_return_t
register_cpu(uint32_t lapic_id,processor_t * processor_out,boolean_t boot_cpu)543 register_cpu(
544 	uint32_t        lapic_id,
545 	processor_t     *processor_out,
546 	boolean_t       boot_cpu )
547 {
548 	int             target_cpu;
549 	cpu_data_t      *this_cpu_datap;
550 
551 	this_cpu_datap = cpu_data_alloc(boot_cpu);
552 	if (this_cpu_datap == NULL) {
553 		return KERN_FAILURE;
554 	}
555 	target_cpu = this_cpu_datap->cpu_number;
556 	assert((boot_cpu && (target_cpu == 0)) ||
557 	    (!boot_cpu && (target_cpu != 0)));
558 
559 	lapic_cpu_map(lapic_id, target_cpu);
560 
561 	/* The cpu_id is not known at registration phase. Just do
562 	 * lapic_id for now
563 	 */
564 	this_cpu_datap->cpu_phys_number = lapic_id;
565 
566 #if CONFIG_CPU_COUNTERS
567 	if (kpc_register_cpu(this_cpu_datap) != TRUE) {
568 		goto failed;
569 	}
570 #endif /* CONFIG_CPU_COUNTERS */
571 
572 	if (!boot_cpu) {
573 		cpu_thread_alloc(this_cpu_datap->cpu_number);
574 		if (this_cpu_datap->lcpu.core == NULL) {
575 			goto failed;
576 		}
577 	}
578 
579 	/*
580 	 * processor_init() deferred to topology start
581 	 * because "slot numbers" a.k.a. logical processor numbers
582 	 * are not yet finalized.
583 	 */
584 	*processor_out = this_cpu_datap->cpu_processor;
585 
586 	return KERN_SUCCESS;
587 
588 failed:
589 #if CONFIG_CPU_COUNTERS
590 	kpc_unregister_cpu(this_cpu_datap);
591 #endif /* CONFIG_CPU_COUNTERS */
592 
593 	return KERN_FAILURE;
594 }
595 
596 kern_return_t
ml_processor_register(cpu_id_t cpu_id,uint32_t lapic_id,processor_t * processor_out,boolean_t boot_cpu,boolean_t start)597 ml_processor_register(
598 	cpu_id_t        cpu_id,
599 	uint32_t        lapic_id,
600 	processor_t     *processor_out,
601 	boolean_t       boot_cpu,
602 	boolean_t       start )
603 {
604 	static boolean_t done_topo_sort = FALSE;
605 	static uint32_t num_registered = 0;
606 
607 	/* Register all CPUs first, and track max */
608 	if (start == FALSE) {
609 		num_registered++;
610 
611 		DBG( "registering CPU lapic id %d\n", lapic_id );
612 
613 		return register_cpu( lapic_id, processor_out, boot_cpu );
614 	}
615 
616 	/* Sort by topology before we start anything */
617 	if (!done_topo_sort) {
618 		DBG( "about to start CPUs. %d registered\n", num_registered );
619 
620 		cpu_topology_sort( num_registered );
621 		done_topo_sort = TRUE;
622 	}
623 
624 	/* Assign the cpu ID */
625 	uint32_t cpunum = -1;
626 	cpu_data_t  *this_cpu_datap = NULL;
627 
628 	/* find cpu num and pointer */
629 	cpunum = ml_get_cpuid( lapic_id );
630 
631 	if (cpunum == 0xFFFFFFFF) { /* never heard of it? */
632 		panic( "trying to start invalid/unregistered CPU %d", lapic_id );
633 	}
634 
635 	this_cpu_datap = cpu_datap(cpunum);
636 
637 	/* fix the CPU id */
638 	this_cpu_datap->cpu_id = cpu_id;
639 
640 	/* allocate and initialize other per-cpu structures */
641 	if (!boot_cpu) {
642 		mp_cpus_call_cpu_init(cpunum);
643 		random_cpu_init(cpunum);
644 	}
645 
646 	/* output arg */
647 	*processor_out = this_cpu_datap->cpu_processor;
648 
649 	/* OK, try and start this CPU */
650 	return cpu_topology_start_cpu( cpunum );
651 }
652 
653 
654 void
ml_cpu_get_info_type(ml_cpu_info_t * cpu_infop,cluster_type_t cluster_type __unused)655 ml_cpu_get_info_type(ml_cpu_info_t *cpu_infop, cluster_type_t cluster_type __unused)
656 {
657 	boolean_t       os_supports_sse;
658 	i386_cpu_info_t *cpuid_infop;
659 
660 	if (cpu_infop == NULL) {
661 		return;
662 	}
663 
664 	/*
665 	 * Are we supporting MMX/SSE/SSE2/SSE3?
666 	 * As distinct from whether the cpu has these capabilities.
667 	 */
668 	os_supports_sse = !!(get_cr4() & CR4_OSXMM);
669 
670 	if (ml_fpu_avx_enabled()) {
671 		cpu_infop->vector_unit = 9;
672 	} else if ((cpuid_features() & CPUID_FEATURE_SSE4_2) && os_supports_sse) {
673 		cpu_infop->vector_unit = 8;
674 	} else if ((cpuid_features() & CPUID_FEATURE_SSE4_1) && os_supports_sse) {
675 		cpu_infop->vector_unit = 7;
676 	} else if ((cpuid_features() & CPUID_FEATURE_SSSE3) && os_supports_sse) {
677 		cpu_infop->vector_unit = 6;
678 	} else if ((cpuid_features() & CPUID_FEATURE_SSE3) && os_supports_sse) {
679 		cpu_infop->vector_unit = 5;
680 	} else if ((cpuid_features() & CPUID_FEATURE_SSE2) && os_supports_sse) {
681 		cpu_infop->vector_unit = 4;
682 	} else if ((cpuid_features() & CPUID_FEATURE_SSE) && os_supports_sse) {
683 		cpu_infop->vector_unit = 3;
684 	} else if (cpuid_features() & CPUID_FEATURE_MMX) {
685 		cpu_infop->vector_unit = 2;
686 	} else {
687 		cpu_infop->vector_unit = 0;
688 	}
689 
690 	cpuid_infop  = cpuid_info();
691 
692 	cpu_infop->cache_line_size = cpuid_infop->cache_linesize;
693 
694 	cpu_infop->l1_icache_size = cpuid_infop->cache_size[L1I];
695 	cpu_infop->l1_dcache_size = cpuid_infop->cache_size[L1D];
696 
697 	if (cpuid_infop->cache_size[L2U] > 0) {
698 		cpu_infop->l2_settings = 1;
699 		cpu_infop->l2_cache_size = cpuid_infop->cache_size[L2U];
700 	} else {
701 		cpu_infop->l2_settings = 0;
702 		cpu_infop->l2_cache_size = 0xFFFFFFFF;
703 	}
704 
705 	if (cpuid_infop->cache_size[L3U] > 0) {
706 		cpu_infop->l3_settings = 1;
707 		cpu_infop->l3_cache_size = cpuid_infop->cache_size[L3U];
708 	} else {
709 		cpu_infop->l3_settings = 0;
710 		cpu_infop->l3_cache_size = 0xFFFFFFFF;
711 	}
712 }
713 
714 /*
715  *	Routine:        ml_cpu_get_info
716  *	Function: Fill out the ml_cpu_info_t structure with parameters associated
717  *	with the boot cluster.
718  */
719 void
ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info)720 ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info)
721 {
722 	ml_cpu_get_info_type(ml_cpu_info, CLUSTER_TYPE_SMP);
723 }
724 
725 unsigned int
ml_get_cpu_number_type(cluster_type_t cluster_type __unused,bool logical,bool available)726 ml_get_cpu_number_type(cluster_type_t cluster_type __unused, bool logical, bool available)
727 {
728 	/*
729 	 * At present no supported x86 system features more than 1 CPU type. Because
730 	 * of this, the cluster_type parameter is ignored.
731 	 */
732 	if (logical && available) {
733 		return machine_info.logical_cpu;
734 	} else if (logical && !available) {
735 		return machine_info.logical_cpu_max;
736 	} else if (!logical && available) {
737 		return machine_info.physical_cpu;
738 	} else {
739 		return machine_info.physical_cpu_max;
740 	}
741 }
742 
743 void
ml_get_cluster_type_name(cluster_type_t cluster_type __unused,char * name,size_t name_size)744 ml_get_cluster_type_name(cluster_type_t cluster_type __unused, char *name, size_t name_size)
745 {
746 	strlcpy(name, "Standard", name_size);
747 }
748 
749 unsigned int
ml_get_cluster_number_type(cluster_type_t cluster_type __unused)750 ml_get_cluster_number_type(cluster_type_t cluster_type __unused)
751 {
752 	/*
753 	 * At present no supported x86 system has more than 1 CPU type and multiple
754 	 * clusters.
755 	 */
756 	return 1;
757 }
758 
759 unsigned int
ml_get_cpu_types(void)760 ml_get_cpu_types(void)
761 {
762 	return 1 << CLUSTER_TYPE_SMP;
763 }
764 
765 unsigned int
ml_get_cluster_count(void)766 ml_get_cluster_count(void)
767 {
768 	/*
769 	 * At present no supported x86 system has more than 1 CPU type and multiple
770 	 * clusters.
771 	 */
772 	return 1;
773 }
774 
775 static_assert(MAX_CPUS <= 256, "MAX_CPUS must fit in _COMM_PAGE_CPU_TO_CLUSTER; Increase table size if needed");
776 
777 void
ml_map_cpus_to_clusters(uint8_t * table)778 ml_map_cpus_to_clusters(uint8_t *table)
779 {
780 	for (uint16_t cpu_id = 0; cpu_id < machine_info.logical_cpu_max; cpu_id++) {
781 		// Supported x86 systems have 1 cluster
782 		*(table + cpu_id) = (uint8_t)0;
783 	}
784 }
785 
786 int
ml_early_cpu_max_number(void)787 ml_early_cpu_max_number(void)
788 {
789 	int n = max_ncpus;
790 
791 	assert(startup_phase >= STARTUP_SUB_TUNABLES);
792 	if (max_cpus_from_firmware) {
793 		n = MIN(n, max_cpus_from_firmware);
794 	}
795 	return n - 1;
796 }
797 
798 void
ml_set_max_cpus(unsigned int max_cpus)799 ml_set_max_cpus(unsigned int max_cpus)
800 {
801 	lck_mtx_lock(&max_cpus_lock);
802 	if (max_cpus_initialized != MAX_CPUS_SET) {
803 		if (max_cpus > 0 && max_cpus <= MAX_CPUS) {
804 			/*
805 			 * Note: max_cpus is the number of enabled processors
806 			 * that ACPI found; max_ncpus is the maximum number
807 			 * that the kernel supports or that the "cpus="
808 			 * boot-arg has set. Here we take int minimum.
809 			 */
810 			machine_info.max_cpus = (integer_t)MIN(max_cpus, max_ncpus);
811 		}
812 		if (max_cpus_initialized == MAX_CPUS_WAIT) {
813 			thread_wakeup((event_t) &max_cpus_initialized);
814 		}
815 		max_cpus_initialized = MAX_CPUS_SET;
816 	}
817 	lck_mtx_unlock(&max_cpus_lock);
818 }
819 
820 unsigned int
ml_wait_max_cpus(void)821 ml_wait_max_cpus(void)
822 {
823 	lck_mtx_lock(&max_cpus_lock);
824 	while (max_cpus_initialized != MAX_CPUS_SET) {
825 		max_cpus_initialized = MAX_CPUS_WAIT;
826 		lck_mtx_sleep(&max_cpus_lock, LCK_SLEEP_DEFAULT, &max_cpus_initialized, THREAD_UNINT);
827 	}
828 	lck_mtx_unlock(&max_cpus_lock);
829 	return machine_info.max_cpus;
830 }
831 
832 void
ml_panic_trap_to_debugger(__unused const char * panic_format_str,__unused va_list * panic_args,__unused unsigned int reason,__unused void * ctx,__unused uint64_t panic_options_mask,__unused unsigned long panic_caller)833 ml_panic_trap_to_debugger(__unused const char *panic_format_str,
834     __unused va_list *panic_args,
835     __unused unsigned int reason,
836     __unused void *ctx,
837     __unused uint64_t panic_options_mask,
838     __unused unsigned long panic_caller)
839 {
840 	return;
841 }
842 
843 static uint64_t
virtual_timeout_inflate64(unsigned int vti,uint64_t timeout,uint64_t max_timeout)844 virtual_timeout_inflate64(unsigned int vti, uint64_t timeout, uint64_t max_timeout)
845 {
846 	if (vti >= 64) {
847 		return max_timeout;
848 	}
849 
850 	if ((timeout << vti) >> vti != timeout) {
851 		return max_timeout;
852 	}
853 
854 	if ((timeout << vti) > max_timeout) {
855 		return max_timeout;
856 	}
857 
858 	return timeout << vti;
859 }
860 
861 static uint32_t
virtual_timeout_inflate32(unsigned int vti,uint32_t timeout,uint32_t max_timeout)862 virtual_timeout_inflate32(unsigned int vti, uint32_t timeout, uint32_t max_timeout)
863 {
864 	if (vti >= 32) {
865 		return max_timeout;
866 	}
867 
868 	if ((timeout << vti) >> vti != timeout) {
869 		return max_timeout;
870 	}
871 
872 	return timeout << vti;
873 }
874 
875 /*
876  * Some timeouts are later adjusted or used in calculations setting
877  * other values. In order to avoid overflow, cap the max timeout as
878  * 2^47ns (~39 hours).
879  */
880 static const uint64_t max_timeout_ns = 1ULL << 47;
881 
882 /*
883  * Inflate a timeout in absolutetime.
884  */
885 static uint64_t
virtual_timeout_inflate_abs(unsigned int vti,uint64_t timeout)886 virtual_timeout_inflate_abs(unsigned int vti, uint64_t timeout)
887 {
888 	uint64_t max_timeout;
889 	nanoseconds_to_absolutetime(max_timeout_ns, &max_timeout);
890 	return virtual_timeout_inflate64(vti, timeout, max_timeout);
891 }
892 
893 /*
894  * Inflate a value in TSC ticks.
895  */
896 static uint64_t
virtual_timeout_inflate_tsc(unsigned int vti,uint64_t timeout)897 virtual_timeout_inflate_tsc(unsigned int vti, uint64_t timeout)
898 {
899 	const uint64_t max_timeout = tmrCvt(max_timeout_ns, tscFCvtn2t);
900 	return virtual_timeout_inflate64(vti, timeout, max_timeout);
901 }
902 
903 /*
904  * Inflate a timeout in microseconds.
905  */
906 static uint32_t
virtual_timeout_inflate_us(unsigned int vti,uint64_t timeout)907 virtual_timeout_inflate_us(unsigned int vti, uint64_t timeout)
908 {
909 	const uint32_t max_timeout = ~0;
910 	return virtual_timeout_inflate32(vti, timeout, max_timeout);
911 }
912 
913 uint64_t
ml_get_timebase_entropy(void)914 ml_get_timebase_entropy(void)
915 {
916 	return __builtin_ia32_rdtsc();
917 }
918 
919 /*
920  *	Routine:        ml_init_lock_timeout
921  *	Function:
922  */
923 static void __startup_func
ml_init_lock_timeout(void)924 ml_init_lock_timeout(void)
925 {
926 	uint64_t        abstime;
927 	uint32_t        mtxspin;
928 #if DEVELOPMENT || DEBUG
929 	uint64_t        default_timeout_ns = NSEC_PER_SEC >> 2;
930 #else
931 	uint64_t        default_timeout_ns = NSEC_PER_SEC >> 1;
932 #endif
933 	uint32_t        slto;
934 	uint32_t        prt;
935 
936 	if (PE_parse_boot_argn("slto_us", &slto, sizeof(slto))) {
937 		default_timeout_ns = slto * NSEC_PER_USEC;
938 	}
939 
940 	/*
941 	 * LockTimeOut is absolutetime, LockTimeOutTSC is in TSC ticks,
942 	 * and LockTimeOutUsec is in microseconds and it's 32-bits.
943 	 */
944 	LockTimeOutUsec = (uint32_t) (default_timeout_ns / NSEC_PER_USEC);
945 	nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
946 	LockTimeOut = abstime;
947 	LockTimeOutTSC = tmrCvt(abstime, tscFCvtn2t);
948 
949 	/*
950 	 * TLBTimeOut dictates the TLB flush timeout period. It defaults to
951 	 * LockTimeOut but can be overriden separately. In particular, a
952 	 * zero value inhibits the timeout-panic and cuts a trace evnt instead
953 	 * - see pmap_flush_tlbs().
954 	 */
955 	if (PE_parse_boot_argn("tlbto_us", &slto, sizeof(slto))) {
956 		default_timeout_ns = slto * NSEC_PER_USEC;
957 		nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
958 		TLBTimeOut = (uint32_t) abstime;
959 	} else {
960 		TLBTimeOut = LockTimeOut;
961 	}
962 
963 #if DEVELOPMENT || DEBUG
964 	report_phy_read_delay = LockTimeOut >> 1;
965 #endif
966 	if (PE_parse_boot_argn("phyreadmaxus", &slto, sizeof(slto))) {
967 		default_timeout_ns = slto * NSEC_PER_USEC;
968 		nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
969 		report_phy_read_delay = abstime;
970 	}
971 
972 	if (PE_parse_boot_argn("phywritemaxus", &slto, sizeof(slto))) {
973 		nanoseconds_to_absolutetime((uint64_t)slto * NSEC_PER_USEC, &abstime);
974 		report_phy_write_delay = abstime;
975 	}
976 
977 	if (PE_parse_boot_argn("tracephyreadus", &slto, sizeof(slto))) {
978 		nanoseconds_to_absolutetime((uint64_t)slto * NSEC_PER_USEC, &abstime);
979 		trace_phy_read_delay = abstime;
980 	}
981 
982 	if (PE_parse_boot_argn("tracephywriteus", &slto, sizeof(slto))) {
983 		nanoseconds_to_absolutetime((uint64_t)slto * NSEC_PER_USEC, &abstime);
984 		trace_phy_write_delay = abstime;
985 	}
986 
987 	if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof(mtxspin))) {
988 		if (mtxspin > USEC_PER_SEC >> 4) {
989 			mtxspin =  USEC_PER_SEC >> 4;
990 		}
991 		nanoseconds_to_absolutetime(mtxspin * NSEC_PER_USEC, &abstime);
992 	} else {
993 		nanoseconds_to_absolutetime(10 * NSEC_PER_USEC, &abstime);
994 	}
995 	MutexSpin = (unsigned int)abstime;
996 	low_MutexSpin = MutexSpin;
997 	/*
998 	 * high_MutexSpin should be initialized as low_MutexSpin * real_ncpus, but
999 	 * real_ncpus is not set at this time
1000 	 */
1001 	high_MutexSpin = -1;
1002 
1003 	nanoseconds_to_absolutetime(4ULL * NSEC_PER_SEC, &LastDebuggerEntryAllowance);
1004 	if (PE_parse_boot_argn("panic_restart_timeout", &prt, sizeof(prt))) {
1005 		nanoseconds_to_absolutetime(prt * NSEC_PER_SEC, &panic_restart_timeout);
1006 	}
1007 
1008 	virtualized = ((cpuid_features() & CPUID_FEATURE_VMM) != 0);
1009 	if (virtualized) {
1010 		unsigned int vti;
1011 
1012 		if (!PE_parse_boot_argn("vti", &vti, sizeof(vti))) {
1013 			vti = 6;
1014 		}
1015 
1016 #define VIRTUAL_TIMEOUT_INFLATE_ABS(_timeout)              \
1017 MACRO_BEGIN                                                \
1018 	_timeout = virtual_timeout_inflate_abs(vti, _timeout); \
1019 MACRO_END
1020 
1021 #define VIRTUAL_TIMEOUT_INFLATE_TSC(_timeout)              \
1022 MACRO_BEGIN                                                \
1023 	_timeout = virtual_timeout_inflate_tsc(vti, _timeout); \
1024 MACRO_END
1025 #define VIRTUAL_TIMEOUT_INFLATE_US(_timeout)               \
1026 MACRO_BEGIN                                                \
1027 	_timeout = virtual_timeout_inflate_us(vti, _timeout);  \
1028 MACRO_END
1029 		/*
1030 		 * These timeout values are inflated because they cause
1031 		 * the kernel to panic when they expire.
1032 		 * (Needed when running as a guest VM as the host OS
1033 		 * may not always schedule vcpu threads in time to
1034 		 * meet the deadline implied by the narrower time
1035 		 * window used on hardware.)
1036 		 */
1037 		VIRTUAL_TIMEOUT_INFLATE_US(LockTimeOutUsec);
1038 		VIRTUAL_TIMEOUT_INFLATE_ABS(LockTimeOut);
1039 		VIRTUAL_TIMEOUT_INFLATE_TSC(LockTimeOutTSC);
1040 		VIRTUAL_TIMEOUT_INFLATE_ABS(TLBTimeOut);
1041 		VIRTUAL_TIMEOUT_INFLATE_ABS(report_phy_read_delay);
1042 		VIRTUAL_TIMEOUT_INFLATE_TSC(lock_panic_timeout);
1043 	}
1044 
1045 	interrupt_latency_tracker_setup();
1046 }
1047 STARTUP(TIMEOUTS, STARTUP_RANK_MIDDLE, ml_init_lock_timeout);
1048 
1049 /*
1050  * Threshold above which we should attempt to block
1051  * instead of spinning for clock_delay_until().
1052  */
1053 
1054 void
ml_init_delay_spin_threshold(int threshold_us)1055 ml_init_delay_spin_threshold(int threshold_us)
1056 {
1057 	nanoseconds_to_absolutetime(threshold_us * NSEC_PER_USEC, &delay_spin_threshold);
1058 }
1059 
1060 boolean_t
ml_delay_should_spin(uint64_t interval)1061 ml_delay_should_spin(uint64_t interval)
1062 {
1063 	return (interval < delay_spin_threshold) ? TRUE : FALSE;
1064 }
1065 
1066 TUNABLE(uint32_t, yield_delay_us, "yield_delay_us", 0);
1067 
1068 void
ml_delay_on_yield(void)1069 ml_delay_on_yield(void)
1070 {
1071 #if DEVELOPMENT || DEBUG
1072 	if (yield_delay_us) {
1073 		delay(yield_delay_us);
1074 	}
1075 #endif
1076 }
1077 
1078 /*
1079  * This is called from the machine-independent layer
1080  * to perform machine-dependent info updates. Defer to cpu_thread_init().
1081  */
1082 void
ml_cpu_up(void)1083 ml_cpu_up(void)
1084 {
1085 	return;
1086 }
1087 
1088 void
ml_cpu_up_update_counts(__unused int cpu_id)1089 ml_cpu_up_update_counts(__unused int cpu_id)
1090 {
1091 	return;
1092 }
1093 
1094 /*
1095  * This is called from the machine-independent layer
1096  * to perform machine-dependent info updates.
1097  */
1098 void
ml_cpu_down(void)1099 ml_cpu_down(void)
1100 {
1101 	i386_deactivate_cpu();
1102 
1103 	return;
1104 }
1105 
1106 void
ml_cpu_down_update_counts(__unused int cpu_id)1107 ml_cpu_down_update_counts(__unused int cpu_id)
1108 {
1109 	return;
1110 }
1111 
1112 thread_t
current_thread(void)1113 current_thread(void)
1114 {
1115 	return current_thread_fast();
1116 }
1117 
1118 
1119 boolean_t
ml_is64bit(void)1120 ml_is64bit(void)
1121 {
1122 	return cpu_mode_is64bit();
1123 }
1124 
1125 
1126 boolean_t
ml_thread_is64bit(thread_t thread)1127 ml_thread_is64bit(thread_t thread)
1128 {
1129 	return thread_is_64bit_addr(thread);
1130 }
1131 
1132 
1133 boolean_t
ml_state_is64bit(void * saved_state)1134 ml_state_is64bit(void *saved_state)
1135 {
1136 	return is_saved_state64(saved_state);
1137 }
1138 
1139 void
ml_cpu_set_ldt(int selector)1140 ml_cpu_set_ldt(int selector)
1141 {
1142 	/*
1143 	 * Avoid loading the LDT
1144 	 * if we're setting the KERNEL LDT and it's already set.
1145 	 */
1146 	if (selector == KERNEL_LDT &&
1147 	    current_cpu_datap()->cpu_ldt == KERNEL_LDT) {
1148 		return;
1149 	}
1150 
1151 	lldt(selector);
1152 	current_cpu_datap()->cpu_ldt = selector;
1153 }
1154 
1155 void
ml_fp_setvalid(boolean_t value)1156 ml_fp_setvalid(boolean_t value)
1157 {
1158 	fp_setvalid(value);
1159 }
1160 
1161 uint64_t
ml_cpu_int_event_time(void)1162 ml_cpu_int_event_time(void)
1163 {
1164 	return current_cpu_datap()->cpu_int_event_time;
1165 }
1166 
1167 vm_offset_t
ml_stack_remaining(void)1168 ml_stack_remaining(void)
1169 {
1170 	uintptr_t local = (uintptr_t) &local;
1171 
1172 	if (ml_at_interrupt_context() != 0) {
1173 		return local - (current_cpu_datap()->cpu_int_stack_top - INTSTACK_SIZE);
1174 	} else {
1175 		return local - current_thread()->kernel_stack;
1176 	}
1177 }
1178 
1179 #if KASAN
1180 vm_offset_t ml_stack_base(void);
1181 vm_size_t ml_stack_size(void);
1182 
1183 vm_offset_t
ml_stack_base(void)1184 ml_stack_base(void)
1185 {
1186 	if (ml_at_interrupt_context()) {
1187 		return current_cpu_datap()->cpu_int_stack_top - INTSTACK_SIZE;
1188 	} else {
1189 		return current_thread()->kernel_stack;
1190 	}
1191 }
1192 
1193 vm_size_t
ml_stack_size(void)1194 ml_stack_size(void)
1195 {
1196 	if (ml_at_interrupt_context()) {
1197 		return INTSTACK_SIZE;
1198 	} else {
1199 		return kernel_stack_size;
1200 	}
1201 }
1202 #endif
1203 
1204 #if CONFIG_KCOV
1205 kcov_cpu_data_t *
current_kcov_data(void)1206 current_kcov_data(void)
1207 {
1208 	return &current_cpu_datap()->cpu_kcov_data;
1209 }
1210 
1211 kcov_cpu_data_t *
cpu_kcov_data(int cpuid)1212 cpu_kcov_data(int cpuid)
1213 {
1214 	return &cpu_datap(cpuid)->cpu_kcov_data;
1215 }
1216 #endif /* CONFIG_KCOV */
1217 
1218 void
kernel_preempt_check(void)1219 kernel_preempt_check(void)
1220 {
1221 	boolean_t       intr;
1222 	unsigned long flags;
1223 
1224 	assert(get_preemption_level() == 0);
1225 
1226 	if (__improbable(*ast_pending() & AST_URGENT)) {
1227 		/*
1228 		 * can handle interrupts and preemptions
1229 		 * at this point
1230 		 */
1231 		__asm__ volatile ("pushf; pop	%0"  :  "=r" (flags));
1232 
1233 		intr = ((flags & EFL_IF) != 0);
1234 
1235 		/*
1236 		 * now cause the PRE-EMPTION trap
1237 		 */
1238 		if (intr == TRUE) {
1239 			__asm__ volatile ("int %0" :: "N" (T_PREEMPT));
1240 		}
1241 	}
1242 }
1243 
1244 boolean_t
machine_timeout_suspended(void)1245 machine_timeout_suspended(void)
1246 {
1247 	return pmap_tlb_flush_timeout || lck_spinlock_timeout_in_progress ||
1248 	       panic_active() || mp_recent_debugger_activity() ||
1249 	       ml_recent_wake();
1250 }
1251 
1252 /* Eagerly evaluate all pending timer and thread callouts
1253  */
1254 void
ml_timer_evaluate(void)1255 ml_timer_evaluate(void)
1256 {
1257 	KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN | DBG_FUNC_START, 0, 0, 0, 0, 0);
1258 
1259 	uint64_t te_end, te_start = mach_absolute_time();
1260 	simple_lock(&ml_timer_evaluation_slock, LCK_GRP_NULL);
1261 	ml_timer_evaluation_in_progress = TRUE;
1262 	thread_call_delayed_timer_rescan_all();
1263 	mp_cpus_call(CPUMASK_ALL, ASYNC, timer_queue_expire_rescan, NULL);
1264 	ml_timer_evaluation_in_progress = FALSE;
1265 	ml_timer_eager_evaluations++;
1266 	te_end = mach_absolute_time();
1267 	ml_timer_eager_evaluation_max = MAX(ml_timer_eager_evaluation_max, (te_end - te_start));
1268 	simple_unlock(&ml_timer_evaluation_slock);
1269 
1270 	KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN | DBG_FUNC_END, 0, 0, 0, 0, 0);
1271 }
1272 
1273 boolean_t
ml_timer_forced_evaluation(void)1274 ml_timer_forced_evaluation(void)
1275 {
1276 	return ml_timer_evaluation_in_progress;
1277 }
1278 
1279 void
ml_gpu_stat_update(uint64_t gpu_ns_delta)1280 ml_gpu_stat_update(uint64_t gpu_ns_delta)
1281 {
1282 	current_thread()->machine.thread_gpu_ns += gpu_ns_delta;
1283 }
1284 
1285 uint64_t
ml_gpu_stat(thread_t t)1286 ml_gpu_stat(thread_t t)
1287 {
1288 	return t->machine.thread_gpu_ns;
1289 }
1290 
1291 int plctrace_enabled = 0;
1292 
1293 void
_disable_preemption(void)1294 _disable_preemption(void)
1295 {
1296 	disable_preemption_internal();
1297 }
1298 
1299 void
_enable_preemption(void)1300 _enable_preemption(void)
1301 {
1302 	enable_preemption_internal();
1303 }
1304 
1305 void
plctrace_disable(void)1306 plctrace_disable(void)
1307 {
1308 	plctrace_enabled = 0;
1309 }
1310 
1311 static boolean_t ml_quiescing;
1312 
1313 void
ml_set_is_quiescing(boolean_t quiescing)1314 ml_set_is_quiescing(boolean_t quiescing)
1315 {
1316 	ml_quiescing = quiescing;
1317 }
1318 
1319 boolean_t
ml_is_quiescing(void)1320 ml_is_quiescing(void)
1321 {
1322 	return ml_quiescing;
1323 }
1324 
1325 uint64_t
ml_get_booter_memory_size(void)1326 ml_get_booter_memory_size(void)
1327 {
1328 	return 0;
1329 }
1330 
1331 void
machine_lockdown(void)1332 machine_lockdown(void)
1333 {
1334 	x86_64_protect_data_const();
1335 }
1336 
1337 bool
ml_cpu_can_exit(__unused int cpu_id,__unused processor_reason_t reason)1338 ml_cpu_can_exit(__unused int cpu_id, __unused processor_reason_t reason)
1339 {
1340 	return true;
1341 }
1342 
1343 void
ml_cpu_begin_state_transition(__unused int cpu_id)1344 ml_cpu_begin_state_transition(__unused int cpu_id)
1345 {
1346 }
1347 
1348 void
ml_cpu_end_state_transition(__unused int cpu_id)1349 ml_cpu_end_state_transition(__unused int cpu_id)
1350 {
1351 }
1352 
1353 void
ml_cpu_begin_loop(void)1354 ml_cpu_begin_loop(void)
1355 {
1356 }
1357 
1358 void
ml_cpu_end_loop(void)1359 ml_cpu_end_loop(void)
1360 {
1361 }
1362 
1363 size_t
ml_get_vm_reserved_regions(bool vm_is64bit,const struct vm_reserved_region ** regions)1364 ml_get_vm_reserved_regions(bool vm_is64bit, const struct vm_reserved_region **regions)
1365 {
1366 #pragma unused(vm_is64bit)
1367 	assert(regions != NULL);
1368 
1369 	*regions = NULL;
1370 	return 0;
1371 }
1372 
1373 void
ml_cpu_power_enable(__unused int cpu_id)1374 ml_cpu_power_enable(__unused int cpu_id)
1375 {
1376 }
1377 
1378 void
ml_cpu_power_disable(__unused int cpu_id)1379 ml_cpu_power_disable(__unused int cpu_id)
1380 {
1381 }
1382 
1383 int
ml_page_protection_type(void)1384 ml_page_protection_type(void)
1385 {
1386 	return 0; // not supported on x86
1387 }
1388 
1389 bool
ml_addr_in_non_xnu_stack(__unused uintptr_t addr)1390 ml_addr_in_non_xnu_stack(__unused uintptr_t addr)
1391 {
1392 	/* There are no non-XNU stacks on x86 systems. */
1393 	return false;
1394 }
1395 
1396 /**
1397  * Explicitly preallocates a floating point save area.
1398  */
1399 void
ml_fp_save_area_prealloc(void)1400 ml_fp_save_area_prealloc(void)
1401 {
1402 	fpnoextflt();
1403 }
1404