1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_debug.h>
58 #include <mach_ldebug.h>
59
60 #include <sys/kdebug.h>
61
62 #include <mach/kern_return.h>
63 #include <mach/thread_status.h>
64 #include <mach/vm_param.h>
65
66 #include <kern/kalloc.h>
67 #include <kern/mach_param.h>
68 #include <kern/processor.h>
69 #include <kern/cpu_data.h>
70 #include <kern/cpu_number.h>
71 #include <kern/task.h>
72 #include <kern/thread.h>
73 #include <kern/sched_prim.h>
74 #include <kern/misc_protos.h>
75 #include <kern/assert.h>
76 #include <kern/spl.h>
77 #include <kern/machine.h>
78 #include <kern/kpc.h>
79 #include <ipc/ipc_port.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_map.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_protos.h>
84
85 #include <i386/cpu_data.h>
86 #include <i386/cpu_number.h>
87 #include <i386/eflags.h>
88 #include <i386/proc_reg.h>
89 #include <i386/fpu.h>
90 #include <i386/misc_protos.h>
91 #include <i386/mp_desc.h>
92 #include <i386/thread.h>
93 #include <i386/machine_routines.h>
94 #include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
95 #include <i386/seg.h>
96
97 #if HYPERVISOR
98 #include <kern/hv_support.h>
99 #endif
100
101 #include <san/kcov_stksz.h>
102
103
104 /*
105 * Maps state flavor to number of words in the state:
106 */
107 unsigned int _MachineStateCount[] = {
108 [x86_THREAD_STATE32] = x86_THREAD_STATE32_COUNT,
109 [x86_THREAD_STATE64] = x86_THREAD_STATE64_COUNT,
110 [x86_THREAD_FULL_STATE64] = x86_THREAD_FULL_STATE64_COUNT,
111 [x86_THREAD_STATE] = x86_THREAD_STATE_COUNT,
112 [x86_FLOAT_STATE32] = x86_FLOAT_STATE32_COUNT,
113 [x86_FLOAT_STATE64] = x86_FLOAT_STATE64_COUNT,
114 [x86_FLOAT_STATE] = x86_FLOAT_STATE_COUNT,
115 [x86_EXCEPTION_STATE32] = x86_EXCEPTION_STATE32_COUNT,
116 [x86_EXCEPTION_STATE64] = x86_EXCEPTION_STATE64_COUNT,
117 [x86_EXCEPTION_STATE] = x86_EXCEPTION_STATE_COUNT,
118 [x86_DEBUG_STATE32] = x86_DEBUG_STATE32_COUNT,
119 [x86_DEBUG_STATE64] = x86_DEBUG_STATE64_COUNT,
120 [x86_DEBUG_STATE] = x86_DEBUG_STATE_COUNT,
121 [x86_AVX_STATE32] = x86_AVX_STATE32_COUNT,
122 [x86_AVX_STATE64] = x86_AVX_STATE64_COUNT,
123 [x86_AVX_STATE] = x86_AVX_STATE_COUNT,
124 [x86_AVX512_STATE32] = x86_AVX512_STATE32_COUNT,
125 [x86_AVX512_STATE64] = x86_AVX512_STATE64_COUNT,
126 [x86_AVX512_STATE] = x86_AVX512_STATE_COUNT,
127 [x86_PAGEIN_STATE] = x86_PAGEIN_STATE_COUNT
128 };
129
130 ZONE_DECLARE(iss_zone, "x86_64 saved state",
131 sizeof(x86_saved_state_t), ZC_NONE);
132
133 ZONE_DECLARE(ids_zone, "x86_64 debug state",
134 sizeof(x86_debug_state64_t), ZC_NONE);
135
136 /* Forward */
137
138 extern void Thread_continue(void);
139 extern void Load_context(
140 thread_t thread) __attribute__((noreturn));
141
142 static void
143 get_exception_state32(thread_t thread, x86_exception_state32_t *es);
144
145 static void
146 get_exception_state64(thread_t thread, x86_exception_state64_t *es);
147
148 static void
149 get_thread_state32(thread_t thread, x86_thread_state32_t *ts);
150
151 static void
152 get_thread_state64(thread_t thread, void *ts, boolean_t full);
153
154 static int
155 set_thread_state32(thread_t thread, x86_thread_state32_t *ts);
156
157 static int
158 set_thread_state64(thread_t thread, void *ts, boolean_t full);
159
160 /*
161 * Don't let an illegal value for the lower 32-bits of dr7 get set.
162 * Specifically, check for undefined settings. Setting these bit patterns
163 * result in undefined behaviour and can lead to an unexpected
164 * TRCTRAP.
165 */
166 static boolean_t
dr7d_is_valid(uint32_t * dr7d)167 dr7d_is_valid(uint32_t *dr7d)
168 {
169 int i;
170 uint32_t mask1, mask2;
171
172 /*
173 * If the DE bit is set in CR4, R/W0-3 can be pattern
174 * "10B" to indicate i/o reads and write
175 */
176 if (!(get_cr4() & CR4_DE)) {
177 for (i = 0, mask1 = 0x3 << 16, mask2 = 0x2 << 16; i < 4;
178 i++, mask1 <<= 4, mask2 <<= 4) {
179 if ((*dr7d & mask1) == mask2) {
180 return FALSE;
181 }
182 }
183 }
184
185 /*
186 * if we are doing an instruction execution break (indicated
187 * by r/w[x] being "00B"), then the len[x] must also be set
188 * to "00B"
189 */
190 for (i = 0; i < 4; i++) {
191 if (((((*dr7d >> (16 + i * 4))) & 0x3) == 0) &&
192 ((((*dr7d >> (18 + i * 4))) & 0x3) != 0)) {
193 return FALSE;
194 }
195 }
196
197 /*
198 * Intel docs have these bits fixed.
199 */
200 *dr7d |= 0x1 << 10; /* set bit 10 to 1 */
201 *dr7d &= ~(0x1 << 11); /* set bit 11 to 0 */
202 *dr7d &= ~(0x1 << 12); /* set bit 12 to 0 */
203 *dr7d &= ~(0x1 << 14); /* set bit 14 to 0 */
204 *dr7d &= ~(0x1 << 15); /* set bit 15 to 0 */
205
206 /*
207 * We don't allow anything to set the global breakpoints.
208 */
209
210 if (*dr7d & 0x2) {
211 return FALSE;
212 }
213
214 if (*dr7d & (0x2 << 2)) {
215 return FALSE;
216 }
217
218 if (*dr7d & (0x2 << 4)) {
219 return FALSE;
220 }
221
222 if (*dr7d & (0x2 << 6)) {
223 return FALSE;
224 }
225
226 return TRUE;
227 }
228
229 extern void set_64bit_debug_regs(x86_debug_state64_t *ds);
230
231 boolean_t
debug_state_is_valid32(x86_debug_state32_t * ds)232 debug_state_is_valid32(x86_debug_state32_t *ds)
233 {
234 if (!dr7d_is_valid(&ds->dr7)) {
235 return FALSE;
236 }
237
238 return TRUE;
239 }
240
241 boolean_t
debug_state_is_valid64(x86_debug_state64_t * ds)242 debug_state_is_valid64(x86_debug_state64_t *ds)
243 {
244 if (!dr7d_is_valid((uint32_t *)&ds->dr7)) {
245 return FALSE;
246 }
247
248 /*
249 * Don't allow the user to set debug addresses above their max
250 * value
251 */
252 if (ds->dr7 & 0x1) {
253 if (ds->dr0 >= VM_MAX_PAGE_ADDRESS) {
254 return FALSE;
255 }
256 }
257
258 if (ds->dr7 & (0x1 << 2)) {
259 if (ds->dr1 >= VM_MAX_PAGE_ADDRESS) {
260 return FALSE;
261 }
262 }
263
264 if (ds->dr7 & (0x1 << 4)) {
265 if (ds->dr2 >= VM_MAX_PAGE_ADDRESS) {
266 return FALSE;
267 }
268 }
269
270 if (ds->dr7 & (0x1 << 6)) {
271 if (ds->dr3 >= VM_MAX_PAGE_ADDRESS) {
272 return FALSE;
273 }
274 }
275
276 /* For x86-64, we must ensure the upper 32-bits of DR7 are clear */
277 ds->dr7 &= 0xffffffffULL;
278
279 return TRUE;
280 }
281
282
283 static kern_return_t
set_debug_state32(thread_t thread,x86_debug_state32_t * ds)284 set_debug_state32(thread_t thread, x86_debug_state32_t *ds)
285 {
286 x86_debug_state32_t *new_ids;
287 pcb_t pcb;
288
289 pcb = THREAD_TO_PCB(thread);
290
291 if (debug_state_is_valid32(ds) != TRUE) {
292 return KERN_INVALID_ARGUMENT;
293 }
294
295 if (pcb->ids == NULL) {
296 new_ids = zalloc_flags(ids_zone, Z_WAITOK | Z_ZERO);
297
298 simple_lock(&pcb->lock, LCK_GRP_NULL);
299 /* make sure it wasn't already alloc()'d elsewhere */
300 if (pcb->ids == NULL) {
301 pcb->ids = new_ids;
302 simple_unlock(&pcb->lock);
303 } else {
304 simple_unlock(&pcb->lock);
305 zfree(ids_zone, new_ids);
306 }
307 }
308
309
310 copy_debug_state32(ds, pcb->ids, FALSE);
311
312 return KERN_SUCCESS;
313 }
314
315 static kern_return_t
set_debug_state64(thread_t thread,x86_debug_state64_t * ds)316 set_debug_state64(thread_t thread, x86_debug_state64_t *ds)
317 {
318 x86_debug_state64_t *new_ids;
319 pcb_t pcb;
320
321 pcb = THREAD_TO_PCB(thread);
322
323 if (debug_state_is_valid64(ds) != TRUE) {
324 return KERN_INVALID_ARGUMENT;
325 }
326
327 if (pcb->ids == NULL) {
328 new_ids = zalloc_flags(ids_zone, Z_WAITOK | Z_ZERO);
329
330 #if HYPERVISOR
331 if (thread->hv_thread_target) {
332 hv_callbacks.volatile_state(thread->hv_thread_target,
333 HV_DEBUG_STATE);
334 }
335 #endif
336
337 simple_lock(&pcb->lock, LCK_GRP_NULL);
338 /* make sure it wasn't already alloc()'d elsewhere */
339 if (pcb->ids == NULL) {
340 pcb->ids = new_ids;
341 simple_unlock(&pcb->lock);
342 } else {
343 simple_unlock(&pcb->lock);
344 zfree(ids_zone, new_ids);
345 }
346 }
347
348 copy_debug_state64(ds, pcb->ids, FALSE);
349
350 return KERN_SUCCESS;
351 }
352
353 static void
get_debug_state32(thread_t thread,x86_debug_state32_t * ds)354 get_debug_state32(thread_t thread, x86_debug_state32_t *ds)
355 {
356 x86_debug_state32_t *saved_state;
357
358 saved_state = thread->machine.ids;
359
360 if (saved_state) {
361 copy_debug_state32(saved_state, ds, TRUE);
362 } else {
363 bzero(ds, sizeof *ds);
364 }
365 }
366
367 static void
get_debug_state64(thread_t thread,x86_debug_state64_t * ds)368 get_debug_state64(thread_t thread, x86_debug_state64_t *ds)
369 {
370 x86_debug_state64_t *saved_state;
371
372 saved_state = (x86_debug_state64_t *)thread->machine.ids;
373
374 if (saved_state) {
375 copy_debug_state64(saved_state, ds, TRUE);
376 } else {
377 bzero(ds, sizeof *ds);
378 }
379 }
380
381 /*
382 * consider_machine_collect:
383 *
384 * Try to collect machine-dependent pages
385 */
386 void
consider_machine_collect(void)387 consider_machine_collect(void)
388 {
389 }
390
391 void
consider_machine_adjust(void)392 consider_machine_adjust(void)
393 {
394 }
395
396 /*
397 * Switch to the first thread on a CPU.
398 */
399 void
machine_load_context(thread_t new)400 machine_load_context(
401 thread_t new)
402 {
403 new->machine.specFlags |= OnProc;
404 act_machine_switch_pcb(NULL, new);
405 Load_context(new);
406 }
407
408 static void
machine_rsb_stuff(void)409 machine_rsb_stuff(void)
410 {
411 #define RSB_STUFF_SPACE_REQD (256 + 16) /* 256 bytes plus a buffer of another 16 for misc. */
412
413 asm volatile (
414 ".macro RSBST from=0, to=15\n"
415 " call 1f\n"
416 "2:\n"
417 " pause\n"
418 " lfence\n"
419 " jmp 2b\n"
420 "1:\n"
421 " call 1f\n"
422 "2:\n"
423 " pause\n"
424 " lfence\n"
425 " jmp 2b\n"
426 "1:\n"
427 " .if \\to - \\from \n"
428 " RSBST \"(\\from + 1)\", \\to \n"
429 " .endif \n"
430 ".endmacro \n"
431 "\n"
432 "L_rsbst:\n"
433 " RSBST \n"
434 " addq $(16 * 2 * 8), %%rsp\n"
435 ::: "memory", "cc");
436 }
437
438 static inline void
pmap_switch_context(thread_t ot,thread_t nt,int cnum)439 pmap_switch_context(thread_t ot, thread_t nt, int cnum)
440 {
441 pmap_assert(ml_get_interrupts_enabled() == FALSE);
442 vm_map_t nmap = nt->map, omap = ot->map;
443 if ((omap != nmap) || (nmap->pmap->pagezero_accessible)) {
444 PMAP_DEACTIVATE_MAP(omap, ot, cnum);
445 PMAP_ACTIVATE_MAP(nmap, nt, cnum);
446 if (__improbable((nt->machine.mthr_do_segchk & MTHR_RSBST) &&
447 (current_kernel_stack_depth() + RSB_STUFF_SPACE_REQD) < kernel_stack_size)) {
448 machine_rsb_stuff();
449 }
450 }
451 }
452
453 /*
454 * Switch to a new thread.
455 * Save the old thread`s kernel state or continuation,
456 * and return it.
457 */
458 thread_t
machine_switch_context(thread_t old,thread_continue_t continuation,thread_t new)459 machine_switch_context(
460 thread_t old,
461 thread_continue_t continuation,
462 thread_t new)
463 {
464 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack);
465
466 #if HYPERVISOR
467 if (old->hv_thread_target) {
468 hv_callbacks.preempt(old->hv_thread_target);
469 }
470 #endif
471
472 #if KPC
473 kpc_off_cpu(old);
474 #endif /* KPC */
475
476 /*
477 * Save FP registers if in use.
478 */
479 fpu_switch_context(old, new);
480
481 old->machine.specFlags &= ~OnProc;
482 new->machine.specFlags |= OnProc;
483
484 /*
485 * Monitor the stack depth and report new max,
486 * not worrying about races.
487 */
488 vm_offset_t depth = current_kernel_stack_depth();
489 if (depth > kernel_stack_depth_max) {
490 kernel_stack_depth_max = depth;
491 KERNEL_DEBUG_CONSTANT(
492 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH),
493 (long) depth, 0, 0, 0, 0);
494 }
495
496 /*
497 * Switch address maps if need be, even if not switching tasks.
498 * (A server activation may be "borrowing" a client map.)
499 */
500 pmap_switch_context(old, new, cpu_number());
501
502 /*
503 * Load the rest of the user state for the new thread
504 */
505 act_machine_switch_pcb(old, new);
506
507 #if HYPERVISOR
508 if (new->hv_thread_target) {
509 hv_callbacks.dispatch(new->hv_thread_target);
510 }
511 #endif
512
513 return Switch_context(old, continuation, new);
514 }
515
516 boolean_t
machine_thread_on_core(thread_t thread)517 machine_thread_on_core(thread_t thread)
518 {
519 return thread->machine.specFlags & OnProc;
520 }
521
522 thread_t
machine_processor_shutdown(thread_t thread,void (* doshutdown)(processor_t),processor_t processor)523 machine_processor_shutdown(
524 thread_t thread,
525 void (*doshutdown)(processor_t),
526 processor_t processor)
527 {
528 #if CONFIG_VMX
529 vmx_suspend();
530 #endif
531 fpu_switch_context(thread, NULL);
532 pmap_switch_context(thread, processor->idle_thread, cpu_number());
533 return Shutdown_context(thread, doshutdown, processor);
534 }
535
536
537 /*
538 * This is where registers that are not normally specified by the mach-o
539 * file on an execve would be nullified, perhaps to avoid a covert channel.
540 */
541 void
machine_thread_state_initialize(thread_t thread)542 machine_thread_state_initialize(
543 thread_t thread)
544 {
545 /*
546 * If there's an fpu save area, free it.
547 * The initialized state will then be lazily faulted-in, if required.
548 * And if we're target, re-arm the no-fpu trap.
549 */
550 if (thread->machine.ifps) {
551 (void) fpu_set_fxstate(thread, NULL, x86_FLOAT_STATE64);
552
553 if (thread == current_thread()) {
554 clear_fpu();
555 }
556 }
557
558 if (thread->machine.ids) {
559 zfree(ids_zone, thread->machine.ids);
560 thread->machine.ids = NULL;
561 }
562 }
563
564 uint32_t
get_eflags_exportmask(void)565 get_eflags_exportmask(void)
566 {
567 return EFL_USER_SET;
568 }
569
570 /*
571 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
572 * for 32bit tasks only
573 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
574 * for 64bit tasks only
575 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
576 * for 32bit tasks only
577 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
578 * for 64bit tasks only
579 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
580 * for either 32bit or 64bit tasks
581 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
582 * for 32bit tasks only
583 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
584 * for 64bit tasks only
585 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
586 * for either 32bit or 64bit tasks
587 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
588 * for 32bit tasks only
589 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
590 * for 64bit tasks only
591 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
592 * for either 32bit or 64bit tasks
593 */
594
595
596 static void
get_exception_state64(thread_t thread,x86_exception_state64_t * es)597 get_exception_state64(thread_t thread, x86_exception_state64_t *es)
598 {
599 x86_saved_state64_t *saved_state;
600
601 saved_state = USER_REGS64(thread);
602
603 es->trapno = saved_state->isf.trapno;
604 es->cpu = saved_state->isf.cpu;
605 es->err = (typeof(es->err))saved_state->isf.err;
606 es->faultvaddr = saved_state->cr2;
607 }
608
609 static void
get_exception_state32(thread_t thread,x86_exception_state32_t * es)610 get_exception_state32(thread_t thread, x86_exception_state32_t *es)
611 {
612 x86_saved_state32_t *saved_state;
613
614 saved_state = USER_REGS32(thread);
615
616 es->trapno = saved_state->trapno;
617 es->cpu = saved_state->cpu;
618 es->err = saved_state->err;
619 es->faultvaddr = saved_state->cr2;
620 }
621
622
623 static int
set_thread_state32(thread_t thread,x86_thread_state32_t * ts)624 set_thread_state32(thread_t thread, x86_thread_state32_t *ts)
625 {
626 x86_saved_state32_t *saved_state;
627
628 pal_register_cache_state(thread, DIRTY);
629
630 saved_state = USER_REGS32(thread);
631
632 /*
633 * Scrub segment selector values:
634 */
635 ts->cs = USER_CS;
636 /*
637 * On a 64 bit kernel, we always override the data segments,
638 * as the actual selector numbers have changed. This also
639 * means that we don't support setting the data segments
640 * manually any more.
641 */
642 ts->ss = USER_DS;
643 ts->ds = USER_DS;
644 ts->es = USER_DS;
645
646 /* Set GS to CTHREAD only if's been established */
647 ts->gs = thread->machine.cthread_self ? USER_CTHREAD : NULL_SEG;
648
649 /* Check segment selectors are safe */
650 if (!valid_user_segment_selectors(ts->cs,
651 ts->ss,
652 ts->ds,
653 ts->es,
654 ts->fs,
655 ts->gs)) {
656 return KERN_INVALID_ARGUMENT;
657 }
658
659 saved_state->eax = ts->eax;
660 saved_state->ebx = ts->ebx;
661 saved_state->ecx = ts->ecx;
662 saved_state->edx = ts->edx;
663 saved_state->edi = ts->edi;
664 saved_state->esi = ts->esi;
665 saved_state->ebp = ts->ebp;
666 saved_state->uesp = ts->esp;
667 saved_state->efl = (ts->eflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
668 saved_state->eip = ts->eip;
669 saved_state->cs = ts->cs;
670 saved_state->ss = ts->ss;
671 saved_state->ds = ts->ds;
672 saved_state->es = ts->es;
673 saved_state->fs = ts->fs;
674 saved_state->gs = ts->gs;
675
676 /*
677 * If the trace trap bit is being set,
678 * ensure that the user returns via iret
679 * - which is signaled thusly:
680 */
681 if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS) {
682 saved_state->cs = SYSENTER_TF_CS;
683 }
684
685 return KERN_SUCCESS;
686 }
687
688 static int
set_thread_state64(thread_t thread,void * state,int full)689 set_thread_state64(thread_t thread, void *state, int full)
690 {
691 x86_thread_state64_t *ts;
692 x86_saved_state64_t *saved_state;
693
694 if (full == TRUE) {
695 ts = &((x86_thread_full_state64_t *)state)->ss64;
696 if (!valid_user_code_selector(((x86_thread_full_state64_t *)ts)->ss64.cs)) {
697 return KERN_INVALID_ARGUMENT;
698 }
699 } else {
700 ts = (x86_thread_state64_t *)state;
701 // In this case, ts->cs exists but is ignored, and
702 // CS is always set to USER_CS below instead.
703 }
704
705 pal_register_cache_state(thread, DIRTY);
706
707 saved_state = USER_REGS64(thread);
708
709 if (!IS_USERADDR64_CANONICAL(ts->rsp) ||
710 !IS_USERADDR64_CANONICAL(ts->rip)) {
711 return KERN_INVALID_ARGUMENT;
712 }
713
714 saved_state->r8 = ts->r8;
715 saved_state->r9 = ts->r9;
716 saved_state->r10 = ts->r10;
717 saved_state->r11 = ts->r11;
718 saved_state->r12 = ts->r12;
719 saved_state->r13 = ts->r13;
720 saved_state->r14 = ts->r14;
721 saved_state->r15 = ts->r15;
722 saved_state->rax = ts->rax;
723 saved_state->rbx = ts->rbx;
724 saved_state->rcx = ts->rcx;
725 saved_state->rdx = ts->rdx;
726 saved_state->rdi = ts->rdi;
727 saved_state->rsi = ts->rsi;
728 saved_state->rbp = ts->rbp;
729 saved_state->isf.rsp = ts->rsp;
730 saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
731 saved_state->isf.rip = ts->rip;
732
733 if (full == FALSE) {
734 saved_state->isf.cs = USER64_CS;
735 } else {
736 saved_state->isf.cs = ((x86_thread_full_state64_t *)ts)->ss64.cs;
737 saved_state->isf.ss = ((x86_thread_full_state64_t *)ts)->ss;
738 saved_state->ds = (uint32_t)((x86_thread_full_state64_t *)ts)->ds;
739 saved_state->es = (uint32_t)((x86_thread_full_state64_t *)ts)->es;
740 machine_thread_set_tsd_base(thread,
741 ((x86_thread_full_state64_t *)ts)->gsbase);
742 }
743
744 saved_state->fs = (uint32_t)ts->fs;
745 saved_state->gs = (uint32_t)ts->gs;
746
747 return KERN_SUCCESS;
748 }
749
750
751
752 static void
get_thread_state32(thread_t thread,x86_thread_state32_t * ts)753 get_thread_state32(thread_t thread, x86_thread_state32_t *ts)
754 {
755 x86_saved_state32_t *saved_state;
756
757 pal_register_cache_state(thread, VALID);
758
759 saved_state = USER_REGS32(thread);
760
761 ts->eax = saved_state->eax;
762 ts->ebx = saved_state->ebx;
763 ts->ecx = saved_state->ecx;
764 ts->edx = saved_state->edx;
765 ts->edi = saved_state->edi;
766 ts->esi = saved_state->esi;
767 ts->ebp = saved_state->ebp;
768 ts->esp = saved_state->uesp;
769 ts->eflags = saved_state->efl;
770 ts->eip = saved_state->eip;
771 ts->cs = saved_state->cs;
772 ts->ss = saved_state->ss;
773 ts->ds = saved_state->ds;
774 ts->es = saved_state->es;
775 ts->fs = saved_state->fs;
776 ts->gs = saved_state->gs;
777 }
778
779
780 static void
get_thread_state64(thread_t thread,void * state,boolean_t full)781 get_thread_state64(thread_t thread, void *state, boolean_t full)
782 {
783 x86_thread_state64_t *ts;
784 x86_saved_state64_t *saved_state;
785
786 if (full == TRUE) {
787 ts = &((x86_thread_full_state64_t *)state)->ss64;
788 } else {
789 ts = (x86_thread_state64_t *)state;
790 }
791
792 pal_register_cache_state(thread, VALID);
793
794 saved_state = USER_REGS64(thread);
795
796 ts->r8 = saved_state->r8;
797 ts->r9 = saved_state->r9;
798 ts->r10 = saved_state->r10;
799 ts->r11 = saved_state->r11;
800 ts->r12 = saved_state->r12;
801 ts->r13 = saved_state->r13;
802 ts->r14 = saved_state->r14;
803 ts->r15 = saved_state->r15;
804 ts->rax = saved_state->rax;
805 ts->rbx = saved_state->rbx;
806 ts->rcx = saved_state->rcx;
807 ts->rdx = saved_state->rdx;
808 ts->rdi = saved_state->rdi;
809 ts->rsi = saved_state->rsi;
810 ts->rbp = saved_state->rbp;
811 ts->rsp = saved_state->isf.rsp;
812 ts->rflags = saved_state->isf.rflags;
813 ts->rip = saved_state->isf.rip;
814 ts->cs = saved_state->isf.cs;
815
816 if (full == TRUE) {
817 ((x86_thread_full_state64_t *)state)->ds = saved_state->ds;
818 ((x86_thread_full_state64_t *)state)->es = saved_state->es;
819 ((x86_thread_full_state64_t *)state)->ss = saved_state->isf.ss;
820 ((x86_thread_full_state64_t *)state)->gsbase =
821 thread->machine.cthread_self;
822 }
823
824 ts->fs = saved_state->fs;
825 ts->gs = saved_state->gs;
826 }
827
828 kern_return_t
machine_thread_state_convert_to_user(__unused thread_t thread,__unused thread_flavor_t flavor,__unused thread_state_t tstate,__unused mach_msg_type_number_t * count)829 machine_thread_state_convert_to_user(
830 __unused thread_t thread,
831 __unused thread_flavor_t flavor,
832 __unused thread_state_t tstate,
833 __unused mach_msg_type_number_t *count)
834 {
835 // No conversion to userspace representation on this platform
836 return KERN_SUCCESS;
837 }
838
839 kern_return_t
machine_thread_state_convert_from_user(__unused thread_t thread,__unused thread_flavor_t flavor,__unused thread_state_t tstate,__unused mach_msg_type_number_t count)840 machine_thread_state_convert_from_user(
841 __unused thread_t thread,
842 __unused thread_flavor_t flavor,
843 __unused thread_state_t tstate,
844 __unused mach_msg_type_number_t count)
845 {
846 // No conversion from userspace representation on this platform
847 return KERN_SUCCESS;
848 }
849
850 kern_return_t
machine_thread_siguctx_pointer_convert_to_user(__unused thread_t thread,__unused user_addr_t * uctxp)851 machine_thread_siguctx_pointer_convert_to_user(
852 __unused thread_t thread,
853 __unused user_addr_t *uctxp)
854 {
855 // No conversion to userspace representation on this platform
856 return KERN_SUCCESS;
857 }
858
859 kern_return_t
machine_thread_function_pointers_convert_from_user(__unused thread_t thread,__unused user_addr_t * fptrs,__unused uint32_t count)860 machine_thread_function_pointers_convert_from_user(
861 __unused thread_t thread,
862 __unused user_addr_t *fptrs,
863 __unused uint32_t count)
864 {
865 // No conversion from userspace representation on this platform
866 return KERN_SUCCESS;
867 }
868
869 /*
870 * act_machine_set_state:
871 *
872 * Set the status of the specified thread.
873 */
874
875 kern_return_t
machine_thread_set_state(thread_t thr_act,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t count)876 machine_thread_set_state(
877 thread_t thr_act,
878 thread_flavor_t flavor,
879 thread_state_t tstate,
880 mach_msg_type_number_t count)
881 {
882 switch (flavor) {
883 case x86_SAVED_STATE32:
884 {
885 x86_saved_state32_t *state;
886 x86_saved_state32_t *saved_state;
887
888 if (count < x86_SAVED_STATE32_COUNT) {
889 return KERN_INVALID_ARGUMENT;
890 }
891
892 state = (x86_saved_state32_t *) tstate;
893
894 /*
895 * Refuse to allow 64-bit processes to set
896 * 32-bit state.
897 */
898 if (thread_is_64bit_addr(thr_act)) {
899 return KERN_INVALID_ARGUMENT;
900 }
901
902 /* Check segment selectors are safe */
903 if (!valid_user_segment_selectors(state->cs,
904 state->ss,
905 state->ds,
906 state->es,
907 state->fs,
908 state->gs)) {
909 return KERN_INVALID_ARGUMENT;
910 }
911
912 pal_register_cache_state(thr_act, DIRTY);
913
914 saved_state = USER_REGS32(thr_act);
915
916 /*
917 * General registers
918 */
919 saved_state->edi = state->edi;
920 saved_state->esi = state->esi;
921 saved_state->ebp = state->ebp;
922 saved_state->uesp = state->uesp;
923 saved_state->ebx = state->ebx;
924 saved_state->edx = state->edx;
925 saved_state->ecx = state->ecx;
926 saved_state->eax = state->eax;
927 saved_state->eip = state->eip;
928
929 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
930
931 /*
932 * If the trace trap bit is being set,
933 * ensure that the user returns via iret
934 * - which is signaled thusly:
935 */
936 if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS) {
937 state->cs = SYSENTER_TF_CS;
938 }
939
940 /*
941 * User setting segment registers.
942 * Code and stack selectors have already been
943 * checked. Others will be reset by 'iret'
944 * if they are not valid.
945 */
946 saved_state->cs = state->cs;
947 saved_state->ss = state->ss;
948 saved_state->ds = state->ds;
949 saved_state->es = state->es;
950 saved_state->fs = state->fs;
951 saved_state->gs = state->gs;
952
953 break;
954 }
955
956 case x86_SAVED_STATE64:
957 {
958 x86_saved_state64_t *state;
959 x86_saved_state64_t *saved_state;
960
961 if (count < x86_SAVED_STATE64_COUNT) {
962 return KERN_INVALID_ARGUMENT;
963 }
964
965 if (!thread_is_64bit_addr(thr_act)) {
966 return KERN_INVALID_ARGUMENT;
967 }
968
969 state = (x86_saved_state64_t *) tstate;
970
971 /* Verify that the supplied code segment selector is
972 * valid. In 64-bit mode, the FS and GS segment overrides
973 * use the FS.base and GS.base MSRs to calculate
974 * base addresses, and the trampolines don't directly
975 * restore the segment registers--hence they are no
976 * longer relevant for validation.
977 */
978 if (!valid_user_code_selector(state->isf.cs)) {
979 return KERN_INVALID_ARGUMENT;
980 }
981
982 /* Check pc and stack are canonical addresses */
983 if (!IS_USERADDR64_CANONICAL(state->isf.rsp) ||
984 !IS_USERADDR64_CANONICAL(state->isf.rip)) {
985 return KERN_INVALID_ARGUMENT;
986 }
987
988 pal_register_cache_state(thr_act, DIRTY);
989
990 saved_state = USER_REGS64(thr_act);
991
992 /*
993 * General registers
994 */
995 saved_state->r8 = state->r8;
996 saved_state->r9 = state->r9;
997 saved_state->r10 = state->r10;
998 saved_state->r11 = state->r11;
999 saved_state->r12 = state->r12;
1000 saved_state->r13 = state->r13;
1001 saved_state->r14 = state->r14;
1002 saved_state->r15 = state->r15;
1003 saved_state->rdi = state->rdi;
1004 saved_state->rsi = state->rsi;
1005 saved_state->rbp = state->rbp;
1006 saved_state->rbx = state->rbx;
1007 saved_state->rdx = state->rdx;
1008 saved_state->rcx = state->rcx;
1009 saved_state->rax = state->rax;
1010 saved_state->isf.rsp = state->isf.rsp;
1011 saved_state->isf.rip = state->isf.rip;
1012
1013 saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
1014
1015 /*
1016 * User setting segment registers.
1017 * Code and stack selectors have already been
1018 * checked. Others will be reset by 'sys'
1019 * if they are not valid.
1020 */
1021 saved_state->isf.cs = state->isf.cs;
1022 saved_state->isf.ss = state->isf.ss;
1023 saved_state->fs = state->fs;
1024 saved_state->gs = state->gs;
1025
1026 break;
1027 }
1028
1029 case x86_FLOAT_STATE32:
1030 case x86_AVX_STATE32:
1031 case x86_AVX512_STATE32:
1032 {
1033 if (count != _MachineStateCount[flavor]) {
1034 return KERN_INVALID_ARGUMENT;
1035 }
1036
1037 if (thread_is_64bit_addr(thr_act)) {
1038 return KERN_INVALID_ARGUMENT;
1039 }
1040
1041 return fpu_set_fxstate(thr_act, tstate, flavor);
1042 }
1043
1044 case x86_FLOAT_STATE64:
1045 case x86_AVX_STATE64:
1046 case x86_AVX512_STATE64:
1047 {
1048 if (count != _MachineStateCount[flavor]) {
1049 return KERN_INVALID_ARGUMENT;
1050 }
1051
1052 if (!thread_is_64bit_addr(thr_act)) {
1053 return KERN_INVALID_ARGUMENT;
1054 }
1055
1056 return fpu_set_fxstate(thr_act, tstate, flavor);
1057 }
1058
1059 case x86_FLOAT_STATE:
1060 {
1061 x86_float_state_t *state;
1062
1063 if (count != x86_FLOAT_STATE_COUNT) {
1064 return KERN_INVALID_ARGUMENT;
1065 }
1066
1067 state = (x86_float_state_t *)tstate;
1068 if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT &&
1069 thread_is_64bit_addr(thr_act)) {
1070 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
1071 }
1072 if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT &&
1073 !thread_is_64bit_addr(thr_act)) {
1074 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
1075 }
1076 return KERN_INVALID_ARGUMENT;
1077 }
1078
1079 case x86_AVX_STATE:
1080 case x86_AVX512_STATE:
1081 {
1082 x86_avx_state_t *state;
1083
1084 if (count != _MachineStateCount[flavor]) {
1085 return KERN_INVALID_ARGUMENT;
1086 }
1087
1088 state = (x86_avx_state_t *)tstate;
1089 /* Flavors are defined to have sequential values: 32-bit, 64-bit, non-specific */
1090 /* 64-bit flavor? */
1091 if (state->ash.flavor == (flavor - 1) &&
1092 state->ash.count == _MachineStateCount[flavor - 1] &&
1093 thread_is_64bit_addr(thr_act)) {
1094 return fpu_set_fxstate(thr_act,
1095 (thread_state_t)&state->ufs.as64,
1096 flavor - 1);
1097 }
1098 /* 32-bit flavor? */
1099 if (state->ash.flavor == (flavor - 2) &&
1100 state->ash.count == _MachineStateCount[flavor - 2] &&
1101 !thread_is_64bit_addr(thr_act)) {
1102 return fpu_set_fxstate(thr_act,
1103 (thread_state_t)&state->ufs.as32,
1104 flavor - 2);
1105 }
1106 return KERN_INVALID_ARGUMENT;
1107 }
1108
1109 case x86_THREAD_STATE32:
1110 {
1111 if (count != x86_THREAD_STATE32_COUNT) {
1112 return KERN_INVALID_ARGUMENT;
1113 }
1114
1115 if (thread_is_64bit_addr(thr_act)) {
1116 return KERN_INVALID_ARGUMENT;
1117 }
1118
1119 return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1120 }
1121
1122 case x86_THREAD_STATE64:
1123 {
1124 if (count != x86_THREAD_STATE64_COUNT) {
1125 return KERN_INVALID_ARGUMENT;
1126 }
1127
1128 if (!thread_is_64bit_addr(thr_act)) {
1129 return KERN_INVALID_ARGUMENT;
1130 }
1131
1132 return set_thread_state64(thr_act, tstate, FALSE);
1133 }
1134
1135 case x86_THREAD_FULL_STATE64:
1136 {
1137 if (count != x86_THREAD_FULL_STATE64_COUNT) {
1138 return KERN_INVALID_ARGUMENT;
1139 }
1140
1141 if (!thread_is_64bit_addr(thr_act)) {
1142 return KERN_INVALID_ARGUMENT;
1143 }
1144
1145 /* If this process does not have a custom LDT, return failure */
1146 if (get_threadtask(thr_act)->i386_ldt == 0) {
1147 return KERN_INVALID_ARGUMENT;
1148 }
1149
1150 return set_thread_state64(thr_act, tstate, TRUE);
1151 }
1152
1153 case x86_THREAD_STATE:
1154 {
1155 x86_thread_state_t *state;
1156
1157 if (count != x86_THREAD_STATE_COUNT) {
1158 return KERN_INVALID_ARGUMENT;
1159 }
1160
1161 state = (x86_thread_state_t *)tstate;
1162
1163 if (state->tsh.flavor == x86_THREAD_STATE64 &&
1164 state->tsh.count == x86_THREAD_STATE64_COUNT &&
1165 thread_is_64bit_addr(thr_act)) {
1166 return set_thread_state64(thr_act, &state->uts.ts64, FALSE);
1167 } else if (state->tsh.flavor == x86_THREAD_FULL_STATE64 &&
1168 state->tsh.count == x86_THREAD_FULL_STATE64_COUNT &&
1169 thread_is_64bit_addr(thr_act) && get_threadtask(thr_act)->i386_ldt != 0) {
1170 return set_thread_state64(thr_act, &state->uts.ts64, TRUE);
1171 } else if (state->tsh.flavor == x86_THREAD_STATE32 &&
1172 state->tsh.count == x86_THREAD_STATE32_COUNT &&
1173 !thread_is_64bit_addr(thr_act)) {
1174 return set_thread_state32(thr_act, &state->uts.ts32);
1175 } else {
1176 return KERN_INVALID_ARGUMENT;
1177 }
1178 }
1179 case x86_DEBUG_STATE32:
1180 {
1181 x86_debug_state32_t *state;
1182 kern_return_t ret;
1183
1184 if (thread_is_64bit_addr(thr_act)) {
1185 return KERN_INVALID_ARGUMENT;
1186 }
1187
1188 state = (x86_debug_state32_t *)tstate;
1189
1190 ret = set_debug_state32(thr_act, state);
1191
1192 return ret;
1193 }
1194 case x86_DEBUG_STATE64:
1195 {
1196 x86_debug_state64_t *state;
1197 kern_return_t ret;
1198
1199 if (!thread_is_64bit_addr(thr_act)) {
1200 return KERN_INVALID_ARGUMENT;
1201 }
1202
1203 state = (x86_debug_state64_t *)tstate;
1204
1205 ret = set_debug_state64(thr_act, state);
1206
1207 return ret;
1208 }
1209 case x86_DEBUG_STATE:
1210 {
1211 x86_debug_state_t *state;
1212 kern_return_t ret = KERN_INVALID_ARGUMENT;
1213
1214 if (count != x86_DEBUG_STATE_COUNT) {
1215 return KERN_INVALID_ARGUMENT;
1216 }
1217
1218 state = (x86_debug_state_t *)tstate;
1219 if (state->dsh.flavor == x86_DEBUG_STATE64 &&
1220 state->dsh.count == x86_DEBUG_STATE64_COUNT &&
1221 thread_is_64bit_addr(thr_act)) {
1222 ret = set_debug_state64(thr_act, &state->uds.ds64);
1223 } else if (state->dsh.flavor == x86_DEBUG_STATE32 &&
1224 state->dsh.count == x86_DEBUG_STATE32_COUNT &&
1225 !thread_is_64bit_addr(thr_act)) {
1226 ret = set_debug_state32(thr_act, &state->uds.ds32);
1227 }
1228 return ret;
1229 }
1230 default:
1231 return KERN_INVALID_ARGUMENT;
1232 }
1233
1234 return KERN_SUCCESS;
1235 }
1236
1237 mach_vm_address_t
machine_thread_pc(thread_t thr_act)1238 machine_thread_pc(thread_t thr_act)
1239 {
1240 if (thread_is_64bit_addr(thr_act)) {
1241 return (mach_vm_address_t)USER_REGS64(thr_act)->isf.rip;
1242 } else {
1243 return (mach_vm_address_t)USER_REGS32(thr_act)->eip;
1244 }
1245 }
1246
1247 void
machine_thread_reset_pc(thread_t thr_act,mach_vm_address_t pc)1248 machine_thread_reset_pc(thread_t thr_act, mach_vm_address_t pc)
1249 {
1250 pal_register_cache_state(thr_act, DIRTY);
1251
1252 if (thread_is_64bit_addr(thr_act)) {
1253 if (!IS_USERADDR64_CANONICAL(pc)) {
1254 pc = 0;
1255 }
1256 USER_REGS64(thr_act)->isf.rip = (uint64_t)pc;
1257 } else {
1258 USER_REGS32(thr_act)->eip = (uint32_t)pc;
1259 }
1260 }
1261
1262
1263 /*
1264 * thread_getstatus:
1265 *
1266 * Get the status of the specified thread.
1267 */
1268
1269 kern_return_t
machine_thread_get_state(thread_t thr_act,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count)1270 machine_thread_get_state(
1271 thread_t thr_act,
1272 thread_flavor_t flavor,
1273 thread_state_t tstate,
1274 mach_msg_type_number_t *count)
1275 {
1276 switch (flavor) {
1277 case THREAD_STATE_FLAVOR_LIST:
1278 {
1279 if (*count < 3) {
1280 return KERN_INVALID_ARGUMENT;
1281 }
1282
1283 tstate[0] = i386_THREAD_STATE;
1284 tstate[1] = i386_FLOAT_STATE;
1285 tstate[2] = i386_EXCEPTION_STATE;
1286
1287 *count = 3;
1288 break;
1289 }
1290
1291 case THREAD_STATE_FLAVOR_LIST_NEW:
1292 {
1293 if (*count < 4) {
1294 return KERN_INVALID_ARGUMENT;
1295 }
1296
1297 tstate[0] = x86_THREAD_STATE;
1298 tstate[1] = x86_FLOAT_STATE;
1299 tstate[2] = x86_EXCEPTION_STATE;
1300 tstate[3] = x86_DEBUG_STATE;
1301
1302 *count = 4;
1303 break;
1304 }
1305
1306 case THREAD_STATE_FLAVOR_LIST_10_9:
1307 {
1308 if (*count < 5) {
1309 return KERN_INVALID_ARGUMENT;
1310 }
1311
1312 tstate[0] = x86_THREAD_STATE;
1313 tstate[1] = x86_FLOAT_STATE;
1314 tstate[2] = x86_EXCEPTION_STATE;
1315 tstate[3] = x86_DEBUG_STATE;
1316 tstate[4] = x86_AVX_STATE;
1317
1318 *count = 5;
1319 break;
1320 }
1321
1322 case THREAD_STATE_FLAVOR_LIST_10_13:
1323 {
1324 if (*count < 6) {
1325 return KERN_INVALID_ARGUMENT;
1326 }
1327
1328 tstate[0] = x86_THREAD_STATE;
1329 tstate[1] = x86_FLOAT_STATE;
1330 tstate[2] = x86_EXCEPTION_STATE;
1331 tstate[3] = x86_DEBUG_STATE;
1332 tstate[4] = x86_AVX_STATE;
1333 tstate[5] = x86_AVX512_STATE;
1334
1335 *count = 6;
1336 break;
1337 }
1338
1339 case THREAD_STATE_FLAVOR_LIST_10_15:
1340 {
1341 if (*count < 7) {
1342 return KERN_INVALID_ARGUMENT;
1343 }
1344
1345 tstate[0] = x86_THREAD_STATE;
1346 tstate[1] = x86_FLOAT_STATE;
1347 tstate[2] = x86_EXCEPTION_STATE;
1348 tstate[3] = x86_DEBUG_STATE;
1349 tstate[4] = x86_AVX_STATE;
1350 tstate[5] = x86_AVX512_STATE;
1351 tstate[6] = x86_PAGEIN_STATE;
1352
1353 *count = 7;
1354 break;
1355 }
1356
1357 case x86_SAVED_STATE32:
1358 {
1359 x86_saved_state32_t *state;
1360 x86_saved_state32_t *saved_state;
1361
1362 if (*count < x86_SAVED_STATE32_COUNT) {
1363 return KERN_INVALID_ARGUMENT;
1364 }
1365
1366 if (thread_is_64bit_addr(thr_act)) {
1367 return KERN_INVALID_ARGUMENT;
1368 }
1369
1370 state = (x86_saved_state32_t *) tstate;
1371 saved_state = USER_REGS32(thr_act);
1372
1373 /*
1374 * First, copy everything:
1375 */
1376 *state = *saved_state;
1377 state->ds = saved_state->ds & 0xffff;
1378 state->es = saved_state->es & 0xffff;
1379 state->fs = saved_state->fs & 0xffff;
1380 state->gs = saved_state->gs & 0xffff;
1381
1382 *count = x86_SAVED_STATE32_COUNT;
1383 break;
1384 }
1385
1386 case x86_SAVED_STATE64:
1387 {
1388 x86_saved_state64_t *state;
1389 x86_saved_state64_t *saved_state;
1390
1391 if (*count < x86_SAVED_STATE64_COUNT) {
1392 return KERN_INVALID_ARGUMENT;
1393 }
1394
1395 if (!thread_is_64bit_addr(thr_act)) {
1396 return KERN_INVALID_ARGUMENT;
1397 }
1398
1399 state = (x86_saved_state64_t *)tstate;
1400 saved_state = USER_REGS64(thr_act);
1401
1402 /*
1403 * First, copy everything:
1404 */
1405 *state = *saved_state;
1406 state->ds = saved_state->ds & 0xffff;
1407 state->es = saved_state->es & 0xffff;
1408 state->fs = saved_state->fs & 0xffff;
1409 state->gs = saved_state->gs & 0xffff;
1410
1411 *count = x86_SAVED_STATE64_COUNT;
1412 break;
1413 }
1414
1415 case x86_FLOAT_STATE32:
1416 {
1417 if (*count < x86_FLOAT_STATE32_COUNT) {
1418 return KERN_INVALID_ARGUMENT;
1419 }
1420
1421 if (thread_is_64bit_addr(thr_act)) {
1422 return KERN_INVALID_ARGUMENT;
1423 }
1424
1425 *count = x86_FLOAT_STATE32_COUNT;
1426
1427 return fpu_get_fxstate(thr_act, tstate, flavor);
1428 }
1429
1430 case x86_FLOAT_STATE64:
1431 {
1432 if (*count < x86_FLOAT_STATE64_COUNT) {
1433 return KERN_INVALID_ARGUMENT;
1434 }
1435
1436 if (!thread_is_64bit_addr(thr_act)) {
1437 return KERN_INVALID_ARGUMENT;
1438 }
1439
1440 *count = x86_FLOAT_STATE64_COUNT;
1441
1442 return fpu_get_fxstate(thr_act, tstate, flavor);
1443 }
1444
1445 case x86_FLOAT_STATE:
1446 {
1447 x86_float_state_t *state;
1448 kern_return_t kret;
1449
1450 if (*count < x86_FLOAT_STATE_COUNT) {
1451 return KERN_INVALID_ARGUMENT;
1452 }
1453
1454 state = (x86_float_state_t *)tstate;
1455
1456 /*
1457 * no need to bzero... currently
1458 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1459 */
1460 if (thread_is_64bit_addr(thr_act)) {
1461 state->fsh.flavor = x86_FLOAT_STATE64;
1462 state->fsh.count = x86_FLOAT_STATE64_COUNT;
1463
1464 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
1465 } else {
1466 state->fsh.flavor = x86_FLOAT_STATE32;
1467 state->fsh.count = x86_FLOAT_STATE32_COUNT;
1468
1469 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
1470 }
1471 *count = x86_FLOAT_STATE_COUNT;
1472
1473 return kret;
1474 }
1475
1476 case x86_AVX_STATE32:
1477 case x86_AVX512_STATE32:
1478 {
1479 if (*count != _MachineStateCount[flavor]) {
1480 return KERN_INVALID_ARGUMENT;
1481 }
1482
1483 if (thread_is_64bit_addr(thr_act)) {
1484 return KERN_INVALID_ARGUMENT;
1485 }
1486
1487 *count = _MachineStateCount[flavor];
1488
1489 return fpu_get_fxstate(thr_act, tstate, flavor);
1490 }
1491
1492 case x86_AVX_STATE64:
1493 case x86_AVX512_STATE64:
1494 {
1495 if (*count != _MachineStateCount[flavor]) {
1496 return KERN_INVALID_ARGUMENT;
1497 }
1498
1499 if (!thread_is_64bit_addr(thr_act)) {
1500 return KERN_INVALID_ARGUMENT;
1501 }
1502
1503 *count = _MachineStateCount[flavor];
1504
1505 return fpu_get_fxstate(thr_act, tstate, flavor);
1506 }
1507
1508 case x86_AVX_STATE:
1509 case x86_AVX512_STATE:
1510 {
1511 x86_avx_state_t *state;
1512 thread_state_t fstate;
1513
1514 if (*count < _MachineStateCount[flavor]) {
1515 return KERN_INVALID_ARGUMENT;
1516 }
1517
1518 *count = _MachineStateCount[flavor];
1519 state = (x86_avx_state_t *)tstate;
1520
1521 bzero((char *)state, *count * sizeof(int));
1522
1523 if (thread_is_64bit_addr(thr_act)) {
1524 flavor -= 1; /* 64-bit flavor */
1525 fstate = (thread_state_t) &state->ufs.as64;
1526 } else {
1527 flavor -= 2; /* 32-bit flavor */
1528 fstate = (thread_state_t) &state->ufs.as32;
1529 }
1530 state->ash.flavor = flavor;
1531 state->ash.count = _MachineStateCount[flavor];
1532
1533 return fpu_get_fxstate(thr_act, fstate, flavor);
1534 }
1535
1536 case x86_THREAD_STATE32:
1537 {
1538 if (*count < x86_THREAD_STATE32_COUNT) {
1539 return KERN_INVALID_ARGUMENT;
1540 }
1541
1542 if (thread_is_64bit_addr(thr_act)) {
1543 return KERN_INVALID_ARGUMENT;
1544 }
1545
1546 *count = x86_THREAD_STATE32_COUNT;
1547
1548 get_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1549 break;
1550 }
1551
1552 case x86_THREAD_STATE64:
1553 {
1554 if (*count < x86_THREAD_STATE64_COUNT) {
1555 return KERN_INVALID_ARGUMENT;
1556 }
1557
1558 if (!thread_is_64bit_addr(thr_act)) {
1559 return KERN_INVALID_ARGUMENT;
1560 }
1561
1562 *count = x86_THREAD_STATE64_COUNT;
1563
1564 get_thread_state64(thr_act, tstate, FALSE);
1565 break;
1566 }
1567
1568 case x86_THREAD_FULL_STATE64:
1569 {
1570 if (*count < x86_THREAD_FULL_STATE64_COUNT) {
1571 return KERN_INVALID_ARGUMENT;
1572 }
1573
1574 if (!thread_is_64bit_addr(thr_act)) {
1575 return KERN_INVALID_ARGUMENT;
1576 }
1577
1578 /* If this process does not have a custom LDT, return failure */
1579 if (get_threadtask(thr_act)->i386_ldt == 0) {
1580 return KERN_INVALID_ARGUMENT;
1581 }
1582
1583 *count = x86_THREAD_FULL_STATE64_COUNT;
1584
1585 get_thread_state64(thr_act, tstate, TRUE);
1586 break;
1587 }
1588
1589 case x86_THREAD_STATE:
1590 {
1591 x86_thread_state_t *state;
1592
1593 if (*count < x86_THREAD_STATE_COUNT) {
1594 return KERN_INVALID_ARGUMENT;
1595 }
1596
1597 state = (x86_thread_state_t *)tstate;
1598
1599 bzero((char *)state, sizeof(x86_thread_state_t));
1600
1601 if (thread_is_64bit_addr(thr_act)) {
1602 state->tsh.flavor = x86_THREAD_STATE64;
1603 state->tsh.count = x86_THREAD_STATE64_COUNT;
1604
1605 get_thread_state64(thr_act, &state->uts.ts64, FALSE);
1606 } else {
1607 state->tsh.flavor = x86_THREAD_STATE32;
1608 state->tsh.count = x86_THREAD_STATE32_COUNT;
1609
1610 get_thread_state32(thr_act, &state->uts.ts32);
1611 }
1612 *count = x86_THREAD_STATE_COUNT;
1613
1614 break;
1615 }
1616
1617
1618 case x86_EXCEPTION_STATE32:
1619 {
1620 if (*count < x86_EXCEPTION_STATE32_COUNT) {
1621 return KERN_INVALID_ARGUMENT;
1622 }
1623
1624 if (thread_is_64bit_addr(thr_act)) {
1625 return KERN_INVALID_ARGUMENT;
1626 }
1627
1628 *count = x86_EXCEPTION_STATE32_COUNT;
1629
1630 get_exception_state32(thr_act, (x86_exception_state32_t *)tstate);
1631 /*
1632 * Suppress the cpu number for binary compatibility
1633 * of this deprecated state.
1634 */
1635 ((x86_exception_state32_t *)tstate)->cpu = 0;
1636 break;
1637 }
1638
1639 case x86_EXCEPTION_STATE64:
1640 {
1641 if (*count < x86_EXCEPTION_STATE64_COUNT) {
1642 return KERN_INVALID_ARGUMENT;
1643 }
1644
1645 if (!thread_is_64bit_addr(thr_act)) {
1646 return KERN_INVALID_ARGUMENT;
1647 }
1648
1649 *count = x86_EXCEPTION_STATE64_COUNT;
1650
1651 get_exception_state64(thr_act, (x86_exception_state64_t *)tstate);
1652 /*
1653 * Suppress the cpu number for binary compatibility
1654 * of this deprecated state.
1655 */
1656 ((x86_exception_state64_t *)tstate)->cpu = 0;
1657 break;
1658 }
1659
1660 case x86_EXCEPTION_STATE:
1661 {
1662 x86_exception_state_t *state;
1663
1664 if (*count < x86_EXCEPTION_STATE_COUNT) {
1665 return KERN_INVALID_ARGUMENT;
1666 }
1667
1668 state = (x86_exception_state_t *)tstate;
1669
1670 bzero((char *)state, sizeof(x86_exception_state_t));
1671
1672 if (thread_is_64bit_addr(thr_act)) {
1673 state->esh.flavor = x86_EXCEPTION_STATE64;
1674 state->esh.count = x86_EXCEPTION_STATE64_COUNT;
1675
1676 get_exception_state64(thr_act, &state->ues.es64);
1677 } else {
1678 state->esh.flavor = x86_EXCEPTION_STATE32;
1679 state->esh.count = x86_EXCEPTION_STATE32_COUNT;
1680
1681 get_exception_state32(thr_act, &state->ues.es32);
1682 }
1683 *count = x86_EXCEPTION_STATE_COUNT;
1684
1685 break;
1686 }
1687 case x86_DEBUG_STATE32:
1688 {
1689 if (*count < x86_DEBUG_STATE32_COUNT) {
1690 return KERN_INVALID_ARGUMENT;
1691 }
1692
1693 if (thread_is_64bit_addr(thr_act)) {
1694 return KERN_INVALID_ARGUMENT;
1695 }
1696
1697 get_debug_state32(thr_act, (x86_debug_state32_t *)tstate);
1698
1699 *count = x86_DEBUG_STATE32_COUNT;
1700
1701 break;
1702 }
1703 case x86_DEBUG_STATE64:
1704 {
1705 if (*count < x86_DEBUG_STATE64_COUNT) {
1706 return KERN_INVALID_ARGUMENT;
1707 }
1708
1709 if (!thread_is_64bit_addr(thr_act)) {
1710 return KERN_INVALID_ARGUMENT;
1711 }
1712
1713 get_debug_state64(thr_act, (x86_debug_state64_t *)tstate);
1714
1715 *count = x86_DEBUG_STATE64_COUNT;
1716
1717 break;
1718 }
1719 case x86_DEBUG_STATE:
1720 {
1721 x86_debug_state_t *state;
1722
1723 if (*count < x86_DEBUG_STATE_COUNT) {
1724 return KERN_INVALID_ARGUMENT;
1725 }
1726
1727 state = (x86_debug_state_t *)tstate;
1728
1729 bzero(state, sizeof *state);
1730
1731 if (thread_is_64bit_addr(thr_act)) {
1732 state->dsh.flavor = x86_DEBUG_STATE64;
1733 state->dsh.count = x86_DEBUG_STATE64_COUNT;
1734
1735 get_debug_state64(thr_act, &state->uds.ds64);
1736 } else {
1737 state->dsh.flavor = x86_DEBUG_STATE32;
1738 state->dsh.count = x86_DEBUG_STATE32_COUNT;
1739
1740 get_debug_state32(thr_act, &state->uds.ds32);
1741 }
1742 *count = x86_DEBUG_STATE_COUNT;
1743 break;
1744 }
1745
1746 case x86_PAGEIN_STATE:
1747 {
1748 if (*count < x86_PAGEIN_STATE_COUNT) {
1749 return KERN_INVALID_ARGUMENT;
1750 }
1751
1752 x86_pagein_state_t *state = (void *)tstate;
1753
1754 state->__pagein_error = thr_act->t_pagein_error;
1755
1756 *count = x86_PAGEIN_STATE_COUNT;
1757 break;
1758 }
1759
1760 case x86_INSTRUCTION_STATE:
1761 {
1762 if (*count < x86_INSTRUCTION_STATE_COUNT) {
1763 return KERN_INVALID_ARGUMENT;
1764 }
1765
1766 x86_instruction_state_t *state = (void *)tstate;
1767 x86_instruction_state_t *src_state = THREAD_TO_PCB(thr_act)->insn_state;
1768
1769 if (src_state != 0 && (src_state->insn_stream_valid_bytes > 0 || src_state->out_of_synch)) {
1770 #if DEVELOPMENT || DEBUG
1771 extern int insnstream_force_cacheline_mismatch;
1772 #endif
1773 size_t byte_count = (src_state->insn_stream_valid_bytes > x86_INSTRUCTION_STATE_MAX_INSN_BYTES)
1774 ? x86_INSTRUCTION_STATE_MAX_INSN_BYTES : src_state->insn_stream_valid_bytes;
1775 if (byte_count > 0) {
1776 bcopy(src_state->insn_bytes, state->insn_bytes, byte_count);
1777 }
1778 state->insn_offset = src_state->insn_offset;
1779 state->insn_stream_valid_bytes = byte_count;
1780 #if DEVELOPMENT || DEBUG
1781 state->out_of_synch = src_state->out_of_synch || insnstream_force_cacheline_mismatch;
1782 insnstream_force_cacheline_mismatch = 0; /* One-shot, reset after use */
1783
1784 if (state->out_of_synch) {
1785 bcopy(&src_state->insn_cacheline[0], &state->insn_cacheline[0],
1786 x86_INSTRUCTION_STATE_CACHELINE_SIZE);
1787 } else {
1788 bzero(&state->insn_cacheline[0], x86_INSTRUCTION_STATE_CACHELINE_SIZE);
1789 }
1790 #else
1791 state->out_of_synch = src_state->out_of_synch;
1792 #endif
1793 *count = x86_INSTRUCTION_STATE_COUNT;
1794 } else {
1795 *count = 0;
1796 }
1797 break;
1798 }
1799
1800 case x86_LAST_BRANCH_STATE:
1801 {
1802 if (last_branch_enabled_modes != LBR_ENABLED_USERMODE || *count < x86_LAST_BRANCH_STATE_COUNT) {
1803 return KERN_INVALID_ARGUMENT;
1804 }
1805
1806 /* Callers to this function are assumed to be from user space and the LBR values will be filtered accordingly */
1807 if (i386_filtered_lbr_state_to_mach_thread_state(thr_act, (last_branch_state_t *)tstate, true) < 0) {
1808 *count = 0;
1809 return KERN_INVALID_ARGUMENT;
1810 }
1811
1812 *count = x86_LAST_BRANCH_STATE_COUNT;
1813 break;
1814 }
1815
1816 default:
1817 return KERN_INVALID_ARGUMENT;
1818 }
1819
1820 return KERN_SUCCESS;
1821 }
1822
1823 kern_return_t
machine_thread_get_kern_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count)1824 machine_thread_get_kern_state(
1825 thread_t thread,
1826 thread_flavor_t flavor,
1827 thread_state_t tstate,
1828 mach_msg_type_number_t *count)
1829 {
1830 x86_saved_state_t *int_state = current_cpu_datap()->cpu_int_state;
1831
1832 /*
1833 * This works only for an interrupted kernel thread
1834 */
1835 if (thread != current_thread() || int_state == NULL) {
1836 return KERN_FAILURE;
1837 }
1838
1839 switch (flavor) {
1840 case x86_THREAD_STATE32: {
1841 x86_thread_state32_t *state;
1842 x86_saved_state32_t *saved_state;
1843
1844 if (!is_saved_state32(int_state) ||
1845 *count < x86_THREAD_STATE32_COUNT) {
1846 return KERN_INVALID_ARGUMENT;
1847 }
1848
1849 state = (x86_thread_state32_t *) tstate;
1850
1851 saved_state = saved_state32(int_state);
1852 /*
1853 * General registers.
1854 */
1855 state->eax = saved_state->eax;
1856 state->ebx = saved_state->ebx;
1857 state->ecx = saved_state->ecx;
1858 state->edx = saved_state->edx;
1859 state->edi = saved_state->edi;
1860 state->esi = saved_state->esi;
1861 state->ebp = saved_state->ebp;
1862 state->esp = saved_state->uesp;
1863 state->eflags = saved_state->efl;
1864 state->eip = saved_state->eip;
1865 state->cs = saved_state->cs;
1866 state->ss = saved_state->ss;
1867 state->ds = saved_state->ds & 0xffff;
1868 state->es = saved_state->es & 0xffff;
1869 state->fs = saved_state->fs & 0xffff;
1870 state->gs = saved_state->gs & 0xffff;
1871
1872 *count = x86_THREAD_STATE32_COUNT;
1873
1874 return KERN_SUCCESS;
1875 }
1876
1877 case x86_THREAD_STATE64: {
1878 x86_thread_state64_t *state;
1879 x86_saved_state64_t *saved_state;
1880
1881 if (!is_saved_state64(int_state) ||
1882 *count < x86_THREAD_STATE64_COUNT) {
1883 return KERN_INVALID_ARGUMENT;
1884 }
1885
1886 state = (x86_thread_state64_t *) tstate;
1887
1888 saved_state = saved_state64(int_state);
1889 /*
1890 * General registers.
1891 */
1892 state->rax = saved_state->rax;
1893 state->rbx = saved_state->rbx;
1894 state->rcx = saved_state->rcx;
1895 state->rdx = saved_state->rdx;
1896 state->rdi = saved_state->rdi;
1897 state->rsi = saved_state->rsi;
1898 state->rbp = saved_state->rbp;
1899 state->rsp = saved_state->isf.rsp;
1900 state->r8 = saved_state->r8;
1901 state->r9 = saved_state->r9;
1902 state->r10 = saved_state->r10;
1903 state->r11 = saved_state->r11;
1904 state->r12 = saved_state->r12;
1905 state->r13 = saved_state->r13;
1906 state->r14 = saved_state->r14;
1907 state->r15 = saved_state->r15;
1908
1909 state->rip = saved_state->isf.rip;
1910 state->rflags = saved_state->isf.rflags;
1911 state->cs = saved_state->isf.cs;
1912 state->fs = saved_state->fs & 0xffff;
1913 state->gs = saved_state->gs & 0xffff;
1914 *count = x86_THREAD_STATE64_COUNT;
1915
1916 return KERN_SUCCESS;
1917 }
1918
1919 case x86_THREAD_STATE: {
1920 x86_thread_state_t *state = NULL;
1921
1922 if (*count < x86_THREAD_STATE_COUNT) {
1923 return KERN_INVALID_ARGUMENT;
1924 }
1925
1926 state = (x86_thread_state_t *) tstate;
1927
1928 if (is_saved_state32(int_state)) {
1929 x86_saved_state32_t *saved_state = saved_state32(int_state);
1930
1931 state->tsh.flavor = x86_THREAD_STATE32;
1932 state->tsh.count = x86_THREAD_STATE32_COUNT;
1933
1934 /*
1935 * General registers.
1936 */
1937 state->uts.ts32.eax = saved_state->eax;
1938 state->uts.ts32.ebx = saved_state->ebx;
1939 state->uts.ts32.ecx = saved_state->ecx;
1940 state->uts.ts32.edx = saved_state->edx;
1941 state->uts.ts32.edi = saved_state->edi;
1942 state->uts.ts32.esi = saved_state->esi;
1943 state->uts.ts32.ebp = saved_state->ebp;
1944 state->uts.ts32.esp = saved_state->uesp;
1945 state->uts.ts32.eflags = saved_state->efl;
1946 state->uts.ts32.eip = saved_state->eip;
1947 state->uts.ts32.cs = saved_state->cs;
1948 state->uts.ts32.ss = saved_state->ss;
1949 state->uts.ts32.ds = saved_state->ds & 0xffff;
1950 state->uts.ts32.es = saved_state->es & 0xffff;
1951 state->uts.ts32.fs = saved_state->fs & 0xffff;
1952 state->uts.ts32.gs = saved_state->gs & 0xffff;
1953 } else if (is_saved_state64(int_state)) {
1954 x86_saved_state64_t *saved_state = saved_state64(int_state);
1955
1956 state->tsh.flavor = x86_THREAD_STATE64;
1957 state->tsh.count = x86_THREAD_STATE64_COUNT;
1958
1959 /*
1960 * General registers.
1961 */
1962 state->uts.ts64.rax = saved_state->rax;
1963 state->uts.ts64.rbx = saved_state->rbx;
1964 state->uts.ts64.rcx = saved_state->rcx;
1965 state->uts.ts64.rdx = saved_state->rdx;
1966 state->uts.ts64.rdi = saved_state->rdi;
1967 state->uts.ts64.rsi = saved_state->rsi;
1968 state->uts.ts64.rbp = saved_state->rbp;
1969 state->uts.ts64.rsp = saved_state->isf.rsp;
1970 state->uts.ts64.r8 = saved_state->r8;
1971 state->uts.ts64.r9 = saved_state->r9;
1972 state->uts.ts64.r10 = saved_state->r10;
1973 state->uts.ts64.r11 = saved_state->r11;
1974 state->uts.ts64.r12 = saved_state->r12;
1975 state->uts.ts64.r13 = saved_state->r13;
1976 state->uts.ts64.r14 = saved_state->r14;
1977 state->uts.ts64.r15 = saved_state->r15;
1978
1979 state->uts.ts64.rip = saved_state->isf.rip;
1980 state->uts.ts64.rflags = saved_state->isf.rflags;
1981 state->uts.ts64.cs = saved_state->isf.cs;
1982 state->uts.ts64.fs = saved_state->fs & 0xffff;
1983 state->uts.ts64.gs = saved_state->gs & 0xffff;
1984 } else {
1985 panic("unknown thread state");
1986 }
1987
1988 *count = x86_THREAD_STATE_COUNT;
1989 return KERN_SUCCESS;
1990 }
1991 }
1992 return KERN_FAILURE;
1993 }
1994
1995
1996 void
machine_thread_switch_addrmode(thread_t thread)1997 machine_thread_switch_addrmode(thread_t thread)
1998 {
1999 task_t task = get_threadtask(thread);
2000
2001 /*
2002 * We don't want to be preempted until we're done
2003 * - particularly if we're switching the current thread
2004 */
2005 disable_preemption();
2006
2007 /*
2008 * Reset the state saveareas. As we're resetting, we anticipate no
2009 * memory allocations in this path.
2010 */
2011 machine_thread_create(thread, task, false);
2012
2013 /* Adjust FPU state */
2014 fpu_switch_addrmode(thread, task_has_64Bit_addr(task));
2015
2016 /* If we're switching ourselves, reset the pcb addresses etc. */
2017 if (thread == current_thread()) {
2018 boolean_t istate = ml_set_interrupts_enabled(FALSE);
2019 act_machine_switch_pcb(NULL, thread);
2020 ml_set_interrupts_enabled(istate);
2021 }
2022 enable_preemption();
2023 }
2024
2025
2026
2027 /*
2028 * This is used to set the current thr_act/thread
2029 * when starting up a new processor
2030 */
2031 void
machine_set_current_thread(thread_t thread)2032 machine_set_current_thread(thread_t thread)
2033 {
2034 current_cpu_datap()->cpu_active_thread = thread;
2035 }
2036
2037
2038 /*
2039 * Perform machine-dependent per-thread initializations
2040 */
2041 void
machine_thread_init(void)2042 machine_thread_init(void)
2043 {
2044 fpu_module_init();
2045 }
2046
2047 /*
2048 * machine_thread_template_init: Initialize machine-specific portion of
2049 * the thread template.
2050 */
2051 void
machine_thread_template_init(thread_t thr_template)2052 machine_thread_template_init(thread_t thr_template)
2053 {
2054 assert(fpu_default != UNDEFINED);
2055
2056 THREAD_TO_PCB(thr_template)->xstate = fpu_default;
2057 }
2058
2059 user_addr_t
get_useraddr(void)2060 get_useraddr(void)
2061 {
2062 thread_t thr_act = current_thread();
2063
2064 if (thread_is_64bit_addr(thr_act)) {
2065 x86_saved_state64_t *iss64;
2066
2067 iss64 = USER_REGS64(thr_act);
2068
2069 return iss64->isf.rip;
2070 } else {
2071 x86_saved_state32_t *iss32;
2072
2073 iss32 = USER_REGS32(thr_act);
2074
2075 return iss32->eip;
2076 }
2077 }
2078
2079 /*
2080 * detach and return a kernel stack from a thread
2081 */
2082
2083 vm_offset_t
machine_stack_detach(thread_t thread)2084 machine_stack_detach(thread_t thread)
2085 {
2086 vm_offset_t stack;
2087
2088 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
2089 (uintptr_t)thread_tid(thread), thread->priority,
2090 thread->sched_pri, 0,
2091 0);
2092
2093 stack = thread->kernel_stack;
2094 #if CONFIG_STKSZ
2095 kcov_stksz_set_thread_stack(thread, stack);
2096 #endif
2097 thread->kernel_stack = 0;
2098
2099 return stack;
2100 }
2101
2102 /*
2103 * attach a kernel stack to a thread and initialize it
2104 */
2105
2106 void
machine_stack_attach(thread_t thread,vm_offset_t stack)2107 machine_stack_attach(
2108 thread_t thread,
2109 vm_offset_t stack)
2110 {
2111 struct x86_kernel_state *statep;
2112
2113 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
2114 (uintptr_t)thread_tid(thread), thread->priority,
2115 thread->sched_pri, 0, 0);
2116
2117 assert(stack);
2118 thread->kernel_stack = stack;
2119 #if CONFIG_STKSZ
2120 kcov_stksz_set_thread_stack(thread, 0);
2121 #endif
2122 thread_initialize_kernel_state(thread);
2123
2124 statep = STACK_IKS(stack);
2125
2126 /*
2127 * Reset the state of the thread to resume from a continuation,
2128 * including resetting the stack and frame pointer to avoid backtracers
2129 * seeing this temporary state and attempting to walk the defunct stack.
2130 */
2131 statep->k_rbp = (uint64_t) 0;
2132 statep->k_rip = (uint64_t) Thread_continue;
2133 statep->k_rbx = (uint64_t) thread_continue;
2134 statep->k_rsp = (uint64_t) STACK_IKS(stack);
2135
2136 return;
2137 }
2138
2139 /*
2140 * move a stack from old to new thread
2141 */
2142
2143 void
machine_stack_handoff(thread_t old,thread_t new)2144 machine_stack_handoff(thread_t old,
2145 thread_t new)
2146 {
2147 vm_offset_t stack;
2148
2149 assert(new);
2150 assert(old);
2151
2152 #if HYPERVISOR
2153 if (old->hv_thread_target) {
2154 hv_callbacks.preempt(old->hv_thread_target);
2155 }
2156 #endif
2157
2158 kpc_off_cpu(old);
2159
2160 stack = old->kernel_stack;
2161 if (stack == old->reserved_stack) {
2162 assert(new->reserved_stack);
2163 old->reserved_stack = new->reserved_stack;
2164 new->reserved_stack = stack;
2165 }
2166 #if CONFIG_STKSZ
2167 kcov_stksz_set_thread_stack(old, old->kernel_stack);
2168 #endif
2169 old->kernel_stack = 0;
2170 /*
2171 * A full call to machine_stack_attach() is unnecessry
2172 * because old stack is already initialized.
2173 */
2174 new->kernel_stack = stack;
2175 #if CONFIG_STKSZ
2176 kcov_stksz_set_thread_stack(new, 0);
2177 #endif
2178
2179 fpu_switch_context(old, new);
2180
2181 old->machine.specFlags &= ~OnProc;
2182 new->machine.specFlags |= OnProc;
2183
2184 pmap_switch_context(old, new, cpu_number());
2185 act_machine_switch_pcb(old, new);
2186
2187 #if HYPERVISOR
2188 if (new->hv_thread_target) {
2189 hv_callbacks.dispatch(new->hv_thread_target);
2190 }
2191 #endif
2192
2193 machine_set_current_thread(new);
2194 thread_initialize_kernel_state(new);
2195
2196 return;
2197 }
2198
2199
2200
2201
2202 struct x86_act_context32 {
2203 x86_saved_state32_t ss;
2204 x86_float_state32_t fs;
2205 x86_debug_state32_t ds;
2206 };
2207
2208 struct x86_act_context64 {
2209 x86_saved_state64_t ss;
2210 x86_float_state64_t fs;
2211 x86_debug_state64_t ds;
2212 };
2213
2214
2215
2216 void *
act_thread_csave(void)2217 act_thread_csave(void)
2218 {
2219 kern_return_t kret;
2220 mach_msg_type_number_t val;
2221 thread_t thr_act = current_thread();
2222
2223 if (thread_is_64bit_addr(thr_act)) {
2224 struct x86_act_context64 *ic64;
2225
2226 ic64 = kalloc_data(sizeof(struct x86_act_context64), Z_WAITOK);
2227
2228 if (ic64 == (struct x86_act_context64 *)NULL) {
2229 return (void *)0;
2230 }
2231
2232 val = x86_SAVED_STATE64_COUNT;
2233 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE64,
2234 (thread_state_t) &ic64->ss, &val);
2235 if (kret != KERN_SUCCESS) {
2236 kfree_data(ic64, sizeof(struct x86_act_context64));
2237 return (void *)0;
2238 }
2239 val = x86_FLOAT_STATE64_COUNT;
2240 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE64,
2241 (thread_state_t) &ic64->fs, &val);
2242 if (kret != KERN_SUCCESS) {
2243 kfree_data(ic64, sizeof(struct x86_act_context64));
2244 return (void *)0;
2245 }
2246
2247 val = x86_DEBUG_STATE64_COUNT;
2248 kret = machine_thread_get_state(thr_act,
2249 x86_DEBUG_STATE64,
2250 (thread_state_t)&ic64->ds,
2251 &val);
2252 if (kret != KERN_SUCCESS) {
2253 kfree_data(ic64, sizeof(struct x86_act_context64));
2254 return (void *)0;
2255 }
2256 return ic64;
2257 } else {
2258 struct x86_act_context32 *ic32;
2259
2260 ic32 = kalloc_data(sizeof(struct x86_act_context32), Z_WAITOK);
2261
2262 if (ic32 == (struct x86_act_context32 *)NULL) {
2263 return (void *)0;
2264 }
2265
2266 val = x86_SAVED_STATE32_COUNT;
2267 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE32,
2268 (thread_state_t) &ic32->ss, &val);
2269 if (kret != KERN_SUCCESS) {
2270 kfree_data(ic32, sizeof(struct x86_act_context32));
2271 return (void *)0;
2272 }
2273 val = x86_FLOAT_STATE32_COUNT;
2274 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE32,
2275 (thread_state_t) &ic32->fs, &val);
2276 if (kret != KERN_SUCCESS) {
2277 kfree_data(ic32, sizeof(struct x86_act_context32));
2278 return (void *)0;
2279 }
2280
2281 val = x86_DEBUG_STATE32_COUNT;
2282 kret = machine_thread_get_state(thr_act,
2283 x86_DEBUG_STATE32,
2284 (thread_state_t)&ic32->ds,
2285 &val);
2286 if (kret != KERN_SUCCESS) {
2287 kfree_data(ic32, sizeof(struct x86_act_context32));
2288 return (void *)0;
2289 }
2290 return ic32;
2291 }
2292 }
2293
2294
2295 void
act_thread_catt(void * ctx)2296 act_thread_catt(void *ctx)
2297 {
2298 thread_t thr_act = current_thread();
2299 kern_return_t kret;
2300
2301 if (ctx == (void *)NULL) {
2302 return;
2303 }
2304
2305 if (thread_is_64bit_addr(thr_act)) {
2306 struct x86_act_context64 *ic64;
2307
2308 ic64 = (struct x86_act_context64 *)ctx;
2309
2310 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE64,
2311 (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT);
2312 if (kret == KERN_SUCCESS) {
2313 machine_thread_set_state(thr_act, x86_FLOAT_STATE64,
2314 (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT);
2315 }
2316 kfree_data(ic64, sizeof(struct x86_act_context64));
2317 } else {
2318 struct x86_act_context32 *ic32;
2319
2320 ic32 = (struct x86_act_context32 *)ctx;
2321
2322 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE32,
2323 (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT);
2324 if (kret == KERN_SUCCESS) {
2325 (void) machine_thread_set_state(thr_act, x86_FLOAT_STATE32,
2326 (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT);
2327 }
2328 kfree_data(ic32, sizeof(struct x86_act_context32));
2329 }
2330 }
2331
2332
2333 void
act_thread_cfree(__unused void * ctx)2334 act_thread_cfree(__unused void *ctx)
2335 {
2336 /* XXX - Unused */
2337 }
2338
2339 /*
2340 * Duplicate one x86_debug_state32_t to another. "all" parameter
2341 * chooses whether dr4 and dr5 are copied (they are never meant
2342 * to be installed when we do machine_task_set_state() or
2343 * machine_thread_set_state()).
2344 */
2345 void
copy_debug_state32(x86_debug_state32_t * src,x86_debug_state32_t * target,boolean_t all)2346 copy_debug_state32(
2347 x86_debug_state32_t *src,
2348 x86_debug_state32_t *target,
2349 boolean_t all)
2350 {
2351 if (all) {
2352 target->dr4 = src->dr4;
2353 target->dr5 = src->dr5;
2354 }
2355
2356 target->dr0 = src->dr0;
2357 target->dr1 = src->dr1;
2358 target->dr2 = src->dr2;
2359 target->dr3 = src->dr3;
2360 target->dr6 = src->dr6;
2361 target->dr7 = src->dr7;
2362 }
2363
2364 /*
2365 * Duplicate one x86_debug_state64_t to another. "all" parameter
2366 * chooses whether dr4 and dr5 are copied (they are never meant
2367 * to be installed when we do machine_task_set_state() or
2368 * machine_thread_set_state()).
2369 */
2370 void
copy_debug_state64(x86_debug_state64_t * src,x86_debug_state64_t * target,boolean_t all)2371 copy_debug_state64(
2372 x86_debug_state64_t *src,
2373 x86_debug_state64_t *target,
2374 boolean_t all)
2375 {
2376 if (all) {
2377 target->dr4 = src->dr4;
2378 target->dr5 = src->dr5;
2379 }
2380
2381 target->dr0 = src->dr0;
2382 target->dr1 = src->dr1;
2383 target->dr2 = src->dr2;
2384 target->dr3 = src->dr3;
2385 target->dr6 = src->dr6;
2386 target->dr7 = src->dr7;
2387 }
2388