1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_debug.h>
58 #include <mach_ldebug.h>
59
60 #include <sys/kdebug.h>
61
62 #include <mach/kern_return.h>
63 #include <mach/thread_status.h>
64 #include <mach/vm_param.h>
65
66 #include <kern/kalloc.h>
67 #include <kern/mach_param.h>
68 #include <kern/processor.h>
69 #include <kern/cpu_data.h>
70 #include <kern/cpu_number.h>
71 #include <kern/task.h>
72 #include <kern/thread.h>
73 #include <kern/sched_prim.h>
74 #include <kern/misc_protos.h>
75 #include <kern/assert.h>
76 #include <kern/spl.h>
77 #include <kern/machine.h>
78 #include <kern/kpc.h>
79 #include <ipc/ipc_port.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_map.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_protos.h>
84
85 #include <i386/cpu_data.h>
86 #include <i386/cpu_number.h>
87 #include <i386/eflags.h>
88 #include <i386/proc_reg.h>
89 #include <i386/fpu.h>
90 #include <i386/misc_protos.h>
91 #include <i386/mp_desc.h>
92 #include <i386/thread.h>
93 #include <i386/machine_routines.h>
94 #include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
95 #include <i386/seg.h>
96
97 #if HYPERVISOR
98 #include <kern/hv_support.h>
99 #endif
100
101 #include <san/kcov_stksz.h>
102
103
104 /*
105 * Maps state flavor to number of words in the state:
106 */
107 unsigned int _MachineStateCount[] = {
108 [x86_THREAD_STATE32] = x86_THREAD_STATE32_COUNT,
109 [x86_THREAD_STATE64] = x86_THREAD_STATE64_COUNT,
110 [x86_THREAD_FULL_STATE64] = x86_THREAD_FULL_STATE64_COUNT,
111 [x86_THREAD_STATE] = x86_THREAD_STATE_COUNT,
112 [x86_FLOAT_STATE32] = x86_FLOAT_STATE32_COUNT,
113 [x86_FLOAT_STATE64] = x86_FLOAT_STATE64_COUNT,
114 [x86_FLOAT_STATE] = x86_FLOAT_STATE_COUNT,
115 [x86_EXCEPTION_STATE32] = x86_EXCEPTION_STATE32_COUNT,
116 [x86_EXCEPTION_STATE64] = x86_EXCEPTION_STATE64_COUNT,
117 [x86_EXCEPTION_STATE] = x86_EXCEPTION_STATE_COUNT,
118 [x86_DEBUG_STATE32] = x86_DEBUG_STATE32_COUNT,
119 [x86_DEBUG_STATE64] = x86_DEBUG_STATE64_COUNT,
120 [x86_DEBUG_STATE] = x86_DEBUG_STATE_COUNT,
121 [x86_AVX_STATE32] = x86_AVX_STATE32_COUNT,
122 [x86_AVX_STATE64] = x86_AVX_STATE64_COUNT,
123 [x86_AVX_STATE] = x86_AVX_STATE_COUNT,
124 [x86_AVX512_STATE32] = x86_AVX512_STATE32_COUNT,
125 [x86_AVX512_STATE64] = x86_AVX512_STATE64_COUNT,
126 [x86_AVX512_STATE] = x86_AVX512_STATE_COUNT,
127 [x86_PAGEIN_STATE] = x86_PAGEIN_STATE_COUNT
128 };
129
130 ZONE_DEFINE_TYPE(iss_zone, "x86_64 saved state",
131 x86_saved_state_t, ZC_NONE);
132
133 ZONE_DEFINE_TYPE(ids_zone, "x86_64 debug state",
134 x86_debug_state64_t, ZC_NONE);
135
136 /* Forward */
137
138 extern void Thread_continue(void);
139 extern void Load_context(
140 thread_t thread) __attribute__((noreturn));
141
142 static void
143 get_exception_state32(thread_t thread, x86_exception_state32_t *es);
144
145 static void
146 get_exception_state64(thread_t thread, x86_exception_state64_t *es);
147
148 static void
149 get_thread_state32(thread_t thread, x86_thread_state32_t *ts);
150
151 static void
152 get_thread_state64(thread_t thread, void *ts, boolean_t full);
153
154 static int
155 set_thread_state32(thread_t thread, x86_thread_state32_t *ts);
156
157 static int
158 set_thread_state64(thread_t thread, void *ts, boolean_t full);
159
160 /*
161 * Don't let an illegal value for the lower 32-bits of dr7 get set.
162 * Specifically, check for undefined settings. Setting these bit patterns
163 * result in undefined behaviour and can lead to an unexpected
164 * TRCTRAP.
165 */
166 static boolean_t
dr7d_is_valid(uint32_t * dr7d)167 dr7d_is_valid(uint32_t *dr7d)
168 {
169 int i;
170 uint32_t mask1, mask2;
171
172 /*
173 * If the DE bit is set in CR4, R/W0-3 can be pattern
174 * "10B" to indicate i/o reads and write
175 */
176 if (!(get_cr4() & CR4_DE)) {
177 for (i = 0, mask1 = 0x3 << 16, mask2 = 0x2 << 16; i < 4;
178 i++, mask1 <<= 4, mask2 <<= 4) {
179 if ((*dr7d & mask1) == mask2) {
180 return FALSE;
181 }
182 }
183 }
184
185 /*
186 * if we are doing an instruction execution break (indicated
187 * by r/w[x] being "00B"), then the len[x] must also be set
188 * to "00B"
189 */
190 for (i = 0; i < 4; i++) {
191 if (((((*dr7d >> (16 + i * 4))) & 0x3) == 0) &&
192 ((((*dr7d >> (18 + i * 4))) & 0x3) != 0)) {
193 return FALSE;
194 }
195 }
196
197 /*
198 * Intel docs have these bits fixed.
199 */
200 *dr7d |= 0x1 << 10; /* set bit 10 to 1 */
201 *dr7d &= ~(0x1 << 11); /* set bit 11 to 0 */
202 *dr7d &= ~(0x1 << 12); /* set bit 12 to 0 */
203 *dr7d &= ~(0x1 << 14); /* set bit 14 to 0 */
204 *dr7d &= ~(0x1 << 15); /* set bit 15 to 0 */
205
206 /*
207 * We don't allow anything to set the global breakpoints.
208 */
209
210 if (*dr7d & 0x2) {
211 return FALSE;
212 }
213
214 if (*dr7d & (0x2 << 2)) {
215 return FALSE;
216 }
217
218 if (*dr7d & (0x2 << 4)) {
219 return FALSE;
220 }
221
222 if (*dr7d & (0x2 << 6)) {
223 return FALSE;
224 }
225
226 return TRUE;
227 }
228
229 extern void set_64bit_debug_regs(x86_debug_state64_t *ds);
230
231 boolean_t
debug_state_is_valid32(x86_debug_state32_t * ds)232 debug_state_is_valid32(x86_debug_state32_t *ds)
233 {
234 if (!dr7d_is_valid(&ds->dr7)) {
235 return FALSE;
236 }
237
238 return TRUE;
239 }
240
241 boolean_t
debug_state_is_valid64(x86_debug_state64_t * ds)242 debug_state_is_valid64(x86_debug_state64_t *ds)
243 {
244 if (!dr7d_is_valid((uint32_t *)&ds->dr7)) {
245 return FALSE;
246 }
247
248 /*
249 * Don't allow the user to set debug addresses above their max
250 * value
251 */
252 if (ds->dr7 & 0x1) {
253 if (ds->dr0 >= VM_MAX_PAGE_ADDRESS) {
254 return FALSE;
255 }
256 }
257
258 if (ds->dr7 & (0x1 << 2)) {
259 if (ds->dr1 >= VM_MAX_PAGE_ADDRESS) {
260 return FALSE;
261 }
262 }
263
264 if (ds->dr7 & (0x1 << 4)) {
265 if (ds->dr2 >= VM_MAX_PAGE_ADDRESS) {
266 return FALSE;
267 }
268 }
269
270 if (ds->dr7 & (0x1 << 6)) {
271 if (ds->dr3 >= VM_MAX_PAGE_ADDRESS) {
272 return FALSE;
273 }
274 }
275
276 /* For x86-64, we must ensure the upper 32-bits of DR7 are clear */
277 ds->dr7 &= 0xffffffffULL;
278
279 return TRUE;
280 }
281
282
283 static kern_return_t
set_debug_state32(thread_t thread,x86_debug_state32_t * ds)284 set_debug_state32(thread_t thread, x86_debug_state32_t *ds)
285 {
286 x86_debug_state32_t *new_ids;
287 pcb_t pcb;
288
289 pcb = THREAD_TO_PCB(thread);
290
291 if (debug_state_is_valid32(ds) != TRUE) {
292 return KERN_INVALID_ARGUMENT;
293 }
294
295 if (pcb->ids == NULL) {
296 new_ids = zalloc_flags(ids_zone, Z_WAITOK | Z_ZERO);
297
298 simple_lock(&pcb->lock, LCK_GRP_NULL);
299 /* make sure it wasn't already alloc()'d elsewhere */
300 if (pcb->ids == NULL) {
301 pcb->ids = new_ids;
302 simple_unlock(&pcb->lock);
303 } else {
304 simple_unlock(&pcb->lock);
305 zfree(ids_zone, new_ids);
306 }
307 }
308
309
310 copy_debug_state32(ds, pcb->ids, FALSE);
311
312 return KERN_SUCCESS;
313 }
314
315 static kern_return_t
set_debug_state64(thread_t thread,x86_debug_state64_t * ds)316 set_debug_state64(thread_t thread, x86_debug_state64_t *ds)
317 {
318 x86_debug_state64_t *new_ids;
319 pcb_t pcb;
320
321 pcb = THREAD_TO_PCB(thread);
322
323 if (debug_state_is_valid64(ds) != TRUE) {
324 return KERN_INVALID_ARGUMENT;
325 }
326
327 if (pcb->ids == NULL) {
328 new_ids = zalloc_flags(ids_zone, Z_WAITOK | Z_ZERO);
329
330 #if HYPERVISOR
331 if (thread->hv_thread_target) {
332 hv_callbacks.volatile_state(thread->hv_thread_target,
333 HV_DEBUG_STATE);
334 }
335 #endif
336
337 simple_lock(&pcb->lock, LCK_GRP_NULL);
338 /* make sure it wasn't already alloc()'d elsewhere */
339 if (pcb->ids == NULL) {
340 pcb->ids = new_ids;
341 simple_unlock(&pcb->lock);
342 } else {
343 simple_unlock(&pcb->lock);
344 zfree(ids_zone, new_ids);
345 }
346 }
347
348 copy_debug_state64(ds, pcb->ids, FALSE);
349
350 return KERN_SUCCESS;
351 }
352
353 static void
get_debug_state32(thread_t thread,x86_debug_state32_t * ds)354 get_debug_state32(thread_t thread, x86_debug_state32_t *ds)
355 {
356 x86_debug_state32_t *saved_state;
357
358 saved_state = thread->machine.ids;
359
360 if (saved_state) {
361 copy_debug_state32(saved_state, ds, TRUE);
362 } else {
363 bzero(ds, sizeof *ds);
364 }
365 }
366
367 static void
get_debug_state64(thread_t thread,x86_debug_state64_t * ds)368 get_debug_state64(thread_t thread, x86_debug_state64_t *ds)
369 {
370 x86_debug_state64_t *saved_state;
371
372 saved_state = (x86_debug_state64_t *)thread->machine.ids;
373
374 if (saved_state) {
375 copy_debug_state64(saved_state, ds, TRUE);
376 } else {
377 bzero(ds, sizeof *ds);
378 }
379 }
380
381 /*
382 * consider_machine_collect:
383 *
384 * Try to collect machine-dependent pages
385 */
386 void
consider_machine_collect(void)387 consider_machine_collect(void)
388 {
389 }
390
391 void
consider_machine_adjust(void)392 consider_machine_adjust(void)
393 {
394 }
395
396 /*
397 * Switch to the first thread on a CPU.
398 */
399 void
machine_load_context(thread_t new)400 machine_load_context(
401 thread_t new)
402 {
403 new->machine.specFlags |= OnProc;
404 act_machine_switch_pcb(NULL, new);
405 Load_context(new);
406 }
407
408 static void
machine_rsb_stuff(void)409 machine_rsb_stuff(void)
410 {
411 #define RSB_STUFF_SPACE_REQD (256 + 16) /* 256 bytes plus a buffer of another 16 for misc. */
412
413 asm volatile (
414 ".macro RSBST from=0, to=15\n"
415 " call 1f\n"
416 "2:\n"
417 " pause\n"
418 " lfence\n"
419 " jmp 2b\n"
420 "1:\n"
421 " call 1f\n"
422 "2:\n"
423 " pause\n"
424 " lfence\n"
425 " jmp 2b\n"
426 "1:\n"
427 " .if \\to - \\from \n"
428 " RSBST \"(\\from + 1)\", \\to \n"
429 " .endif \n"
430 ".endmacro \n"
431 "\n"
432 "L_rsbst:\n"
433 " RSBST \n"
434 " addq $(16 * 2 * 8), %%rsp\n"
435 ::: "memory", "cc");
436 }
437
438 static inline void
pmap_switch_context(thread_t ot,thread_t nt,int cnum)439 pmap_switch_context(thread_t ot, thread_t nt, int cnum)
440 {
441 pmap_assert(ml_get_interrupts_enabled() == FALSE);
442 vm_map_t nmap = nt->map, omap = ot->map;
443 if ((omap != nmap) || (nmap->pmap->pagezero_accessible)) {
444 PMAP_DEACTIVATE_MAP(omap, ot, cnum);
445 PMAP_ACTIVATE_MAP(nmap, nt, cnum);
446 if (__improbable((nt->machine.mthr_do_segchk & MTHR_RSBST) &&
447 (current_kernel_stack_depth() + RSB_STUFF_SPACE_REQD) < kernel_stack_size)) {
448 machine_rsb_stuff();
449 }
450 }
451 }
452
453 /*
454 * Switch to a new thread.
455 * Save the old thread`s kernel state or continuation,
456 * and return it.
457 */
458 thread_t
machine_switch_context(thread_t old,thread_continue_t continuation,thread_t new)459 machine_switch_context(
460 thread_t old,
461 thread_continue_t continuation,
462 thread_t new)
463 {
464 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack);
465
466 #if HYPERVISOR
467 if (old->hv_thread_target) {
468 hv_callbacks.preempt(old->hv_thread_target);
469 }
470 #endif
471
472 #if KPC
473 kpc_off_cpu(old);
474 #endif /* KPC */
475
476 /*
477 * Save FP registers if in use.
478 */
479 fpu_switch_context(old, new);
480
481 old->machine.specFlags &= ~OnProc;
482 new->machine.specFlags |= OnProc;
483
484 /*
485 * Monitor the stack depth and report new max,
486 * not worrying about races.
487 */
488 vm_offset_t depth = current_kernel_stack_depth();
489 if (depth > kernel_stack_depth_max) {
490 kernel_stack_depth_max = depth;
491 KERNEL_DEBUG_CONSTANT(
492 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH),
493 (long) depth, 0, 0, 0, 0);
494 }
495
496 /*
497 * Switch address maps if need be, even if not switching tasks.
498 * (A server activation may be "borrowing" a client map.)
499 */
500 pmap_switch_context(old, new, cpu_number());
501
502 /*
503 * Load the rest of the user state for the new thread
504 */
505 act_machine_switch_pcb(old, new);
506
507 #if HYPERVISOR
508 if (new->hv_thread_target) {
509 hv_callbacks.dispatch(new->hv_thread_target);
510 }
511 #endif
512
513 return Switch_context(old, continuation, new);
514 }
515
516 boolean_t
machine_thread_on_core(thread_t thread)517 machine_thread_on_core(thread_t thread)
518 {
519 return thread->machine.specFlags & OnProc;
520 }
521
522 thread_t
machine_processor_shutdown(thread_t thread,void (* doshutdown)(processor_t),processor_t processor)523 machine_processor_shutdown(
524 thread_t thread,
525 void (*doshutdown)(processor_t),
526 processor_t processor)
527 {
528 #if CONFIG_VMX
529 vmx_suspend();
530 #endif
531 fpu_switch_context(thread, NULL);
532 pmap_switch_context(thread, processor->idle_thread, cpu_number());
533 return Shutdown_context(thread, doshutdown, processor);
534 }
535
536
537 /*
538 * This is where registers that are not normally specified by the mach-o
539 * file on an execve would be nullified, perhaps to avoid a covert channel.
540 */
541 void
machine_thread_state_initialize(thread_t thread)542 machine_thread_state_initialize(
543 thread_t thread)
544 {
545 /*
546 * If there's an fpu save area, free it.
547 * The initialized state will then be lazily faulted-in, if required.
548 * And if we're target, re-arm the no-fpu trap.
549 */
550 if (thread->machine.ifps) {
551 (void) fpu_set_fxstate(thread, NULL, x86_FLOAT_STATE64);
552
553 if (thread == current_thread()) {
554 clear_fpu();
555 }
556 }
557
558 if (thread->machine.ids) {
559 zfree(ids_zone, thread->machine.ids);
560 thread->machine.ids = NULL;
561 }
562 }
563
564 uint32_t
get_eflags_exportmask(void)565 get_eflags_exportmask(void)
566 {
567 return EFL_USER_SET;
568 }
569
570 /*
571 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
572 * for 32bit tasks only
573 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
574 * for 64bit tasks only
575 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
576 * for 32bit tasks only
577 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
578 * for 64bit tasks only
579 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
580 * for either 32bit or 64bit tasks
581 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
582 * for 32bit tasks only
583 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
584 * for 64bit tasks only
585 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
586 * for either 32bit or 64bit tasks
587 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
588 * for 32bit tasks only
589 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
590 * for 64bit tasks only
591 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
592 * for either 32bit or 64bit tasks
593 */
594
595
596 static void
get_exception_state64(thread_t thread,x86_exception_state64_t * es)597 get_exception_state64(thread_t thread, x86_exception_state64_t *es)
598 {
599 x86_saved_state64_t *saved_state;
600
601 saved_state = USER_REGS64(thread);
602
603 es->trapno = saved_state->isf.trapno;
604 es->cpu = saved_state->isf.cpu;
605 es->err = (typeof(es->err))saved_state->isf.err;
606 es->faultvaddr = saved_state->cr2;
607 }
608
609 static void
get_exception_state32(thread_t thread,x86_exception_state32_t * es)610 get_exception_state32(thread_t thread, x86_exception_state32_t *es)
611 {
612 x86_saved_state32_t *saved_state;
613
614 saved_state = USER_REGS32(thread);
615
616 es->trapno = saved_state->trapno;
617 es->cpu = saved_state->cpu;
618 es->err = saved_state->err;
619 es->faultvaddr = saved_state->cr2;
620 }
621
622
623 static int
set_thread_state32(thread_t thread,x86_thread_state32_t * ts)624 set_thread_state32(thread_t thread, x86_thread_state32_t *ts)
625 {
626 x86_saved_state32_t *saved_state;
627
628 pal_register_cache_state(thread, DIRTY);
629
630 saved_state = USER_REGS32(thread);
631
632 /*
633 * Scrub segment selector values:
634 */
635 ts->cs = USER_CS;
636 /*
637 * On a 64 bit kernel, we always override the data segments,
638 * as the actual selector numbers have changed. This also
639 * means that we don't support setting the data segments
640 * manually any more.
641 */
642 ts->ss = USER_DS;
643 ts->ds = USER_DS;
644 ts->es = USER_DS;
645
646 /* Set GS to CTHREAD only if's been established */
647 ts->gs = thread->machine.cthread_self ? USER_CTHREAD : NULL_SEG;
648
649 /* Check segment selectors are safe */
650 if (!valid_user_segment_selectors(ts->cs,
651 ts->ss,
652 ts->ds,
653 ts->es,
654 ts->fs,
655 ts->gs)) {
656 return KERN_INVALID_ARGUMENT;
657 }
658
659 saved_state->eax = ts->eax;
660 saved_state->ebx = ts->ebx;
661 saved_state->ecx = ts->ecx;
662 saved_state->edx = ts->edx;
663 saved_state->edi = ts->edi;
664 saved_state->esi = ts->esi;
665 saved_state->ebp = ts->ebp;
666 saved_state->uesp = ts->esp;
667 saved_state->efl = (ts->eflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
668 saved_state->eip = ts->eip;
669 saved_state->cs = ts->cs;
670 saved_state->ss = ts->ss;
671 saved_state->ds = ts->ds;
672 saved_state->es = ts->es;
673 saved_state->fs = ts->fs;
674 saved_state->gs = ts->gs;
675
676 /*
677 * If the trace trap bit is being set,
678 * ensure that the user returns via iret
679 * - which is signaled thusly:
680 */
681 if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS) {
682 saved_state->cs = SYSENTER_TF_CS;
683 }
684
685 return KERN_SUCCESS;
686 }
687
688 static int
set_thread_state64(thread_t thread,void * state,int full)689 set_thread_state64(thread_t thread, void *state, int full)
690 {
691 x86_thread_state64_t *ts;
692 x86_saved_state64_t *saved_state;
693
694 if (full == TRUE) {
695 ts = &((x86_thread_full_state64_t *)state)->ss64;
696 if (!valid_user_code_selector(((x86_thread_full_state64_t *)ts)->ss64.cs)) {
697 return KERN_INVALID_ARGUMENT;
698 }
699 } else {
700 ts = (x86_thread_state64_t *)state;
701 // In this case, ts->cs exists but is ignored, and
702 // CS is always set to USER_CS below instead.
703 }
704
705 pal_register_cache_state(thread, DIRTY);
706
707 saved_state = USER_REGS64(thread);
708
709 if (!IS_USERADDR64_CANONICAL(ts->rsp) ||
710 !IS_USERADDR64_CANONICAL(ts->rip)) {
711 return KERN_INVALID_ARGUMENT;
712 }
713
714 saved_state->r8 = ts->r8;
715 saved_state->r9 = ts->r9;
716 saved_state->r10 = ts->r10;
717 saved_state->r11 = ts->r11;
718 saved_state->r12 = ts->r12;
719 saved_state->r13 = ts->r13;
720 saved_state->r14 = ts->r14;
721 saved_state->r15 = ts->r15;
722 saved_state->rax = ts->rax;
723 saved_state->rbx = ts->rbx;
724 saved_state->rcx = ts->rcx;
725 saved_state->rdx = ts->rdx;
726 saved_state->rdi = ts->rdi;
727 saved_state->rsi = ts->rsi;
728 saved_state->rbp = ts->rbp;
729 saved_state->isf.rsp = ts->rsp;
730 saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
731 saved_state->isf.rip = ts->rip;
732
733 if (full == FALSE) {
734 saved_state->isf.cs = USER64_CS;
735 } else {
736 saved_state->isf.cs = ((x86_thread_full_state64_t *)ts)->ss64.cs;
737 saved_state->isf.ss = ((x86_thread_full_state64_t *)ts)->ss;
738 saved_state->ds = (uint32_t)((x86_thread_full_state64_t *)ts)->ds;
739 saved_state->es = (uint32_t)((x86_thread_full_state64_t *)ts)->es;
740 machine_thread_set_tsd_base(thread,
741 ((x86_thread_full_state64_t *)ts)->gsbase);
742 }
743
744 saved_state->fs = (uint32_t)ts->fs;
745 saved_state->gs = (uint32_t)ts->gs;
746
747 return KERN_SUCCESS;
748 }
749
750
751
752 static void
get_thread_state32(thread_t thread,x86_thread_state32_t * ts)753 get_thread_state32(thread_t thread, x86_thread_state32_t *ts)
754 {
755 x86_saved_state32_t *saved_state;
756
757 pal_register_cache_state(thread, VALID);
758
759 saved_state = USER_REGS32(thread);
760
761 ts->eax = saved_state->eax;
762 ts->ebx = saved_state->ebx;
763 ts->ecx = saved_state->ecx;
764 ts->edx = saved_state->edx;
765 ts->edi = saved_state->edi;
766 ts->esi = saved_state->esi;
767 ts->ebp = saved_state->ebp;
768 ts->esp = saved_state->uesp;
769 ts->eflags = saved_state->efl;
770 ts->eip = saved_state->eip;
771 ts->cs = saved_state->cs;
772 ts->ss = saved_state->ss;
773 ts->ds = saved_state->ds;
774 ts->es = saved_state->es;
775 ts->fs = saved_state->fs;
776 ts->gs = saved_state->gs;
777 }
778
779
780 static void
get_thread_state64(thread_t thread,void * state,boolean_t full)781 get_thread_state64(thread_t thread, void *state, boolean_t full)
782 {
783 x86_thread_state64_t *ts;
784 x86_saved_state64_t *saved_state;
785
786 if (full == TRUE) {
787 ts = &((x86_thread_full_state64_t *)state)->ss64;
788 } else {
789 ts = (x86_thread_state64_t *)state;
790 }
791
792 pal_register_cache_state(thread, VALID);
793
794 saved_state = USER_REGS64(thread);
795
796 ts->r8 = saved_state->r8;
797 ts->r9 = saved_state->r9;
798 ts->r10 = saved_state->r10;
799 ts->r11 = saved_state->r11;
800 ts->r12 = saved_state->r12;
801 ts->r13 = saved_state->r13;
802 ts->r14 = saved_state->r14;
803 ts->r15 = saved_state->r15;
804 ts->rax = saved_state->rax;
805 ts->rbx = saved_state->rbx;
806 ts->rcx = saved_state->rcx;
807 ts->rdx = saved_state->rdx;
808 ts->rdi = saved_state->rdi;
809 ts->rsi = saved_state->rsi;
810 ts->rbp = saved_state->rbp;
811 ts->rsp = saved_state->isf.rsp;
812 ts->rflags = saved_state->isf.rflags;
813 ts->rip = saved_state->isf.rip;
814 ts->cs = saved_state->isf.cs;
815
816 if (full == TRUE) {
817 ((x86_thread_full_state64_t *)state)->ds = saved_state->ds;
818 ((x86_thread_full_state64_t *)state)->es = saved_state->es;
819 ((x86_thread_full_state64_t *)state)->ss = saved_state->isf.ss;
820 ((x86_thread_full_state64_t *)state)->gsbase =
821 thread->machine.cthread_self;
822 }
823
824 ts->fs = saved_state->fs;
825 ts->gs = saved_state->gs;
826 }
827
828 kern_return_t
machine_thread_state_convert_to_user(__unused thread_t thread,__unused thread_flavor_t flavor,__unused thread_state_t tstate,__unused mach_msg_type_number_t * count,__unused thread_set_status_flags_t tssf_flags)829 machine_thread_state_convert_to_user(
830 __unused thread_t thread,
831 __unused thread_flavor_t flavor,
832 __unused thread_state_t tstate,
833 __unused mach_msg_type_number_t *count,
834 __unused thread_set_status_flags_t tssf_flags)
835 {
836 // No conversion to userspace representation on this platform
837 return KERN_SUCCESS;
838 }
839
840 kern_return_t
machine_thread_state_convert_from_user(__unused thread_t thread,__unused thread_flavor_t flavor,__unused thread_state_t tstate,__unused mach_msg_type_number_t count,__unused thread_state_t old_tstate,__unused mach_msg_type_number_t old_count,__unused thread_set_status_flags_t tssf_flags)841 machine_thread_state_convert_from_user(
842 __unused thread_t thread,
843 __unused thread_flavor_t flavor,
844 __unused thread_state_t tstate,
845 __unused mach_msg_type_number_t count,
846 __unused thread_state_t old_tstate,
847 __unused mach_msg_type_number_t old_count,
848 __unused thread_set_status_flags_t tssf_flags)
849 {
850 // No conversion from userspace representation on this platform
851 return KERN_SUCCESS;
852 }
853
854 kern_return_t
machine_thread_siguctx_pointer_convert_to_user(__unused thread_t thread,__unused user_addr_t * uctxp)855 machine_thread_siguctx_pointer_convert_to_user(
856 __unused thread_t thread,
857 __unused user_addr_t *uctxp)
858 {
859 // No conversion to userspace representation on this platform
860 return KERN_SUCCESS;
861 }
862
863 kern_return_t
machine_thread_function_pointers_convert_from_user(__unused thread_t thread,__unused user_addr_t * fptrs,__unused uint32_t count)864 machine_thread_function_pointers_convert_from_user(
865 __unused thread_t thread,
866 __unused user_addr_t *fptrs,
867 __unused uint32_t count)
868 {
869 // No conversion from userspace representation on this platform
870 return KERN_SUCCESS;
871 }
872
873 /*
874 * act_machine_set_state:
875 *
876 * Set the status of the specified thread.
877 */
878
879 kern_return_t
machine_thread_set_state(thread_t thr_act,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t count)880 machine_thread_set_state(
881 thread_t thr_act,
882 thread_flavor_t flavor,
883 thread_state_t tstate,
884 mach_msg_type_number_t count)
885 {
886 switch (flavor) {
887 case x86_SAVED_STATE32:
888 {
889 x86_saved_state32_t *state;
890 x86_saved_state32_t *saved_state;
891
892 if (count < x86_SAVED_STATE32_COUNT) {
893 return KERN_INVALID_ARGUMENT;
894 }
895
896 state = (x86_saved_state32_t *) tstate;
897
898 /*
899 * Refuse to allow 64-bit processes to set
900 * 32-bit state.
901 */
902 if (thread_is_64bit_addr(thr_act)) {
903 return KERN_INVALID_ARGUMENT;
904 }
905
906 /* Check segment selectors are safe */
907 if (!valid_user_segment_selectors(state->cs,
908 state->ss,
909 state->ds,
910 state->es,
911 state->fs,
912 state->gs)) {
913 return KERN_INVALID_ARGUMENT;
914 }
915
916 pal_register_cache_state(thr_act, DIRTY);
917
918 saved_state = USER_REGS32(thr_act);
919
920 /*
921 * General registers
922 */
923 saved_state->edi = state->edi;
924 saved_state->esi = state->esi;
925 saved_state->ebp = state->ebp;
926 saved_state->uesp = state->uesp;
927 saved_state->ebx = state->ebx;
928 saved_state->edx = state->edx;
929 saved_state->ecx = state->ecx;
930 saved_state->eax = state->eax;
931 saved_state->eip = state->eip;
932
933 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
934
935 /*
936 * If the trace trap bit is being set,
937 * ensure that the user returns via iret
938 * - which is signaled thusly:
939 */
940 if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS) {
941 state->cs = SYSENTER_TF_CS;
942 }
943
944 /*
945 * User setting segment registers.
946 * Code and stack selectors have already been
947 * checked. Others will be reset by 'iret'
948 * if they are not valid.
949 */
950 saved_state->cs = state->cs;
951 saved_state->ss = state->ss;
952 saved_state->ds = state->ds;
953 saved_state->es = state->es;
954 saved_state->fs = state->fs;
955 saved_state->gs = state->gs;
956
957 break;
958 }
959
960 case x86_SAVED_STATE64:
961 {
962 x86_saved_state64_t *state;
963 x86_saved_state64_t *saved_state;
964
965 if (count < x86_SAVED_STATE64_COUNT) {
966 return KERN_INVALID_ARGUMENT;
967 }
968
969 if (!thread_is_64bit_addr(thr_act)) {
970 return KERN_INVALID_ARGUMENT;
971 }
972
973 state = (x86_saved_state64_t *) tstate;
974
975 /* Verify that the supplied code segment selector is
976 * valid. In 64-bit mode, the FS and GS segment overrides
977 * use the FS.base and GS.base MSRs to calculate
978 * base addresses, and the trampolines don't directly
979 * restore the segment registers--hence they are no
980 * longer relevant for validation.
981 */
982 if (!valid_user_code_selector(state->isf.cs)) {
983 return KERN_INVALID_ARGUMENT;
984 }
985
986 /* Check pc and stack are canonical addresses */
987 if (!IS_USERADDR64_CANONICAL(state->isf.rsp) ||
988 !IS_USERADDR64_CANONICAL(state->isf.rip)) {
989 return KERN_INVALID_ARGUMENT;
990 }
991
992 pal_register_cache_state(thr_act, DIRTY);
993
994 saved_state = USER_REGS64(thr_act);
995
996 /*
997 * General registers
998 */
999 saved_state->r8 = state->r8;
1000 saved_state->r9 = state->r9;
1001 saved_state->r10 = state->r10;
1002 saved_state->r11 = state->r11;
1003 saved_state->r12 = state->r12;
1004 saved_state->r13 = state->r13;
1005 saved_state->r14 = state->r14;
1006 saved_state->r15 = state->r15;
1007 saved_state->rdi = state->rdi;
1008 saved_state->rsi = state->rsi;
1009 saved_state->rbp = state->rbp;
1010 saved_state->rbx = state->rbx;
1011 saved_state->rdx = state->rdx;
1012 saved_state->rcx = state->rcx;
1013 saved_state->rax = state->rax;
1014 saved_state->isf.rsp = state->isf.rsp;
1015 saved_state->isf.rip = state->isf.rip;
1016
1017 saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
1018
1019 /*
1020 * User setting segment registers.
1021 * Code and stack selectors have already been
1022 * checked. Others will be reset by 'sys'
1023 * if they are not valid.
1024 */
1025 saved_state->isf.cs = state->isf.cs;
1026 saved_state->isf.ss = state->isf.ss;
1027 saved_state->fs = state->fs;
1028 saved_state->gs = state->gs;
1029
1030 break;
1031 }
1032
1033 case x86_FLOAT_STATE32:
1034 case x86_AVX_STATE32:
1035 case x86_AVX512_STATE32:
1036 {
1037 if (count != _MachineStateCount[flavor]) {
1038 return KERN_INVALID_ARGUMENT;
1039 }
1040
1041 if (thread_is_64bit_addr(thr_act)) {
1042 return KERN_INVALID_ARGUMENT;
1043 }
1044
1045 return fpu_set_fxstate(thr_act, tstate, flavor);
1046 }
1047
1048 case x86_FLOAT_STATE64:
1049 case x86_AVX_STATE64:
1050 case x86_AVX512_STATE64:
1051 {
1052 if (count != _MachineStateCount[flavor]) {
1053 return KERN_INVALID_ARGUMENT;
1054 }
1055
1056 if (!thread_is_64bit_addr(thr_act)) {
1057 return KERN_INVALID_ARGUMENT;
1058 }
1059
1060 return fpu_set_fxstate(thr_act, tstate, flavor);
1061 }
1062
1063 case x86_FLOAT_STATE:
1064 {
1065 x86_float_state_t *state;
1066
1067 if (count != x86_FLOAT_STATE_COUNT) {
1068 return KERN_INVALID_ARGUMENT;
1069 }
1070
1071 state = (x86_float_state_t *)tstate;
1072 if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT &&
1073 thread_is_64bit_addr(thr_act)) {
1074 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
1075 }
1076 if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT &&
1077 !thread_is_64bit_addr(thr_act)) {
1078 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
1079 }
1080 return KERN_INVALID_ARGUMENT;
1081 }
1082
1083 case x86_AVX_STATE:
1084 case x86_AVX512_STATE:
1085 {
1086 x86_avx_state_t *state;
1087
1088 if (count != _MachineStateCount[flavor]) {
1089 return KERN_INVALID_ARGUMENT;
1090 }
1091
1092 state = (x86_avx_state_t *)tstate;
1093 /* Flavors are defined to have sequential values: 32-bit, 64-bit, non-specific */
1094 /* 64-bit flavor? */
1095 if (state->ash.flavor == (flavor - 1) &&
1096 state->ash.count == _MachineStateCount[flavor - 1] &&
1097 thread_is_64bit_addr(thr_act)) {
1098 return fpu_set_fxstate(thr_act,
1099 (thread_state_t)&state->ufs.as64,
1100 flavor - 1);
1101 }
1102 /* 32-bit flavor? */
1103 if (state->ash.flavor == (flavor - 2) &&
1104 state->ash.count == _MachineStateCount[flavor - 2] &&
1105 !thread_is_64bit_addr(thr_act)) {
1106 return fpu_set_fxstate(thr_act,
1107 (thread_state_t)&state->ufs.as32,
1108 flavor - 2);
1109 }
1110 return KERN_INVALID_ARGUMENT;
1111 }
1112
1113 case x86_THREAD_STATE32:
1114 {
1115 if (count != x86_THREAD_STATE32_COUNT) {
1116 return KERN_INVALID_ARGUMENT;
1117 }
1118
1119 if (thread_is_64bit_addr(thr_act)) {
1120 return KERN_INVALID_ARGUMENT;
1121 }
1122
1123 return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1124 }
1125
1126 case x86_THREAD_STATE64:
1127 {
1128 if (count != x86_THREAD_STATE64_COUNT) {
1129 return KERN_INVALID_ARGUMENT;
1130 }
1131
1132 if (!thread_is_64bit_addr(thr_act)) {
1133 return KERN_INVALID_ARGUMENT;
1134 }
1135
1136 return set_thread_state64(thr_act, tstate, FALSE);
1137 }
1138
1139 case x86_THREAD_FULL_STATE64:
1140 {
1141 if (count != x86_THREAD_FULL_STATE64_COUNT) {
1142 return KERN_INVALID_ARGUMENT;
1143 }
1144
1145 if (!thread_is_64bit_addr(thr_act)) {
1146 return KERN_INVALID_ARGUMENT;
1147 }
1148
1149 /* If this process does not have a custom LDT, return failure */
1150 if (get_threadtask(thr_act)->i386_ldt == 0) {
1151 return KERN_INVALID_ARGUMENT;
1152 }
1153
1154 return set_thread_state64(thr_act, tstate, TRUE);
1155 }
1156
1157 case x86_THREAD_STATE:
1158 {
1159 x86_thread_state_t *state;
1160
1161 if (count != x86_THREAD_STATE_COUNT) {
1162 return KERN_INVALID_ARGUMENT;
1163 }
1164
1165 state = (x86_thread_state_t *)tstate;
1166
1167 if (state->tsh.flavor == x86_THREAD_STATE64 &&
1168 state->tsh.count == x86_THREAD_STATE64_COUNT &&
1169 thread_is_64bit_addr(thr_act)) {
1170 return set_thread_state64(thr_act, &state->uts.ts64, FALSE);
1171 } else if (state->tsh.flavor == x86_THREAD_FULL_STATE64 &&
1172 state->tsh.count == x86_THREAD_FULL_STATE64_COUNT &&
1173 thread_is_64bit_addr(thr_act) && get_threadtask(thr_act)->i386_ldt != 0) {
1174 return set_thread_state64(thr_act, &state->uts.ts64, TRUE);
1175 } else if (state->tsh.flavor == x86_THREAD_STATE32 &&
1176 state->tsh.count == x86_THREAD_STATE32_COUNT &&
1177 !thread_is_64bit_addr(thr_act)) {
1178 return set_thread_state32(thr_act, &state->uts.ts32);
1179 } else {
1180 return KERN_INVALID_ARGUMENT;
1181 }
1182 }
1183 case x86_DEBUG_STATE32:
1184 {
1185 x86_debug_state32_t *state;
1186 kern_return_t ret;
1187
1188 if (thread_is_64bit_addr(thr_act)) {
1189 return KERN_INVALID_ARGUMENT;
1190 }
1191
1192 state = (x86_debug_state32_t *)tstate;
1193
1194 ret = set_debug_state32(thr_act, state);
1195
1196 return ret;
1197 }
1198 case x86_DEBUG_STATE64:
1199 {
1200 x86_debug_state64_t *state;
1201 kern_return_t ret;
1202
1203 if (!thread_is_64bit_addr(thr_act)) {
1204 return KERN_INVALID_ARGUMENT;
1205 }
1206
1207 state = (x86_debug_state64_t *)tstate;
1208
1209 ret = set_debug_state64(thr_act, state);
1210
1211 return ret;
1212 }
1213 case x86_DEBUG_STATE:
1214 {
1215 x86_debug_state_t *state;
1216 kern_return_t ret = KERN_INVALID_ARGUMENT;
1217
1218 if (count != x86_DEBUG_STATE_COUNT) {
1219 return KERN_INVALID_ARGUMENT;
1220 }
1221
1222 state = (x86_debug_state_t *)tstate;
1223 if (state->dsh.flavor == x86_DEBUG_STATE64 &&
1224 state->dsh.count == x86_DEBUG_STATE64_COUNT &&
1225 thread_is_64bit_addr(thr_act)) {
1226 ret = set_debug_state64(thr_act, &state->uds.ds64);
1227 } else if (state->dsh.flavor == x86_DEBUG_STATE32 &&
1228 state->dsh.count == x86_DEBUG_STATE32_COUNT &&
1229 !thread_is_64bit_addr(thr_act)) {
1230 ret = set_debug_state32(thr_act, &state->uds.ds32);
1231 }
1232 return ret;
1233 }
1234 default:
1235 return KERN_INVALID_ARGUMENT;
1236 }
1237
1238 return KERN_SUCCESS;
1239 }
1240
1241 mach_vm_address_t
machine_thread_pc(thread_t thr_act)1242 machine_thread_pc(thread_t thr_act)
1243 {
1244 if (thread_is_64bit_addr(thr_act)) {
1245 return (mach_vm_address_t)USER_REGS64(thr_act)->isf.rip;
1246 } else {
1247 return (mach_vm_address_t)USER_REGS32(thr_act)->eip;
1248 }
1249 }
1250
1251 void
machine_thread_reset_pc(thread_t thr_act,mach_vm_address_t pc)1252 machine_thread_reset_pc(thread_t thr_act, mach_vm_address_t pc)
1253 {
1254 pal_register_cache_state(thr_act, DIRTY);
1255
1256 if (thread_is_64bit_addr(thr_act)) {
1257 if (!IS_USERADDR64_CANONICAL(pc)) {
1258 pc = 0;
1259 }
1260 USER_REGS64(thr_act)->isf.rip = (uint64_t)pc;
1261 } else {
1262 USER_REGS32(thr_act)->eip = (uint32_t)pc;
1263 }
1264 }
1265
1266
1267 /*
1268 * thread_getstatus:
1269 *
1270 * Get the status of the specified thread.
1271 */
1272
1273 kern_return_t
machine_thread_get_state(thread_t thr_act,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count)1274 machine_thread_get_state(
1275 thread_t thr_act,
1276 thread_flavor_t flavor,
1277 thread_state_t tstate,
1278 mach_msg_type_number_t *count)
1279 {
1280 switch (flavor) {
1281 case THREAD_STATE_FLAVOR_LIST:
1282 {
1283 if (*count < 3) {
1284 return KERN_INVALID_ARGUMENT;
1285 }
1286
1287 tstate[0] = i386_THREAD_STATE;
1288 tstate[1] = i386_FLOAT_STATE;
1289 tstate[2] = i386_EXCEPTION_STATE;
1290
1291 *count = 3;
1292 break;
1293 }
1294
1295 case THREAD_STATE_FLAVOR_LIST_NEW:
1296 {
1297 if (*count < 4) {
1298 return KERN_INVALID_ARGUMENT;
1299 }
1300
1301 tstate[0] = x86_THREAD_STATE;
1302 tstate[1] = x86_FLOAT_STATE;
1303 tstate[2] = x86_EXCEPTION_STATE;
1304 tstate[3] = x86_DEBUG_STATE;
1305
1306 *count = 4;
1307 break;
1308 }
1309
1310 case THREAD_STATE_FLAVOR_LIST_10_9:
1311 {
1312 if (*count < 5) {
1313 return KERN_INVALID_ARGUMENT;
1314 }
1315
1316 tstate[0] = x86_THREAD_STATE;
1317 tstate[1] = x86_FLOAT_STATE;
1318 tstate[2] = x86_EXCEPTION_STATE;
1319 tstate[3] = x86_DEBUG_STATE;
1320 tstate[4] = x86_AVX_STATE;
1321
1322 *count = 5;
1323 break;
1324 }
1325
1326 case THREAD_STATE_FLAVOR_LIST_10_13:
1327 {
1328 if (*count < 6) {
1329 return KERN_INVALID_ARGUMENT;
1330 }
1331
1332 tstate[0] = x86_THREAD_STATE;
1333 tstate[1] = x86_FLOAT_STATE;
1334 tstate[2] = x86_EXCEPTION_STATE;
1335 tstate[3] = x86_DEBUG_STATE;
1336 tstate[4] = x86_AVX_STATE;
1337 tstate[5] = x86_AVX512_STATE;
1338
1339 *count = 6;
1340 break;
1341 }
1342
1343 case THREAD_STATE_FLAVOR_LIST_10_15:
1344 {
1345 if (*count < 7) {
1346 return KERN_INVALID_ARGUMENT;
1347 }
1348
1349 tstate[0] = x86_THREAD_STATE;
1350 tstate[1] = x86_FLOAT_STATE;
1351 tstate[2] = x86_EXCEPTION_STATE;
1352 tstate[3] = x86_DEBUG_STATE;
1353 tstate[4] = x86_AVX_STATE;
1354 tstate[5] = x86_AVX512_STATE;
1355 tstate[6] = x86_PAGEIN_STATE;
1356
1357 *count = 7;
1358 break;
1359 }
1360
1361 case x86_SAVED_STATE32:
1362 {
1363 x86_saved_state32_t *state;
1364 x86_saved_state32_t *saved_state;
1365
1366 if (*count < x86_SAVED_STATE32_COUNT) {
1367 return KERN_INVALID_ARGUMENT;
1368 }
1369
1370 if (thread_is_64bit_addr(thr_act)) {
1371 return KERN_INVALID_ARGUMENT;
1372 }
1373
1374 state = (x86_saved_state32_t *) tstate;
1375 saved_state = USER_REGS32(thr_act);
1376
1377 /*
1378 * First, copy everything:
1379 */
1380 *state = *saved_state;
1381 state->ds = saved_state->ds & 0xffff;
1382 state->es = saved_state->es & 0xffff;
1383 state->fs = saved_state->fs & 0xffff;
1384 state->gs = saved_state->gs & 0xffff;
1385
1386 *count = x86_SAVED_STATE32_COUNT;
1387 break;
1388 }
1389
1390 case x86_SAVED_STATE64:
1391 {
1392 x86_saved_state64_t *state;
1393 x86_saved_state64_t *saved_state;
1394
1395 if (*count < x86_SAVED_STATE64_COUNT) {
1396 return KERN_INVALID_ARGUMENT;
1397 }
1398
1399 if (!thread_is_64bit_addr(thr_act)) {
1400 return KERN_INVALID_ARGUMENT;
1401 }
1402
1403 state = (x86_saved_state64_t *)tstate;
1404 saved_state = USER_REGS64(thr_act);
1405
1406 /*
1407 * First, copy everything:
1408 */
1409 *state = *saved_state;
1410 state->ds = saved_state->ds & 0xffff;
1411 state->es = saved_state->es & 0xffff;
1412 state->fs = saved_state->fs & 0xffff;
1413 state->gs = saved_state->gs & 0xffff;
1414
1415 *count = x86_SAVED_STATE64_COUNT;
1416 break;
1417 }
1418
1419 case x86_FLOAT_STATE32:
1420 {
1421 if (*count < x86_FLOAT_STATE32_COUNT) {
1422 return KERN_INVALID_ARGUMENT;
1423 }
1424
1425 if (thread_is_64bit_addr(thr_act)) {
1426 return KERN_INVALID_ARGUMENT;
1427 }
1428
1429 *count = x86_FLOAT_STATE32_COUNT;
1430
1431 return fpu_get_fxstate(thr_act, tstate, flavor);
1432 }
1433
1434 case x86_FLOAT_STATE64:
1435 {
1436 if (*count < x86_FLOAT_STATE64_COUNT) {
1437 return KERN_INVALID_ARGUMENT;
1438 }
1439
1440 if (!thread_is_64bit_addr(thr_act)) {
1441 return KERN_INVALID_ARGUMENT;
1442 }
1443
1444 *count = x86_FLOAT_STATE64_COUNT;
1445
1446 return fpu_get_fxstate(thr_act, tstate, flavor);
1447 }
1448
1449 case x86_FLOAT_STATE:
1450 {
1451 x86_float_state_t *state;
1452 kern_return_t kret;
1453
1454 if (*count < x86_FLOAT_STATE_COUNT) {
1455 return KERN_INVALID_ARGUMENT;
1456 }
1457
1458 state = (x86_float_state_t *)tstate;
1459
1460 /*
1461 * no need to bzero... currently
1462 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1463 */
1464 if (thread_is_64bit_addr(thr_act)) {
1465 state->fsh.flavor = x86_FLOAT_STATE64;
1466 state->fsh.count = x86_FLOAT_STATE64_COUNT;
1467
1468 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
1469 } else {
1470 state->fsh.flavor = x86_FLOAT_STATE32;
1471 state->fsh.count = x86_FLOAT_STATE32_COUNT;
1472
1473 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
1474 }
1475 *count = x86_FLOAT_STATE_COUNT;
1476
1477 return kret;
1478 }
1479
1480 case x86_AVX_STATE32:
1481 case x86_AVX512_STATE32:
1482 {
1483 if (*count != _MachineStateCount[flavor]) {
1484 return KERN_INVALID_ARGUMENT;
1485 }
1486
1487 if (thread_is_64bit_addr(thr_act)) {
1488 return KERN_INVALID_ARGUMENT;
1489 }
1490
1491 *count = _MachineStateCount[flavor];
1492
1493 return fpu_get_fxstate(thr_act, tstate, flavor);
1494 }
1495
1496 case x86_AVX_STATE64:
1497 case x86_AVX512_STATE64:
1498 {
1499 if (*count != _MachineStateCount[flavor]) {
1500 return KERN_INVALID_ARGUMENT;
1501 }
1502
1503 if (!thread_is_64bit_addr(thr_act)) {
1504 return KERN_INVALID_ARGUMENT;
1505 }
1506
1507 *count = _MachineStateCount[flavor];
1508
1509 return fpu_get_fxstate(thr_act, tstate, flavor);
1510 }
1511
1512 case x86_AVX_STATE:
1513 case x86_AVX512_STATE:
1514 {
1515 x86_avx_state_t *state;
1516 thread_state_t fstate;
1517
1518 if (*count < _MachineStateCount[flavor]) {
1519 return KERN_INVALID_ARGUMENT;
1520 }
1521
1522 *count = _MachineStateCount[flavor];
1523 state = (x86_avx_state_t *)tstate;
1524
1525 bzero((char *)state, *count * sizeof(int));
1526
1527 if (thread_is_64bit_addr(thr_act)) {
1528 flavor -= 1; /* 64-bit flavor */
1529 fstate = (thread_state_t) &state->ufs.as64;
1530 } else {
1531 flavor -= 2; /* 32-bit flavor */
1532 fstate = (thread_state_t) &state->ufs.as32;
1533 }
1534 state->ash.flavor = flavor;
1535 state->ash.count = _MachineStateCount[flavor];
1536
1537 return fpu_get_fxstate(thr_act, fstate, flavor);
1538 }
1539
1540 case x86_THREAD_STATE32:
1541 {
1542 if (*count < x86_THREAD_STATE32_COUNT) {
1543 return KERN_INVALID_ARGUMENT;
1544 }
1545
1546 if (thread_is_64bit_addr(thr_act)) {
1547 return KERN_INVALID_ARGUMENT;
1548 }
1549
1550 *count = x86_THREAD_STATE32_COUNT;
1551
1552 get_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1553 break;
1554 }
1555
1556 case x86_THREAD_STATE64:
1557 {
1558 if (*count < x86_THREAD_STATE64_COUNT) {
1559 return KERN_INVALID_ARGUMENT;
1560 }
1561
1562 if (!thread_is_64bit_addr(thr_act)) {
1563 return KERN_INVALID_ARGUMENT;
1564 }
1565
1566 *count = x86_THREAD_STATE64_COUNT;
1567
1568 get_thread_state64(thr_act, tstate, FALSE);
1569 break;
1570 }
1571
1572 case x86_THREAD_FULL_STATE64:
1573 {
1574 if (*count < x86_THREAD_FULL_STATE64_COUNT) {
1575 return KERN_INVALID_ARGUMENT;
1576 }
1577
1578 if (!thread_is_64bit_addr(thr_act)) {
1579 return KERN_INVALID_ARGUMENT;
1580 }
1581
1582 /* If this process does not have a custom LDT, return failure */
1583 if (get_threadtask(thr_act)->i386_ldt == 0) {
1584 return KERN_INVALID_ARGUMENT;
1585 }
1586
1587 *count = x86_THREAD_FULL_STATE64_COUNT;
1588
1589 get_thread_state64(thr_act, tstate, TRUE);
1590 break;
1591 }
1592
1593 case x86_THREAD_STATE:
1594 {
1595 x86_thread_state_t *state;
1596
1597 if (*count < x86_THREAD_STATE_COUNT) {
1598 return KERN_INVALID_ARGUMENT;
1599 }
1600
1601 state = (x86_thread_state_t *)tstate;
1602
1603 bzero((char *)state, sizeof(x86_thread_state_t));
1604
1605 if (thread_is_64bit_addr(thr_act)) {
1606 state->tsh.flavor = x86_THREAD_STATE64;
1607 state->tsh.count = x86_THREAD_STATE64_COUNT;
1608
1609 get_thread_state64(thr_act, &state->uts.ts64, FALSE);
1610 } else {
1611 state->tsh.flavor = x86_THREAD_STATE32;
1612 state->tsh.count = x86_THREAD_STATE32_COUNT;
1613
1614 get_thread_state32(thr_act, &state->uts.ts32);
1615 }
1616 *count = x86_THREAD_STATE_COUNT;
1617
1618 break;
1619 }
1620
1621
1622 case x86_EXCEPTION_STATE32:
1623 {
1624 if (*count < x86_EXCEPTION_STATE32_COUNT) {
1625 return KERN_INVALID_ARGUMENT;
1626 }
1627
1628 if (thread_is_64bit_addr(thr_act)) {
1629 return KERN_INVALID_ARGUMENT;
1630 }
1631
1632 *count = x86_EXCEPTION_STATE32_COUNT;
1633
1634 get_exception_state32(thr_act, (x86_exception_state32_t *)tstate);
1635 /*
1636 * Suppress the cpu number for binary compatibility
1637 * of this deprecated state.
1638 */
1639 ((x86_exception_state32_t *)tstate)->cpu = 0;
1640 break;
1641 }
1642
1643 case x86_EXCEPTION_STATE64:
1644 {
1645 if (*count < x86_EXCEPTION_STATE64_COUNT) {
1646 return KERN_INVALID_ARGUMENT;
1647 }
1648
1649 if (!thread_is_64bit_addr(thr_act)) {
1650 return KERN_INVALID_ARGUMENT;
1651 }
1652
1653 *count = x86_EXCEPTION_STATE64_COUNT;
1654
1655 get_exception_state64(thr_act, (x86_exception_state64_t *)tstate);
1656 /*
1657 * Suppress the cpu number for binary compatibility
1658 * of this deprecated state.
1659 */
1660 ((x86_exception_state64_t *)tstate)->cpu = 0;
1661 break;
1662 }
1663
1664 case x86_EXCEPTION_STATE:
1665 {
1666 x86_exception_state_t *state;
1667
1668 if (*count < x86_EXCEPTION_STATE_COUNT) {
1669 return KERN_INVALID_ARGUMENT;
1670 }
1671
1672 state = (x86_exception_state_t *)tstate;
1673
1674 bzero((char *)state, sizeof(x86_exception_state_t));
1675
1676 if (thread_is_64bit_addr(thr_act)) {
1677 state->esh.flavor = x86_EXCEPTION_STATE64;
1678 state->esh.count = x86_EXCEPTION_STATE64_COUNT;
1679
1680 get_exception_state64(thr_act, &state->ues.es64);
1681 } else {
1682 state->esh.flavor = x86_EXCEPTION_STATE32;
1683 state->esh.count = x86_EXCEPTION_STATE32_COUNT;
1684
1685 get_exception_state32(thr_act, &state->ues.es32);
1686 }
1687 *count = x86_EXCEPTION_STATE_COUNT;
1688
1689 break;
1690 }
1691 case x86_DEBUG_STATE32:
1692 {
1693 if (*count < x86_DEBUG_STATE32_COUNT) {
1694 return KERN_INVALID_ARGUMENT;
1695 }
1696
1697 if (thread_is_64bit_addr(thr_act)) {
1698 return KERN_INVALID_ARGUMENT;
1699 }
1700
1701 get_debug_state32(thr_act, (x86_debug_state32_t *)tstate);
1702
1703 *count = x86_DEBUG_STATE32_COUNT;
1704
1705 break;
1706 }
1707 case x86_DEBUG_STATE64:
1708 {
1709 if (*count < x86_DEBUG_STATE64_COUNT) {
1710 return KERN_INVALID_ARGUMENT;
1711 }
1712
1713 if (!thread_is_64bit_addr(thr_act)) {
1714 return KERN_INVALID_ARGUMENT;
1715 }
1716
1717 get_debug_state64(thr_act, (x86_debug_state64_t *)tstate);
1718
1719 *count = x86_DEBUG_STATE64_COUNT;
1720
1721 break;
1722 }
1723 case x86_DEBUG_STATE:
1724 {
1725 x86_debug_state_t *state;
1726
1727 if (*count < x86_DEBUG_STATE_COUNT) {
1728 return KERN_INVALID_ARGUMENT;
1729 }
1730
1731 state = (x86_debug_state_t *)tstate;
1732
1733 bzero(state, sizeof *state);
1734
1735 if (thread_is_64bit_addr(thr_act)) {
1736 state->dsh.flavor = x86_DEBUG_STATE64;
1737 state->dsh.count = x86_DEBUG_STATE64_COUNT;
1738
1739 get_debug_state64(thr_act, &state->uds.ds64);
1740 } else {
1741 state->dsh.flavor = x86_DEBUG_STATE32;
1742 state->dsh.count = x86_DEBUG_STATE32_COUNT;
1743
1744 get_debug_state32(thr_act, &state->uds.ds32);
1745 }
1746 *count = x86_DEBUG_STATE_COUNT;
1747 break;
1748 }
1749
1750 case x86_PAGEIN_STATE:
1751 {
1752 if (*count < x86_PAGEIN_STATE_COUNT) {
1753 return KERN_INVALID_ARGUMENT;
1754 }
1755
1756 x86_pagein_state_t *state = (void *)tstate;
1757
1758 state->__pagein_error = thr_act->t_pagein_error;
1759
1760 *count = x86_PAGEIN_STATE_COUNT;
1761 break;
1762 }
1763
1764 case x86_INSTRUCTION_STATE:
1765 {
1766 if (*count < x86_INSTRUCTION_STATE_COUNT) {
1767 return KERN_INVALID_ARGUMENT;
1768 }
1769
1770 x86_instruction_state_t *state = (void *)tstate;
1771 x86_instruction_state_t *src_state = THREAD_TO_PCB(thr_act)->insn_state;
1772
1773 if (src_state != 0 && (src_state->insn_stream_valid_bytes > 0 || src_state->out_of_synch)) {
1774 #if DEVELOPMENT || DEBUG
1775 extern int insnstream_force_cacheline_mismatch;
1776 #endif
1777 size_t byte_count = (src_state->insn_stream_valid_bytes > x86_INSTRUCTION_STATE_MAX_INSN_BYTES)
1778 ? x86_INSTRUCTION_STATE_MAX_INSN_BYTES : src_state->insn_stream_valid_bytes;
1779 if (byte_count > 0) {
1780 bcopy(src_state->insn_bytes, state->insn_bytes, byte_count);
1781 }
1782 state->insn_offset = src_state->insn_offset;
1783 state->insn_stream_valid_bytes = byte_count;
1784 #if DEVELOPMENT || DEBUG
1785 state->out_of_synch = src_state->out_of_synch || insnstream_force_cacheline_mismatch;
1786 insnstream_force_cacheline_mismatch = 0; /* One-shot, reset after use */
1787
1788 if (state->out_of_synch) {
1789 bcopy(&src_state->insn_cacheline[0], &state->insn_cacheline[0],
1790 x86_INSTRUCTION_STATE_CACHELINE_SIZE);
1791 } else {
1792 bzero(&state->insn_cacheline[0], x86_INSTRUCTION_STATE_CACHELINE_SIZE);
1793 }
1794 #else
1795 state->out_of_synch = src_state->out_of_synch;
1796 #endif
1797 *count = x86_INSTRUCTION_STATE_COUNT;
1798 } else {
1799 *count = 0;
1800 }
1801 break;
1802 }
1803
1804 case x86_LAST_BRANCH_STATE:
1805 {
1806 if (last_branch_enabled_modes != LBR_ENABLED_USERMODE || *count < x86_LAST_BRANCH_STATE_COUNT) {
1807 return KERN_INVALID_ARGUMENT;
1808 }
1809
1810 /* Callers to this function are assumed to be from user space and the LBR values will be filtered accordingly */
1811 if (i386_filtered_lbr_state_to_mach_thread_state(thr_act, (last_branch_state_t *)tstate, true) < 0) {
1812 *count = 0;
1813 return KERN_INVALID_ARGUMENT;
1814 }
1815
1816 *count = x86_LAST_BRANCH_STATE_COUNT;
1817 break;
1818 }
1819
1820 default:
1821 return KERN_INVALID_ARGUMENT;
1822 }
1823
1824 return KERN_SUCCESS;
1825 }
1826
1827 kern_return_t
machine_thread_get_kern_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count)1828 machine_thread_get_kern_state(
1829 thread_t thread,
1830 thread_flavor_t flavor,
1831 thread_state_t tstate,
1832 mach_msg_type_number_t *count)
1833 {
1834 x86_saved_state_t *int_state = current_cpu_datap()->cpu_int_state;
1835
1836 /*
1837 * This works only for an interrupted kernel thread
1838 */
1839 if (thread != current_thread() || int_state == NULL) {
1840 return KERN_FAILURE;
1841 }
1842
1843 switch (flavor) {
1844 case x86_THREAD_STATE32: {
1845 x86_thread_state32_t *state;
1846 x86_saved_state32_t *saved_state;
1847
1848 if (!is_saved_state32(int_state) ||
1849 *count < x86_THREAD_STATE32_COUNT) {
1850 return KERN_INVALID_ARGUMENT;
1851 }
1852
1853 state = (x86_thread_state32_t *) tstate;
1854
1855 saved_state = saved_state32(int_state);
1856 /*
1857 * General registers.
1858 */
1859 state->eax = saved_state->eax;
1860 state->ebx = saved_state->ebx;
1861 state->ecx = saved_state->ecx;
1862 state->edx = saved_state->edx;
1863 state->edi = saved_state->edi;
1864 state->esi = saved_state->esi;
1865 state->ebp = saved_state->ebp;
1866 state->esp = saved_state->uesp;
1867 state->eflags = saved_state->efl;
1868 state->eip = saved_state->eip;
1869 state->cs = saved_state->cs;
1870 state->ss = saved_state->ss;
1871 state->ds = saved_state->ds & 0xffff;
1872 state->es = saved_state->es & 0xffff;
1873 state->fs = saved_state->fs & 0xffff;
1874 state->gs = saved_state->gs & 0xffff;
1875
1876 *count = x86_THREAD_STATE32_COUNT;
1877
1878 return KERN_SUCCESS;
1879 }
1880
1881 case x86_THREAD_STATE64: {
1882 x86_thread_state64_t *state;
1883 x86_saved_state64_t *saved_state;
1884
1885 if (!is_saved_state64(int_state) ||
1886 *count < x86_THREAD_STATE64_COUNT) {
1887 return KERN_INVALID_ARGUMENT;
1888 }
1889
1890 state = (x86_thread_state64_t *) tstate;
1891
1892 saved_state = saved_state64(int_state);
1893 /*
1894 * General registers.
1895 */
1896 state->rax = saved_state->rax;
1897 state->rbx = saved_state->rbx;
1898 state->rcx = saved_state->rcx;
1899 state->rdx = saved_state->rdx;
1900 state->rdi = saved_state->rdi;
1901 state->rsi = saved_state->rsi;
1902 state->rbp = saved_state->rbp;
1903 state->rsp = saved_state->isf.rsp;
1904 state->r8 = saved_state->r8;
1905 state->r9 = saved_state->r9;
1906 state->r10 = saved_state->r10;
1907 state->r11 = saved_state->r11;
1908 state->r12 = saved_state->r12;
1909 state->r13 = saved_state->r13;
1910 state->r14 = saved_state->r14;
1911 state->r15 = saved_state->r15;
1912
1913 state->rip = saved_state->isf.rip;
1914 state->rflags = saved_state->isf.rflags;
1915 state->cs = saved_state->isf.cs;
1916 state->fs = saved_state->fs & 0xffff;
1917 state->gs = saved_state->gs & 0xffff;
1918 *count = x86_THREAD_STATE64_COUNT;
1919
1920 return KERN_SUCCESS;
1921 }
1922
1923 case x86_THREAD_STATE: {
1924 x86_thread_state_t *state = NULL;
1925
1926 if (*count < x86_THREAD_STATE_COUNT) {
1927 return KERN_INVALID_ARGUMENT;
1928 }
1929
1930 state = (x86_thread_state_t *) tstate;
1931
1932 if (is_saved_state32(int_state)) {
1933 x86_saved_state32_t *saved_state = saved_state32(int_state);
1934
1935 state->tsh.flavor = x86_THREAD_STATE32;
1936 state->tsh.count = x86_THREAD_STATE32_COUNT;
1937
1938 /*
1939 * General registers.
1940 */
1941 state->uts.ts32.eax = saved_state->eax;
1942 state->uts.ts32.ebx = saved_state->ebx;
1943 state->uts.ts32.ecx = saved_state->ecx;
1944 state->uts.ts32.edx = saved_state->edx;
1945 state->uts.ts32.edi = saved_state->edi;
1946 state->uts.ts32.esi = saved_state->esi;
1947 state->uts.ts32.ebp = saved_state->ebp;
1948 state->uts.ts32.esp = saved_state->uesp;
1949 state->uts.ts32.eflags = saved_state->efl;
1950 state->uts.ts32.eip = saved_state->eip;
1951 state->uts.ts32.cs = saved_state->cs;
1952 state->uts.ts32.ss = saved_state->ss;
1953 state->uts.ts32.ds = saved_state->ds & 0xffff;
1954 state->uts.ts32.es = saved_state->es & 0xffff;
1955 state->uts.ts32.fs = saved_state->fs & 0xffff;
1956 state->uts.ts32.gs = saved_state->gs & 0xffff;
1957 } else if (is_saved_state64(int_state)) {
1958 x86_saved_state64_t *saved_state = saved_state64(int_state);
1959
1960 state->tsh.flavor = x86_THREAD_STATE64;
1961 state->tsh.count = x86_THREAD_STATE64_COUNT;
1962
1963 /*
1964 * General registers.
1965 */
1966 state->uts.ts64.rax = saved_state->rax;
1967 state->uts.ts64.rbx = saved_state->rbx;
1968 state->uts.ts64.rcx = saved_state->rcx;
1969 state->uts.ts64.rdx = saved_state->rdx;
1970 state->uts.ts64.rdi = saved_state->rdi;
1971 state->uts.ts64.rsi = saved_state->rsi;
1972 state->uts.ts64.rbp = saved_state->rbp;
1973 state->uts.ts64.rsp = saved_state->isf.rsp;
1974 state->uts.ts64.r8 = saved_state->r8;
1975 state->uts.ts64.r9 = saved_state->r9;
1976 state->uts.ts64.r10 = saved_state->r10;
1977 state->uts.ts64.r11 = saved_state->r11;
1978 state->uts.ts64.r12 = saved_state->r12;
1979 state->uts.ts64.r13 = saved_state->r13;
1980 state->uts.ts64.r14 = saved_state->r14;
1981 state->uts.ts64.r15 = saved_state->r15;
1982
1983 state->uts.ts64.rip = saved_state->isf.rip;
1984 state->uts.ts64.rflags = saved_state->isf.rflags;
1985 state->uts.ts64.cs = saved_state->isf.cs;
1986 state->uts.ts64.fs = saved_state->fs & 0xffff;
1987 state->uts.ts64.gs = saved_state->gs & 0xffff;
1988 } else {
1989 panic("unknown thread state");
1990 }
1991
1992 *count = x86_THREAD_STATE_COUNT;
1993 return KERN_SUCCESS;
1994 }
1995 }
1996 return KERN_FAILURE;
1997 }
1998
1999
2000 void
machine_thread_switch_addrmode(thread_t thread)2001 machine_thread_switch_addrmode(thread_t thread)
2002 {
2003 task_t task = get_threadtask(thread);
2004
2005 /*
2006 * We don't want to be preempted until we're done
2007 * - particularly if we're switching the current thread
2008 */
2009 disable_preemption();
2010
2011 /*
2012 * Reset the state saveareas. As we're resetting, we anticipate no
2013 * memory allocations in this path.
2014 */
2015 machine_thread_create(thread, task, false);
2016
2017 /* Adjust FPU state */
2018 fpu_switch_addrmode(thread, task_has_64Bit_addr(task));
2019
2020 /* If we're switching ourselves, reset the pcb addresses etc. */
2021 if (thread == current_thread()) {
2022 boolean_t istate = ml_set_interrupts_enabled(FALSE);
2023 act_machine_switch_pcb(NULL, thread);
2024 ml_set_interrupts_enabled(istate);
2025 }
2026 enable_preemption();
2027 }
2028
2029
2030
2031 /*
2032 * This is used to set the current thr_act/thread
2033 * when starting up a new processor
2034 */
2035 void
machine_set_current_thread(thread_t thread)2036 machine_set_current_thread(thread_t thread)
2037 {
2038 current_cpu_datap()->cpu_active_thread = thread;
2039 }
2040
2041
2042 /*
2043 * Perform machine-dependent per-thread initializations
2044 */
2045 void
machine_thread_init(void)2046 machine_thread_init(void)
2047 {
2048 fpu_module_init();
2049 }
2050
2051 /*
2052 * machine_thread_template_init: Initialize machine-specific portion of
2053 * the thread template.
2054 */
2055 void
machine_thread_template_init(thread_t thr_template)2056 machine_thread_template_init(thread_t thr_template)
2057 {
2058 assert(fpu_default != UNDEFINED);
2059
2060 THREAD_TO_PCB(thr_template)->xstate = fpu_default;
2061 }
2062
2063 user_addr_t
get_useraddr(void)2064 get_useraddr(void)
2065 {
2066 thread_t thr_act = current_thread();
2067
2068 if (thread_is_64bit_addr(thr_act)) {
2069 x86_saved_state64_t *iss64;
2070
2071 iss64 = USER_REGS64(thr_act);
2072
2073 return iss64->isf.rip;
2074 } else {
2075 x86_saved_state32_t *iss32;
2076
2077 iss32 = USER_REGS32(thr_act);
2078
2079 return iss32->eip;
2080 }
2081 }
2082
2083 /*
2084 * detach and return a kernel stack from a thread
2085 */
2086
2087 vm_offset_t
machine_stack_detach(thread_t thread)2088 machine_stack_detach(thread_t thread)
2089 {
2090 vm_offset_t stack;
2091
2092 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
2093 (uintptr_t)thread_tid(thread), thread->priority,
2094 thread->sched_pri, 0,
2095 0);
2096
2097 stack = thread->kernel_stack;
2098 #if CONFIG_STKSZ
2099 kcov_stksz_set_thread_stack(thread, stack);
2100 #endif
2101 thread->kernel_stack = 0;
2102
2103 return stack;
2104 }
2105
2106 /*
2107 * attach a kernel stack to a thread and initialize it
2108 */
2109
2110 void
machine_stack_attach(thread_t thread,vm_offset_t stack)2111 machine_stack_attach(
2112 thread_t thread,
2113 vm_offset_t stack)
2114 {
2115 struct x86_kernel_state *statep;
2116
2117 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
2118 (uintptr_t)thread_tid(thread), thread->priority,
2119 thread->sched_pri, 0, 0);
2120
2121 assert(stack);
2122 thread->kernel_stack = stack;
2123 #if CONFIG_STKSZ
2124 kcov_stksz_set_thread_stack(thread, 0);
2125 #endif
2126 thread_initialize_kernel_state(thread);
2127
2128 statep = STACK_IKS(stack);
2129
2130 /*
2131 * Reset the state of the thread to resume from a continuation,
2132 * including resetting the stack and frame pointer to avoid backtracers
2133 * seeing this temporary state and attempting to walk the defunct stack.
2134 */
2135 statep->k_rbp = (uint64_t) 0;
2136 statep->k_rip = (uint64_t) Thread_continue;
2137 statep->k_rbx = (uint64_t) thread_continue;
2138 statep->k_rsp = (uint64_t) STACK_IKS(stack);
2139
2140 return;
2141 }
2142
2143 /*
2144 * move a stack from old to new thread
2145 */
2146
2147 void
machine_stack_handoff(thread_t old,thread_t new)2148 machine_stack_handoff(thread_t old,
2149 thread_t new)
2150 {
2151 vm_offset_t stack;
2152
2153 assert(new);
2154 assert(old);
2155
2156 #if HYPERVISOR
2157 if (old->hv_thread_target) {
2158 hv_callbacks.preempt(old->hv_thread_target);
2159 }
2160 #endif
2161
2162 kpc_off_cpu(old);
2163
2164 stack = old->kernel_stack;
2165 if (stack == old->reserved_stack) {
2166 assert(new->reserved_stack);
2167 old->reserved_stack = new->reserved_stack;
2168 new->reserved_stack = stack;
2169 }
2170 #if CONFIG_STKSZ
2171 kcov_stksz_set_thread_stack(old, old->kernel_stack);
2172 #endif
2173 old->kernel_stack = 0;
2174 /*
2175 * A full call to machine_stack_attach() is unnecessry
2176 * because old stack is already initialized.
2177 */
2178 new->kernel_stack = stack;
2179 #if CONFIG_STKSZ
2180 kcov_stksz_set_thread_stack(new, 0);
2181 #endif
2182
2183 fpu_switch_context(old, new);
2184
2185 old->machine.specFlags &= ~OnProc;
2186 new->machine.specFlags |= OnProc;
2187
2188 pmap_switch_context(old, new, cpu_number());
2189 act_machine_switch_pcb(old, new);
2190
2191 #if HYPERVISOR
2192 if (new->hv_thread_target) {
2193 hv_callbacks.dispatch(new->hv_thread_target);
2194 }
2195 #endif
2196
2197 machine_set_current_thread(new);
2198 thread_initialize_kernel_state(new);
2199
2200 return;
2201 }
2202
2203
2204
2205
2206 struct x86_act_context32 {
2207 x86_saved_state32_t ss;
2208 x86_float_state32_t fs;
2209 x86_debug_state32_t ds;
2210 };
2211
2212 struct x86_act_context64 {
2213 x86_saved_state64_t ss;
2214 x86_float_state64_t fs;
2215 x86_debug_state64_t ds;
2216 };
2217
2218
2219
2220 void *
act_thread_csave(void)2221 act_thread_csave(void)
2222 {
2223 kern_return_t kret;
2224 mach_msg_type_number_t val;
2225 thread_t thr_act = current_thread();
2226
2227 if (thread_is_64bit_addr(thr_act)) {
2228 struct x86_act_context64 *ic64;
2229
2230 ic64 = kalloc_data(sizeof(struct x86_act_context64), Z_WAITOK);
2231
2232 if (ic64 == (struct x86_act_context64 *)NULL) {
2233 return (void *)0;
2234 }
2235
2236 val = x86_SAVED_STATE64_COUNT;
2237 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE64,
2238 (thread_state_t) &ic64->ss, &val);
2239 if (kret != KERN_SUCCESS) {
2240 kfree_data(ic64, sizeof(struct x86_act_context64));
2241 return (void *)0;
2242 }
2243 val = x86_FLOAT_STATE64_COUNT;
2244 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE64,
2245 (thread_state_t) &ic64->fs, &val);
2246 if (kret != KERN_SUCCESS) {
2247 kfree_data(ic64, sizeof(struct x86_act_context64));
2248 return (void *)0;
2249 }
2250
2251 val = x86_DEBUG_STATE64_COUNT;
2252 kret = machine_thread_get_state(thr_act,
2253 x86_DEBUG_STATE64,
2254 (thread_state_t)&ic64->ds,
2255 &val);
2256 if (kret != KERN_SUCCESS) {
2257 kfree_data(ic64, sizeof(struct x86_act_context64));
2258 return (void *)0;
2259 }
2260 return ic64;
2261 } else {
2262 struct x86_act_context32 *ic32;
2263
2264 ic32 = kalloc_data(sizeof(struct x86_act_context32), Z_WAITOK);
2265
2266 if (ic32 == (struct x86_act_context32 *)NULL) {
2267 return (void *)0;
2268 }
2269
2270 val = x86_SAVED_STATE32_COUNT;
2271 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE32,
2272 (thread_state_t) &ic32->ss, &val);
2273 if (kret != KERN_SUCCESS) {
2274 kfree_data(ic32, sizeof(struct x86_act_context32));
2275 return (void *)0;
2276 }
2277 val = x86_FLOAT_STATE32_COUNT;
2278 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE32,
2279 (thread_state_t) &ic32->fs, &val);
2280 if (kret != KERN_SUCCESS) {
2281 kfree_data(ic32, sizeof(struct x86_act_context32));
2282 return (void *)0;
2283 }
2284
2285 val = x86_DEBUG_STATE32_COUNT;
2286 kret = machine_thread_get_state(thr_act,
2287 x86_DEBUG_STATE32,
2288 (thread_state_t)&ic32->ds,
2289 &val);
2290 if (kret != KERN_SUCCESS) {
2291 kfree_data(ic32, sizeof(struct x86_act_context32));
2292 return (void *)0;
2293 }
2294 return ic32;
2295 }
2296 }
2297
2298
2299 void
act_thread_catt(void * ctx)2300 act_thread_catt(void *ctx)
2301 {
2302 thread_t thr_act = current_thread();
2303 kern_return_t kret;
2304
2305 if (ctx == (void *)NULL) {
2306 return;
2307 }
2308
2309 if (thread_is_64bit_addr(thr_act)) {
2310 struct x86_act_context64 *ic64;
2311
2312 ic64 = (struct x86_act_context64 *)ctx;
2313
2314 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE64,
2315 (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT);
2316 if (kret == KERN_SUCCESS) {
2317 machine_thread_set_state(thr_act, x86_FLOAT_STATE64,
2318 (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT);
2319 }
2320 kfree_data(ic64, sizeof(struct x86_act_context64));
2321 } else {
2322 struct x86_act_context32 *ic32;
2323
2324 ic32 = (struct x86_act_context32 *)ctx;
2325
2326 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE32,
2327 (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT);
2328 if (kret == KERN_SUCCESS) {
2329 (void) machine_thread_set_state(thr_act, x86_FLOAT_STATE32,
2330 (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT);
2331 }
2332 kfree_data(ic32, sizeof(struct x86_act_context32));
2333 }
2334 }
2335
2336
2337 void
act_thread_cfree(__unused void * ctx)2338 act_thread_cfree(__unused void *ctx)
2339 {
2340 /* XXX - Unused */
2341 }
2342
2343 /*
2344 * Duplicate one x86_debug_state32_t to another. "all" parameter
2345 * chooses whether dr4 and dr5 are copied (they are never meant
2346 * to be installed when we do machine_task_set_state() or
2347 * machine_thread_set_state()).
2348 */
2349 void
copy_debug_state32(x86_debug_state32_t * src,x86_debug_state32_t * target,boolean_t all)2350 copy_debug_state32(
2351 x86_debug_state32_t *src,
2352 x86_debug_state32_t *target,
2353 boolean_t all)
2354 {
2355 if (all) {
2356 target->dr4 = src->dr4;
2357 target->dr5 = src->dr5;
2358 }
2359
2360 target->dr0 = src->dr0;
2361 target->dr1 = src->dr1;
2362 target->dr2 = src->dr2;
2363 target->dr3 = src->dr3;
2364 target->dr6 = src->dr6;
2365 target->dr7 = src->dr7;
2366 }
2367
2368 /*
2369 * Duplicate one x86_debug_state64_t to another. "all" parameter
2370 * chooses whether dr4 and dr5 are copied (they are never meant
2371 * to be installed when we do machine_task_set_state() or
2372 * machine_thread_set_state()).
2373 */
2374 void
copy_debug_state64(x86_debug_state64_t * src,x86_debug_state64_t * target,boolean_t all)2375 copy_debug_state64(
2376 x86_debug_state64_t *src,
2377 x86_debug_state64_t *target,
2378 boolean_t all)
2379 {
2380 if (all) {
2381 target->dr4 = src->dr4;
2382 target->dr5 = src->dr5;
2383 }
2384
2385 target->dr0 = src->dr0;
2386 target->dr1 = src->dr1;
2387 target->dr2 = src->dr2;
2388 target->dr3 = src->dr3;
2389 target->dr6 = src->dr6;
2390 target->dr7 = src->dr7;
2391 }
2392