1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_ldebug.h>
58
59 #include <sys/kdebug.h>
60
61 #include <mach/kern_return.h>
62 #include <mach/thread_status.h>
63 #include <mach/vm_param.h>
64
65 #include <kern/kalloc.h>
66 #include <kern/mach_param.h>
67 #include <kern/processor.h>
68 #include <kern/cpu_data.h>
69 #include <kern/cpu_number.h>
70 #include <kern/task.h>
71 #include <kern/thread.h>
72 #include <kern/sched_prim.h>
73 #include <kern/misc_protos.h>
74 #include <kern/assert.h>
75 #include <kern/spl.h>
76 #include <kern/machine.h>
77 #include <kern/kpc.h>
78 #include <ipc/ipc_port.h>
79 #include <vm/vm_kern.h>
80 #include <vm/vm_map.h>
81 #include <vm/pmap.h>
82 #include <vm/vm_protos.h>
83
84 #include <i386/cpu_data.h>
85 #include <i386/cpu_number.h>
86 #include <i386/eflags.h>
87 #include <i386/proc_reg.h>
88 #include <i386/fpu.h>
89 #include <i386/misc_protos.h>
90 #include <i386/mp_desc.h>
91 #include <i386/thread.h>
92 #include <i386/machine_routines.h>
93 #include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
94 #include <i386/seg.h>
95
96 #if HYPERVISOR
97 #include <kern/hv_support.h>
98 #endif
99
100 #include <san/kcov_stksz.h>
101
102
103 /*
104 * Maps state flavor to number of words in the state:
105 */
106 unsigned int _MachineStateCount[] = {
107 [x86_THREAD_STATE32] = x86_THREAD_STATE32_COUNT,
108 [x86_THREAD_STATE64] = x86_THREAD_STATE64_COUNT,
109 [x86_THREAD_FULL_STATE64] = x86_THREAD_FULL_STATE64_COUNT,
110 [x86_THREAD_STATE] = x86_THREAD_STATE_COUNT,
111 [x86_FLOAT_STATE32] = x86_FLOAT_STATE32_COUNT,
112 [x86_FLOAT_STATE64] = x86_FLOAT_STATE64_COUNT,
113 [x86_FLOAT_STATE] = x86_FLOAT_STATE_COUNT,
114 [x86_EXCEPTION_STATE32] = x86_EXCEPTION_STATE32_COUNT,
115 [x86_EXCEPTION_STATE64] = x86_EXCEPTION_STATE64_COUNT,
116 [x86_EXCEPTION_STATE] = x86_EXCEPTION_STATE_COUNT,
117 [x86_DEBUG_STATE32] = x86_DEBUG_STATE32_COUNT,
118 [x86_DEBUG_STATE64] = x86_DEBUG_STATE64_COUNT,
119 [x86_DEBUG_STATE] = x86_DEBUG_STATE_COUNT,
120 [x86_AVX_STATE32] = x86_AVX_STATE32_COUNT,
121 [x86_AVX_STATE64] = x86_AVX_STATE64_COUNT,
122 [x86_AVX_STATE] = x86_AVX_STATE_COUNT,
123 [x86_AVX512_STATE32] = x86_AVX512_STATE32_COUNT,
124 [x86_AVX512_STATE64] = x86_AVX512_STATE64_COUNT,
125 [x86_AVX512_STATE] = x86_AVX512_STATE_COUNT,
126 [x86_PAGEIN_STATE] = x86_PAGEIN_STATE_COUNT
127 };
128
129 ZONE_DEFINE_TYPE(iss_zone, "x86_64 saved state",
130 x86_saved_state_t, ZC_NONE);
131
132 ZONE_DEFINE_TYPE(ids_zone, "x86_64 debug state",
133 x86_debug_state64_t, ZC_NONE);
134
135 /* Forward */
136
137 extern void Thread_continue(void);
138 extern void Load_context(
139 thread_t thread) __attribute__((noreturn));
140
141 static void
142 get_exception_state32(thread_t thread, x86_exception_state32_t *es);
143
144 static void
145 get_exception_state64(thread_t thread, x86_exception_state64_t *es);
146
147 static void
148 get_thread_state32(thread_t thread, x86_thread_state32_t *ts);
149
150 static void
151 get_thread_state64(thread_t thread, void *ts, boolean_t full);
152
153 static int
154 set_thread_state32(thread_t thread, x86_thread_state32_t *ts);
155
156 static int
157 set_thread_state64(thread_t thread, void *ts, boolean_t full);
158
159 /*
160 * Don't let an illegal value for the lower 32-bits of dr7 get set.
161 * Specifically, check for undefined settings. Setting these bit patterns
162 * result in undefined behaviour and can lead to an unexpected
163 * TRCTRAP.
164 */
165 static boolean_t
dr7d_is_valid(uint32_t * dr7d)166 dr7d_is_valid(uint32_t *dr7d)
167 {
168 int i;
169 uint32_t mask1, mask2;
170
171 /*
172 * If the DE bit is set in CR4, R/W0-3 can be pattern
173 * "10B" to indicate i/o reads and write
174 */
175 if (!(get_cr4() & CR4_DE)) {
176 for (i = 0, mask1 = 0x3 << 16, mask2 = 0x2 << 16; i < 4;
177 i++, mask1 <<= 4, mask2 <<= 4) {
178 if ((*dr7d & mask1) == mask2) {
179 return FALSE;
180 }
181 }
182 }
183
184 /*
185 * if we are doing an instruction execution break (indicated
186 * by r/w[x] being "00B"), then the len[x] must also be set
187 * to "00B"
188 */
189 for (i = 0; i < 4; i++) {
190 if (((((*dr7d >> (16 + i * 4))) & 0x3) == 0) &&
191 ((((*dr7d >> (18 + i * 4))) & 0x3) != 0)) {
192 return FALSE;
193 }
194 }
195
196 /*
197 * Intel docs have these bits fixed.
198 */
199 *dr7d |= 0x1 << 10; /* set bit 10 to 1 */
200 *dr7d &= ~(0x1 << 11); /* set bit 11 to 0 */
201 *dr7d &= ~(0x1 << 12); /* set bit 12 to 0 */
202 *dr7d &= ~(0x1 << 14); /* set bit 14 to 0 */
203 *dr7d &= ~(0x1 << 15); /* set bit 15 to 0 */
204
205 /*
206 * We don't allow anything to set the global breakpoints.
207 */
208
209 if (*dr7d & 0x2) {
210 return FALSE;
211 }
212
213 if (*dr7d & (0x2 << 2)) {
214 return FALSE;
215 }
216
217 if (*dr7d & (0x2 << 4)) {
218 return FALSE;
219 }
220
221 if (*dr7d & (0x2 << 6)) {
222 return FALSE;
223 }
224
225 return TRUE;
226 }
227
228 extern void set_64bit_debug_regs(x86_debug_state64_t *ds);
229
230 boolean_t
debug_state_is_valid32(x86_debug_state32_t * ds)231 debug_state_is_valid32(x86_debug_state32_t *ds)
232 {
233 if (!dr7d_is_valid(&ds->dr7)) {
234 return FALSE;
235 }
236
237 return TRUE;
238 }
239
240 boolean_t
debug_state_is_valid64(x86_debug_state64_t * ds)241 debug_state_is_valid64(x86_debug_state64_t *ds)
242 {
243 if (!dr7d_is_valid((uint32_t *)&ds->dr7)) {
244 return FALSE;
245 }
246
247 /*
248 * Don't allow the user to set debug addresses above their max
249 * value
250 */
251 if (ds->dr7 & 0x1) {
252 if (ds->dr0 >= VM_MAX_PAGE_ADDRESS) {
253 return FALSE;
254 }
255 }
256
257 if (ds->dr7 & (0x1 << 2)) {
258 if (ds->dr1 >= VM_MAX_PAGE_ADDRESS) {
259 return FALSE;
260 }
261 }
262
263 if (ds->dr7 & (0x1 << 4)) {
264 if (ds->dr2 >= VM_MAX_PAGE_ADDRESS) {
265 return FALSE;
266 }
267 }
268
269 if (ds->dr7 & (0x1 << 6)) {
270 if (ds->dr3 >= VM_MAX_PAGE_ADDRESS) {
271 return FALSE;
272 }
273 }
274
275 /* For x86-64, we must ensure the upper 32-bits of DR7 are clear */
276 ds->dr7 &= 0xffffffffULL;
277
278 return TRUE;
279 }
280
281
282 static kern_return_t
set_debug_state32(thread_t thread,x86_debug_state32_t * ds)283 set_debug_state32(thread_t thread, x86_debug_state32_t *ds)
284 {
285 x86_debug_state32_t *new_ids;
286 pcb_t pcb;
287
288 pcb = THREAD_TO_PCB(thread);
289
290 if (debug_state_is_valid32(ds) != TRUE) {
291 return KERN_INVALID_ARGUMENT;
292 }
293
294 if (pcb->ids == NULL) {
295 new_ids = zalloc_flags(ids_zone, Z_WAITOK | Z_ZERO);
296
297 simple_lock(&pcb->lock, LCK_GRP_NULL);
298 /* make sure it wasn't already alloc()'d elsewhere */
299 if (pcb->ids == NULL) {
300 pcb->ids = new_ids;
301 simple_unlock(&pcb->lock);
302 } else {
303 simple_unlock(&pcb->lock);
304 zfree(ids_zone, new_ids);
305 }
306 }
307
308
309 copy_debug_state32(ds, pcb->ids, FALSE);
310
311 return KERN_SUCCESS;
312 }
313
314 static kern_return_t
set_debug_state64(thread_t thread,x86_debug_state64_t * ds)315 set_debug_state64(thread_t thread, x86_debug_state64_t *ds)
316 {
317 x86_debug_state64_t *new_ids;
318 pcb_t pcb;
319
320 pcb = THREAD_TO_PCB(thread);
321
322 if (debug_state_is_valid64(ds) != TRUE) {
323 return KERN_INVALID_ARGUMENT;
324 }
325
326 if (pcb->ids == NULL) {
327 new_ids = zalloc_flags(ids_zone, Z_WAITOK | Z_ZERO);
328
329 #if HYPERVISOR
330 if (thread->hv_thread_target) {
331 hv_callbacks.volatile_state(thread->hv_thread_target,
332 HV_DEBUG_STATE);
333 }
334 #endif
335
336 simple_lock(&pcb->lock, LCK_GRP_NULL);
337 /* make sure it wasn't already alloc()'d elsewhere */
338 if (pcb->ids == NULL) {
339 pcb->ids = new_ids;
340 simple_unlock(&pcb->lock);
341 } else {
342 simple_unlock(&pcb->lock);
343 zfree(ids_zone, new_ids);
344 }
345 }
346
347 copy_debug_state64(ds, pcb->ids, FALSE);
348
349 return KERN_SUCCESS;
350 }
351
352 static void
get_debug_state32(thread_t thread,x86_debug_state32_t * ds)353 get_debug_state32(thread_t thread, x86_debug_state32_t *ds)
354 {
355 x86_debug_state32_t *saved_state;
356
357 saved_state = thread->machine.ids;
358
359 if (saved_state) {
360 copy_debug_state32(saved_state, ds, TRUE);
361 } else {
362 bzero(ds, sizeof *ds);
363 }
364 }
365
366 static void
get_debug_state64(thread_t thread,x86_debug_state64_t * ds)367 get_debug_state64(thread_t thread, x86_debug_state64_t *ds)
368 {
369 x86_debug_state64_t *saved_state;
370
371 saved_state = (x86_debug_state64_t *)thread->machine.ids;
372
373 if (saved_state) {
374 copy_debug_state64(saved_state, ds, TRUE);
375 } else {
376 bzero(ds, sizeof *ds);
377 }
378 }
379
380 /*
381 * consider_machine_collect:
382 *
383 * Try to collect machine-dependent pages
384 */
385 void
consider_machine_collect(void)386 consider_machine_collect(void)
387 {
388 }
389
390 void
consider_machine_adjust(void)391 consider_machine_adjust(void)
392 {
393 }
394
395 /*
396 * Switch to the first thread on a CPU.
397 */
398 void
machine_load_context(thread_t new)399 machine_load_context(
400 thread_t new)
401 {
402 new->machine.specFlags |= OnProc;
403 act_machine_switch_pcb(NULL, new);
404 Load_context(new);
405 }
406
407 static void
machine_rsb_stuff(void)408 machine_rsb_stuff(void)
409 {
410 #define RSB_STUFF_SPACE_REQD (256 + 16) /* 256 bytes plus a buffer of another 16 for misc. */
411
412 asm volatile (
413 ".macro RSBST from=0, to=15\n"
414 " call 1f\n"
415 "2:\n"
416 " pause\n"
417 " lfence\n"
418 " jmp 2b\n"
419 "1:\n"
420 " call 1f\n"
421 "2:\n"
422 " pause\n"
423 " lfence\n"
424 " jmp 2b\n"
425 "1:\n"
426 " .if \\to - \\from \n"
427 " RSBST \"(\\from + 1)\", \\to \n"
428 " .endif \n"
429 ".endmacro \n"
430 "\n"
431 "L_rsbst:\n"
432 " RSBST \n"
433 " addq $(16 * 2 * 8), %%rsp\n"
434 ::: "memory", "cc");
435 }
436
437 static inline void
pmap_switch_context(thread_t ot,thread_t nt,int cnum)438 pmap_switch_context(thread_t ot, thread_t nt, int cnum)
439 {
440 pmap_assert(ml_get_interrupts_enabled() == FALSE);
441 vm_map_t nmap = nt->map, omap = ot->map;
442 if ((omap != nmap) || (nmap->pmap->pagezero_accessible)) {
443 PMAP_DEACTIVATE_MAP(omap, ot, cnum);
444 PMAP_ACTIVATE_MAP(nmap, nt, cnum);
445 if (__improbable((nt->machine.mthr_do_segchk & MTHR_RSBST) &&
446 (current_kernel_stack_depth() + RSB_STUFF_SPACE_REQD) < kernel_stack_size)) {
447 machine_rsb_stuff();
448 }
449 }
450 }
451
452 /*
453 * Switch to a new thread.
454 * Save the old thread`s kernel state or continuation,
455 * and return it.
456 */
457 thread_t
machine_switch_context(thread_t old,thread_continue_t continuation,thread_t new)458 machine_switch_context(
459 thread_t old,
460 thread_continue_t continuation,
461 thread_t new)
462 {
463 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack);
464
465 #if HYPERVISOR
466 if (old->hv_thread_target) {
467 hv_callbacks.preempt(old->hv_thread_target);
468 }
469 #endif
470
471 #if KPC
472 kpc_off_cpu(old);
473 #endif /* KPC */
474
475 /*
476 * Save FP registers if in use.
477 */
478 fpu_switch_context(old, new);
479
480 old->machine.specFlags &= ~OnProc;
481 new->machine.specFlags |= OnProc;
482
483 /*
484 * Monitor the stack depth and report new max,
485 * not worrying about races.
486 */
487 vm_offset_t depth = current_kernel_stack_depth();
488 if (depth > kernel_stack_depth_max) {
489 kernel_stack_depth_max = depth;
490 KERNEL_DEBUG_CONSTANT(
491 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH),
492 (long) depth, 0, 0, 0, 0);
493 }
494
495 /*
496 * Switch address maps if need be, even if not switching tasks.
497 * (A server activation may be "borrowing" a client map.)
498 */
499 pmap_switch_context(old, new, cpu_number());
500
501 /*
502 * Load the rest of the user state for the new thread
503 */
504 act_machine_switch_pcb(old, new);
505
506 #if HYPERVISOR
507 if (new->hv_thread_target) {
508 hv_callbacks.dispatch(new->hv_thread_target);
509 }
510 #endif
511
512 return Switch_context(old, continuation, new);
513 }
514
515 boolean_t
machine_thread_on_core(thread_t thread)516 machine_thread_on_core(thread_t thread)
517 {
518 return thread->machine.specFlags & OnProc;
519 }
520
521 boolean_t
machine_thread_on_core_allow_invalid(thread_t thread)522 machine_thread_on_core_allow_invalid(thread_t thread)
523 {
524 extern int _copyin_atomic32(const char *src, uint32_t *dst);
525 uint32_t flags;
526
527 /*
528 * Utilize that the thread zone is sequestered which means
529 * that this kernel-to-kernel copyin can't read data
530 * from anything but a thread, zeroed or freed memory.
531 */
532 assert(get_preemption_level() > 0);
533 thread = pgz_decode_allow_invalid(thread, ZONE_ID_THREAD);
534 if (thread == THREAD_NULL) {
535 return false;
536 }
537 thread_require(thread);
538 if (_copyin_atomic32((void *)&thread->machine.specFlags, &flags) == 0) {
539 return flags & OnProc;
540 }
541 return false;
542 }
543
544 thread_t
machine_processor_shutdown(thread_t thread,void (* doshutdown)(processor_t),processor_t processor)545 machine_processor_shutdown(
546 thread_t thread,
547 void (*doshutdown)(processor_t),
548 processor_t processor)
549 {
550 #if CONFIG_VMX
551 vmx_suspend();
552 #endif
553 fpu_switch_context(thread, NULL);
554 pmap_switch_context(thread, processor->idle_thread, cpu_number());
555 return Shutdown_context(thread, doshutdown, processor);
556 }
557
558
559 /*
560 * This is where registers that are not normally specified by the mach-o
561 * file on an execve would be nullified, perhaps to avoid a covert channel.
562 */
563 void
machine_thread_state_initialize(thread_t thread)564 machine_thread_state_initialize(
565 thread_t thread)
566 {
567 /*
568 * If there's an fpu save area, free it.
569 * The initialized state will then be lazily faulted-in, if required.
570 * And if we're target, re-arm the no-fpu trap.
571 */
572 if (thread->machine.ifps) {
573 (void) fpu_set_fxstate(thread, NULL, x86_FLOAT_STATE64);
574
575 if (thread == current_thread()) {
576 clear_fpu();
577 }
578 }
579
580 if (thread->machine.ids) {
581 zfree(ids_zone, thread->machine.ids);
582 thread->machine.ids = NULL;
583 }
584 }
585
586 uint32_t
get_eflags_exportmask(void)587 get_eflags_exportmask(void)
588 {
589 return EFL_USER_SET;
590 }
591
592 /*
593 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
594 * for 32bit tasks only
595 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
596 * for 64bit tasks only
597 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
598 * for 32bit tasks only
599 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
600 * for 64bit tasks only
601 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
602 * for either 32bit or 64bit tasks
603 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
604 * for 32bit tasks only
605 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
606 * for 64bit tasks only
607 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
608 * for either 32bit or 64bit tasks
609 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
610 * for 32bit tasks only
611 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
612 * for 64bit tasks only
613 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
614 * for either 32bit or 64bit tasks
615 */
616
617
618 static void
get_exception_state64(thread_t thread,x86_exception_state64_t * es)619 get_exception_state64(thread_t thread, x86_exception_state64_t *es)
620 {
621 x86_saved_state64_t *saved_state;
622
623 saved_state = USER_REGS64(thread);
624
625 es->trapno = saved_state->isf.trapno;
626 es->cpu = saved_state->isf.cpu;
627 es->err = (typeof(es->err))saved_state->isf.err;
628 es->faultvaddr = saved_state->cr2;
629 }
630
631 static void
get_exception_state32(thread_t thread,x86_exception_state32_t * es)632 get_exception_state32(thread_t thread, x86_exception_state32_t *es)
633 {
634 x86_saved_state32_t *saved_state;
635
636 saved_state = USER_REGS32(thread);
637
638 es->trapno = saved_state->trapno;
639 es->cpu = saved_state->cpu;
640 es->err = saved_state->err;
641 es->faultvaddr = saved_state->cr2;
642 }
643
644
645 static int
set_thread_state32(thread_t thread,x86_thread_state32_t * ts)646 set_thread_state32(thread_t thread, x86_thread_state32_t *ts)
647 {
648 x86_saved_state32_t *saved_state;
649
650 pal_register_cache_state(thread, DIRTY);
651
652 saved_state = USER_REGS32(thread);
653
654 /*
655 * Scrub segment selector values:
656 */
657 ts->cs = USER_CS;
658 /*
659 * On a 64 bit kernel, we always override the data segments,
660 * as the actual selector numbers have changed. This also
661 * means that we don't support setting the data segments
662 * manually any more.
663 */
664 ts->ss = USER_DS;
665 ts->ds = USER_DS;
666 ts->es = USER_DS;
667
668 /* Set GS to CTHREAD only if's been established */
669 ts->gs = thread->machine.cthread_self ? USER_CTHREAD : NULL_SEG;
670
671 /* Check segment selectors are safe */
672 if (!valid_user_segment_selectors(ts->cs,
673 ts->ss,
674 ts->ds,
675 ts->es,
676 ts->fs,
677 ts->gs)) {
678 return KERN_INVALID_ARGUMENT;
679 }
680
681 saved_state->eax = ts->eax;
682 saved_state->ebx = ts->ebx;
683 saved_state->ecx = ts->ecx;
684 saved_state->edx = ts->edx;
685 saved_state->edi = ts->edi;
686 saved_state->esi = ts->esi;
687 saved_state->ebp = ts->ebp;
688 saved_state->uesp = ts->esp;
689 saved_state->efl = (ts->eflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
690 saved_state->eip = ts->eip;
691 saved_state->cs = ts->cs;
692 saved_state->ss = ts->ss;
693 saved_state->ds = ts->ds;
694 saved_state->es = ts->es;
695 saved_state->fs = ts->fs;
696 saved_state->gs = ts->gs;
697
698 /*
699 * If the trace trap bit is being set,
700 * ensure that the user returns via iret
701 * - which is signaled thusly:
702 */
703 if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS) {
704 saved_state->cs = SYSENTER_TF_CS;
705 }
706
707 return KERN_SUCCESS;
708 }
709
710 static int
set_thread_state64(thread_t thread,void * state,int full)711 set_thread_state64(thread_t thread, void *state, int full)
712 {
713 x86_thread_state64_t *ts;
714 x86_saved_state64_t *saved_state;
715
716 if (full == TRUE) {
717 ts = &((x86_thread_full_state64_t *)state)->ss64;
718 if (!valid_user_code_selector(((x86_thread_full_state64_t *)ts)->ss64.cs)) {
719 return KERN_INVALID_ARGUMENT;
720 }
721 } else {
722 ts = (x86_thread_state64_t *)state;
723 // In this case, ts->cs exists but is ignored, and
724 // CS is always set to USER_CS below instead.
725 }
726
727 pal_register_cache_state(thread, DIRTY);
728
729 saved_state = USER_REGS64(thread);
730
731 if (!IS_USERADDR64_CANONICAL(ts->rsp) ||
732 !IS_USERADDR64_CANONICAL(ts->rip)) {
733 return KERN_INVALID_ARGUMENT;
734 }
735
736 saved_state->r8 = ts->r8;
737 saved_state->r9 = ts->r9;
738 saved_state->r10 = ts->r10;
739 saved_state->r11 = ts->r11;
740 saved_state->r12 = ts->r12;
741 saved_state->r13 = ts->r13;
742 saved_state->r14 = ts->r14;
743 saved_state->r15 = ts->r15;
744 saved_state->rax = ts->rax;
745 saved_state->rbx = ts->rbx;
746 saved_state->rcx = ts->rcx;
747 saved_state->rdx = ts->rdx;
748 saved_state->rdi = ts->rdi;
749 saved_state->rsi = ts->rsi;
750 saved_state->rbp = ts->rbp;
751 saved_state->isf.rsp = ts->rsp;
752 saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
753 saved_state->isf.rip = ts->rip;
754
755 if (full == FALSE) {
756 saved_state->isf.cs = USER64_CS;
757 } else {
758 saved_state->isf.cs = ((x86_thread_full_state64_t *)ts)->ss64.cs;
759 saved_state->isf.ss = ((x86_thread_full_state64_t *)ts)->ss;
760 saved_state->ds = (uint32_t)((x86_thread_full_state64_t *)ts)->ds;
761 saved_state->es = (uint32_t)((x86_thread_full_state64_t *)ts)->es;
762 machine_thread_set_tsd_base(thread,
763 ((x86_thread_full_state64_t *)ts)->gsbase);
764 }
765
766 saved_state->fs = (uint32_t)ts->fs;
767 saved_state->gs = (uint32_t)ts->gs;
768
769 return KERN_SUCCESS;
770 }
771
772
773
774 static void
get_thread_state32(thread_t thread,x86_thread_state32_t * ts)775 get_thread_state32(thread_t thread, x86_thread_state32_t *ts)
776 {
777 x86_saved_state32_t *saved_state;
778
779 pal_register_cache_state(thread, VALID);
780
781 saved_state = USER_REGS32(thread);
782
783 ts->eax = saved_state->eax;
784 ts->ebx = saved_state->ebx;
785 ts->ecx = saved_state->ecx;
786 ts->edx = saved_state->edx;
787 ts->edi = saved_state->edi;
788 ts->esi = saved_state->esi;
789 ts->ebp = saved_state->ebp;
790 ts->esp = saved_state->uesp;
791 ts->eflags = saved_state->efl;
792 ts->eip = saved_state->eip;
793 ts->cs = saved_state->cs;
794 ts->ss = saved_state->ss;
795 ts->ds = saved_state->ds;
796 ts->es = saved_state->es;
797 ts->fs = saved_state->fs;
798 ts->gs = saved_state->gs;
799 }
800
801
802 static void
get_thread_state64(thread_t thread,void * state,boolean_t full)803 get_thread_state64(thread_t thread, void *state, boolean_t full)
804 {
805 x86_thread_state64_t *ts;
806 x86_saved_state64_t *saved_state;
807
808 if (full == TRUE) {
809 ts = &((x86_thread_full_state64_t *)state)->ss64;
810 } else {
811 ts = (x86_thread_state64_t *)state;
812 }
813
814 pal_register_cache_state(thread, VALID);
815
816 saved_state = USER_REGS64(thread);
817
818 ts->r8 = saved_state->r8;
819 ts->r9 = saved_state->r9;
820 ts->r10 = saved_state->r10;
821 ts->r11 = saved_state->r11;
822 ts->r12 = saved_state->r12;
823 ts->r13 = saved_state->r13;
824 ts->r14 = saved_state->r14;
825 ts->r15 = saved_state->r15;
826 ts->rax = saved_state->rax;
827 ts->rbx = saved_state->rbx;
828 ts->rcx = saved_state->rcx;
829 ts->rdx = saved_state->rdx;
830 ts->rdi = saved_state->rdi;
831 ts->rsi = saved_state->rsi;
832 ts->rbp = saved_state->rbp;
833 ts->rsp = saved_state->isf.rsp;
834 ts->rflags = saved_state->isf.rflags;
835 ts->rip = saved_state->isf.rip;
836 ts->cs = saved_state->isf.cs;
837
838 if (full == TRUE) {
839 ((x86_thread_full_state64_t *)state)->ds = saved_state->ds;
840 ((x86_thread_full_state64_t *)state)->es = saved_state->es;
841 ((x86_thread_full_state64_t *)state)->ss = saved_state->isf.ss;
842 ((x86_thread_full_state64_t *)state)->gsbase =
843 thread->machine.cthread_self;
844 }
845
846 ts->fs = saved_state->fs;
847 ts->gs = saved_state->gs;
848 }
849
850 kern_return_t
machine_thread_state_convert_to_user(__unused thread_t thread,__unused thread_flavor_t flavor,__unused thread_state_t tstate,__unused mach_msg_type_number_t * count,__unused thread_set_status_flags_t tssf_flags)851 machine_thread_state_convert_to_user(
852 __unused thread_t thread,
853 __unused thread_flavor_t flavor,
854 __unused thread_state_t tstate,
855 __unused mach_msg_type_number_t *count,
856 __unused thread_set_status_flags_t tssf_flags)
857 {
858 // No conversion to userspace representation on this platform
859 return KERN_SUCCESS;
860 }
861
862 kern_return_t
machine_thread_state_convert_from_user(__unused thread_t thread,__unused thread_flavor_t flavor,__unused thread_state_t tstate,__unused mach_msg_type_number_t count,__unused thread_state_t old_tstate,__unused mach_msg_type_number_t old_count,__unused thread_set_status_flags_t tssf_flags)863 machine_thread_state_convert_from_user(
864 __unused thread_t thread,
865 __unused thread_flavor_t flavor,
866 __unused thread_state_t tstate,
867 __unused mach_msg_type_number_t count,
868 __unused thread_state_t old_tstate,
869 __unused mach_msg_type_number_t old_count,
870 __unused thread_set_status_flags_t tssf_flags)
871 {
872 // No conversion from userspace representation on this platform
873 return KERN_SUCCESS;
874 }
875
876 kern_return_t
machine_thread_siguctx_pointer_convert_to_user(__unused thread_t thread,__unused user_addr_t * uctxp)877 machine_thread_siguctx_pointer_convert_to_user(
878 __unused thread_t thread,
879 __unused user_addr_t *uctxp)
880 {
881 // No conversion to userspace representation on this platform
882 return KERN_SUCCESS;
883 }
884
885 kern_return_t
machine_thread_function_pointers_convert_from_user(__unused thread_t thread,__unused user_addr_t * fptrs,__unused uint32_t count)886 machine_thread_function_pointers_convert_from_user(
887 __unused thread_t thread,
888 __unused user_addr_t *fptrs,
889 __unused uint32_t count)
890 {
891 // No conversion from userspace representation on this platform
892 return KERN_SUCCESS;
893 }
894
895 /*
896 * act_machine_set_state:
897 *
898 * Set the status of the specified thread.
899 */
900
901 kern_return_t
machine_thread_set_state(thread_t thr_act,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t count)902 machine_thread_set_state(
903 thread_t thr_act,
904 thread_flavor_t flavor,
905 thread_state_t tstate,
906 mach_msg_type_number_t count)
907 {
908 switch (flavor) {
909 case x86_SAVED_STATE32:
910 {
911 x86_saved_state32_t *state;
912 x86_saved_state32_t *saved_state;
913
914 if (count < x86_SAVED_STATE32_COUNT) {
915 return KERN_INVALID_ARGUMENT;
916 }
917
918 state = (x86_saved_state32_t *) tstate;
919
920 /*
921 * Refuse to allow 64-bit processes to set
922 * 32-bit state.
923 */
924 if (thread_is_64bit_addr(thr_act)) {
925 return KERN_INVALID_ARGUMENT;
926 }
927
928 /* Check segment selectors are safe */
929 if (!valid_user_segment_selectors(state->cs,
930 state->ss,
931 state->ds,
932 state->es,
933 state->fs,
934 state->gs)) {
935 return KERN_INVALID_ARGUMENT;
936 }
937
938 pal_register_cache_state(thr_act, DIRTY);
939
940 saved_state = USER_REGS32(thr_act);
941
942 /*
943 * General registers
944 */
945 saved_state->edi = state->edi;
946 saved_state->esi = state->esi;
947 saved_state->ebp = state->ebp;
948 saved_state->uesp = state->uesp;
949 saved_state->ebx = state->ebx;
950 saved_state->edx = state->edx;
951 saved_state->ecx = state->ecx;
952 saved_state->eax = state->eax;
953 saved_state->eip = state->eip;
954
955 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
956
957 /*
958 * If the trace trap bit is being set,
959 * ensure that the user returns via iret
960 * - which is signaled thusly:
961 */
962 if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS) {
963 state->cs = SYSENTER_TF_CS;
964 }
965
966 /*
967 * User setting segment registers.
968 * Code and stack selectors have already been
969 * checked. Others will be reset by 'iret'
970 * if they are not valid.
971 */
972 saved_state->cs = state->cs;
973 saved_state->ss = state->ss;
974 saved_state->ds = state->ds;
975 saved_state->es = state->es;
976 saved_state->fs = state->fs;
977 saved_state->gs = state->gs;
978
979 break;
980 }
981
982 case x86_SAVED_STATE64:
983 {
984 x86_saved_state64_t *state;
985 x86_saved_state64_t *saved_state;
986
987 if (count < x86_SAVED_STATE64_COUNT) {
988 return KERN_INVALID_ARGUMENT;
989 }
990
991 if (!thread_is_64bit_addr(thr_act)) {
992 return KERN_INVALID_ARGUMENT;
993 }
994
995 state = (x86_saved_state64_t *) tstate;
996
997 /* Verify that the supplied code segment selector is
998 * valid. In 64-bit mode, the FS and GS segment overrides
999 * use the FS.base and GS.base MSRs to calculate
1000 * base addresses, and the trampolines don't directly
1001 * restore the segment registers--hence they are no
1002 * longer relevant for validation.
1003 */
1004 if (!valid_user_code_selector(state->isf.cs)) {
1005 return KERN_INVALID_ARGUMENT;
1006 }
1007
1008 /* Check pc and stack are canonical addresses */
1009 if (!IS_USERADDR64_CANONICAL(state->isf.rsp) ||
1010 !IS_USERADDR64_CANONICAL(state->isf.rip)) {
1011 return KERN_INVALID_ARGUMENT;
1012 }
1013
1014 pal_register_cache_state(thr_act, DIRTY);
1015
1016 saved_state = USER_REGS64(thr_act);
1017
1018 /*
1019 * General registers
1020 */
1021 saved_state->r8 = state->r8;
1022 saved_state->r9 = state->r9;
1023 saved_state->r10 = state->r10;
1024 saved_state->r11 = state->r11;
1025 saved_state->r12 = state->r12;
1026 saved_state->r13 = state->r13;
1027 saved_state->r14 = state->r14;
1028 saved_state->r15 = state->r15;
1029 saved_state->rdi = state->rdi;
1030 saved_state->rsi = state->rsi;
1031 saved_state->rbp = state->rbp;
1032 saved_state->rbx = state->rbx;
1033 saved_state->rdx = state->rdx;
1034 saved_state->rcx = state->rcx;
1035 saved_state->rax = state->rax;
1036 saved_state->isf.rsp = state->isf.rsp;
1037 saved_state->isf.rip = state->isf.rip;
1038
1039 saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
1040
1041 /*
1042 * User setting segment registers.
1043 * Code and stack selectors have already been
1044 * checked. Others will be reset by 'sys'
1045 * if they are not valid.
1046 */
1047 saved_state->isf.cs = state->isf.cs;
1048 saved_state->isf.ss = state->isf.ss;
1049 saved_state->fs = state->fs;
1050 saved_state->gs = state->gs;
1051
1052 break;
1053 }
1054
1055 case x86_FLOAT_STATE32:
1056 case x86_AVX_STATE32:
1057 case x86_AVX512_STATE32:
1058 {
1059 if (count != _MachineStateCount[flavor]) {
1060 return KERN_INVALID_ARGUMENT;
1061 }
1062
1063 if (thread_is_64bit_addr(thr_act)) {
1064 return KERN_INVALID_ARGUMENT;
1065 }
1066
1067 return fpu_set_fxstate(thr_act, tstate, flavor);
1068 }
1069
1070 case x86_FLOAT_STATE64:
1071 case x86_AVX_STATE64:
1072 case x86_AVX512_STATE64:
1073 {
1074 if (count != _MachineStateCount[flavor]) {
1075 return KERN_INVALID_ARGUMENT;
1076 }
1077
1078 if (!thread_is_64bit_addr(thr_act)) {
1079 return KERN_INVALID_ARGUMENT;
1080 }
1081
1082 return fpu_set_fxstate(thr_act, tstate, flavor);
1083 }
1084
1085 case x86_FLOAT_STATE:
1086 {
1087 x86_float_state_t *state;
1088
1089 if (count != x86_FLOAT_STATE_COUNT) {
1090 return KERN_INVALID_ARGUMENT;
1091 }
1092
1093 state = (x86_float_state_t *)tstate;
1094 if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT &&
1095 thread_is_64bit_addr(thr_act)) {
1096 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
1097 }
1098 if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT &&
1099 !thread_is_64bit_addr(thr_act)) {
1100 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
1101 }
1102 return KERN_INVALID_ARGUMENT;
1103 }
1104
1105 case x86_AVX_STATE:
1106 case x86_AVX512_STATE:
1107 {
1108 x86_avx_state_t *state;
1109
1110 if (count != _MachineStateCount[flavor]) {
1111 return KERN_INVALID_ARGUMENT;
1112 }
1113
1114 state = (x86_avx_state_t *)tstate;
1115 /* Flavors are defined to have sequential values: 32-bit, 64-bit, non-specific */
1116 /* 64-bit flavor? */
1117 if (state->ash.flavor == (flavor - 1) &&
1118 state->ash.count == _MachineStateCount[flavor - 1] &&
1119 thread_is_64bit_addr(thr_act)) {
1120 return fpu_set_fxstate(thr_act,
1121 (thread_state_t)&state->ufs.as64,
1122 flavor - 1);
1123 }
1124 /* 32-bit flavor? */
1125 if (state->ash.flavor == (flavor - 2) &&
1126 state->ash.count == _MachineStateCount[flavor - 2] &&
1127 !thread_is_64bit_addr(thr_act)) {
1128 return fpu_set_fxstate(thr_act,
1129 (thread_state_t)&state->ufs.as32,
1130 flavor - 2);
1131 }
1132 return KERN_INVALID_ARGUMENT;
1133 }
1134
1135 case x86_THREAD_STATE32:
1136 {
1137 if (count != x86_THREAD_STATE32_COUNT) {
1138 return KERN_INVALID_ARGUMENT;
1139 }
1140
1141 if (thread_is_64bit_addr(thr_act)) {
1142 return KERN_INVALID_ARGUMENT;
1143 }
1144
1145 return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1146 }
1147
1148 case x86_THREAD_STATE64:
1149 {
1150 if (count != x86_THREAD_STATE64_COUNT) {
1151 return KERN_INVALID_ARGUMENT;
1152 }
1153
1154 if (!thread_is_64bit_addr(thr_act)) {
1155 return KERN_INVALID_ARGUMENT;
1156 }
1157
1158 return set_thread_state64(thr_act, tstate, FALSE);
1159 }
1160
1161 case x86_THREAD_FULL_STATE64:
1162 {
1163 if (count != x86_THREAD_FULL_STATE64_COUNT) {
1164 return KERN_INVALID_ARGUMENT;
1165 }
1166
1167 if (!thread_is_64bit_addr(thr_act)) {
1168 return KERN_INVALID_ARGUMENT;
1169 }
1170
1171 /* If this process does not have a custom LDT, return failure */
1172 if (get_threadtask(thr_act)->i386_ldt == 0) {
1173 return KERN_INVALID_ARGUMENT;
1174 }
1175
1176 return set_thread_state64(thr_act, tstate, TRUE);
1177 }
1178
1179 case x86_THREAD_STATE:
1180 {
1181 x86_thread_state_t *state;
1182
1183 if (count != x86_THREAD_STATE_COUNT) {
1184 return KERN_INVALID_ARGUMENT;
1185 }
1186
1187 state = (x86_thread_state_t *)tstate;
1188
1189 if (state->tsh.flavor == x86_THREAD_STATE64 &&
1190 state->tsh.count == x86_THREAD_STATE64_COUNT &&
1191 thread_is_64bit_addr(thr_act)) {
1192 return set_thread_state64(thr_act, &state->uts.ts64, FALSE);
1193 } else if (state->tsh.flavor == x86_THREAD_FULL_STATE64 &&
1194 state->tsh.count == x86_THREAD_FULL_STATE64_COUNT &&
1195 thread_is_64bit_addr(thr_act) && get_threadtask(thr_act)->i386_ldt != 0) {
1196 return set_thread_state64(thr_act, &state->uts.ts64, TRUE);
1197 } else if (state->tsh.flavor == x86_THREAD_STATE32 &&
1198 state->tsh.count == x86_THREAD_STATE32_COUNT &&
1199 !thread_is_64bit_addr(thr_act)) {
1200 return set_thread_state32(thr_act, &state->uts.ts32);
1201 } else {
1202 return KERN_INVALID_ARGUMENT;
1203 }
1204 }
1205 case x86_DEBUG_STATE32:
1206 {
1207 x86_debug_state32_t *state;
1208 kern_return_t ret;
1209
1210 if (thread_is_64bit_addr(thr_act)) {
1211 return KERN_INVALID_ARGUMENT;
1212 }
1213
1214 state = (x86_debug_state32_t *)tstate;
1215
1216 ret = set_debug_state32(thr_act, state);
1217
1218 return ret;
1219 }
1220 case x86_DEBUG_STATE64:
1221 {
1222 x86_debug_state64_t *state;
1223 kern_return_t ret;
1224
1225 if (!thread_is_64bit_addr(thr_act)) {
1226 return KERN_INVALID_ARGUMENT;
1227 }
1228
1229 state = (x86_debug_state64_t *)tstate;
1230
1231 ret = set_debug_state64(thr_act, state);
1232
1233 return ret;
1234 }
1235 case x86_DEBUG_STATE:
1236 {
1237 x86_debug_state_t *state;
1238 kern_return_t ret = KERN_INVALID_ARGUMENT;
1239
1240 if (count != x86_DEBUG_STATE_COUNT) {
1241 return KERN_INVALID_ARGUMENT;
1242 }
1243
1244 state = (x86_debug_state_t *)tstate;
1245 if (state->dsh.flavor == x86_DEBUG_STATE64 &&
1246 state->dsh.count == x86_DEBUG_STATE64_COUNT &&
1247 thread_is_64bit_addr(thr_act)) {
1248 ret = set_debug_state64(thr_act, &state->uds.ds64);
1249 } else if (state->dsh.flavor == x86_DEBUG_STATE32 &&
1250 state->dsh.count == x86_DEBUG_STATE32_COUNT &&
1251 !thread_is_64bit_addr(thr_act)) {
1252 ret = set_debug_state32(thr_act, &state->uds.ds32);
1253 }
1254 return ret;
1255 }
1256 default:
1257 return KERN_INVALID_ARGUMENT;
1258 }
1259
1260 return KERN_SUCCESS;
1261 }
1262
1263 mach_vm_address_t
machine_thread_pc(thread_t thr_act)1264 machine_thread_pc(thread_t thr_act)
1265 {
1266 if (thread_is_64bit_addr(thr_act)) {
1267 return (mach_vm_address_t)USER_REGS64(thr_act)->isf.rip;
1268 } else {
1269 return (mach_vm_address_t)USER_REGS32(thr_act)->eip;
1270 }
1271 }
1272
1273 void
machine_thread_reset_pc(thread_t thr_act,mach_vm_address_t pc)1274 machine_thread_reset_pc(thread_t thr_act, mach_vm_address_t pc)
1275 {
1276 pal_register_cache_state(thr_act, DIRTY);
1277
1278 if (thread_is_64bit_addr(thr_act)) {
1279 if (!IS_USERADDR64_CANONICAL(pc)) {
1280 pc = 0;
1281 }
1282 USER_REGS64(thr_act)->isf.rip = (uint64_t)pc;
1283 } else {
1284 USER_REGS32(thr_act)->eip = (uint32_t)pc;
1285 }
1286 }
1287
1288
1289 /*
1290 * thread_getstatus:
1291 *
1292 * Get the status of the specified thread.
1293 */
1294
1295 kern_return_t
machine_thread_get_state(thread_t thr_act,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count)1296 machine_thread_get_state(
1297 thread_t thr_act,
1298 thread_flavor_t flavor,
1299 thread_state_t tstate,
1300 mach_msg_type_number_t *count)
1301 {
1302 switch (flavor) {
1303 case THREAD_STATE_FLAVOR_LIST:
1304 {
1305 if (*count < 3) {
1306 return KERN_INVALID_ARGUMENT;
1307 }
1308
1309 tstate[0] = i386_THREAD_STATE;
1310 tstate[1] = i386_FLOAT_STATE;
1311 tstate[2] = i386_EXCEPTION_STATE;
1312
1313 *count = 3;
1314 break;
1315 }
1316
1317 case THREAD_STATE_FLAVOR_LIST_NEW:
1318 {
1319 if (*count < 4) {
1320 return KERN_INVALID_ARGUMENT;
1321 }
1322
1323 tstate[0] = x86_THREAD_STATE;
1324 tstate[1] = x86_FLOAT_STATE;
1325 tstate[2] = x86_EXCEPTION_STATE;
1326 tstate[3] = x86_DEBUG_STATE;
1327
1328 *count = 4;
1329 break;
1330 }
1331
1332 case THREAD_STATE_FLAVOR_LIST_10_9:
1333 {
1334 if (*count < 5) {
1335 return KERN_INVALID_ARGUMENT;
1336 }
1337
1338 tstate[0] = x86_THREAD_STATE;
1339 tstate[1] = x86_FLOAT_STATE;
1340 tstate[2] = x86_EXCEPTION_STATE;
1341 tstate[3] = x86_DEBUG_STATE;
1342 tstate[4] = x86_AVX_STATE;
1343
1344 *count = 5;
1345 break;
1346 }
1347
1348 case THREAD_STATE_FLAVOR_LIST_10_13:
1349 {
1350 if (*count < 6) {
1351 return KERN_INVALID_ARGUMENT;
1352 }
1353
1354 tstate[0] = x86_THREAD_STATE;
1355 tstate[1] = x86_FLOAT_STATE;
1356 tstate[2] = x86_EXCEPTION_STATE;
1357 tstate[3] = x86_DEBUG_STATE;
1358 tstate[4] = x86_AVX_STATE;
1359 tstate[5] = x86_AVX512_STATE;
1360
1361 *count = 6;
1362 break;
1363 }
1364
1365 case THREAD_STATE_FLAVOR_LIST_10_15:
1366 {
1367 if (*count < 7) {
1368 return KERN_INVALID_ARGUMENT;
1369 }
1370
1371 tstate[0] = x86_THREAD_STATE;
1372 tstate[1] = x86_FLOAT_STATE;
1373 tstate[2] = x86_EXCEPTION_STATE;
1374 tstate[3] = x86_DEBUG_STATE;
1375 tstate[4] = x86_AVX_STATE;
1376 tstate[5] = x86_AVX512_STATE;
1377 tstate[6] = x86_PAGEIN_STATE;
1378
1379 *count = 7;
1380 break;
1381 }
1382
1383 case x86_SAVED_STATE32:
1384 {
1385 x86_saved_state32_t *state;
1386 x86_saved_state32_t *saved_state;
1387
1388 if (*count < x86_SAVED_STATE32_COUNT) {
1389 return KERN_INVALID_ARGUMENT;
1390 }
1391
1392 if (thread_is_64bit_addr(thr_act)) {
1393 return KERN_INVALID_ARGUMENT;
1394 }
1395
1396 state = (x86_saved_state32_t *) tstate;
1397 saved_state = USER_REGS32(thr_act);
1398
1399 /*
1400 * First, copy everything:
1401 */
1402 *state = *saved_state;
1403 state->ds = saved_state->ds & 0xffff;
1404 state->es = saved_state->es & 0xffff;
1405 state->fs = saved_state->fs & 0xffff;
1406 state->gs = saved_state->gs & 0xffff;
1407
1408 *count = x86_SAVED_STATE32_COUNT;
1409 break;
1410 }
1411
1412 case x86_SAVED_STATE64:
1413 {
1414 x86_saved_state64_t *state;
1415 x86_saved_state64_t *saved_state;
1416
1417 if (*count < x86_SAVED_STATE64_COUNT) {
1418 return KERN_INVALID_ARGUMENT;
1419 }
1420
1421 if (!thread_is_64bit_addr(thr_act)) {
1422 return KERN_INVALID_ARGUMENT;
1423 }
1424
1425 state = (x86_saved_state64_t *)tstate;
1426 saved_state = USER_REGS64(thr_act);
1427
1428 /*
1429 * First, copy everything:
1430 */
1431 *state = *saved_state;
1432 state->ds = saved_state->ds & 0xffff;
1433 state->es = saved_state->es & 0xffff;
1434 state->fs = saved_state->fs & 0xffff;
1435 state->gs = saved_state->gs & 0xffff;
1436
1437 *count = x86_SAVED_STATE64_COUNT;
1438 break;
1439 }
1440
1441 case x86_FLOAT_STATE32:
1442 {
1443 if (*count < x86_FLOAT_STATE32_COUNT) {
1444 return KERN_INVALID_ARGUMENT;
1445 }
1446
1447 if (thread_is_64bit_addr(thr_act)) {
1448 return KERN_INVALID_ARGUMENT;
1449 }
1450
1451 *count = x86_FLOAT_STATE32_COUNT;
1452
1453 return fpu_get_fxstate(thr_act, tstate, flavor);
1454 }
1455
1456 case x86_FLOAT_STATE64:
1457 {
1458 if (*count < x86_FLOAT_STATE64_COUNT) {
1459 return KERN_INVALID_ARGUMENT;
1460 }
1461
1462 if (!thread_is_64bit_addr(thr_act)) {
1463 return KERN_INVALID_ARGUMENT;
1464 }
1465
1466 *count = x86_FLOAT_STATE64_COUNT;
1467
1468 return fpu_get_fxstate(thr_act, tstate, flavor);
1469 }
1470
1471 case x86_FLOAT_STATE:
1472 {
1473 x86_float_state_t *state;
1474 kern_return_t kret;
1475
1476 if (*count < x86_FLOAT_STATE_COUNT) {
1477 return KERN_INVALID_ARGUMENT;
1478 }
1479
1480 state = (x86_float_state_t *)tstate;
1481
1482 /*
1483 * no need to bzero... currently
1484 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1485 */
1486 if (thread_is_64bit_addr(thr_act)) {
1487 state->fsh.flavor = x86_FLOAT_STATE64;
1488 state->fsh.count = x86_FLOAT_STATE64_COUNT;
1489
1490 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
1491 } else {
1492 state->fsh.flavor = x86_FLOAT_STATE32;
1493 state->fsh.count = x86_FLOAT_STATE32_COUNT;
1494
1495 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
1496 }
1497 *count = x86_FLOAT_STATE_COUNT;
1498
1499 return kret;
1500 }
1501
1502 case x86_AVX_STATE32:
1503 case x86_AVX512_STATE32:
1504 {
1505 if (*count != _MachineStateCount[flavor]) {
1506 return KERN_INVALID_ARGUMENT;
1507 }
1508
1509 if (thread_is_64bit_addr(thr_act)) {
1510 return KERN_INVALID_ARGUMENT;
1511 }
1512
1513 *count = _MachineStateCount[flavor];
1514
1515 return fpu_get_fxstate(thr_act, tstate, flavor);
1516 }
1517
1518 case x86_AVX_STATE64:
1519 case x86_AVX512_STATE64:
1520 {
1521 if (*count != _MachineStateCount[flavor]) {
1522 return KERN_INVALID_ARGUMENT;
1523 }
1524
1525 if (!thread_is_64bit_addr(thr_act)) {
1526 return KERN_INVALID_ARGUMENT;
1527 }
1528
1529 *count = _MachineStateCount[flavor];
1530
1531 return fpu_get_fxstate(thr_act, tstate, flavor);
1532 }
1533
1534 case x86_AVX_STATE:
1535 case x86_AVX512_STATE:
1536 {
1537 x86_avx_state_t *state;
1538 thread_state_t fstate;
1539
1540 if (*count < _MachineStateCount[flavor]) {
1541 return KERN_INVALID_ARGUMENT;
1542 }
1543
1544 *count = _MachineStateCount[flavor];
1545 state = (x86_avx_state_t *)tstate;
1546
1547 bzero((char *)state, *count * sizeof(int));
1548
1549 if (thread_is_64bit_addr(thr_act)) {
1550 flavor -= 1; /* 64-bit flavor */
1551 fstate = (thread_state_t) &state->ufs.as64;
1552 } else {
1553 flavor -= 2; /* 32-bit flavor */
1554 fstate = (thread_state_t) &state->ufs.as32;
1555 }
1556 state->ash.flavor = flavor;
1557 state->ash.count = _MachineStateCount[flavor];
1558
1559 return fpu_get_fxstate(thr_act, fstate, flavor);
1560 }
1561
1562 case x86_THREAD_STATE32:
1563 {
1564 if (*count < x86_THREAD_STATE32_COUNT) {
1565 return KERN_INVALID_ARGUMENT;
1566 }
1567
1568 if (thread_is_64bit_addr(thr_act)) {
1569 return KERN_INVALID_ARGUMENT;
1570 }
1571
1572 *count = x86_THREAD_STATE32_COUNT;
1573
1574 get_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1575 break;
1576 }
1577
1578 case x86_THREAD_STATE64:
1579 {
1580 if (*count < x86_THREAD_STATE64_COUNT) {
1581 return KERN_INVALID_ARGUMENT;
1582 }
1583
1584 if (!thread_is_64bit_addr(thr_act)) {
1585 return KERN_INVALID_ARGUMENT;
1586 }
1587
1588 *count = x86_THREAD_STATE64_COUNT;
1589
1590 get_thread_state64(thr_act, tstate, FALSE);
1591 break;
1592 }
1593
1594 case x86_THREAD_FULL_STATE64:
1595 {
1596 if (*count < x86_THREAD_FULL_STATE64_COUNT) {
1597 return KERN_INVALID_ARGUMENT;
1598 }
1599
1600 if (!thread_is_64bit_addr(thr_act)) {
1601 return KERN_INVALID_ARGUMENT;
1602 }
1603
1604 /* If this process does not have a custom LDT, return failure */
1605 if (get_threadtask(thr_act)->i386_ldt == 0) {
1606 return KERN_INVALID_ARGUMENT;
1607 }
1608
1609 *count = x86_THREAD_FULL_STATE64_COUNT;
1610
1611 get_thread_state64(thr_act, tstate, TRUE);
1612 break;
1613 }
1614
1615 case x86_THREAD_STATE:
1616 {
1617 x86_thread_state_t *state;
1618
1619 if (*count < x86_THREAD_STATE_COUNT) {
1620 return KERN_INVALID_ARGUMENT;
1621 }
1622
1623 state = (x86_thread_state_t *)tstate;
1624
1625 bzero((char *)state, sizeof(x86_thread_state_t));
1626
1627 if (thread_is_64bit_addr(thr_act)) {
1628 state->tsh.flavor = x86_THREAD_STATE64;
1629 state->tsh.count = x86_THREAD_STATE64_COUNT;
1630
1631 get_thread_state64(thr_act, &state->uts.ts64, FALSE);
1632 } else {
1633 state->tsh.flavor = x86_THREAD_STATE32;
1634 state->tsh.count = x86_THREAD_STATE32_COUNT;
1635
1636 get_thread_state32(thr_act, &state->uts.ts32);
1637 }
1638 *count = x86_THREAD_STATE_COUNT;
1639
1640 break;
1641 }
1642
1643
1644 case x86_EXCEPTION_STATE32:
1645 {
1646 if (*count < x86_EXCEPTION_STATE32_COUNT) {
1647 return KERN_INVALID_ARGUMENT;
1648 }
1649
1650 if (thread_is_64bit_addr(thr_act)) {
1651 return KERN_INVALID_ARGUMENT;
1652 }
1653
1654 *count = x86_EXCEPTION_STATE32_COUNT;
1655
1656 get_exception_state32(thr_act, (x86_exception_state32_t *)tstate);
1657 /*
1658 * Suppress the cpu number for binary compatibility
1659 * of this deprecated state.
1660 */
1661 ((x86_exception_state32_t *)tstate)->cpu = 0;
1662 break;
1663 }
1664
1665 case x86_EXCEPTION_STATE64:
1666 {
1667 if (*count < x86_EXCEPTION_STATE64_COUNT) {
1668 return KERN_INVALID_ARGUMENT;
1669 }
1670
1671 if (!thread_is_64bit_addr(thr_act)) {
1672 return KERN_INVALID_ARGUMENT;
1673 }
1674
1675 *count = x86_EXCEPTION_STATE64_COUNT;
1676
1677 get_exception_state64(thr_act, (x86_exception_state64_t *)tstate);
1678 /*
1679 * Suppress the cpu number for binary compatibility
1680 * of this deprecated state.
1681 */
1682 ((x86_exception_state64_t *)tstate)->cpu = 0;
1683 break;
1684 }
1685
1686 case x86_EXCEPTION_STATE:
1687 {
1688 x86_exception_state_t *state;
1689
1690 if (*count < x86_EXCEPTION_STATE_COUNT) {
1691 return KERN_INVALID_ARGUMENT;
1692 }
1693
1694 state = (x86_exception_state_t *)tstate;
1695
1696 bzero((char *)state, sizeof(x86_exception_state_t));
1697
1698 if (thread_is_64bit_addr(thr_act)) {
1699 state->esh.flavor = x86_EXCEPTION_STATE64;
1700 state->esh.count = x86_EXCEPTION_STATE64_COUNT;
1701
1702 get_exception_state64(thr_act, &state->ues.es64);
1703 } else {
1704 state->esh.flavor = x86_EXCEPTION_STATE32;
1705 state->esh.count = x86_EXCEPTION_STATE32_COUNT;
1706
1707 get_exception_state32(thr_act, &state->ues.es32);
1708 }
1709 *count = x86_EXCEPTION_STATE_COUNT;
1710
1711 break;
1712 }
1713 case x86_DEBUG_STATE32:
1714 {
1715 if (*count < x86_DEBUG_STATE32_COUNT) {
1716 return KERN_INVALID_ARGUMENT;
1717 }
1718
1719 if (thread_is_64bit_addr(thr_act)) {
1720 return KERN_INVALID_ARGUMENT;
1721 }
1722
1723 get_debug_state32(thr_act, (x86_debug_state32_t *)tstate);
1724
1725 *count = x86_DEBUG_STATE32_COUNT;
1726
1727 break;
1728 }
1729 case x86_DEBUG_STATE64:
1730 {
1731 if (*count < x86_DEBUG_STATE64_COUNT) {
1732 return KERN_INVALID_ARGUMENT;
1733 }
1734
1735 if (!thread_is_64bit_addr(thr_act)) {
1736 return KERN_INVALID_ARGUMENT;
1737 }
1738
1739 get_debug_state64(thr_act, (x86_debug_state64_t *)tstate);
1740
1741 *count = x86_DEBUG_STATE64_COUNT;
1742
1743 break;
1744 }
1745 case x86_DEBUG_STATE:
1746 {
1747 x86_debug_state_t *state;
1748
1749 if (*count < x86_DEBUG_STATE_COUNT) {
1750 return KERN_INVALID_ARGUMENT;
1751 }
1752
1753 state = (x86_debug_state_t *)tstate;
1754
1755 bzero(state, sizeof *state);
1756
1757 if (thread_is_64bit_addr(thr_act)) {
1758 state->dsh.flavor = x86_DEBUG_STATE64;
1759 state->dsh.count = x86_DEBUG_STATE64_COUNT;
1760
1761 get_debug_state64(thr_act, &state->uds.ds64);
1762 } else {
1763 state->dsh.flavor = x86_DEBUG_STATE32;
1764 state->dsh.count = x86_DEBUG_STATE32_COUNT;
1765
1766 get_debug_state32(thr_act, &state->uds.ds32);
1767 }
1768 *count = x86_DEBUG_STATE_COUNT;
1769 break;
1770 }
1771
1772 case x86_PAGEIN_STATE:
1773 {
1774 if (*count < x86_PAGEIN_STATE_COUNT) {
1775 return KERN_INVALID_ARGUMENT;
1776 }
1777
1778 x86_pagein_state_t *state = (void *)tstate;
1779
1780 state->__pagein_error = thr_act->t_pagein_error;
1781
1782 *count = x86_PAGEIN_STATE_COUNT;
1783 break;
1784 }
1785
1786 case x86_INSTRUCTION_STATE:
1787 {
1788 if (*count < x86_INSTRUCTION_STATE_COUNT) {
1789 return KERN_INVALID_ARGUMENT;
1790 }
1791
1792 x86_instruction_state_t *state = (void *)tstate;
1793 x86_instruction_state_t *src_state = THREAD_TO_PCB(thr_act)->insn_state;
1794
1795 if (src_state != 0 && (src_state->insn_stream_valid_bytes > 0 || src_state->out_of_synch)) {
1796 #if DEVELOPMENT || DEBUG
1797 extern int insnstream_force_cacheline_mismatch;
1798 #endif
1799 size_t byte_count = (src_state->insn_stream_valid_bytes > x86_INSTRUCTION_STATE_MAX_INSN_BYTES)
1800 ? x86_INSTRUCTION_STATE_MAX_INSN_BYTES : src_state->insn_stream_valid_bytes;
1801 if (byte_count > 0) {
1802 bcopy(src_state->insn_bytes, state->insn_bytes, byte_count);
1803 }
1804 state->insn_offset = src_state->insn_offset;
1805 state->insn_stream_valid_bytes = byte_count;
1806 #if DEVELOPMENT || DEBUG
1807 state->out_of_synch = src_state->out_of_synch || insnstream_force_cacheline_mismatch;
1808 insnstream_force_cacheline_mismatch = 0; /* One-shot, reset after use */
1809
1810 if (state->out_of_synch) {
1811 bcopy(&src_state->insn_cacheline[0], &state->insn_cacheline[0],
1812 x86_INSTRUCTION_STATE_CACHELINE_SIZE);
1813 } else {
1814 bzero(&state->insn_cacheline[0], x86_INSTRUCTION_STATE_CACHELINE_SIZE);
1815 }
1816 #else
1817 state->out_of_synch = src_state->out_of_synch;
1818 #endif
1819 *count = x86_INSTRUCTION_STATE_COUNT;
1820 } else {
1821 *count = 0;
1822 }
1823 break;
1824 }
1825
1826 case x86_LAST_BRANCH_STATE:
1827 {
1828 if (last_branch_enabled_modes != LBR_ENABLED_USERMODE || *count < x86_LAST_BRANCH_STATE_COUNT) {
1829 return KERN_INVALID_ARGUMENT;
1830 }
1831
1832 /* Callers to this function are assumed to be from user space and the LBR values will be filtered accordingly */
1833 if (i386_filtered_lbr_state_to_mach_thread_state(thr_act, (last_branch_state_t *)tstate, true) < 0) {
1834 *count = 0;
1835 return KERN_INVALID_ARGUMENT;
1836 }
1837
1838 *count = x86_LAST_BRANCH_STATE_COUNT;
1839 break;
1840 }
1841
1842 default:
1843 return KERN_INVALID_ARGUMENT;
1844 }
1845
1846 return KERN_SUCCESS;
1847 }
1848
1849 kern_return_t
machine_thread_get_kern_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count)1850 machine_thread_get_kern_state(
1851 thread_t thread,
1852 thread_flavor_t flavor,
1853 thread_state_t tstate,
1854 mach_msg_type_number_t *count)
1855 {
1856 x86_saved_state_t *int_state = current_cpu_datap()->cpu_int_state;
1857
1858 /*
1859 * This works only for an interrupted kernel thread
1860 */
1861 if (thread != current_thread() || int_state == NULL) {
1862 return KERN_FAILURE;
1863 }
1864
1865 switch (flavor) {
1866 case x86_THREAD_STATE32: {
1867 x86_thread_state32_t *state;
1868 x86_saved_state32_t *saved_state;
1869
1870 if (!is_saved_state32(int_state) ||
1871 *count < x86_THREAD_STATE32_COUNT) {
1872 return KERN_INVALID_ARGUMENT;
1873 }
1874
1875 state = (x86_thread_state32_t *) tstate;
1876
1877 saved_state = saved_state32(int_state);
1878 /*
1879 * General registers.
1880 */
1881 state->eax = saved_state->eax;
1882 state->ebx = saved_state->ebx;
1883 state->ecx = saved_state->ecx;
1884 state->edx = saved_state->edx;
1885 state->edi = saved_state->edi;
1886 state->esi = saved_state->esi;
1887 state->ebp = saved_state->ebp;
1888 state->esp = saved_state->uesp;
1889 state->eflags = saved_state->efl;
1890 state->eip = saved_state->eip;
1891 state->cs = saved_state->cs;
1892 state->ss = saved_state->ss;
1893 state->ds = saved_state->ds & 0xffff;
1894 state->es = saved_state->es & 0xffff;
1895 state->fs = saved_state->fs & 0xffff;
1896 state->gs = saved_state->gs & 0xffff;
1897
1898 *count = x86_THREAD_STATE32_COUNT;
1899
1900 return KERN_SUCCESS;
1901 }
1902
1903 case x86_THREAD_STATE64: {
1904 x86_thread_state64_t *state;
1905 x86_saved_state64_t *saved_state;
1906
1907 if (!is_saved_state64(int_state) ||
1908 *count < x86_THREAD_STATE64_COUNT) {
1909 return KERN_INVALID_ARGUMENT;
1910 }
1911
1912 state = (x86_thread_state64_t *) tstate;
1913
1914 saved_state = saved_state64(int_state);
1915 /*
1916 * General registers.
1917 */
1918 state->rax = saved_state->rax;
1919 state->rbx = saved_state->rbx;
1920 state->rcx = saved_state->rcx;
1921 state->rdx = saved_state->rdx;
1922 state->rdi = saved_state->rdi;
1923 state->rsi = saved_state->rsi;
1924 state->rbp = saved_state->rbp;
1925 state->rsp = saved_state->isf.rsp;
1926 state->r8 = saved_state->r8;
1927 state->r9 = saved_state->r9;
1928 state->r10 = saved_state->r10;
1929 state->r11 = saved_state->r11;
1930 state->r12 = saved_state->r12;
1931 state->r13 = saved_state->r13;
1932 state->r14 = saved_state->r14;
1933 state->r15 = saved_state->r15;
1934
1935 state->rip = saved_state->isf.rip;
1936 state->rflags = saved_state->isf.rflags;
1937 state->cs = saved_state->isf.cs;
1938 state->fs = saved_state->fs & 0xffff;
1939 state->gs = saved_state->gs & 0xffff;
1940 *count = x86_THREAD_STATE64_COUNT;
1941
1942 return KERN_SUCCESS;
1943 }
1944
1945 case x86_THREAD_STATE: {
1946 x86_thread_state_t *state = NULL;
1947
1948 if (*count < x86_THREAD_STATE_COUNT) {
1949 return KERN_INVALID_ARGUMENT;
1950 }
1951
1952 state = (x86_thread_state_t *) tstate;
1953
1954 if (is_saved_state32(int_state)) {
1955 x86_saved_state32_t *saved_state = saved_state32(int_state);
1956
1957 state->tsh.flavor = x86_THREAD_STATE32;
1958 state->tsh.count = x86_THREAD_STATE32_COUNT;
1959
1960 /*
1961 * General registers.
1962 */
1963 state->uts.ts32.eax = saved_state->eax;
1964 state->uts.ts32.ebx = saved_state->ebx;
1965 state->uts.ts32.ecx = saved_state->ecx;
1966 state->uts.ts32.edx = saved_state->edx;
1967 state->uts.ts32.edi = saved_state->edi;
1968 state->uts.ts32.esi = saved_state->esi;
1969 state->uts.ts32.ebp = saved_state->ebp;
1970 state->uts.ts32.esp = saved_state->uesp;
1971 state->uts.ts32.eflags = saved_state->efl;
1972 state->uts.ts32.eip = saved_state->eip;
1973 state->uts.ts32.cs = saved_state->cs;
1974 state->uts.ts32.ss = saved_state->ss;
1975 state->uts.ts32.ds = saved_state->ds & 0xffff;
1976 state->uts.ts32.es = saved_state->es & 0xffff;
1977 state->uts.ts32.fs = saved_state->fs & 0xffff;
1978 state->uts.ts32.gs = saved_state->gs & 0xffff;
1979 } else if (is_saved_state64(int_state)) {
1980 x86_saved_state64_t *saved_state = saved_state64(int_state);
1981
1982 state->tsh.flavor = x86_THREAD_STATE64;
1983 state->tsh.count = x86_THREAD_STATE64_COUNT;
1984
1985 /*
1986 * General registers.
1987 */
1988 state->uts.ts64.rax = saved_state->rax;
1989 state->uts.ts64.rbx = saved_state->rbx;
1990 state->uts.ts64.rcx = saved_state->rcx;
1991 state->uts.ts64.rdx = saved_state->rdx;
1992 state->uts.ts64.rdi = saved_state->rdi;
1993 state->uts.ts64.rsi = saved_state->rsi;
1994 state->uts.ts64.rbp = saved_state->rbp;
1995 state->uts.ts64.rsp = saved_state->isf.rsp;
1996 state->uts.ts64.r8 = saved_state->r8;
1997 state->uts.ts64.r9 = saved_state->r9;
1998 state->uts.ts64.r10 = saved_state->r10;
1999 state->uts.ts64.r11 = saved_state->r11;
2000 state->uts.ts64.r12 = saved_state->r12;
2001 state->uts.ts64.r13 = saved_state->r13;
2002 state->uts.ts64.r14 = saved_state->r14;
2003 state->uts.ts64.r15 = saved_state->r15;
2004
2005 state->uts.ts64.rip = saved_state->isf.rip;
2006 state->uts.ts64.rflags = saved_state->isf.rflags;
2007 state->uts.ts64.cs = saved_state->isf.cs;
2008 state->uts.ts64.fs = saved_state->fs & 0xffff;
2009 state->uts.ts64.gs = saved_state->gs & 0xffff;
2010 } else {
2011 panic("unknown thread state");
2012 }
2013
2014 *count = x86_THREAD_STATE_COUNT;
2015 return KERN_SUCCESS;
2016 }
2017 }
2018 return KERN_FAILURE;
2019 }
2020
2021
2022 void
machine_thread_switch_addrmode(thread_t thread)2023 machine_thread_switch_addrmode(thread_t thread)
2024 {
2025 task_t task = get_threadtask(thread);
2026
2027 /*
2028 * We don't want to be preempted until we're done
2029 * - particularly if we're switching the current thread
2030 */
2031 disable_preemption();
2032
2033 /*
2034 * Reset the state saveareas. As we're resetting, we anticipate no
2035 * memory allocations in this path.
2036 */
2037 machine_thread_create(thread, task, false);
2038
2039 /* Adjust FPU state */
2040 fpu_switch_addrmode(thread, task_has_64Bit_addr(task));
2041
2042 /* If we're switching ourselves, reset the pcb addresses etc. */
2043 if (thread == current_thread()) {
2044 boolean_t istate = ml_set_interrupts_enabled(FALSE);
2045 act_machine_switch_pcb(NULL, thread);
2046 ml_set_interrupts_enabled(istate);
2047 }
2048 enable_preemption();
2049 }
2050
2051
2052
2053 /*
2054 * This is used to set the current thr_act/thread
2055 * when starting up a new processor
2056 */
2057 void
machine_set_current_thread(thread_t thread)2058 machine_set_current_thread(thread_t thread)
2059 {
2060 current_cpu_datap()->cpu_active_thread = thread;
2061 }
2062
2063
2064 /*
2065 * Perform machine-dependent per-thread initializations
2066 */
2067 void
machine_thread_init(void)2068 machine_thread_init(void)
2069 {
2070 fpu_module_init();
2071 }
2072
2073 /*
2074 * machine_thread_template_init: Initialize machine-specific portion of
2075 * the thread template.
2076 */
2077 void
machine_thread_template_init(thread_t thr_template)2078 machine_thread_template_init(thread_t thr_template)
2079 {
2080 assert(fpu_default != UNDEFINED);
2081
2082 THREAD_TO_PCB(thr_template)->xstate = fpu_default;
2083 }
2084
2085 user_addr_t
get_useraddr(void)2086 get_useraddr(void)
2087 {
2088 thread_t thr_act = current_thread();
2089
2090 if (thread_is_64bit_addr(thr_act)) {
2091 x86_saved_state64_t *iss64;
2092
2093 iss64 = USER_REGS64(thr_act);
2094
2095 return iss64->isf.rip;
2096 } else {
2097 x86_saved_state32_t *iss32;
2098
2099 iss32 = USER_REGS32(thr_act);
2100
2101 return iss32->eip;
2102 }
2103 }
2104
2105 /*
2106 * detach and return a kernel stack from a thread
2107 */
2108
2109 vm_offset_t
machine_stack_detach(thread_t thread)2110 machine_stack_detach(thread_t thread)
2111 {
2112 vm_offset_t stack;
2113
2114 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
2115 (uintptr_t)thread_tid(thread), thread->priority,
2116 thread->sched_pri, 0,
2117 0);
2118
2119 stack = thread->kernel_stack;
2120 #if CONFIG_STKSZ
2121 kcov_stksz_set_thread_stack(thread, stack);
2122 #endif
2123 thread->kernel_stack = 0;
2124
2125 return stack;
2126 }
2127
2128 /*
2129 * attach a kernel stack to a thread and initialize it
2130 */
2131
2132 void
machine_stack_attach(thread_t thread,vm_offset_t stack)2133 machine_stack_attach(
2134 thread_t thread,
2135 vm_offset_t stack)
2136 {
2137 struct x86_kernel_state *statep;
2138
2139 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
2140 (uintptr_t)thread_tid(thread), thread->priority,
2141 thread->sched_pri, 0, 0);
2142
2143 assert(stack);
2144 thread->kernel_stack = stack;
2145 #if CONFIG_STKSZ
2146 kcov_stksz_set_thread_stack(thread, 0);
2147 #endif
2148 thread_initialize_kernel_state(thread);
2149
2150 statep = STACK_IKS(stack);
2151
2152 /*
2153 * Reset the state of the thread to resume from a continuation,
2154 * including resetting the stack and frame pointer to avoid backtracers
2155 * seeing this temporary state and attempting to walk the defunct stack.
2156 */
2157 statep->k_rbp = (uint64_t) 0;
2158 statep->k_rip = (uint64_t) Thread_continue;
2159 statep->k_rbx = (uint64_t) thread_continue;
2160 statep->k_rsp = (uint64_t) STACK_IKS(stack);
2161
2162 return;
2163 }
2164
2165 /*
2166 * move a stack from old to new thread
2167 */
2168
2169 void
machine_stack_handoff(thread_t old,thread_t new)2170 machine_stack_handoff(thread_t old,
2171 thread_t new)
2172 {
2173 vm_offset_t stack;
2174
2175 assert(new);
2176 assert(old);
2177
2178 #if HYPERVISOR
2179 if (old->hv_thread_target) {
2180 hv_callbacks.preempt(old->hv_thread_target);
2181 }
2182 #endif
2183
2184 kpc_off_cpu(old);
2185
2186 stack = old->kernel_stack;
2187 if (stack == old->reserved_stack) {
2188 assert(new->reserved_stack);
2189 old->reserved_stack = new->reserved_stack;
2190 new->reserved_stack = stack;
2191 }
2192 #if CONFIG_STKSZ
2193 kcov_stksz_set_thread_stack(old, old->kernel_stack);
2194 #endif
2195 old->kernel_stack = 0;
2196 /*
2197 * A full call to machine_stack_attach() is unnecessry
2198 * because old stack is already initialized.
2199 */
2200 new->kernel_stack = stack;
2201 #if CONFIG_STKSZ
2202 kcov_stksz_set_thread_stack(new, 0);
2203 #endif
2204
2205 fpu_switch_context(old, new);
2206
2207 old->machine.specFlags &= ~OnProc;
2208 new->machine.specFlags |= OnProc;
2209
2210 pmap_switch_context(old, new, cpu_number());
2211 act_machine_switch_pcb(old, new);
2212
2213 #if HYPERVISOR
2214 if (new->hv_thread_target) {
2215 hv_callbacks.dispatch(new->hv_thread_target);
2216 }
2217 #endif
2218
2219 machine_set_current_thread(new);
2220 thread_initialize_kernel_state(new);
2221
2222 return;
2223 }
2224
2225
2226
2227
2228 struct x86_act_context32 {
2229 x86_saved_state32_t ss;
2230 x86_float_state32_t fs;
2231 x86_debug_state32_t ds;
2232 };
2233
2234 struct x86_act_context64 {
2235 x86_saved_state64_t ss;
2236 x86_float_state64_t fs;
2237 x86_debug_state64_t ds;
2238 };
2239
2240
2241
2242 void *
act_thread_csave(void)2243 act_thread_csave(void)
2244 {
2245 kern_return_t kret;
2246 mach_msg_type_number_t val;
2247 thread_t thr_act = current_thread();
2248
2249 if (thread_is_64bit_addr(thr_act)) {
2250 struct x86_act_context64 *ic64;
2251
2252 ic64 = kalloc_data(sizeof(struct x86_act_context64), Z_WAITOK);
2253
2254 if (ic64 == (struct x86_act_context64 *)NULL) {
2255 return (void *)0;
2256 }
2257
2258 val = x86_SAVED_STATE64_COUNT;
2259 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE64,
2260 (thread_state_t) &ic64->ss, &val);
2261 if (kret != KERN_SUCCESS) {
2262 kfree_data(ic64, sizeof(struct x86_act_context64));
2263 return (void *)0;
2264 }
2265 val = x86_FLOAT_STATE64_COUNT;
2266 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE64,
2267 (thread_state_t) &ic64->fs, &val);
2268 if (kret != KERN_SUCCESS) {
2269 kfree_data(ic64, sizeof(struct x86_act_context64));
2270 return (void *)0;
2271 }
2272
2273 val = x86_DEBUG_STATE64_COUNT;
2274 kret = machine_thread_get_state(thr_act,
2275 x86_DEBUG_STATE64,
2276 (thread_state_t)&ic64->ds,
2277 &val);
2278 if (kret != KERN_SUCCESS) {
2279 kfree_data(ic64, sizeof(struct x86_act_context64));
2280 return (void *)0;
2281 }
2282 return ic64;
2283 } else {
2284 struct x86_act_context32 *ic32;
2285
2286 ic32 = kalloc_data(sizeof(struct x86_act_context32), Z_WAITOK);
2287
2288 if (ic32 == (struct x86_act_context32 *)NULL) {
2289 return (void *)0;
2290 }
2291
2292 val = x86_SAVED_STATE32_COUNT;
2293 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE32,
2294 (thread_state_t) &ic32->ss, &val);
2295 if (kret != KERN_SUCCESS) {
2296 kfree_data(ic32, sizeof(struct x86_act_context32));
2297 return (void *)0;
2298 }
2299 val = x86_FLOAT_STATE32_COUNT;
2300 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE32,
2301 (thread_state_t) &ic32->fs, &val);
2302 if (kret != KERN_SUCCESS) {
2303 kfree_data(ic32, sizeof(struct x86_act_context32));
2304 return (void *)0;
2305 }
2306
2307 val = x86_DEBUG_STATE32_COUNT;
2308 kret = machine_thread_get_state(thr_act,
2309 x86_DEBUG_STATE32,
2310 (thread_state_t)&ic32->ds,
2311 &val);
2312 if (kret != KERN_SUCCESS) {
2313 kfree_data(ic32, sizeof(struct x86_act_context32));
2314 return (void *)0;
2315 }
2316 return ic32;
2317 }
2318 }
2319
2320
2321 void
act_thread_catt(void * ctx)2322 act_thread_catt(void *ctx)
2323 {
2324 thread_t thr_act = current_thread();
2325 kern_return_t kret;
2326
2327 if (ctx == (void *)NULL) {
2328 return;
2329 }
2330
2331 if (thread_is_64bit_addr(thr_act)) {
2332 struct x86_act_context64 *ic64;
2333
2334 ic64 = (struct x86_act_context64 *)ctx;
2335
2336 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE64,
2337 (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT);
2338 if (kret == KERN_SUCCESS) {
2339 machine_thread_set_state(thr_act, x86_FLOAT_STATE64,
2340 (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT);
2341 }
2342 kfree_data(ic64, sizeof(struct x86_act_context64));
2343 } else {
2344 struct x86_act_context32 *ic32;
2345
2346 ic32 = (struct x86_act_context32 *)ctx;
2347
2348 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE32,
2349 (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT);
2350 if (kret == KERN_SUCCESS) {
2351 (void) machine_thread_set_state(thr_act, x86_FLOAT_STATE32,
2352 (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT);
2353 }
2354 kfree_data(ic32, sizeof(struct x86_act_context32));
2355 }
2356 }
2357
2358
2359 void
act_thread_cfree(__unused void * ctx)2360 act_thread_cfree(__unused void *ctx)
2361 {
2362 /* XXX - Unused */
2363 }
2364
2365 /*
2366 * Duplicate one x86_debug_state32_t to another. "all" parameter
2367 * chooses whether dr4 and dr5 are copied (they are never meant
2368 * to be installed when we do machine_task_set_state() or
2369 * machine_thread_set_state()).
2370 */
2371 void
copy_debug_state32(x86_debug_state32_t * src,x86_debug_state32_t * target,boolean_t all)2372 copy_debug_state32(
2373 x86_debug_state32_t *src,
2374 x86_debug_state32_t *target,
2375 boolean_t all)
2376 {
2377 if (all) {
2378 target->dr4 = src->dr4;
2379 target->dr5 = src->dr5;
2380 }
2381
2382 target->dr0 = src->dr0;
2383 target->dr1 = src->dr1;
2384 target->dr2 = src->dr2;
2385 target->dr3 = src->dr3;
2386 target->dr6 = src->dr6;
2387 target->dr7 = src->dr7;
2388 }
2389
2390 /*
2391 * Duplicate one x86_debug_state64_t to another. "all" parameter
2392 * chooses whether dr4 and dr5 are copied (they are never meant
2393 * to be installed when we do machine_task_set_state() or
2394 * machine_thread_set_state()).
2395 */
2396 void
copy_debug_state64(x86_debug_state64_t * src,x86_debug_state64_t * target,boolean_t all)2397 copy_debug_state64(
2398 x86_debug_state64_t *src,
2399 x86_debug_state64_t *target,
2400 boolean_t all)
2401 {
2402 if (all) {
2403 target->dr4 = src->dr4;
2404 target->dr5 = src->dr5;
2405 }
2406
2407 target->dr0 = src->dr0;
2408 target->dr1 = src->dr1;
2409 target->dr2 = src->dr2;
2410 target->dr3 = src->dr3;
2411 target->dr6 = src->dr6;
2412 target->dr7 = src->dr7;
2413 }
2414