1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/mach_types.h>
29 #include <mach/vm_attributes.h>
30 #include <mach/vm_param.h>
31 #include <libsa/types.h>
32
33 #include <kdp/kdp_core.h>
34 #include <kdp/kdp_internal.h>
35 #include <kdp/ml/i386/kdp_x86_common.h>
36 #include <mach-o/loader.h>
37 #include <mach/thread_status.h>
38 #include <i386/thread.h>
39
40 int kdp_dump_trap(int type, x86_saved_state64_t *regs);
41
42 static const x86_state_hdr_t thread_flavor_array[] = {
43 {x86_THREAD_STATE64, x86_THREAD_STATE64_COUNT}
44 };
45
46 void
kern_collectth_state_size(uint64_t * tstate_count,uint64_t * ptstate_size)47 kern_collectth_state_size(uint64_t * tstate_count, uint64_t * ptstate_size)
48 {
49 unsigned int i;
50 uint64_t tstate_size = 0;
51
52 for (i = 0; i < sizeof(thread_flavor_array) / sizeof(thread_flavor_array[0]); i++) {
53 tstate_size += sizeof(x86_state_hdr_t) +
54 (thread_flavor_array[i].count * sizeof(int));
55 }
56
57 *tstate_count = 1;
58 *ptstate_size = sizeof(struct thread_command) + tstate_size;
59 }
60
61 void
kern_collect_userth_state_size(task_t task,uint64_t * tstate_count,uint64_t * tstate_size)62 kern_collect_userth_state_size(task_t task, uint64_t * tstate_count, uint64_t * tstate_size)
63 {
64 uint64_t per_thread_size = 0;
65 const x86_state_hdr_t * flavors = thread_flavor_array;
66 size_t num_flavors = sizeof(thread_flavor_array) / sizeof(thread_flavor_array[0]);
67
68 for (size_t i = 0; i < num_flavors; i++) {
69 per_thread_size += sizeof(x86_state_hdr_t) + flavors[i].count * sizeof(natural_t);
70 }
71
72 *tstate_count = task->thread_count;
73 *tstate_size = sizeof(struct thread_command) + per_thread_size;
74 }
75
76 void
kern_collect_userth_state(task_t task __unused,thread_t thread,void * buffer,uint64_t size)77 kern_collect_userth_state(task_t task __unused, thread_t thread, void *buffer, uint64_t size)
78 {
79 kern_return_t ret;
80 const x86_state_hdr_t * flavors = thread_flavor_array;
81 size_t num_flavors = sizeof(thread_flavor_array) / sizeof(thread_flavor_array[0]);
82
83 struct thread_command *tc = (struct thread_command *)buffer;
84 tc->cmd = LC_THREAD;
85 tc->cmdsize = (uint32_t)size;
86
87 x86_state_hdr_t *hdr = (x86_state_hdr_t *)(tc + 1);
88
89 for (size_t i = 0; i < num_flavors; i++) {
90 hdr->flavor = flavors[i].flavor;
91 hdr->count = flavors[i].count;
92 /* Ensure we can't write past the end of the buffer */
93 assert(hdr->count + sizeof(x86_state_hdr_t) + ((uintptr_t)hdr - (uintptr_t)buffer) <= size);
94 ret = machine_thread_get_state(thread, hdr->flavor, (thread_state_t)(hdr + 1), &hdr->count);
95 assert(ret == KERN_SUCCESS);
96
97 hdr = (x86_state_hdr_t *)((uintptr_t)(hdr + 1) + hdr->count * sizeof(natural_t));
98 }
99 }
100
101 void
kern_collectth_state(thread_t thread,void * buffer,uint64_t size,void ** iter)102 kern_collectth_state(thread_t thread, void *buffer, uint64_t size, void ** iter)
103 {
104 size_t hoffset;
105 uint64_t tstate_size, tstate_count;
106 unsigned int i;
107 struct thread_command *tc;
108
109
110 *iter = NULL;
111 /*
112 * Fill in thread command structure.
113 */
114 hoffset = 0;
115
116 if (hoffset + sizeof(struct thread_command) > size) {
117 return;
118 }
119
120 kern_collectth_state_size(&tstate_count, &tstate_size);
121 tc = (struct thread_command *) ((uintptr_t)buffer + hoffset);
122 tc->cmd = LC_THREAD;
123 tc->cmdsize = (uint32_t) tstate_size;
124 hoffset += sizeof(struct thread_command);
125 /*
126 * Follow with a struct thread_state_flavor and
127 * the appropriate thread state struct for each
128 * thread state flavor.
129 */
130 for (i = 0; i < sizeof(thread_flavor_array) / sizeof(thread_flavor_array[0]); i++) {
131 if (hoffset + sizeof(x86_state_hdr_t) > size) {
132 return;
133 }
134
135 *(x86_state_hdr_t *)((uintptr_t)buffer + hoffset) =
136 thread_flavor_array[i];
137 hoffset += sizeof(x86_state_hdr_t);
138
139
140 if (hoffset + thread_flavor_array[i].count * sizeof(int) > size) {
141 return;
142 }
143
144 /* Locate and obtain the non-volatile register context
145 * for this kernel thread. This should ideally be
146 * encapsulated in machine_thread_get_kern_state().
147 */
148 if (thread_flavor_array[i].flavor == x86_THREAD_STATE64) {
149 x86_thread_state64_t *tstate = (x86_thread_state64_t *) ((uintptr_t)buffer + hoffset);
150 vm_offset_t kstack;
151 x86_saved_state64_t *cpstate = current_cpu_datap()->cpu_fatal_trap_state;
152
153 bzero(tstate, x86_THREAD_STATE64_COUNT * sizeof(int));
154 if ((current_thread() == thread) && (cpstate != NULL)) {
155 tstate->rax = cpstate->rax;
156 tstate->rbx = cpstate->rbx;
157 tstate->rcx = cpstate->rcx;
158 tstate->rdx = cpstate->rdx;
159 tstate->rdi = cpstate->rdi;
160 tstate->rsi = cpstate->rsi;
161 tstate->rbp = cpstate->rbp;
162 tstate->r8 = cpstate->r8;
163 tstate->r9 = cpstate->r9;
164 tstate->r10 = cpstate->r10;
165 tstate->r11 = cpstate->r11;
166 tstate->r12 = cpstate->r12;
167 tstate->r13 = cpstate->r13;
168 tstate->r14 = cpstate->r14;
169 tstate->r15 = cpstate->r15;
170 tstate->rip = cpstate->isf.rip;
171 tstate->rsp = cpstate->isf.rsp;
172 tstate->rflags = cpstate->isf.rflags;
173 tstate->cs = cpstate->isf.cs;
174 tstate->fs = cpstate->fs;
175 tstate->gs = cpstate->gs;
176 } else if ((kstack = thread->kernel_stack) != 0) {
177 struct x86_kernel_state *iks = STACK_IKS(kstack);
178 tstate->rbx = iks->k_rbx;
179 tstate->rsp = iks->k_rsp;
180 tstate->rbp = iks->k_rbp;
181 tstate->r12 = iks->k_r12;
182 tstate->r13 = iks->k_r13;
183 tstate->r14 = iks->k_r14;
184 tstate->r15 = iks->k_r15;
185 tstate->rip = iks->k_rip;
186 }
187 } else {
188 void *tstate = (void *)((uintptr_t)buffer + hoffset);
189
190 bzero(tstate, thread_flavor_array[i].count * sizeof(int));
191 }
192
193 hoffset += thread_flavor_array[i].count * sizeof(int);
194 }
195 }
196
197 /* Intended to be called from the kernel trap handler if an unrecoverable fault
198 * occurs during a crashdump (which shouldn't happen since we validate mappings
199 * and so on). This should be reworked to attempt some form of recovery.
200 */
201 int
kdp_dump_trap(int type,__unused x86_saved_state64_t * saved_state)202 kdp_dump_trap(
203 int type,
204 __unused x86_saved_state64_t *saved_state)
205 {
206 printf("An unexpected trap (type %d) occurred during the system dump, terminating.\n", type);
207 kdp_send_crashdump_pkt(KDP_EOF, NULL, 0, ((void *) 0));
208 abort_panic_transfer();
209 kdp_flag &= ~KDP_PANIC_DUMP_ENABLED;
210 kdp_flag &= ~PANIC_CORE_ON_NMI;
211 kdp_flag &= ~PANIC_LOG_DUMP;
212
213 kdp_reset();
214
215 kdp_raise_exception(EXC_BAD_ACCESS, 0, 0, kdp.saved_state);
216 return 0;
217 }
218
219 /*
220 * kdp_core_start_addr
221 *
222 * return the address where the kernel core file starts
223 *
224 */
225 vm_map_offset_t
kdp_core_start_addr()226 kdp_core_start_addr()
227 {
228 return VM_MIN_KERNEL_AND_KEXT_ADDRESS;
229 }
230