1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @Apple_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
21 */
22
23 #include <sys/kdebug_common.h>
24 #include <vm/vm_kern_xnu.h>
25
26 LCK_GRP_DECLARE(kdebug_lck_grp, "kdebug");
27 int kdbg_debug = 0;
28
29 extern struct kd_control kd_control_trace, kd_control_triage;
30
31 int
kdebug_storage_lock(struct kd_control * kd_ctrl_page)32 kdebug_storage_lock(struct kd_control *kd_ctrl_page)
33 {
34 int intrs_en = ml_set_interrupts_enabled(false);
35 lck_spin_lock_grp(&kd_ctrl_page->kdc_storage_lock, &kdebug_lck_grp);
36 return intrs_en;
37 }
38
39 void
kdebug_storage_unlock(struct kd_control * kd_ctrl_page,int intrs_en)40 kdebug_storage_unlock(struct kd_control *kd_ctrl_page, int intrs_en)
41 {
42 lck_spin_unlock(&kd_ctrl_page->kdc_storage_lock);
43 ml_set_interrupts_enabled(intrs_en);
44 }
45
46 // Turn on boot tracing and set the number of events.
47 static TUNABLE(unsigned int, new_nkdbufs, "trace", 0);
48 // Enable wrapping during boot tracing.
49 TUNABLE(unsigned int, trace_wrap, "trace_wrap", 0);
50 // The filter description to apply to boot tracing.
51 static TUNABLE_STR(trace_typefilter, 256, "trace_typefilter", "");
52
53 // Turn on wake tracing and set the number of events.
54 TUNABLE(unsigned int, wake_nkdbufs, "trace_wake", 0);
55 // Write trace events to a file in the event of a panic.
56 TUNABLE(unsigned int, write_trace_on_panic, "trace_panic", 0);
57
58 // Obsolete leak logging system.
59 TUNABLE(int, log_leaks, "-l", 0);
60
61 __startup_func
62 void
kdebug_startup(void)63 kdebug_startup(void)
64 {
65 lck_spin_init(&kd_control_trace.kdc_storage_lock, &kdebug_lck_grp, LCK_ATTR_NULL);
66 lck_spin_init(&kd_control_triage.kdc_storage_lock, &kdebug_lck_grp, LCK_ATTR_NULL);
67 kdebug_init(new_nkdbufs, trace_typefilter,
68 (trace_wrap ? KDOPT_WRAPPING : 0) | KDOPT_ATBOOT);
69 create_buffers_triage();
70 }
71
72 uint32_t
kdbg_cpu_count(void)73 kdbg_cpu_count(void)
74 {
75 #if defined(__x86_64__)
76 return ml_early_cpu_max_number() + 1;
77 #else // defined(__x86_64__)
78 return ml_get_cpu_count();
79 #endif // !defined(__x86_64__)
80 }
81
82 /*
83 * Both kdebug_timestamp and kdebug_using_continuous_time are known
84 * to kexts. And going forward we always want to use mach_continuous_time().
85 * So we keep these 2 routines as-is to keep the TRACE mode use outside
86 * the kernel intact. TRIAGE mode will explicitly only use mach_continuous_time()
87 * for its timestamp.
88 */
89 bool
kdebug_using_continuous_time(void)90 kdebug_using_continuous_time(void)
91 {
92 return kd_control_trace.kdc_flags & KDBG_CONTINUOUS_TIME;
93 }
94
95 uint64_t
kdebug_timestamp(void)96 kdebug_timestamp(void)
97 {
98 if (kdebug_using_continuous_time()) {
99 return mach_continuous_time();
100 } else {
101 return mach_absolute_time();
102 }
103 }
104
105 int
create_buffers(struct kd_control * kd_ctrl_page,struct kd_buffer * kd_data_page,vm_tag_t tag)106 create_buffers(
107 struct kd_control *kd_ctrl_page,
108 struct kd_buffer *kd_data_page,
109 vm_tag_t tag)
110 {
111 unsigned int i;
112 unsigned int p_buffer_size;
113 unsigned int f_buffer_size;
114 unsigned int f_buffers;
115 int error = 0;
116 int ncpus, count_storage_units = 0;
117
118 struct kd_bufinfo *kdbip = NULL;
119 struct kd_region *kd_bufs = NULL;
120 int kdb_storage_count = kd_data_page->kdb_storage_count;
121
122 ncpus = kd_ctrl_page->alloc_cpus;
123
124 kdbip = kalloc_type_tag(struct kd_bufinfo, ncpus, Z_WAITOK | Z_ZERO, tag);
125 if (kdbip == NULL) {
126 error = ENOSPC;
127 goto out;
128 }
129 kd_data_page->kdb_info = kdbip;
130
131 f_buffers = kdb_storage_count / N_STORAGE_UNITS_PER_BUFFER;
132 kd_data_page->kdb_region_count = f_buffers;
133
134 f_buffer_size = N_STORAGE_UNITS_PER_BUFFER * sizeof(struct kd_storage);
135 p_buffer_size = (kdb_storage_count % N_STORAGE_UNITS_PER_BUFFER) * sizeof(struct kd_storage);
136
137 if (p_buffer_size) {
138 kd_data_page->kdb_region_count++;
139 }
140
141 if (kd_ctrl_page->kdebug_kdcopybuf_size > 0 && kd_data_page->kdcopybuf == NULL) {
142 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_data_page->kdcopybuf,
143 (vm_size_t) kd_ctrl_page->kdebug_kdcopybuf_size,
144 KMA_DATA | KMA_ZERO, tag) != KERN_SUCCESS) {
145 error = ENOSPC;
146 goto out;
147 }
148 }
149
150 kd_bufs = kalloc_type_tag(struct kd_region, kd_data_page->kdb_region_count,
151 Z_WAITOK | Z_ZERO, tag);
152 if (kd_bufs == NULL) {
153 error = ENOSPC;
154 goto out;
155 }
156 kd_data_page->kd_bufs = kd_bufs;
157
158 for (i = 0; i < f_buffers; i++) {
159 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdr_addr,
160 (vm_size_t)f_buffer_size, KMA_DATA | KMA_ZERO, tag) != KERN_SUCCESS) {
161 error = ENOSPC;
162 goto out;
163 }
164
165 kd_bufs[i].kdr_size = f_buffer_size;
166 }
167 if (p_buffer_size) {
168 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdr_addr,
169 (vm_size_t)p_buffer_size, KMA_DATA | KMA_ZERO, tag) != KERN_SUCCESS) {
170 error = ENOSPC;
171 goto out;
172 }
173
174 kd_bufs[i].kdr_size = p_buffer_size;
175 }
176
177 count_storage_units = 0;
178 for (i = 0; i < kd_data_page->kdb_region_count; i++) {
179 struct kd_storage *kds;
180 uint16_t n_elements;
181 static_assert(N_STORAGE_UNITS_PER_BUFFER <= UINT16_MAX);
182 assert(kd_bufs[i].kdr_size <= N_STORAGE_UNITS_PER_BUFFER *
183 sizeof(struct kd_storage));
184
185 n_elements = kd_bufs[i].kdr_size / sizeof(struct kd_storage);
186 kds = kd_bufs[i].kdr_addr;
187
188 for (uint16_t n = 0; n < n_elements; n++) {
189 kds[n].kds_next.buffer_index = kd_ctrl_page->kds_free_list.buffer_index;
190 kds[n].kds_next.offset = kd_ctrl_page->kds_free_list.offset;
191
192 kd_ctrl_page->kds_free_list.buffer_index = i;
193 kd_ctrl_page->kds_free_list.offset = n;
194 }
195 count_storage_units += n_elements;
196 }
197
198 kd_data_page->kdb_storage_count = count_storage_units;
199
200 for (i = 0; i < ncpus; i++) {
201 kdbip[i].kd_list_head.raw = KDS_PTR_NULL;
202 kdbip[i].kd_list_tail.raw = KDS_PTR_NULL;
203 kdbip[i].kd_lostevents = false;
204 kdbip[i].num_bufs = 0;
205 }
206
207 kd_ctrl_page->kdc_flags |= KDBG_BUFINIT;
208
209 kd_ctrl_page->kdc_storage_used = 0;
210 out:
211 if (error) {
212 delete_buffers(kd_ctrl_page, kd_data_page);
213 }
214
215 return error;
216 }
217
218 void
delete_buffers(struct kd_control * kd_ctrl_page,struct kd_buffer * kd_data_page)219 delete_buffers(struct kd_control *kd_ctrl_page,
220 struct kd_buffer *kd_data_page)
221 {
222 unsigned int i;
223 int kdb_region_count = kd_data_page->kdb_region_count;
224
225 struct kd_bufinfo *kdbip = kd_data_page->kdb_info;
226 struct kd_region *kd_bufs = kd_data_page->kd_bufs;
227
228 if (kd_bufs) {
229 for (i = 0; i < kdb_region_count; i++) {
230 if (kd_bufs[i].kdr_addr) {
231 kmem_free(kernel_map, (vm_offset_t)kd_bufs[i].kdr_addr, (vm_size_t)kd_bufs[i].kdr_size);
232 }
233 }
234 kfree_type(struct kd_region, kdb_region_count, kd_bufs);
235
236 kd_data_page->kd_bufs = NULL;
237 kd_data_page->kdb_region_count = 0;
238 }
239 if (kd_data_page->kdcopybuf) {
240 kmem_free(kernel_map, (vm_offset_t)kd_data_page->kdcopybuf, kd_ctrl_page->kdebug_kdcopybuf_size);
241
242 kd_data_page->kdcopybuf = NULL;
243 }
244 kd_ctrl_page->kds_free_list.raw = KDS_PTR_NULL;
245
246 if (kdbip) {
247 kfree_type(struct kd_bufinfo, kd_ctrl_page->alloc_cpus, kdbip);
248 kd_data_page->kdb_info = NULL;
249 }
250 kd_ctrl_page->kdc_coprocs = NULL;
251 kd_ctrl_page->kdebug_cpus = 0;
252 kd_ctrl_page->alloc_cpus = 0;
253 kd_ctrl_page->kdc_flags &= ~KDBG_BUFINIT;
254 }
255
256 static void
_register_out_of_space(struct kd_control * kd_ctrl_page)257 _register_out_of_space(struct kd_control *kd_ctrl_page)
258 {
259 kd_ctrl_page->kdc_emit = KDEMIT_DISABLE;
260 kdebug_enable = 0;
261 kd_ctrl_page->enabled = 0;
262 commpage_update_kdebug_state();
263 }
264
265 bool
kdebug_storage_alloc(struct kd_control * kd_ctrl_page,struct kd_buffer * kd_data_page,int cpu)266 kdebug_storage_alloc(
267 struct kd_control *kd_ctrl_page,
268 struct kd_buffer *kd_data_page,
269 int cpu)
270 {
271 union kds_ptr kdsp;
272 struct kd_storage *kdsp_actual, *kdsp_next_actual;
273 struct kd_bufinfo *kdbip, *kdbp, *kdbp_vict, *kdbp_try;
274 uint64_t oldest_ts, ts;
275 bool retval = true;
276 struct kd_region *kd_bufs;
277
278 int intrs_en = kdebug_storage_lock(kd_ctrl_page);
279
280 kdbp = &kd_data_page->kdb_info[cpu];
281 kd_bufs = kd_data_page->kd_bufs;
282 kdbip = kd_data_page->kdb_info;
283
284 /* If someone beat us to the allocate, return success */
285 if (kdbp->kd_list_tail.raw != KDS_PTR_NULL) {
286 kdsp_actual = POINTER_FROM_KDS_PTR(kd_bufs, kdbp->kd_list_tail);
287
288 if (kdsp_actual->kds_bufindx < kd_ctrl_page->kdebug_events_per_storage_unit) {
289 goto out;
290 }
291 }
292
293 if ((kdsp = kd_ctrl_page->kds_free_list).raw != KDS_PTR_NULL) {
294 /*
295 * If there's a free page, grab it from the free list.
296 */
297 kdsp_actual = POINTER_FROM_KDS_PTR(kd_bufs, kdsp);
298 kd_ctrl_page->kds_free_list = kdsp_actual->kds_next;
299
300 kd_ctrl_page->kdc_storage_used++;
301 } else {
302 /*
303 * Otherwise, we're going to lose events and repurpose the oldest
304 * storage unit we can find.
305 */
306 if (kd_ctrl_page->kdc_live_flags & KDBG_NOWRAP) {
307 _register_out_of_space(kd_ctrl_page);
308 kd_ctrl_page->kdc_live_flags |= KDBG_WRAPPED;
309 kdbp->kd_lostevents = true;
310 retval = false;
311 goto out;
312 }
313 kdbp_vict = NULL;
314 oldest_ts = UINT64_MAX;
315
316 for (kdbp_try = &kdbip[0]; kdbp_try < &kdbip[kd_ctrl_page->kdebug_cpus]; kdbp_try++) {
317 if (kdbp_try->kd_list_head.raw == KDS_PTR_NULL) {
318 /*
319 * no storage unit to steal
320 */
321 continue;
322 }
323
324 kdsp_actual = POINTER_FROM_KDS_PTR(kd_bufs, kdbp_try->kd_list_head);
325
326 if (kdsp_actual->kds_bufcnt < kd_ctrl_page->kdebug_events_per_storage_unit) {
327 /*
328 * make sure we don't steal the storage unit
329 * being actively recorded to... need to
330 * move on because we don't want an out-of-order
331 * set of events showing up later
332 */
333 continue;
334 }
335
336 /*
337 * When wrapping, steal the storage unit with the
338 * earliest timestamp on its last event, instead of the
339 * earliest timestamp on the first event. This allows a
340 * storage unit with more recent events to be preserved,
341 * even if the storage unit contains events that are
342 * older than those found in other CPUs.
343 */
344 ts = kdbg_get_timestamp(&kdsp_actual->kds_records[kd_ctrl_page->kdebug_events_per_storage_unit - 1]);
345 if (ts < oldest_ts) {
346 oldest_ts = ts;
347 kdbp_vict = kdbp_try;
348 }
349 }
350 if (kdbp_vict == NULL && kd_ctrl_page->mode == KDEBUG_MODE_TRACE) {
351 _register_out_of_space(kd_ctrl_page);
352 retval = false;
353 goto out;
354 }
355 kdsp = kdbp_vict->kd_list_head;
356 kdsp_actual = POINTER_FROM_KDS_PTR(kd_bufs, kdsp);
357 kdbp_vict->kd_list_head = kdsp_actual->kds_next;
358
359 if (kdbp_vict->kd_list_head.raw != KDS_PTR_NULL) {
360 kdsp_next_actual = POINTER_FROM_KDS_PTR(kd_bufs, kdbp_vict->kd_list_head);
361 kdsp_next_actual->kds_lostevents = true;
362 } else {
363 kdbp_vict->kd_lostevents = true;
364 }
365
366 if (kd_ctrl_page->kdc_oldest_time < oldest_ts) {
367 kd_ctrl_page->kdc_oldest_time = oldest_ts;
368 }
369 kd_ctrl_page->kdc_live_flags |= KDBG_WRAPPED;
370 }
371
372 if (kd_ctrl_page->mode == KDEBUG_MODE_TRACE) {
373 kdsp_actual->kds_timestamp = kdebug_timestamp();
374 } else {
375 kdsp_actual->kds_timestamp = mach_continuous_time();
376 }
377
378 kdsp_actual->kds_next.raw = KDS_PTR_NULL;
379 kdsp_actual->kds_bufcnt = 0;
380 kdsp_actual->kds_readlast = 0;
381
382 kdsp_actual->kds_lostevents = kdbp->kd_lostevents;
383 kdbp->kd_lostevents = false;
384 kdsp_actual->kds_bufindx = 0;
385
386 if (kdbp->kd_list_head.raw == KDS_PTR_NULL) {
387 kdbp->kd_list_head = kdsp;
388 } else {
389 POINTER_FROM_KDS_PTR(kd_bufs, kdbp->kd_list_tail)->kds_next = kdsp;
390 }
391 kdbp->kd_list_tail = kdsp;
392 out:
393 kdebug_storage_unlock(kd_ctrl_page, intrs_en);
394
395 return retval;
396 }
397