1*c54f35caSApple OSS Distributions /*
2*c54f35caSApple OSS Distributions * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3*c54f35caSApple OSS Distributions *
4*c54f35caSApple OSS Distributions * @Apple_LICENSE_HEADER_START@
5*c54f35caSApple OSS Distributions *
6*c54f35caSApple OSS Distributions * The contents of this file constitute Original Code as defined in and
7*c54f35caSApple OSS Distributions * are subject to the Apple Public Source License Version 1.1 (the
8*c54f35caSApple OSS Distributions * "License"). You may not use this file except in compliance with the
9*c54f35caSApple OSS Distributions * License. Please obtain a copy of the License at
10*c54f35caSApple OSS Distributions * http://www.apple.com/publicsource and read it before using this file.
11*c54f35caSApple OSS Distributions *
12*c54f35caSApple OSS Distributions * This Original Code and all software distributed under the License are
13*c54f35caSApple OSS Distributions * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14*c54f35caSApple OSS Distributions * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15*c54f35caSApple OSS Distributions * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16*c54f35caSApple OSS Distributions * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17*c54f35caSApple OSS Distributions * License for the specific language governing rights and limitations
18*c54f35caSApple OSS Distributions * under the License.
19*c54f35caSApple OSS Distributions *
20*c54f35caSApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
21*c54f35caSApple OSS Distributions */
22*c54f35caSApple OSS Distributions
23*c54f35caSApple OSS Distributions #include <sys/kdebug_common.h>
24*c54f35caSApple OSS Distributions
25*c54f35caSApple OSS Distributions LCK_GRP_DECLARE(kdebug_lck_grp, "kdebug");
26*c54f35caSApple OSS Distributions int kdbg_debug = 0;
27*c54f35caSApple OSS Distributions
28*c54f35caSApple OSS Distributions extern struct kd_control kd_control_trace, kd_control_triage;
29*c54f35caSApple OSS Distributions
30*c54f35caSApple OSS Distributions int
kdebug_storage_lock(struct kd_control * kd_ctrl_page)31*c54f35caSApple OSS Distributions kdebug_storage_lock(struct kd_control *kd_ctrl_page)
32*c54f35caSApple OSS Distributions {
33*c54f35caSApple OSS Distributions int intrs_en = ml_set_interrupts_enabled(false);
34*c54f35caSApple OSS Distributions lck_spin_lock_grp(&kd_ctrl_page->kdc_storage_lock, &kdebug_lck_grp);
35*c54f35caSApple OSS Distributions return intrs_en;
36*c54f35caSApple OSS Distributions }
37*c54f35caSApple OSS Distributions
38*c54f35caSApple OSS Distributions void
kdebug_storage_unlock(struct kd_control * kd_ctrl_page,int intrs_en)39*c54f35caSApple OSS Distributions kdebug_storage_unlock(struct kd_control *kd_ctrl_page, int intrs_en)
40*c54f35caSApple OSS Distributions {
41*c54f35caSApple OSS Distributions lck_spin_unlock(&kd_ctrl_page->kdc_storage_lock);
42*c54f35caSApple OSS Distributions ml_set_interrupts_enabled(intrs_en);
43*c54f35caSApple OSS Distributions }
44*c54f35caSApple OSS Distributions
45*c54f35caSApple OSS Distributions // Turn on boot tracing and set the number of events.
46*c54f35caSApple OSS Distributions static TUNABLE(unsigned int, new_nkdbufs, "trace", 0);
47*c54f35caSApple OSS Distributions // Enable wrapping during boot tracing.
48*c54f35caSApple OSS Distributions TUNABLE(unsigned int, trace_wrap, "trace_wrap", 0);
49*c54f35caSApple OSS Distributions // The filter description to apply to boot tracing.
50*c54f35caSApple OSS Distributions static TUNABLE_STR(trace_typefilter, 256, "trace_typefilter", "");
51*c54f35caSApple OSS Distributions
52*c54f35caSApple OSS Distributions // Turn on wake tracing and set the number of events.
53*c54f35caSApple OSS Distributions TUNABLE(unsigned int, wake_nkdbufs, "trace_wake", 0);
54*c54f35caSApple OSS Distributions // Write trace events to a file in the event of a panic.
55*c54f35caSApple OSS Distributions TUNABLE(unsigned int, write_trace_on_panic, "trace_panic", 0);
56*c54f35caSApple OSS Distributions
57*c54f35caSApple OSS Distributions // Obsolete leak logging system.
58*c54f35caSApple OSS Distributions TUNABLE(int, log_leaks, "-l", 0);
59*c54f35caSApple OSS Distributions
60*c54f35caSApple OSS Distributions void
kdebug_startup(void)61*c54f35caSApple OSS Distributions kdebug_startup(void)
62*c54f35caSApple OSS Distributions {
63*c54f35caSApple OSS Distributions lck_spin_init(&kd_control_trace.kdc_storage_lock, &kdebug_lck_grp, LCK_ATTR_NULL);
64*c54f35caSApple OSS Distributions lck_spin_init(&kd_control_triage.kdc_storage_lock, &kdebug_lck_grp, LCK_ATTR_NULL);
65*c54f35caSApple OSS Distributions kdebug_init(new_nkdbufs, trace_typefilter,
66*c54f35caSApple OSS Distributions (trace_wrap ? KDOPT_WRAPPING : 0) | KDOPT_ATBOOT);
67*c54f35caSApple OSS Distributions create_buffers_triage();
68*c54f35caSApple OSS Distributions }
69*c54f35caSApple OSS Distributions
70*c54f35caSApple OSS Distributions uint32_t
kdbg_cpu_count(void)71*c54f35caSApple OSS Distributions kdbg_cpu_count(void)
72*c54f35caSApple OSS Distributions {
73*c54f35caSApple OSS Distributions #if defined(__x86_64__)
74*c54f35caSApple OSS Distributions return ml_early_cpu_max_number() + 1;
75*c54f35caSApple OSS Distributions #else // defined(__x86_64__)
76*c54f35caSApple OSS Distributions return ml_get_cpu_count();
77*c54f35caSApple OSS Distributions #endif // !defined(__x86_64__)
78*c54f35caSApple OSS Distributions }
79*c54f35caSApple OSS Distributions
80*c54f35caSApple OSS Distributions /*
81*c54f35caSApple OSS Distributions * Both kdebug_timestamp and kdebug_using_continuous_time are known
82*c54f35caSApple OSS Distributions * to kexts. And going forward we always want to use mach_continuous_time().
83*c54f35caSApple OSS Distributions * So we keep these 2 routines as-is to keep the TRACE mode use outside
84*c54f35caSApple OSS Distributions * the kernel intact. TRIAGE mode will explicitly only use mach_continuous_time()
85*c54f35caSApple OSS Distributions * for its timestamp.
86*c54f35caSApple OSS Distributions */
87*c54f35caSApple OSS Distributions bool
kdebug_using_continuous_time(void)88*c54f35caSApple OSS Distributions kdebug_using_continuous_time(void)
89*c54f35caSApple OSS Distributions {
90*c54f35caSApple OSS Distributions return kd_control_trace.kdc_flags & KDBG_CONTINUOUS_TIME;
91*c54f35caSApple OSS Distributions }
92*c54f35caSApple OSS Distributions
93*c54f35caSApple OSS Distributions uint64_t
kdebug_timestamp(void)94*c54f35caSApple OSS Distributions kdebug_timestamp(void)
95*c54f35caSApple OSS Distributions {
96*c54f35caSApple OSS Distributions if (kdebug_using_continuous_time()) {
97*c54f35caSApple OSS Distributions return mach_continuous_time();
98*c54f35caSApple OSS Distributions } else {
99*c54f35caSApple OSS Distributions return mach_absolute_time();
100*c54f35caSApple OSS Distributions }
101*c54f35caSApple OSS Distributions }
102*c54f35caSApple OSS Distributions
103*c54f35caSApple OSS Distributions int
create_buffers(struct kd_control * kd_ctrl_page,struct kd_buffer * kd_data_page,vm_tag_t tag)104*c54f35caSApple OSS Distributions create_buffers(
105*c54f35caSApple OSS Distributions struct kd_control *kd_ctrl_page,
106*c54f35caSApple OSS Distributions struct kd_buffer *kd_data_page,
107*c54f35caSApple OSS Distributions vm_tag_t tag)
108*c54f35caSApple OSS Distributions {
109*c54f35caSApple OSS Distributions unsigned int i;
110*c54f35caSApple OSS Distributions unsigned int p_buffer_size;
111*c54f35caSApple OSS Distributions unsigned int f_buffer_size;
112*c54f35caSApple OSS Distributions unsigned int f_buffers;
113*c54f35caSApple OSS Distributions int error = 0;
114*c54f35caSApple OSS Distributions int ncpus, count_storage_units = 0;
115*c54f35caSApple OSS Distributions
116*c54f35caSApple OSS Distributions struct kd_bufinfo *kdbip = NULL;
117*c54f35caSApple OSS Distributions struct kd_region *kd_bufs = NULL;
118*c54f35caSApple OSS Distributions int kdb_storage_count = kd_data_page->kdb_storage_count;
119*c54f35caSApple OSS Distributions
120*c54f35caSApple OSS Distributions ncpus = kd_ctrl_page->alloc_cpus;
121*c54f35caSApple OSS Distributions
122*c54f35caSApple OSS Distributions kdbip = kalloc_type_tag(struct kd_bufinfo, ncpus, Z_WAITOK | Z_ZERO, tag);
123*c54f35caSApple OSS Distributions if (kdbip == NULL) {
124*c54f35caSApple OSS Distributions error = ENOSPC;
125*c54f35caSApple OSS Distributions goto out;
126*c54f35caSApple OSS Distributions }
127*c54f35caSApple OSS Distributions kd_data_page->kdb_info = kdbip;
128*c54f35caSApple OSS Distributions
129*c54f35caSApple OSS Distributions f_buffers = kdb_storage_count / N_STORAGE_UNITS_PER_BUFFER;
130*c54f35caSApple OSS Distributions kd_data_page->kdb_region_count = f_buffers;
131*c54f35caSApple OSS Distributions
132*c54f35caSApple OSS Distributions f_buffer_size = N_STORAGE_UNITS_PER_BUFFER * sizeof(struct kd_storage);
133*c54f35caSApple OSS Distributions p_buffer_size = (kdb_storage_count % N_STORAGE_UNITS_PER_BUFFER) * sizeof(struct kd_storage);
134*c54f35caSApple OSS Distributions
135*c54f35caSApple OSS Distributions if (p_buffer_size) {
136*c54f35caSApple OSS Distributions kd_data_page->kdb_region_count++;
137*c54f35caSApple OSS Distributions }
138*c54f35caSApple OSS Distributions
139*c54f35caSApple OSS Distributions if (kd_data_page->kdcopybuf == 0) {
140*c54f35caSApple OSS Distributions if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_data_page->kdcopybuf,
141*c54f35caSApple OSS Distributions (vm_size_t) kd_ctrl_page->kdebug_kdcopybuf_size,
142*c54f35caSApple OSS Distributions KMA_DATA | KMA_ZERO, tag) != KERN_SUCCESS) {
143*c54f35caSApple OSS Distributions error = ENOSPC;
144*c54f35caSApple OSS Distributions goto out;
145*c54f35caSApple OSS Distributions }
146*c54f35caSApple OSS Distributions }
147*c54f35caSApple OSS Distributions
148*c54f35caSApple OSS Distributions kd_bufs = kalloc_type_tag(struct kd_region, kd_data_page->kdb_region_count,
149*c54f35caSApple OSS Distributions Z_WAITOK | Z_ZERO, tag);
150*c54f35caSApple OSS Distributions if (kd_bufs == NULL) {
151*c54f35caSApple OSS Distributions error = ENOSPC;
152*c54f35caSApple OSS Distributions goto out;
153*c54f35caSApple OSS Distributions }
154*c54f35caSApple OSS Distributions kd_data_page->kd_bufs = kd_bufs;
155*c54f35caSApple OSS Distributions
156*c54f35caSApple OSS Distributions for (i = 0; i < f_buffers; i++) {
157*c54f35caSApple OSS Distributions if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdr_addr,
158*c54f35caSApple OSS Distributions (vm_size_t)f_buffer_size, KMA_DATA | KMA_ZERO, tag) != KERN_SUCCESS) {
159*c54f35caSApple OSS Distributions error = ENOSPC;
160*c54f35caSApple OSS Distributions goto out;
161*c54f35caSApple OSS Distributions }
162*c54f35caSApple OSS Distributions
163*c54f35caSApple OSS Distributions kd_bufs[i].kdr_size = f_buffer_size;
164*c54f35caSApple OSS Distributions }
165*c54f35caSApple OSS Distributions if (p_buffer_size) {
166*c54f35caSApple OSS Distributions if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdr_addr,
167*c54f35caSApple OSS Distributions (vm_size_t)p_buffer_size, KMA_DATA | KMA_ZERO, tag) != KERN_SUCCESS) {
168*c54f35caSApple OSS Distributions error = ENOSPC;
169*c54f35caSApple OSS Distributions goto out;
170*c54f35caSApple OSS Distributions }
171*c54f35caSApple OSS Distributions
172*c54f35caSApple OSS Distributions kd_bufs[i].kdr_size = p_buffer_size;
173*c54f35caSApple OSS Distributions }
174*c54f35caSApple OSS Distributions
175*c54f35caSApple OSS Distributions count_storage_units = 0;
176*c54f35caSApple OSS Distributions for (i = 0; i < kd_data_page->kdb_region_count; i++) {
177*c54f35caSApple OSS Distributions struct kd_storage *kds;
178*c54f35caSApple OSS Distributions uint16_t n_elements;
179*c54f35caSApple OSS Distributions static_assert(N_STORAGE_UNITS_PER_BUFFER <= UINT16_MAX);
180*c54f35caSApple OSS Distributions assert(kd_bufs[i].kdr_size <= N_STORAGE_UNITS_PER_BUFFER *
181*c54f35caSApple OSS Distributions sizeof(struct kd_storage));
182*c54f35caSApple OSS Distributions
183*c54f35caSApple OSS Distributions n_elements = kd_bufs[i].kdr_size / sizeof(struct kd_storage);
184*c54f35caSApple OSS Distributions kds = kd_bufs[i].kdr_addr;
185*c54f35caSApple OSS Distributions
186*c54f35caSApple OSS Distributions for (uint16_t n = 0; n < n_elements; n++) {
187*c54f35caSApple OSS Distributions kds[n].kds_next.buffer_index = kd_ctrl_page->kds_free_list.buffer_index;
188*c54f35caSApple OSS Distributions kds[n].kds_next.offset = kd_ctrl_page->kds_free_list.offset;
189*c54f35caSApple OSS Distributions
190*c54f35caSApple OSS Distributions kd_ctrl_page->kds_free_list.buffer_index = i;
191*c54f35caSApple OSS Distributions kd_ctrl_page->kds_free_list.offset = n;
192*c54f35caSApple OSS Distributions }
193*c54f35caSApple OSS Distributions count_storage_units += n_elements;
194*c54f35caSApple OSS Distributions }
195*c54f35caSApple OSS Distributions
196*c54f35caSApple OSS Distributions kd_data_page->kdb_storage_count = count_storage_units;
197*c54f35caSApple OSS Distributions
198*c54f35caSApple OSS Distributions for (i = 0; i < ncpus; i++) {
199*c54f35caSApple OSS Distributions kdbip[i].kd_list_head.raw = KDS_PTR_NULL;
200*c54f35caSApple OSS Distributions kdbip[i].kd_list_tail.raw = KDS_PTR_NULL;
201*c54f35caSApple OSS Distributions kdbip[i].kd_lostevents = false;
202*c54f35caSApple OSS Distributions kdbip[i].num_bufs = 0;
203*c54f35caSApple OSS Distributions }
204*c54f35caSApple OSS Distributions
205*c54f35caSApple OSS Distributions kd_ctrl_page->kdc_flags |= KDBG_BUFINIT;
206*c54f35caSApple OSS Distributions
207*c54f35caSApple OSS Distributions kd_ctrl_page->kdc_storage_used = 0;
208*c54f35caSApple OSS Distributions out:
209*c54f35caSApple OSS Distributions if (error) {
210*c54f35caSApple OSS Distributions delete_buffers(kd_ctrl_page, kd_data_page);
211*c54f35caSApple OSS Distributions }
212*c54f35caSApple OSS Distributions
213*c54f35caSApple OSS Distributions return error;
214*c54f35caSApple OSS Distributions }
215*c54f35caSApple OSS Distributions
216*c54f35caSApple OSS Distributions void
delete_buffers(struct kd_control * kd_ctrl_page,struct kd_buffer * kd_data_page)217*c54f35caSApple OSS Distributions delete_buffers(struct kd_control *kd_ctrl_page,
218*c54f35caSApple OSS Distributions struct kd_buffer *kd_data_page)
219*c54f35caSApple OSS Distributions {
220*c54f35caSApple OSS Distributions unsigned int i;
221*c54f35caSApple OSS Distributions int kdb_region_count = kd_data_page->kdb_region_count;
222*c54f35caSApple OSS Distributions
223*c54f35caSApple OSS Distributions struct kd_bufinfo *kdbip = kd_data_page->kdb_info;
224*c54f35caSApple OSS Distributions struct kd_region *kd_bufs = kd_data_page->kd_bufs;
225*c54f35caSApple OSS Distributions
226*c54f35caSApple OSS Distributions if (kd_bufs) {
227*c54f35caSApple OSS Distributions for (i = 0; i < kdb_region_count; i++) {
228*c54f35caSApple OSS Distributions if (kd_bufs[i].kdr_addr) {
229*c54f35caSApple OSS Distributions kmem_free(kernel_map, (vm_offset_t)kd_bufs[i].kdr_addr, (vm_size_t)kd_bufs[i].kdr_size);
230*c54f35caSApple OSS Distributions }
231*c54f35caSApple OSS Distributions }
232*c54f35caSApple OSS Distributions kfree_type(struct kd_region, kdb_region_count, kd_bufs);
233*c54f35caSApple OSS Distributions
234*c54f35caSApple OSS Distributions kd_data_page->kd_bufs = NULL;
235*c54f35caSApple OSS Distributions kd_data_page->kdb_region_count = 0;
236*c54f35caSApple OSS Distributions }
237*c54f35caSApple OSS Distributions if (kd_data_page->kdcopybuf) {
238*c54f35caSApple OSS Distributions kmem_free(kernel_map, (vm_offset_t)kd_data_page->kdcopybuf, kd_ctrl_page->kdebug_kdcopybuf_size);
239*c54f35caSApple OSS Distributions
240*c54f35caSApple OSS Distributions kd_data_page->kdcopybuf = NULL;
241*c54f35caSApple OSS Distributions }
242*c54f35caSApple OSS Distributions kd_ctrl_page->kds_free_list.raw = KDS_PTR_NULL;
243*c54f35caSApple OSS Distributions
244*c54f35caSApple OSS Distributions if (kdbip) {
245*c54f35caSApple OSS Distributions kfree_type(struct kd_bufinfo, kd_ctrl_page->alloc_cpus, kdbip);
246*c54f35caSApple OSS Distributions kd_data_page->kdb_info = NULL;
247*c54f35caSApple OSS Distributions }
248*c54f35caSApple OSS Distributions kd_ctrl_page->kdc_coprocs = NULL;
249*c54f35caSApple OSS Distributions kd_ctrl_page->kdebug_cpus = 0;
250*c54f35caSApple OSS Distributions kd_ctrl_page->alloc_cpus = 0;
251*c54f35caSApple OSS Distributions kd_ctrl_page->kdc_flags &= ~KDBG_BUFINIT;
252*c54f35caSApple OSS Distributions }
253*c54f35caSApple OSS Distributions
254*c54f35caSApple OSS Distributions static bool
allocate_storage_unit(struct kd_control * kd_ctrl_page,struct kd_buffer * kd_data_page,int cpu)255*c54f35caSApple OSS Distributions allocate_storage_unit(struct kd_control *kd_ctrl_page,
256*c54f35caSApple OSS Distributions struct kd_buffer *kd_data_page, int cpu)
257*c54f35caSApple OSS Distributions {
258*c54f35caSApple OSS Distributions union kds_ptr kdsp;
259*c54f35caSApple OSS Distributions struct kd_storage *kdsp_actual, *kdsp_next_actual;
260*c54f35caSApple OSS Distributions struct kd_bufinfo *kdbip, *kdbp, *kdbp_vict, *kdbp_try;
261*c54f35caSApple OSS Distributions uint64_t oldest_ts, ts;
262*c54f35caSApple OSS Distributions bool retval = true;
263*c54f35caSApple OSS Distributions struct kd_region *kd_bufs;
264*c54f35caSApple OSS Distributions
265*c54f35caSApple OSS Distributions int intrs_en = kdebug_storage_lock(kd_ctrl_page);
266*c54f35caSApple OSS Distributions
267*c54f35caSApple OSS Distributions kdbp = &kd_data_page->kdb_info[cpu];
268*c54f35caSApple OSS Distributions kd_bufs = kd_data_page->kd_bufs;
269*c54f35caSApple OSS Distributions kdbip = kd_data_page->kdb_info;
270*c54f35caSApple OSS Distributions
271*c54f35caSApple OSS Distributions /* If someone beat us to the allocate, return success */
272*c54f35caSApple OSS Distributions if (kdbp->kd_list_tail.raw != KDS_PTR_NULL) {
273*c54f35caSApple OSS Distributions kdsp_actual = POINTER_FROM_KDS_PTR(kd_bufs, kdbp->kd_list_tail);
274*c54f35caSApple OSS Distributions
275*c54f35caSApple OSS Distributions if (kdsp_actual->kds_bufindx < kd_ctrl_page->kdebug_events_per_storage_unit) {
276*c54f35caSApple OSS Distributions goto out;
277*c54f35caSApple OSS Distributions }
278*c54f35caSApple OSS Distributions }
279*c54f35caSApple OSS Distributions
280*c54f35caSApple OSS Distributions if ((kdsp = kd_ctrl_page->kds_free_list).raw != KDS_PTR_NULL) {
281*c54f35caSApple OSS Distributions /*
282*c54f35caSApple OSS Distributions * If there's a free page, grab it from the free list.
283*c54f35caSApple OSS Distributions */
284*c54f35caSApple OSS Distributions kdsp_actual = POINTER_FROM_KDS_PTR(kd_bufs, kdsp);
285*c54f35caSApple OSS Distributions kd_ctrl_page->kds_free_list = kdsp_actual->kds_next;
286*c54f35caSApple OSS Distributions
287*c54f35caSApple OSS Distributions kd_ctrl_page->kdc_storage_used++;
288*c54f35caSApple OSS Distributions } else {
289*c54f35caSApple OSS Distributions /*
290*c54f35caSApple OSS Distributions * Otherwise, we're going to lose events and repurpose the oldest
291*c54f35caSApple OSS Distributions * storage unit we can find.
292*c54f35caSApple OSS Distributions */
293*c54f35caSApple OSS Distributions if (kd_ctrl_page->kdc_live_flags & KDBG_NOWRAP) {
294*c54f35caSApple OSS Distributions kd_ctrl_page->kdc_emit = KDEMIT_DISABLE;
295*c54f35caSApple OSS Distributions kd_ctrl_page->kdc_live_flags |= KDBG_WRAPPED;
296*c54f35caSApple OSS Distributions kdebug_enable = 0;
297*c54f35caSApple OSS Distributions kd_ctrl_page->enabled = 0;
298*c54f35caSApple OSS Distributions commpage_update_kdebug_state();
299*c54f35caSApple OSS Distributions kdbp->kd_lostevents = true;
300*c54f35caSApple OSS Distributions retval = false;
301*c54f35caSApple OSS Distributions goto out;
302*c54f35caSApple OSS Distributions }
303*c54f35caSApple OSS Distributions kdbp_vict = NULL;
304*c54f35caSApple OSS Distributions oldest_ts = UINT64_MAX;
305*c54f35caSApple OSS Distributions
306*c54f35caSApple OSS Distributions for (kdbp_try = &kdbip[0]; kdbp_try < &kdbip[kd_ctrl_page->kdebug_cpus]; kdbp_try++) {
307*c54f35caSApple OSS Distributions if (kdbp_try->kd_list_head.raw == KDS_PTR_NULL) {
308*c54f35caSApple OSS Distributions /*
309*c54f35caSApple OSS Distributions * no storage unit to steal
310*c54f35caSApple OSS Distributions */
311*c54f35caSApple OSS Distributions continue;
312*c54f35caSApple OSS Distributions }
313*c54f35caSApple OSS Distributions
314*c54f35caSApple OSS Distributions kdsp_actual = POINTER_FROM_KDS_PTR(kd_bufs, kdbp_try->kd_list_head);
315*c54f35caSApple OSS Distributions
316*c54f35caSApple OSS Distributions if (kdsp_actual->kds_bufcnt < kd_ctrl_page->kdebug_events_per_storage_unit) {
317*c54f35caSApple OSS Distributions /*
318*c54f35caSApple OSS Distributions * make sure we don't steal the storage unit
319*c54f35caSApple OSS Distributions * being actively recorded to... need to
320*c54f35caSApple OSS Distributions * move on because we don't want an out-of-order
321*c54f35caSApple OSS Distributions * set of events showing up later
322*c54f35caSApple OSS Distributions */
323*c54f35caSApple OSS Distributions continue;
324*c54f35caSApple OSS Distributions }
325*c54f35caSApple OSS Distributions
326*c54f35caSApple OSS Distributions /*
327*c54f35caSApple OSS Distributions * When wrapping, steal the storage unit with the
328*c54f35caSApple OSS Distributions * earliest timestamp on its last event, instead of the
329*c54f35caSApple OSS Distributions * earliest timestamp on the first event. This allows a
330*c54f35caSApple OSS Distributions * storage unit with more recent events to be preserved,
331*c54f35caSApple OSS Distributions * even if the storage unit contains events that are
332*c54f35caSApple OSS Distributions * older than those found in other CPUs.
333*c54f35caSApple OSS Distributions */
334*c54f35caSApple OSS Distributions ts = kdbg_get_timestamp(&kdsp_actual->kds_records[kd_ctrl_page->kdebug_events_per_storage_unit - 1]);
335*c54f35caSApple OSS Distributions if (ts < oldest_ts) {
336*c54f35caSApple OSS Distributions oldest_ts = ts;
337*c54f35caSApple OSS Distributions kdbp_vict = kdbp_try;
338*c54f35caSApple OSS Distributions }
339*c54f35caSApple OSS Distributions }
340*c54f35caSApple OSS Distributions if (kdbp_vict == NULL && kd_ctrl_page->mode == KDEBUG_MODE_TRACE) {
341*c54f35caSApple OSS Distributions kd_ctrl_page->kdc_emit = KDEMIT_DISABLE;
342*c54f35caSApple OSS Distributions kdebug_enable = 0;
343*c54f35caSApple OSS Distributions kd_ctrl_page->enabled = 0;
344*c54f35caSApple OSS Distributions commpage_update_kdebug_state();
345*c54f35caSApple OSS Distributions retval = false;
346*c54f35caSApple OSS Distributions goto out;
347*c54f35caSApple OSS Distributions }
348*c54f35caSApple OSS Distributions kdsp = kdbp_vict->kd_list_head;
349*c54f35caSApple OSS Distributions kdsp_actual = POINTER_FROM_KDS_PTR(kd_bufs, kdsp);
350*c54f35caSApple OSS Distributions kdbp_vict->kd_list_head = kdsp_actual->kds_next;
351*c54f35caSApple OSS Distributions
352*c54f35caSApple OSS Distributions if (kdbp_vict->kd_list_head.raw != KDS_PTR_NULL) {
353*c54f35caSApple OSS Distributions kdsp_next_actual = POINTER_FROM_KDS_PTR(kd_bufs, kdbp_vict->kd_list_head);
354*c54f35caSApple OSS Distributions kdsp_next_actual->kds_lostevents = true;
355*c54f35caSApple OSS Distributions } else {
356*c54f35caSApple OSS Distributions kdbp_vict->kd_lostevents = true;
357*c54f35caSApple OSS Distributions }
358*c54f35caSApple OSS Distributions
359*c54f35caSApple OSS Distributions if (kd_ctrl_page->kdc_oldest_time < oldest_ts) {
360*c54f35caSApple OSS Distributions kd_ctrl_page->kdc_oldest_time = oldest_ts;
361*c54f35caSApple OSS Distributions }
362*c54f35caSApple OSS Distributions kd_ctrl_page->kdc_live_flags |= KDBG_WRAPPED;
363*c54f35caSApple OSS Distributions }
364*c54f35caSApple OSS Distributions
365*c54f35caSApple OSS Distributions if (kd_ctrl_page->mode == KDEBUG_MODE_TRACE) {
366*c54f35caSApple OSS Distributions kdsp_actual->kds_timestamp = kdebug_timestamp();
367*c54f35caSApple OSS Distributions } else {
368*c54f35caSApple OSS Distributions kdsp_actual->kds_timestamp = mach_continuous_time();
369*c54f35caSApple OSS Distributions }
370*c54f35caSApple OSS Distributions
371*c54f35caSApple OSS Distributions kdsp_actual->kds_next.raw = KDS_PTR_NULL;
372*c54f35caSApple OSS Distributions kdsp_actual->kds_bufcnt = 0;
373*c54f35caSApple OSS Distributions kdsp_actual->kds_readlast = 0;
374*c54f35caSApple OSS Distributions
375*c54f35caSApple OSS Distributions kdsp_actual->kds_lostevents = kdbp->kd_lostevents;
376*c54f35caSApple OSS Distributions kdbp->kd_lostevents = false;
377*c54f35caSApple OSS Distributions kdsp_actual->kds_bufindx = 0;
378*c54f35caSApple OSS Distributions
379*c54f35caSApple OSS Distributions if (kdbp->kd_list_head.raw == KDS_PTR_NULL) {
380*c54f35caSApple OSS Distributions kdbp->kd_list_head = kdsp;
381*c54f35caSApple OSS Distributions } else {
382*c54f35caSApple OSS Distributions POINTER_FROM_KDS_PTR(kd_bufs, kdbp->kd_list_tail)->kds_next = kdsp;
383*c54f35caSApple OSS Distributions }
384*c54f35caSApple OSS Distributions kdbp->kd_list_tail = kdsp;
385*c54f35caSApple OSS Distributions out:
386*c54f35caSApple OSS Distributions kdebug_storage_unlock(kd_ctrl_page, intrs_en);
387*c54f35caSApple OSS Distributions
388*c54f35caSApple OSS Distributions return retval;
389*c54f35caSApple OSS Distributions }
390*c54f35caSApple OSS Distributions
391*c54f35caSApple OSS Distributions static void
release_storage_unit(struct kd_control * kd_ctrl_page,struct kd_buffer * kd_data_page,int cpu,uint32_t kdsp_raw)392*c54f35caSApple OSS Distributions release_storage_unit(struct kd_control *kd_ctrl_page, struct kd_buffer *kd_data_page, int cpu, uint32_t kdsp_raw)
393*c54f35caSApple OSS Distributions {
394*c54f35caSApple OSS Distributions struct kd_storage *kdsp_actual;
395*c54f35caSApple OSS Distributions struct kd_bufinfo *kdbp;
396*c54f35caSApple OSS Distributions union kds_ptr kdsp;
397*c54f35caSApple OSS Distributions
398*c54f35caSApple OSS Distributions kdbp = &kd_data_page->kdb_info[cpu];
399*c54f35caSApple OSS Distributions
400*c54f35caSApple OSS Distributions kdsp.raw = kdsp_raw;
401*c54f35caSApple OSS Distributions
402*c54f35caSApple OSS Distributions int intrs_en = kdebug_storage_lock(kd_ctrl_page);
403*c54f35caSApple OSS Distributions
404*c54f35caSApple OSS Distributions if (kdsp.raw == kdbp->kd_list_head.raw) {
405*c54f35caSApple OSS Distributions /*
406*c54f35caSApple OSS Distributions * it's possible for the storage unit pointed to
407*c54f35caSApple OSS Distributions * by kdsp to have already been stolen... so
408*c54f35caSApple OSS Distributions * check to see if it's still the head of the list
409*c54f35caSApple OSS Distributions * now that we're behind the lock that protects
410*c54f35caSApple OSS Distributions * adding and removing from the queue...
411*c54f35caSApple OSS Distributions * since we only ever release and steal units from
412*c54f35caSApple OSS Distributions * that position, if it's no longer the head
413*c54f35caSApple OSS Distributions * we having nothing to do in this context
414*c54f35caSApple OSS Distributions */
415*c54f35caSApple OSS Distributions kdsp_actual = POINTER_FROM_KDS_PTR(kd_data_page->kd_bufs, kdsp);
416*c54f35caSApple OSS Distributions kdbp->kd_list_head = kdsp_actual->kds_next;
417*c54f35caSApple OSS Distributions
418*c54f35caSApple OSS Distributions kdsp_actual->kds_next = kd_ctrl_page->kds_free_list;
419*c54f35caSApple OSS Distributions kd_ctrl_page->kds_free_list = kdsp;
420*c54f35caSApple OSS Distributions
421*c54f35caSApple OSS Distributions kd_ctrl_page->kdc_storage_used--;
422*c54f35caSApple OSS Distributions }
423*c54f35caSApple OSS Distributions
424*c54f35caSApple OSS Distributions kdebug_storage_unlock(kd_ctrl_page, intrs_en);
425*c54f35caSApple OSS Distributions }
426*c54f35caSApple OSS Distributions
427*c54f35caSApple OSS Distributions bool
kdebug_disable_wrap(struct kd_control * ctl,kdebug_emit_filter_t * old_emit,kdebug_live_flags_t * old_live)428*c54f35caSApple OSS Distributions kdebug_disable_wrap(struct kd_control *ctl,
429*c54f35caSApple OSS Distributions kdebug_emit_filter_t *old_emit, kdebug_live_flags_t *old_live)
430*c54f35caSApple OSS Distributions {
431*c54f35caSApple OSS Distributions int intrs_en = kdebug_storage_lock(ctl);
432*c54f35caSApple OSS Distributions
433*c54f35caSApple OSS Distributions *old_emit = ctl->kdc_emit;
434*c54f35caSApple OSS Distributions *old_live = ctl->kdc_live_flags;
435*c54f35caSApple OSS Distributions
436*c54f35caSApple OSS Distributions bool wrapped = ctl->kdc_live_flags & KDBG_WRAPPED;
437*c54f35caSApple OSS Distributions ctl->kdc_live_flags &= ~KDBG_WRAPPED;
438*c54f35caSApple OSS Distributions ctl->kdc_live_flags |= KDBG_NOWRAP;
439*c54f35caSApple OSS Distributions
440*c54f35caSApple OSS Distributions kdebug_storage_unlock(ctl, intrs_en);
441*c54f35caSApple OSS Distributions
442*c54f35caSApple OSS Distributions return wrapped;
443*c54f35caSApple OSS Distributions }
444*c54f35caSApple OSS Distributions
445*c54f35caSApple OSS Distributions static void
_enable_wrap(struct kd_control * kd_ctrl_page,kdebug_emit_filter_t emit)446*c54f35caSApple OSS Distributions _enable_wrap(struct kd_control *kd_ctrl_page, kdebug_emit_filter_t emit)
447*c54f35caSApple OSS Distributions {
448*c54f35caSApple OSS Distributions int intrs_en = kdebug_storage_lock(kd_ctrl_page);
449*c54f35caSApple OSS Distributions kd_ctrl_page->kdc_live_flags &= ~KDBG_NOWRAP;
450*c54f35caSApple OSS Distributions if (emit) {
451*c54f35caSApple OSS Distributions kd_ctrl_page->kdc_emit = emit;
452*c54f35caSApple OSS Distributions }
453*c54f35caSApple OSS Distributions kdebug_storage_unlock(kd_ctrl_page, intrs_en);
454*c54f35caSApple OSS Distributions }
455*c54f35caSApple OSS Distributions
456*c54f35caSApple OSS Distributions __attribute__((always_inline))
457*c54f35caSApple OSS Distributions void
kernel_debug_write(struct kd_control * kd_ctrl_page,struct kd_buffer * kd_data_page,struct kd_record kd_rec)458*c54f35caSApple OSS Distributions kernel_debug_write(struct kd_control *kd_ctrl_page,
459*c54f35caSApple OSS Distributions struct kd_buffer *kd_data_page,
460*c54f35caSApple OSS Distributions struct kd_record kd_rec)
461*c54f35caSApple OSS Distributions {
462*c54f35caSApple OSS Distributions uint64_t now = 0;
463*c54f35caSApple OSS Distributions uint32_t bindx;
464*c54f35caSApple OSS Distributions kd_buf *kd;
465*c54f35caSApple OSS Distributions int cpu;
466*c54f35caSApple OSS Distributions struct kd_bufinfo *kdbp;
467*c54f35caSApple OSS Distributions struct kd_storage *kdsp_actual;
468*c54f35caSApple OSS Distributions union kds_ptr kds_raw;
469*c54f35caSApple OSS Distributions
470*c54f35caSApple OSS Distributions disable_preemption();
471*c54f35caSApple OSS Distributions
472*c54f35caSApple OSS Distributions if (kd_ctrl_page->enabled == 0) {
473*c54f35caSApple OSS Distributions goto out;
474*c54f35caSApple OSS Distributions }
475*c54f35caSApple OSS Distributions
476*c54f35caSApple OSS Distributions if (kd_rec.cpu == -1) {
477*c54f35caSApple OSS Distributions cpu = cpu_number();
478*c54f35caSApple OSS Distributions } else {
479*c54f35caSApple OSS Distributions cpu = kd_rec.cpu;
480*c54f35caSApple OSS Distributions }
481*c54f35caSApple OSS Distributions
482*c54f35caSApple OSS Distributions kdbp = &kd_data_page->kdb_info[cpu];
483*c54f35caSApple OSS Distributions
484*c54f35caSApple OSS Distributions bool timestamp_is_continuous = kdbp->continuous_timestamps;
485*c54f35caSApple OSS Distributions
486*c54f35caSApple OSS Distributions if (kd_rec.timestamp != -1) {
487*c54f35caSApple OSS Distributions if (kdebug_using_continuous_time()) {
488*c54f35caSApple OSS Distributions if (!timestamp_is_continuous) {
489*c54f35caSApple OSS Distributions kd_rec.timestamp = absolutetime_to_continuoustime(kd_rec.timestamp);
490*c54f35caSApple OSS Distributions }
491*c54f35caSApple OSS Distributions } else {
492*c54f35caSApple OSS Distributions if (timestamp_is_continuous) {
493*c54f35caSApple OSS Distributions kd_rec.timestamp = continuoustime_to_absolutetime(kd_rec.timestamp);
494*c54f35caSApple OSS Distributions }
495*c54f35caSApple OSS Distributions }
496*c54f35caSApple OSS Distributions kd_rec.timestamp &= KDBG_TIMESTAMP_MASK;
497*c54f35caSApple OSS Distributions if (kd_rec.timestamp < kd_ctrl_page->kdc_oldest_time) {
498*c54f35caSApple OSS Distributions if (kdbp->latest_past_event_timestamp < kd_rec.timestamp) {
499*c54f35caSApple OSS Distributions kdbp->latest_past_event_timestamp = kd_rec.timestamp;
500*c54f35caSApple OSS Distributions }
501*c54f35caSApple OSS Distributions goto out;
502*c54f35caSApple OSS Distributions }
503*c54f35caSApple OSS Distributions }
504*c54f35caSApple OSS Distributions
505*c54f35caSApple OSS Distributions retry_q:
506*c54f35caSApple OSS Distributions kds_raw = kdbp->kd_list_tail;
507*c54f35caSApple OSS Distributions
508*c54f35caSApple OSS Distributions if (kds_raw.raw != KDS_PTR_NULL) {
509*c54f35caSApple OSS Distributions kdsp_actual = POINTER_FROM_KDS_PTR(kd_data_page->kd_bufs, kds_raw);
510*c54f35caSApple OSS Distributions bindx = kdsp_actual->kds_bufindx;
511*c54f35caSApple OSS Distributions } else {
512*c54f35caSApple OSS Distributions kdsp_actual = NULL;
513*c54f35caSApple OSS Distributions bindx = kd_ctrl_page->kdebug_events_per_storage_unit;
514*c54f35caSApple OSS Distributions }
515*c54f35caSApple OSS Distributions
516*c54f35caSApple OSS Distributions if (kdsp_actual == NULL || bindx >= kd_ctrl_page->kdebug_events_per_storage_unit) {
517*c54f35caSApple OSS Distributions if (allocate_storage_unit(kd_ctrl_page, kd_data_page, cpu) == false) {
518*c54f35caSApple OSS Distributions /*
519*c54f35caSApple OSS Distributions * this can only happen if wrapping
520*c54f35caSApple OSS Distributions * has been disabled
521*c54f35caSApple OSS Distributions */
522*c54f35caSApple OSS Distributions goto out;
523*c54f35caSApple OSS Distributions }
524*c54f35caSApple OSS Distributions goto retry_q;
525*c54f35caSApple OSS Distributions }
526*c54f35caSApple OSS Distributions
527*c54f35caSApple OSS Distributions if (kd_rec.timestamp != -1) {
528*c54f35caSApple OSS Distributions /*
529*c54f35caSApple OSS Distributions * IOP entries can be allocated before xnu allocates and inits the buffer
530*c54f35caSApple OSS Distributions * And, Intel uses a special 0 value as a early tracing timestamp sentinel
531*c54f35caSApple OSS Distributions * to set the start of trace-time-start-of-interest.
532*c54f35caSApple OSS Distributions */
533*c54f35caSApple OSS Distributions if (kd_rec.timestamp < kdsp_actual->kds_timestamp) {
534*c54f35caSApple OSS Distributions kdsp_actual->kds_timestamp = kd_rec.timestamp;
535*c54f35caSApple OSS Distributions }
536*c54f35caSApple OSS Distributions now = kd_rec.timestamp;
537*c54f35caSApple OSS Distributions } else {
538*c54f35caSApple OSS Distributions if (kd_ctrl_page->mode == KDEBUG_MODE_TRACE) {
539*c54f35caSApple OSS Distributions now = kdebug_timestamp() & KDBG_TIMESTAMP_MASK;
540*c54f35caSApple OSS Distributions } else {
541*c54f35caSApple OSS Distributions now = mach_continuous_time() & KDBG_TIMESTAMP_MASK;
542*c54f35caSApple OSS Distributions }
543*c54f35caSApple OSS Distributions }
544*c54f35caSApple OSS Distributions
545*c54f35caSApple OSS Distributions if (!OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx)) {
546*c54f35caSApple OSS Distributions goto retry_q;
547*c54f35caSApple OSS Distributions }
548*c54f35caSApple OSS Distributions
549*c54f35caSApple OSS Distributions kd = &kdsp_actual->kds_records[bindx];
550*c54f35caSApple OSS Distributions
551*c54f35caSApple OSS Distributions if (kd_ctrl_page->kdc_flags & KDBG_DEBUGID_64) {
552*c54f35caSApple OSS Distributions /*DebugID has been passed in arg 4*/
553*c54f35caSApple OSS Distributions kd->debugid = 0;
554*c54f35caSApple OSS Distributions } else {
555*c54f35caSApple OSS Distributions kd->debugid = kd_rec.debugid;
556*c54f35caSApple OSS Distributions }
557*c54f35caSApple OSS Distributions
558*c54f35caSApple OSS Distributions kd->arg1 = kd_rec.arg1;
559*c54f35caSApple OSS Distributions kd->arg2 = kd_rec.arg2;
560*c54f35caSApple OSS Distributions kd->arg3 = kd_rec.arg3;
561*c54f35caSApple OSS Distributions kd->arg4 = kd_rec.arg4;
562*c54f35caSApple OSS Distributions kd->arg5 = kd_rec.arg5;
563*c54f35caSApple OSS Distributions
564*c54f35caSApple OSS Distributions kdbg_set_timestamp_and_cpu(kd, now, cpu);
565*c54f35caSApple OSS Distributions
566*c54f35caSApple OSS Distributions OSAddAtomic(1, &kdsp_actual->kds_bufcnt);
567*c54f35caSApple OSS Distributions
568*c54f35caSApple OSS Distributions out:
569*c54f35caSApple OSS Distributions enable_preemption();
570*c54f35caSApple OSS Distributions }
571*c54f35caSApple OSS Distributions
572*c54f35caSApple OSS Distributions // Read events from kdebug storage units into a user space buffer or file.
573*c54f35caSApple OSS Distributions //
574*c54f35caSApple OSS Distributions // This code runs while events are emitted -- storage unit allocation and
575*c54f35caSApple OSS Distributions // deallocation wll synchronize with the emitters. Only one reader per control
576*c54f35caSApple OSS Distributions // structure is allowed.
577*c54f35caSApple OSS Distributions int
kernel_debug_read(struct kd_control * kd_ctrl_page,struct kd_buffer * kd_data_page,user_addr_t buffer,size_t * number,vnode_t vp,vfs_context_t ctx,uint32_t file_version)578*c54f35caSApple OSS Distributions kernel_debug_read(struct kd_control *kd_ctrl_page,
579*c54f35caSApple OSS Distributions struct kd_buffer *kd_data_page, user_addr_t buffer, size_t *number,
580*c54f35caSApple OSS Distributions vnode_t vp, vfs_context_t ctx, uint32_t file_version)
581*c54f35caSApple OSS Distributions {
582*c54f35caSApple OSS Distributions size_t count;
583*c54f35caSApple OSS Distributions unsigned int cpu, min_cpu;
584*c54f35caSApple OSS Distributions uint64_t barrier_min = 0, barrier_max = 0, t, earliest_time;
585*c54f35caSApple OSS Distributions int error = 0;
586*c54f35caSApple OSS Distributions kd_buf *tempbuf;
587*c54f35caSApple OSS Distributions uint32_t rcursor;
588*c54f35caSApple OSS Distributions kd_buf lostevent;
589*c54f35caSApple OSS Distributions union kds_ptr kdsp;
590*c54f35caSApple OSS Distributions bool traced_retrograde = false;
591*c54f35caSApple OSS Distributions struct kd_storage *kdsp_actual;
592*c54f35caSApple OSS Distributions struct kd_bufinfo *kdbp;
593*c54f35caSApple OSS Distributions struct kd_bufinfo *min_kdbp;
594*c54f35caSApple OSS Distributions size_t tempbuf_count;
595*c54f35caSApple OSS Distributions uint32_t tempbuf_number;
596*c54f35caSApple OSS Distributions kdebug_emit_filter_t old_emit;
597*c54f35caSApple OSS Distributions uint32_t old_live_flags;
598*c54f35caSApple OSS Distributions bool out_of_events = false;
599*c54f35caSApple OSS Distributions bool wrapped = false;
600*c54f35caSApple OSS Distributions bool set_preempt = true;
601*c54f35caSApple OSS Distributions bool should_disable = false;
602*c54f35caSApple OSS Distributions
603*c54f35caSApple OSS Distributions struct kd_bufinfo *kdbip = kd_data_page->kdb_info;
604*c54f35caSApple OSS Distributions struct kd_region *kd_bufs = kd_data_page->kd_bufs;
605*c54f35caSApple OSS Distributions
606*c54f35caSApple OSS Distributions assert(number != NULL);
607*c54f35caSApple OSS Distributions count = *number / sizeof(kd_buf);
608*c54f35caSApple OSS Distributions *number = 0;
609*c54f35caSApple OSS Distributions
610*c54f35caSApple OSS Distributions if (count == 0 || !(kd_ctrl_page->kdc_flags & KDBG_BUFINIT) || kd_data_page->kdcopybuf == 0) {
611*c54f35caSApple OSS Distributions return EINVAL;
612*c54f35caSApple OSS Distributions }
613*c54f35caSApple OSS Distributions
614*c54f35caSApple OSS Distributions if (kd_ctrl_page->mode == KDEBUG_MODE_TRIAGE) {
615*c54f35caSApple OSS Distributions /*
616*c54f35caSApple OSS Distributions * A corpse can be created due to 'TASK_HAS_TOO_MANY_THREADS'
617*c54f35caSApple OSS Distributions * and that can be handled by a callout thread that already
618*c54f35caSApple OSS Distributions * has the eager-preemption set.
619*c54f35caSApple OSS Distributions * So check to see if we are dealing with one such thread.
620*c54f35caSApple OSS Distributions */
621*c54f35caSApple OSS Distributions set_preempt = !(thread_is_eager_preempt(current_thread()));
622*c54f35caSApple OSS Distributions }
623*c54f35caSApple OSS Distributions
624*c54f35caSApple OSS Distributions if (set_preempt) {
625*c54f35caSApple OSS Distributions thread_set_eager_preempt(current_thread());
626*c54f35caSApple OSS Distributions }
627*c54f35caSApple OSS Distributions
628*c54f35caSApple OSS Distributions memset(&lostevent, 0, sizeof(lostevent));
629*c54f35caSApple OSS Distributions lostevent.debugid = TRACE_LOST_EVENTS;
630*c54f35caSApple OSS Distributions
631*c54f35caSApple OSS Distributions /*
632*c54f35caSApple OSS Distributions * Capture the current time. Only sort events that have occured
633*c54f35caSApple OSS Distributions * before now. Since the IOPs are being flushed here, it is possible
634*c54f35caSApple OSS Distributions * that events occur on the AP while running live tracing.
635*c54f35caSApple OSS Distributions */
636*c54f35caSApple OSS Distributions if (kd_ctrl_page->mode == KDEBUG_MODE_TRACE) {
637*c54f35caSApple OSS Distributions barrier_max = kdebug_timestamp() & KDBG_TIMESTAMP_MASK;
638*c54f35caSApple OSS Distributions } else {
639*c54f35caSApple OSS Distributions barrier_max = mach_continuous_time() & KDBG_TIMESTAMP_MASK;
640*c54f35caSApple OSS Distributions }
641*c54f35caSApple OSS Distributions
642*c54f35caSApple OSS Distributions /*
643*c54f35caSApple OSS Distributions * Disable wrap so storage units cannot be stolen out from underneath us
644*c54f35caSApple OSS Distributions * while merging events.
645*c54f35caSApple OSS Distributions *
646*c54f35caSApple OSS Distributions * Because we hold ktrace_lock, no other control threads can be playing
647*c54f35caSApple OSS Distributions * with kdc_flags. The code that emits new events could be running,
648*c54f35caSApple OSS Distributions * but it grabs kdc_storage_lock if it needs to acquire a new storage
649*c54f35caSApple OSS Distributions * chunk, which is where it examines kdc_flags. If it is adding to
650*c54f35caSApple OSS Distributions * the same chunk we're reading from, check for that below.
651*c54f35caSApple OSS Distributions */
652*c54f35caSApple OSS Distributions wrapped = kdebug_disable_wrap(kd_ctrl_page, &old_emit, &old_live_flags);
653*c54f35caSApple OSS Distributions
654*c54f35caSApple OSS Distributions if (count > kd_data_page->kdb_event_count) {
655*c54f35caSApple OSS Distributions count = kd_data_page->kdb_event_count;
656*c54f35caSApple OSS Distributions }
657*c54f35caSApple OSS Distributions
658*c54f35caSApple OSS Distributions if ((tempbuf_count = count) > kd_ctrl_page->kdebug_kdcopybuf_count) {
659*c54f35caSApple OSS Distributions tempbuf_count = kd_ctrl_page->kdebug_kdcopybuf_count;
660*c54f35caSApple OSS Distributions }
661*c54f35caSApple OSS Distributions
662*c54f35caSApple OSS Distributions /*
663*c54f35caSApple OSS Distributions * If the buffers have wrapped, do not emit additional lost events for the
664*c54f35caSApple OSS Distributions * oldest storage units.
665*c54f35caSApple OSS Distributions */
666*c54f35caSApple OSS Distributions if (wrapped) {
667*c54f35caSApple OSS Distributions kd_ctrl_page->kdc_live_flags &= ~KDBG_WRAPPED;
668*c54f35caSApple OSS Distributions
669*c54f35caSApple OSS Distributions for (cpu = 0, kdbp = &kdbip[0]; cpu < kd_ctrl_page->kdebug_cpus; cpu++, kdbp++) {
670*c54f35caSApple OSS Distributions if ((kdsp = kdbp->kd_list_head).raw == KDS_PTR_NULL) {
671*c54f35caSApple OSS Distributions continue;
672*c54f35caSApple OSS Distributions }
673*c54f35caSApple OSS Distributions kdsp_actual = POINTER_FROM_KDS_PTR(kd_bufs, kdsp);
674*c54f35caSApple OSS Distributions kdsp_actual->kds_lostevents = false;
675*c54f35caSApple OSS Distributions }
676*c54f35caSApple OSS Distributions }
677*c54f35caSApple OSS Distributions
678*c54f35caSApple OSS Distributions if (kd_ctrl_page->mode == KDEBUG_MODE_TRIAGE) {
679*c54f35caSApple OSS Distributions /*
680*c54f35caSApple OSS Distributions * In TRIAGE mode we want to extract all the current
681*c54f35caSApple OSS Distributions * records regardless of where we stopped reading last
682*c54f35caSApple OSS Distributions * time so that we have the best shot at getting older
683*c54f35caSApple OSS Distributions * records for threads before the buffers are wrapped.
684*c54f35caSApple OSS Distributions * So set:-
685*c54f35caSApple OSS Distributions * a) kd_prev_timebase to 0 so we (re-)consider older records
686*c54f35caSApple OSS Distributions * b) readlast to 0 to initiate the search from the
687*c54f35caSApple OSS Distributions * 1st record.
688*c54f35caSApple OSS Distributions */
689*c54f35caSApple OSS Distributions for (cpu = 0, kdbp = &kdbip[0]; cpu < kd_ctrl_page->kdebug_cpus; cpu++, kdbp++) {
690*c54f35caSApple OSS Distributions kdbp->kd_prev_timebase = 0;
691*c54f35caSApple OSS Distributions if ((kdsp = kdbp->kd_list_head).raw == KDS_PTR_NULL) {
692*c54f35caSApple OSS Distributions continue;
693*c54f35caSApple OSS Distributions }
694*c54f35caSApple OSS Distributions kdsp_actual = POINTER_FROM_KDS_PTR(kd_bufs, kdsp);
695*c54f35caSApple OSS Distributions kdsp_actual->kds_readlast = 0;
696*c54f35caSApple OSS Distributions }
697*c54f35caSApple OSS Distributions }
698*c54f35caSApple OSS Distributions
699*c54f35caSApple OSS Distributions /*
700*c54f35caSApple OSS Distributions * Capture the earliest time where there are events for all CPUs and don't
701*c54f35caSApple OSS Distributions * emit events with timestamps prior.
702*c54f35caSApple OSS Distributions */
703*c54f35caSApple OSS Distributions barrier_min = kd_ctrl_page->kdc_oldest_time;
704*c54f35caSApple OSS Distributions
705*c54f35caSApple OSS Distributions while (count) {
706*c54f35caSApple OSS Distributions tempbuf = kd_data_page->kdcopybuf;
707*c54f35caSApple OSS Distributions tempbuf_number = 0;
708*c54f35caSApple OSS Distributions
709*c54f35caSApple OSS Distributions if (wrapped) {
710*c54f35caSApple OSS Distributions /*
711*c54f35caSApple OSS Distributions * Emit a lost events tracepoint to indicate that previous events
712*c54f35caSApple OSS Distributions * were lost -- the thread map cannot be trusted. A new one must
713*c54f35caSApple OSS Distributions * be taken so tools can analyze the trace in a backwards-facing
714*c54f35caSApple OSS Distributions * fashion.
715*c54f35caSApple OSS Distributions */
716*c54f35caSApple OSS Distributions kdbg_set_timestamp_and_cpu(&lostevent, barrier_min, 0);
717*c54f35caSApple OSS Distributions *tempbuf = lostevent;
718*c54f35caSApple OSS Distributions wrapped = false;
719*c54f35caSApple OSS Distributions goto nextevent;
720*c54f35caSApple OSS Distributions }
721*c54f35caSApple OSS Distributions
722*c54f35caSApple OSS Distributions /* While space left in merged events scratch buffer. */
723*c54f35caSApple OSS Distributions while (tempbuf_count) {
724*c54f35caSApple OSS Distributions bool lostevents = false;
725*c54f35caSApple OSS Distributions int lostcpu = 0;
726*c54f35caSApple OSS Distributions earliest_time = UINT64_MAX;
727*c54f35caSApple OSS Distributions min_kdbp = NULL;
728*c54f35caSApple OSS Distributions min_cpu = 0;
729*c54f35caSApple OSS Distributions
730*c54f35caSApple OSS Distributions /* Check each CPU's buffers for the earliest event. */
731*c54f35caSApple OSS Distributions for (cpu = 0, kdbp = &kdbip[0]; cpu < kd_ctrl_page->kdebug_cpus; cpu++, kdbp++) {
732*c54f35caSApple OSS Distributions /* Skip CPUs without data in their oldest storage unit. */
733*c54f35caSApple OSS Distributions if ((kdsp = kdbp->kd_list_head).raw == KDS_PTR_NULL) {
734*c54f35caSApple OSS Distributions next_cpu:
735*c54f35caSApple OSS Distributions continue;
736*c54f35caSApple OSS Distributions }
737*c54f35caSApple OSS Distributions /* From CPU data to buffer header to buffer. */
738*c54f35caSApple OSS Distributions kdsp_actual = POINTER_FROM_KDS_PTR(kd_bufs, kdsp);
739*c54f35caSApple OSS Distributions
740*c54f35caSApple OSS Distributions next_event:
741*c54f35caSApple OSS Distributions /* The next event to be read from this buffer. */
742*c54f35caSApple OSS Distributions rcursor = kdsp_actual->kds_readlast;
743*c54f35caSApple OSS Distributions
744*c54f35caSApple OSS Distributions /* Skip this buffer if there are no events left. */
745*c54f35caSApple OSS Distributions if (rcursor == kdsp_actual->kds_bufindx) {
746*c54f35caSApple OSS Distributions continue;
747*c54f35caSApple OSS Distributions }
748*c54f35caSApple OSS Distributions
749*c54f35caSApple OSS Distributions if (kd_ctrl_page->mode == KDEBUG_MODE_TRIAGE) {
750*c54f35caSApple OSS Distributions /*
751*c54f35caSApple OSS Distributions * TRIAGE mode record keeping doesn't (currently)
752*c54f35caSApple OSS Distributions * use lostevent markers. It also doesn't want to
753*c54f35caSApple OSS Distributions * call release_storage_unit() in this read call.
754*c54f35caSApple OSS Distributions * It expects the buffers to wrap and records reclaimed
755*c54f35caSApple OSS Distributions * in that way solely.
756*c54f35caSApple OSS Distributions */
757*c54f35caSApple OSS Distributions t = kdbg_get_timestamp(&kdsp_actual->kds_records[rcursor]);
758*c54f35caSApple OSS Distributions goto skip_record_checks;
759*c54f35caSApple OSS Distributions }
760*c54f35caSApple OSS Distributions
761*c54f35caSApple OSS Distributions /*
762*c54f35caSApple OSS Distributions * Check that this storage unit wasn't stolen and events were
763*c54f35caSApple OSS Distributions * lost. This must have happened while wrapping was disabled
764*c54f35caSApple OSS Distributions * in this function.
765*c54f35caSApple OSS Distributions */
766*c54f35caSApple OSS Distributions if (kdsp_actual->kds_lostevents) {
767*c54f35caSApple OSS Distributions lostevents = true;
768*c54f35caSApple OSS Distributions kdsp_actual->kds_lostevents = false;
769*c54f35caSApple OSS Distributions
770*c54f35caSApple OSS Distributions /*
771*c54f35caSApple OSS Distributions * The earliest event we can trust is the first one in this
772*c54f35caSApple OSS Distributions * stolen storage unit.
773*c54f35caSApple OSS Distributions */
774*c54f35caSApple OSS Distributions uint64_t lost_time =
775*c54f35caSApple OSS Distributions kdbg_get_timestamp(&kdsp_actual->kds_records[0]);
776*c54f35caSApple OSS Distributions if (kd_ctrl_page->kdc_oldest_time < lost_time) {
777*c54f35caSApple OSS Distributions /*
778*c54f35caSApple OSS Distributions * If this is the first time we've seen lost events for
779*c54f35caSApple OSS Distributions * this gap, record its timestamp as the oldest
780*c54f35caSApple OSS Distributions * timestamp we're willing to merge for the lost events
781*c54f35caSApple OSS Distributions * tracepoint.
782*c54f35caSApple OSS Distributions */
783*c54f35caSApple OSS Distributions kd_ctrl_page->kdc_oldest_time = barrier_min = lost_time;
784*c54f35caSApple OSS Distributions lostcpu = cpu;
785*c54f35caSApple OSS Distributions }
786*c54f35caSApple OSS Distributions }
787*c54f35caSApple OSS Distributions
788*c54f35caSApple OSS Distributions t = kdbg_get_timestamp(&kdsp_actual->kds_records[rcursor]);
789*c54f35caSApple OSS Distributions
790*c54f35caSApple OSS Distributions if (t > barrier_max) {
791*c54f35caSApple OSS Distributions goto next_cpu;
792*c54f35caSApple OSS Distributions }
793*c54f35caSApple OSS Distributions if (t < kdsp_actual->kds_timestamp) {
794*c54f35caSApple OSS Distributions /*
795*c54f35caSApple OSS Distributions * This indicates the event emitter hasn't completed
796*c54f35caSApple OSS Distributions * filling in the event (becuase we're looking at the
797*c54f35caSApple OSS Distributions * buffer that the record head is using). The max barrier
798*c54f35caSApple OSS Distributions * timestamp should have saved us from seeing these kinds
799*c54f35caSApple OSS Distributions * of things, but other CPUs might be slow on the up-take.
800*c54f35caSApple OSS Distributions *
801*c54f35caSApple OSS Distributions * Bail out so we don't get out-of-order events by
802*c54f35caSApple OSS Distributions * continuing to read events from other CPUs' events.
803*c54f35caSApple OSS Distributions */
804*c54f35caSApple OSS Distributions out_of_events = true;
805*c54f35caSApple OSS Distributions break;
806*c54f35caSApple OSS Distributions }
807*c54f35caSApple OSS Distributions
808*c54f35caSApple OSS Distributions /*
809*c54f35caSApple OSS Distributions * Ignore events that have aged out due to wrapping or storage
810*c54f35caSApple OSS Distributions * unit exhaustion while merging events.
811*c54f35caSApple OSS Distributions */
812*c54f35caSApple OSS Distributions if (t < barrier_min) {
813*c54f35caSApple OSS Distributions kdsp_actual->kds_readlast++;
814*c54f35caSApple OSS Distributions if (kdsp_actual->kds_readlast >= kd_ctrl_page->kdebug_events_per_storage_unit) {
815*c54f35caSApple OSS Distributions release_storage_unit(kd_ctrl_page, kd_data_page, cpu, kdsp.raw);
816*c54f35caSApple OSS Distributions
817*c54f35caSApple OSS Distributions if ((kdsp = kdbp->kd_list_head).raw == KDS_PTR_NULL) {
818*c54f35caSApple OSS Distributions goto next_cpu;
819*c54f35caSApple OSS Distributions }
820*c54f35caSApple OSS Distributions kdsp_actual = POINTER_FROM_KDS_PTR(kd_bufs, kdsp);
821*c54f35caSApple OSS Distributions }
822*c54f35caSApple OSS Distributions goto next_event;
823*c54f35caSApple OSS Distributions }
824*c54f35caSApple OSS Distributions
825*c54f35caSApple OSS Distributions /*
826*c54f35caSApple OSS Distributions * Don't worry about merging any events -- just walk through
827*c54f35caSApple OSS Distributions * the CPUs and find the latest timestamp of lost events.
828*c54f35caSApple OSS Distributions */
829*c54f35caSApple OSS Distributions if (lostevents) {
830*c54f35caSApple OSS Distributions continue;
831*c54f35caSApple OSS Distributions }
832*c54f35caSApple OSS Distributions skip_record_checks:
833*c54f35caSApple OSS Distributions if (t < earliest_time) {
834*c54f35caSApple OSS Distributions earliest_time = t;
835*c54f35caSApple OSS Distributions min_kdbp = kdbp;
836*c54f35caSApple OSS Distributions min_cpu = cpu;
837*c54f35caSApple OSS Distributions }
838*c54f35caSApple OSS Distributions }
839*c54f35caSApple OSS Distributions if (lostevents) {
840*c54f35caSApple OSS Distributions /*
841*c54f35caSApple OSS Distributions * If any lost events were hit in the buffers, emit an event
842*c54f35caSApple OSS Distributions * with the latest timestamp.
843*c54f35caSApple OSS Distributions */
844*c54f35caSApple OSS Distributions kdbg_set_timestamp_and_cpu(&lostevent, barrier_min, lostcpu);
845*c54f35caSApple OSS Distributions *tempbuf = lostevent;
846*c54f35caSApple OSS Distributions tempbuf->arg1 = 1;
847*c54f35caSApple OSS Distributions goto nextevent;
848*c54f35caSApple OSS Distributions }
849*c54f35caSApple OSS Distributions if (min_kdbp == NULL) {
850*c54f35caSApple OSS Distributions /* All buffers ran empty. */
851*c54f35caSApple OSS Distributions out_of_events = true;
852*c54f35caSApple OSS Distributions }
853*c54f35caSApple OSS Distributions if (out_of_events) {
854*c54f35caSApple OSS Distributions break;
855*c54f35caSApple OSS Distributions }
856*c54f35caSApple OSS Distributions
857*c54f35caSApple OSS Distributions kdsp = min_kdbp->kd_list_head;
858*c54f35caSApple OSS Distributions kdsp_actual = POINTER_FROM_KDS_PTR(kd_bufs, kdsp);
859*c54f35caSApple OSS Distributions
860*c54f35caSApple OSS Distributions if (min_kdbp->latest_past_event_timestamp != 0) {
861*c54f35caSApple OSS Distributions if (kdbg_debug) {
862*c54f35caSApple OSS Distributions printf("kdebug: PAST EVENT: debugid %#8x: "
863*c54f35caSApple OSS Distributions "time %lld from CPU %u "
864*c54f35caSApple OSS Distributions "(barrier at time %lld)\n",
865*c54f35caSApple OSS Distributions kdsp_actual->kds_records[rcursor].debugid,
866*c54f35caSApple OSS Distributions t, cpu, barrier_min);
867*c54f35caSApple OSS Distributions }
868*c54f35caSApple OSS Distributions
869*c54f35caSApple OSS Distributions kdbg_set_timestamp_and_cpu(tempbuf, earliest_time, min_cpu);
870*c54f35caSApple OSS Distributions tempbuf->arg1 = (kd_buf_argtype)min_kdbp->latest_past_event_timestamp;
871*c54f35caSApple OSS Distributions tempbuf->arg2 = 0;
872*c54f35caSApple OSS Distributions tempbuf->arg3 = 0;
873*c54f35caSApple OSS Distributions tempbuf->arg4 = 0;
874*c54f35caSApple OSS Distributions tempbuf->debugid = TRACE_PAST_EVENTS;
875*c54f35caSApple OSS Distributions min_kdbp->latest_past_event_timestamp = 0;
876*c54f35caSApple OSS Distributions goto nextevent;
877*c54f35caSApple OSS Distributions }
878*c54f35caSApple OSS Distributions
879*c54f35caSApple OSS Distributions /* Copy earliest event into merged events scratch buffer. */
880*c54f35caSApple OSS Distributions *tempbuf = kdsp_actual->kds_records[kdsp_actual->kds_readlast++];
881*c54f35caSApple OSS Distributions kd_buf *earliest_event = tempbuf;
882*c54f35caSApple OSS Distributions if (kd_control_trace.kdc_flags & KDBG_MATCH_DISABLE) {
883*c54f35caSApple OSS Distributions kd_event_matcher *match = &kd_control_trace.disable_event_match;
884*c54f35caSApple OSS Distributions kd_event_matcher *mask = &kd_control_trace.disable_event_mask;
885*c54f35caSApple OSS Distributions if ((earliest_event->debugid & mask->kem_debugid) == match->kem_debugid &&
886*c54f35caSApple OSS Distributions (earliest_event->arg1 & mask->kem_args[0]) == match->kem_args[0] &&
887*c54f35caSApple OSS Distributions (earliest_event->arg2 & mask->kem_args[1]) == match->kem_args[1] &&
888*c54f35caSApple OSS Distributions (earliest_event->arg3 & mask->kem_args[2]) == match->kem_args[2] &&
889*c54f35caSApple OSS Distributions (earliest_event->arg4 & mask->kem_args[3]) == match->kem_args[3]) {
890*c54f35caSApple OSS Distributions should_disable = true;
891*c54f35caSApple OSS Distributions }
892*c54f35caSApple OSS Distributions }
893*c54f35caSApple OSS Distributions
894*c54f35caSApple OSS Distributions if (kd_ctrl_page->mode == KDEBUG_MODE_TRACE) {
895*c54f35caSApple OSS Distributions if (kdsp_actual->kds_readlast == kd_ctrl_page->kdebug_events_per_storage_unit) {
896*c54f35caSApple OSS Distributions release_storage_unit(kd_ctrl_page, kd_data_page, min_cpu, kdsp.raw);
897*c54f35caSApple OSS Distributions }
898*c54f35caSApple OSS Distributions }
899*c54f35caSApple OSS Distributions
900*c54f35caSApple OSS Distributions /*
901*c54f35caSApple OSS Distributions * Watch for out of order timestamps (from IOPs).
902*c54f35caSApple OSS Distributions */
903*c54f35caSApple OSS Distributions if (earliest_time < min_kdbp->kd_prev_timebase) {
904*c54f35caSApple OSS Distributions /*
905*c54f35caSApple OSS Distributions * If we haven't already, emit a retrograde events event.
906*c54f35caSApple OSS Distributions * Otherwise, ignore this event.
907*c54f35caSApple OSS Distributions */
908*c54f35caSApple OSS Distributions if (traced_retrograde) {
909*c54f35caSApple OSS Distributions continue;
910*c54f35caSApple OSS Distributions }
911*c54f35caSApple OSS Distributions if (kdbg_debug) {
912*c54f35caSApple OSS Distributions printf("kdebug: RETRO EVENT: debugid %#8x: "
913*c54f35caSApple OSS Distributions "time %lld from CPU %u "
914*c54f35caSApple OSS Distributions "(barrier at time %lld)\n",
915*c54f35caSApple OSS Distributions kdsp_actual->kds_records[rcursor].debugid,
916*c54f35caSApple OSS Distributions t, cpu, barrier_min);
917*c54f35caSApple OSS Distributions }
918*c54f35caSApple OSS Distributions
919*c54f35caSApple OSS Distributions kdbg_set_timestamp_and_cpu(tempbuf, min_kdbp->kd_prev_timebase,
920*c54f35caSApple OSS Distributions kdbg_get_cpu(tempbuf));
921*c54f35caSApple OSS Distributions tempbuf->arg1 = tempbuf->debugid;
922*c54f35caSApple OSS Distributions tempbuf->arg2 = (kd_buf_argtype)earliest_time;
923*c54f35caSApple OSS Distributions tempbuf->arg3 = 0;
924*c54f35caSApple OSS Distributions tempbuf->arg4 = 0;
925*c54f35caSApple OSS Distributions tempbuf->debugid = TRACE_RETROGRADE_EVENTS;
926*c54f35caSApple OSS Distributions traced_retrograde = true;
927*c54f35caSApple OSS Distributions } else {
928*c54f35caSApple OSS Distributions min_kdbp->kd_prev_timebase = earliest_time;
929*c54f35caSApple OSS Distributions }
930*c54f35caSApple OSS Distributions nextevent:
931*c54f35caSApple OSS Distributions tempbuf_count--;
932*c54f35caSApple OSS Distributions tempbuf_number++;
933*c54f35caSApple OSS Distributions tempbuf++;
934*c54f35caSApple OSS Distributions
935*c54f35caSApple OSS Distributions if (kd_ctrl_page->mode == KDEBUG_MODE_TRACE &&
936*c54f35caSApple OSS Distributions (RAW_file_written += sizeof(kd_buf)) >= RAW_FLUSH_SIZE) {
937*c54f35caSApple OSS Distributions break;
938*c54f35caSApple OSS Distributions }
939*c54f35caSApple OSS Distributions }
940*c54f35caSApple OSS Distributions
941*c54f35caSApple OSS Distributions if (tempbuf_number) {
942*c54f35caSApple OSS Distributions /*
943*c54f35caSApple OSS Distributions * Remember the latest timestamp of events that we've merged so we
944*c54f35caSApple OSS Distributions * don't think we've lost events later.
945*c54f35caSApple OSS Distributions */
946*c54f35caSApple OSS Distributions uint64_t latest_time = kdbg_get_timestamp(tempbuf - 1);
947*c54f35caSApple OSS Distributions if (kd_ctrl_page->kdc_oldest_time < latest_time) {
948*c54f35caSApple OSS Distributions kd_ctrl_page->kdc_oldest_time = latest_time;
949*c54f35caSApple OSS Distributions }
950*c54f35caSApple OSS Distributions
951*c54f35caSApple OSS Distributions if (kd_ctrl_page->mode == KDEBUG_MODE_TRACE) {
952*c54f35caSApple OSS Distributions extern int kernel_debug_trace_write_to_file(user_addr_t *buffer,
953*c54f35caSApple OSS Distributions size_t *number, size_t *count, size_t tempbuf_number,
954*c54f35caSApple OSS Distributions vnode_t vp, vfs_context_t ctx, uint32_t file_version);
955*c54f35caSApple OSS Distributions error = kernel_debug_trace_write_to_file(&buffer, number,
956*c54f35caSApple OSS Distributions &count, tempbuf_number, vp, ctx, file_version);
957*c54f35caSApple OSS Distributions } else if (kd_ctrl_page->mode == KDEBUG_MODE_TRIAGE) {
958*c54f35caSApple OSS Distributions memcpy((void*)buffer, kd_data_page->kdcopybuf,
959*c54f35caSApple OSS Distributions tempbuf_number * sizeof(kd_buf));
960*c54f35caSApple OSS Distributions buffer += tempbuf_number * sizeof(kd_buf);
961*c54f35caSApple OSS Distributions } else {
962*c54f35caSApple OSS Distributions panic("kdebug: invalid kdebug mode %d", kd_ctrl_page->mode);
963*c54f35caSApple OSS Distributions }
964*c54f35caSApple OSS Distributions if (error) {
965*c54f35caSApple OSS Distributions *number = 0;
966*c54f35caSApple OSS Distributions error = EINVAL;
967*c54f35caSApple OSS Distributions break;
968*c54f35caSApple OSS Distributions }
969*c54f35caSApple OSS Distributions count -= tempbuf_number;
970*c54f35caSApple OSS Distributions *number += tempbuf_number;
971*c54f35caSApple OSS Distributions }
972*c54f35caSApple OSS Distributions if (out_of_events) {
973*c54f35caSApple OSS Distributions break;
974*c54f35caSApple OSS Distributions }
975*c54f35caSApple OSS Distributions
976*c54f35caSApple OSS Distributions if ((tempbuf_count = count) > kd_ctrl_page->kdebug_kdcopybuf_count) {
977*c54f35caSApple OSS Distributions tempbuf_count = kd_ctrl_page->kdebug_kdcopybuf_count;
978*c54f35caSApple OSS Distributions }
979*c54f35caSApple OSS Distributions }
980*c54f35caSApple OSS Distributions if ((old_live_flags & KDBG_NOWRAP) == 0) {
981*c54f35caSApple OSS Distributions _enable_wrap(kd_ctrl_page, old_emit);
982*c54f35caSApple OSS Distributions }
983*c54f35caSApple OSS Distributions
984*c54f35caSApple OSS Distributions if (set_preempt) {
985*c54f35caSApple OSS Distributions thread_clear_eager_preempt(current_thread());
986*c54f35caSApple OSS Distributions }
987*c54f35caSApple OSS Distributions
988*c54f35caSApple OSS Distributions if (should_disable) {
989*c54f35caSApple OSS Distributions kernel_debug_disable();
990*c54f35caSApple OSS Distributions }
991*c54f35caSApple OSS Distributions
992*c54f35caSApple OSS Distributions return error;
993*c54f35caSApple OSS Distributions }
994