1 /*
2 * Copyright (c) 2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #if defined(CONFIG_KDP_INTERACTIVE_DEBUGGING) && defined(__arm64__)
30
31 #include <mach/mach_types.h>
32 #include <IOKit/IOTypes.h>
33 #include <kdp/output_stages/output_stages.h>
34 #include <kdp/kdp_core.h>
35 #include <kdp/processor_core.h>
36 #include <arm/cpuid.h>
37 #include <arm/caches_internal.h>
38 #include <pexpert/arm/consistent_debug.h>
39 #include <vm/vm_kern_xnu.h>
40 #include <vm/vm_map_xnu.h>
41
42 #if !defined(ROUNDUP)
43 #define ROUNDUP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
44 #endif
45
46 #if !defined(ROUNDDOWN)
47 #define ROUNDDOWN(a, b) ((a) & ~((b) - 1))
48 #endif
49
50 #define KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS 2
51 #define KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE 64 * 1024
52 #define KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS 30
53
54 TUNABLE(uint64_t, shmem_timeout_sec, "shmem_timeout_sec", KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS);
55
56 /*
57 * Astris can read up to 4064 bytes at a time over
58 * the probe, so we should try to make our buffer
59 * size a multiple of this to make reads by astris
60 * (the bottleneck) most efficient.
61 */
62 #define OPTIMAL_ASTRIS_READSIZE 4064
63
64 /*
65 * xnu shared memory hardware debugger support
66 *
67 * A hardware debugger can connect, read the consistent debug
68 * header to determine the physical location of the handshake
69 * structure and communicate using commands in the structure as
70 * defined below.
71 *
72 * Currently used for sending compressed coredumps to
73 * astris.
74 */
75
76 __enum_closed_decl(xhsdci_status_t, uint32_t, {
77 XHSDCI_STATUS_NONE = 0, /* default status */
78 XHSDCI_STATUS_KERNEL_BUSY = 1, /* kernel is busy with other procedure */
79 XHSDCI_STATUS_KERNEL_READY = 2, /* kernel ready to begin command */
80 XHSDCI_COREDUMP_BEGIN = 3, /* indicates hardware debugger is ready to begin consuming coredump info */
81 XHSDCI_COREDUMP_BUF_READY = 4, /* indicates the kernel has populated the buffer */
82 XHSDCI_COREDUMP_BUF_EMPTY = 5, /* indicates hardware debugger is done consuming the current data */
83 XHSDCI_COREDUMP_STATUS_DONE = 6, /* indicates last compressed data is in buffer */
84 XHSDCI_COREDUMP_ERROR = 7, /* indicates an error was encountered */
85 XHSDCI_COREDUMP_REMOTE_DONE = 8, /* indicates that hardware debugger is done */
86 XHSDCI_COREDUMP_INFO = 9, /* anounces new file available for consumption */
87 XHSDCI_COREDUMP_ACK = 10, /* remote side ack/nack anounced file */
88 });
89
90 typedef union xhscdi_file_flags {
91 uint64_t value;
92 struct {
93 bool xff_ack :1; /* Remote side ACKed file transfer */
94 bool xff_gzip :1; /* File is gzipped */
95 uint8_t xff_type :4; /* coredump type */
96 };
97 } xhsdci_file_flags_t;
98
99 struct xnu_hw_shmem_dbg_command_info {
100 volatile xhsdci_status_t xhsdci_status;
101 uint32_t xhsdci_seq_no;
102 volatile uint64_t xhsdci_buf_phys_addr;
103 volatile uint32_t xhsdci_buf_data_length;
104 /* end of version 0 structure */
105 uint64_t xhsdci_coredump_total_size_uncomp;
106 uint64_t xhsdci_coredump_total_size_sent_uncomp;
107 uint32_t xhsdci_page_size;
108 /* end of version 1 structure */
109 char xhsdci_file_name[64]; /* name of a core that XNU offers */
110 xhsdci_file_flags_t xhsdci_file_flags; /* file flags */
111 } __attribute__((packed));
112
113 #define CUR_XNU_HWSDCI_STRUCT_VERS 2
114
115 struct kdp_hw_shmem_dbg_buf_elm {
116 vm_offset_t khsd_buf;
117 uint32_t khsd_data_length;
118 STAILQ_ENTRY(kdp_hw_shmem_dbg_buf_elm) khsd_elms;
119 };
120
121 struct shmem_stage_data {
122 bool signal_done;
123 uint32_t seq_no;
124 uint64_t contact_deadline;
125 uint64_t contact_deadline_interval;
126
127 struct kdp_hw_shmem_dbg_buf_elm *currently_filling_buf;
128 struct kdp_hw_shmem_dbg_buf_elm *currently_flushing_buf;
129 };
130
131 static uint32_t kdp_hw_shmem_dbg_bufsize;
132 static struct xnu_hw_shmem_dbg_command_info *hwsd_info = NULL;
133 static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm) free_hw_shmem_dbg_bufs =
134 STAILQ_HEAD_INITIALIZER(free_hw_shmem_dbg_bufs);
135 static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm) hw_shmem_dbg_bufs_to_flush =
136 STAILQ_HEAD_INITIALIZER(hw_shmem_dbg_bufs_to_flush);
137
138
139 #pragma mark Shared memory protocol implementation
140
141 /*
142 * Waits for remote side to move protocol to expected state. Check for errors
143 * and timeouts.
144 */
145 static kern_return_t
shmem_wait_for_state(struct shmem_stage_data * data,xhsdci_status_t status)146 shmem_wait_for_state(struct shmem_stage_data *data, xhsdci_status_t status)
147 {
148 data->contact_deadline = mach_absolute_time() + data->contact_deadline_interval;
149
150 while (hwsd_info->xhsdci_status != status) {
151 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
152
153 if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR) {
154 kern_coredump_log(NULL, "%s: Detected remote side error (state %d, waiting %d)\n",
155 __func__, hwsd_info->xhsdci_status, status);
156 return KERN_FAILURE;
157 }
158
159 if (mach_absolute_time() > data->contact_deadline) {
160 kern_coredump_log(NULL, "%s: Timed out waiting for the reply (state %d, waiting %d)\n",
161 __func__, hwsd_info->xhsdci_status, status);
162 return KERN_OPERATION_TIMED_OUT;
163 }
164 }
165
166 if (hwsd_info->xhsdci_seq_no != (data->seq_no + 1)) {
167 kern_coredump_log(NULL, "%s: Detected stale/invalid seq num (state %d, waiting %d). Expected: %d, received %d\n",
168 __func__, hwsd_info->xhsdci_status, status, (data->seq_no + 1), hwsd_info->xhsdci_seq_no);
169 return KERN_FAILURE;
170 }
171
172 return KERN_SUCCESS;
173 }
174
175 /*
176 * Publish new state, update seq number and flush cache.
177 */
178 static kern_return_t
shmem_set_status(struct shmem_stage_data * data,xhsdci_status_t status)179 shmem_set_status(struct shmem_stage_data *data, xhsdci_status_t status)
180 {
181 data->seq_no = hwsd_info->xhsdci_seq_no;
182 hwsd_info->xhsdci_seq_no = ++(data->seq_no);
183 hwsd_info->xhsdci_status = status;
184 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
185
186 return KERN_SUCCESS;
187 }
188
189 #pragma mark Output stage implementation
190
191 /*
192 * Anounces file to be written to the other side and waits for response.
193 *
194 * Return value meaning:
195 * KERN_SUCCESS - A coredump should proceed
196 * KERN_NODE_DOWN - Other side is not interested
197 * KERN_* - Error occured
198 */
199 static kern_return_t
shmem_stage_announce(struct kdp_output_stage * stage,const char * corename,uint8_t coretype)200 shmem_stage_announce(struct kdp_output_stage *stage, const char *corename, uint8_t coretype)
201 {
202 struct shmem_stage_data *data = (struct shmem_stage_data *) stage->kos_data;
203 kern_return_t ret = KERN_SUCCESS;
204
205 /* Don't signal XHSDCI_COREDUMP_DONE unless remote side has seen XHSDCI_COREDUMP_INFO. */
206 data->signal_done = false;
207
208 /*
209 * This is the first state after XHSDCI_COREDUMP_BEGIN is set.
210 * If that's the case then reset the sequence number to 1.
211 */
212 if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_BEGIN) {
213 data->seq_no = 1;
214 }
215
216 /* Announce new corefile to the remote side. */
217 strlcpy(hwsd_info->xhsdci_file_name, corename, sizeof(hwsd_info->xhsdci_file_name));
218 hwsd_info->xhsdci_file_flags.xff_gzip = true;
219 hwsd_info->xhsdci_file_flags.xff_type = (coretype & 0xf);
220 shmem_set_status(data, XHSDCI_COREDUMP_INFO);
221
222 /* wait for response */
223 ret = shmem_wait_for_state(data, XHSDCI_COREDUMP_ACK);
224 if (ret != KERN_SUCCESS) {
225 kern_coredump_log(NULL, "%s: no ACK from remote side: %d\n", __func__, ret);
226 return ret;
227 }
228
229 /* Remote side has seen XHSDCI_COREDUMP_INFO so it will expect XHSDCI_COREDUMP_DONE. */
230 data->signal_done = true;
231
232 /* Return whether transfer has been acked/nacked. */
233 return (hwsd_info->xhsdci_file_flags.xff_ack) ? KERN_SUCCESS : KERN_NODE_DOWN;
234 }
235
236 /*
237 * Whenever a new file gets transfered, make sure the buffers
238 * are all on the free queue and the state is as expected.
239 * The buffers may have been left in a different state if
240 * a previous coredump attempt failed.
241 */
242 static kern_return_t
shmem_stage_reset(struct kdp_output_stage * stage,const char * corename,kern_coredump_type_t coretype)243 shmem_stage_reset(struct kdp_output_stage *stage, const char *corename, kern_coredump_type_t coretype)
244 {
245 struct shmem_stage_data *data = (struct shmem_stage_data *) stage->kos_data;
246 struct kdp_hw_shmem_dbg_buf_elm *cur_elm = NULL, *tmp_elm = NULL;
247 kern_return_t res = KERN_SUCCESS;
248
249 /*
250 * Announce new file and wait for remote side's ACK.
251 */
252 res = shmem_stage_announce(stage, corename, coretype);
253 if (res != KERN_SUCCESS) {
254 return res;
255 }
256
257 /*
258 * Proceed with the stage output reset.
259 */
260 STAILQ_FOREACH(cur_elm, &free_hw_shmem_dbg_bufs, khsd_elms) {
261 cur_elm->khsd_data_length = 0;
262 }
263
264 if (data->currently_filling_buf != NULL) {
265 data->currently_filling_buf->khsd_data_length = 0;
266
267 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, data->currently_filling_buf, khsd_elms);
268 data->currently_filling_buf = NULL;
269 }
270
271 if (data->currently_flushing_buf != NULL) {
272 data->currently_flushing_buf->khsd_data_length = 0;
273
274 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, data->currently_flushing_buf, khsd_elms);
275 data->currently_flushing_buf = NULL;
276 }
277
278 STAILQ_FOREACH_SAFE(cur_elm, &hw_shmem_dbg_bufs_to_flush, khsd_elms, tmp_elm) {
279 cur_elm->khsd_data_length = 0;
280
281 STAILQ_REMOVE(&hw_shmem_dbg_bufs_to_flush, cur_elm, kdp_hw_shmem_dbg_buf_elm, khsd_elms);
282 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, cur_elm, khsd_elms);
283 }
284
285 hwsd_info->xhsdci_buf_phys_addr = 0;
286 hwsd_info->xhsdci_buf_data_length = 0;
287 hwsd_info->xhsdci_coredump_total_size_uncomp = 0;
288 hwsd_info->xhsdci_coredump_total_size_sent_uncomp = 0;
289 hwsd_info->xhsdci_page_size = PAGE_SIZE;
290
291 /*
292 * Do not modify sequence numbers here. This is not a message for a remote
293 * side. This sets only initial state for the file transfer itself.
294 */
295 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_BUF_EMPTY;
296
297 data->contact_deadline = mach_absolute_time() + data->contact_deadline_interval;
298
299 stage->kos_bypass = false;
300 stage->kos_bytes_written = 0;
301
302 return KERN_SUCCESS;
303 }
304
305 /*
306 * Tries to move buffers forward in 'progress'. If
307 * the hardware debugger is done consuming the current buffer, we
308 * can put the next one on it and move the current
309 * buffer back to the free queue.
310 */
311 static kern_return_t
shmem_dbg_process_buffers(struct kdp_output_stage * stage)312 shmem_dbg_process_buffers(struct kdp_output_stage *stage)
313 {
314 struct shmem_stage_data *data = (struct shmem_stage_data *) stage->kos_data;
315
316 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
317 if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR) {
318 kern_coredump_log(NULL, "%s: Detected remote error, terminating...\n", __func__);
319 return kIOReturnError;
320 } else if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_BUF_EMPTY) {
321 if (hwsd_info->xhsdci_seq_no != (data->seq_no + 1)) {
322 kern_coredump_log(NULL, "%s: Detected stale/invalid seq num. Expected: %d, received %d\n",
323 __func__, (data->seq_no + 1), hwsd_info->xhsdci_seq_no);
324 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_ERROR;
325 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
326 return kIOReturnError;
327 }
328
329 data->seq_no = hwsd_info->xhsdci_seq_no;
330
331 if (data->currently_flushing_buf != NULL) {
332 data->currently_flushing_buf->khsd_data_length = 0;
333 STAILQ_INSERT_TAIL(&free_hw_shmem_dbg_bufs, data->currently_flushing_buf, khsd_elms);
334 }
335
336 data->currently_flushing_buf = STAILQ_FIRST(&hw_shmem_dbg_bufs_to_flush);
337 if (data->currently_flushing_buf != NULL) {
338 STAILQ_REMOVE_HEAD(&hw_shmem_dbg_bufs_to_flush, khsd_elms);
339
340 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
341 hwsd_info->xhsdci_buf_phys_addr = kvtophys(data->currently_flushing_buf->khsd_buf);
342 hwsd_info->xhsdci_buf_data_length = data->currently_flushing_buf->khsd_data_length;
343 hwsd_info->xhsdci_coredump_total_size_uncomp = stage->kos_outstate->kcos_totalbytes;
344 hwsd_info->xhsdci_coredump_total_size_sent_uncomp = stage->kos_outstate->kcos_bytes_written;
345 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE);
346 shmem_set_status(data, XHSDCI_COREDUMP_BUF_READY);
347 }
348
349 data->contact_deadline = mach_absolute_time() + data->contact_deadline_interval;
350
351 return KERN_SUCCESS;
352 } else if (mach_absolute_time() > data->contact_deadline) {
353 kern_coredump_log(NULL, "Kernel timed out waiting for hardware debugger to update handshake structure.");
354 kern_coredump_log(NULL, "No contact in %llu seconds\n", shmem_timeout_sec);
355
356 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_ERROR;
357 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
358 return kIOReturnError;
359 }
360
361 return KERN_SUCCESS;
362 }
363
364 /*
365 * Populates currently_filling_buf with a new buffer
366 * once one becomes available. Returns 0 on success
367 * or the value returned by shmem_dbg_process_buffers()
368 * if it is non-zero (an error).
369 */
370 static kern_return_t
shmem_dbg_get_buffer(struct kdp_output_stage * stage)371 shmem_dbg_get_buffer(struct kdp_output_stage *stage)
372 {
373 kern_return_t ret = KERN_SUCCESS;
374 struct shmem_stage_data *data = (struct shmem_stage_data *) stage->kos_data;
375
376 assert(data->currently_filling_buf == NULL);
377
378 while (STAILQ_EMPTY(&free_hw_shmem_dbg_bufs)) {
379 ret = shmem_dbg_process_buffers(stage);
380 if (ret) {
381 return ret;
382 }
383 }
384
385 data->currently_filling_buf = STAILQ_FIRST(&free_hw_shmem_dbg_bufs);
386 STAILQ_REMOVE_HEAD(&free_hw_shmem_dbg_bufs, khsd_elms);
387
388 assert(data->currently_filling_buf->khsd_data_length == 0);
389 return ret;
390 }
391
392
393 /*
394 * Output procedure for hardware shared memory core dumps
395 *
396 * Tries to fill up the buffer completely before flushing
397 */
398 static kern_return_t
shmem_stage_outproc(struct kdp_output_stage * stage,unsigned int request,__unused char * corename,uint64_t length,void * panic_data)399 shmem_stage_outproc(struct kdp_output_stage *stage, unsigned int request,
400 __unused char *corename, uint64_t length, void * panic_data)
401 {
402 kern_return_t ret = KERN_SUCCESS;
403 struct shmem_stage_data *data = (struct shmem_stage_data *) stage->kos_data;
404
405 assert(STAILQ_NEXT(stage, kos_next) == NULL);
406 assert(length < UINT32_MAX);
407 uint32_t bytes_remaining = (uint32_t) length;
408 uint32_t bytes_to_copy;
409
410 /*
411 * Flush the buffers and signal that coredump is finished.
412 */
413 if (request == KDP_EOF || request == KDP_SEEK) {
414 assert(data->currently_filling_buf == NULL);
415
416 /*
417 * Do not signal XHSDCI_COREDUMP_STATUS_DONE if no file transfer is in
418 * progress.
419 *
420 * If connection is already in ERROR state then avoid touching status
421 * field. Remote side is waiting for protocol restart (KERNEL_READY).
422 */
423 if (!data->signal_done || hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR) {
424 return KERN_SUCCESS;
425 }
426
427 /*
428 * Wait until we've flushed all the buffers
429 * before setting the connection status to done.
430 */
431 while (!STAILQ_EMPTY(&hw_shmem_dbg_bufs_to_flush) ||
432 data->currently_flushing_buf != NULL) {
433 ret = shmem_dbg_process_buffers(stage);
434 if (KERN_SUCCESS != ret) {
435 kern_coredump_log(NULL, "(%s) shmem_dbg_process_buffers failed with error 0x%x\n", __func__, ret);
436 return ret;
437 }
438 }
439
440 /*
441 * If the last status we saw indicates that the buffer was
442 * empty and we didn't flush any new data since then, we expect
443 * the sequence number to still match the last we saw.
444 */
445 if (hwsd_info->xhsdci_seq_no < data->seq_no) {
446 kern_coredump_log(NULL, "EOF Flush: Detected stale/invalid seq num. Expected: %d, received %d\n",
447 data->seq_no, hwsd_info->xhsdci_seq_no);
448 return -1;
449 }
450
451 kern_coredump_log(NULL, "Setting coredump status as done!\n");
452 shmem_set_status(data, XHSDCI_COREDUMP_STATUS_DONE);
453
454 /* wait for remote side to signal it is done */
455 ret = shmem_wait_for_state(data, XHSDCI_COREDUMP_REMOTE_DONE);
456 if (ret != KERN_SUCCESS) {
457 kern_coredump_log(NULL, "%s: remote is not done: %d\n", __func__, ret);
458 return ret;
459 }
460
461 return ret;
462 }
463
464 assert(request == KDP_DATA);
465
466 /*
467 * The output procedure is called with length == 0 and panic_data == NULL
468 * to flush any remaining output at the end of the coredump before
469 * we call it a final time to mark the dump as done.
470 */
471 if (length == 0) {
472 assert(panic_data == NULL);
473
474 if (data->currently_filling_buf != NULL) {
475 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush, data->currently_filling_buf, khsd_elms);
476 data->currently_filling_buf = NULL;
477 }
478
479 /*
480 * Move the current buffer along if possible.
481 */
482 ret = shmem_dbg_process_buffers(stage);
483 if (KERN_SUCCESS != ret) {
484 kern_coredump_log(NULL, "(%s) shmem_dbg_process_buffers failed with error 0x%x\n", __func__, ret);
485 }
486 return ret;
487 }
488
489 while (bytes_remaining != 0) {
490 /*
491 * Make sure we have a buffer to work with.
492 */
493 while (data->currently_filling_buf == NULL) {
494 ret = shmem_dbg_get_buffer(stage);
495 if (KERN_SUCCESS != ret) {
496 kern_coredump_log(NULL, "(%s) shmem_dbg_get_buffer failed with error 0x%x\n", __func__, ret);
497 return ret;
498 }
499 }
500
501 assert(kdp_hw_shmem_dbg_bufsize >= data->currently_filling_buf->khsd_data_length);
502 bytes_to_copy = MIN(bytes_remaining, kdp_hw_shmem_dbg_bufsize -
503 data->currently_filling_buf->khsd_data_length);
504 bcopy(panic_data, (void *)(data->currently_filling_buf->khsd_buf + data->currently_filling_buf->khsd_data_length),
505 bytes_to_copy);
506
507 data->currently_filling_buf->khsd_data_length += bytes_to_copy;
508
509 if (data->currently_filling_buf->khsd_data_length == kdp_hw_shmem_dbg_bufsize) {
510 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush, data->currently_filling_buf, khsd_elms);
511 data->currently_filling_buf = NULL;
512
513 /*
514 * Move it along if possible.
515 */
516 ret = shmem_dbg_process_buffers(stage);
517 if (KERN_SUCCESS != ret) {
518 kern_coredump_log(NULL, "(%s) shmem_dbg_process_buffers failed with error 0x%x\n", __func__, ret);
519 return ret;
520 }
521 }
522
523 stage->kos_bytes_written += bytes_to_copy;
524 bytes_remaining -= bytes_to_copy;
525 panic_data = (void *) ((uintptr_t)panic_data + bytes_to_copy);
526 }
527
528 return ret;
529 }
530
531 static void
shmem_stage_free(struct kdp_output_stage * stage)532 shmem_stage_free(struct kdp_output_stage *stage)
533 {
534 kmem_free(kernel_map, (vm_offset_t) stage->kos_data, stage->kos_data_size);
535
536 stage->kos_data = NULL;
537 stage->kos_data_size = 0;
538 stage->kos_initialized = false;
539 }
540
541 kern_return_t
shmem_stage_initialize(struct kdp_output_stage * stage)542 shmem_stage_initialize(struct kdp_output_stage *stage)
543 {
544 kern_return_t ret = KERN_SUCCESS;
545 struct shmem_stage_data *data = NULL;
546
547 assert(stage != NULL);
548 assert(stage->kos_initialized == false);
549 assert(stage->kos_data == NULL);
550
551 if (!hwsd_info) {
552 vm_offset_t kdp_core_hw_shmem_buf = 0;
553 struct kdp_hw_shmem_dbg_buf_elm *cur_elm = NULL;
554 cache_info_t *cpuid_cache_info = NULL;
555
556 /*
557 * We need to allocate physically contiguous memory since astris isn't capable
558 * of doing address translations while the CPUs are running.
559 */
560 kdp_hw_shmem_dbg_bufsize = KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE;
561 kmem_alloc_contig(kernel_map, &kdp_core_hw_shmem_buf,
562 kdp_hw_shmem_dbg_bufsize, VM_MAP_PAGE_MASK(kernel_map),
563 0, 0, KMA_NOFAIL | KMA_KOBJECT | KMA_DATA | KMA_PERMANENT,
564 VM_KERN_MEMORY_DIAG);
565
566 /*
567 * Put the connection info structure at the beginning of this buffer and adjust
568 * the buffer size accordingly.
569 */
570 hwsd_info = (struct xnu_hw_shmem_dbg_command_info *) kdp_core_hw_shmem_buf;
571 hwsd_info->xhsdci_status = XHSDCI_STATUS_NONE;
572 hwsd_info->xhsdci_seq_no = 0;
573 hwsd_info->xhsdci_buf_phys_addr = 0;
574 hwsd_info->xhsdci_buf_data_length = 0;
575 hwsd_info->xhsdci_coredump_total_size_uncomp = 0;
576 hwsd_info->xhsdci_coredump_total_size_sent_uncomp = 0;
577 hwsd_info->xhsdci_page_size = PAGE_SIZE;
578
579 cpuid_cache_info = cache_info();
580 assert(cpuid_cache_info != NULL);
581
582 kdp_core_hw_shmem_buf += sizeof(*hwsd_info);
583 /* Leave the handshake structure on its own cache line so buffer writes don't cause flushes of old handshake data */
584 kdp_core_hw_shmem_buf = ROUNDUP(kdp_core_hw_shmem_buf, (vm_offset_t) cpuid_cache_info->c_linesz);
585 kdp_hw_shmem_dbg_bufsize -= (uint32_t) (kdp_core_hw_shmem_buf - (vm_offset_t) hwsd_info);
586 kdp_hw_shmem_dbg_bufsize /= KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS;
587 /* The buffer size should be a cache-line length multiple */
588 kdp_hw_shmem_dbg_bufsize -= (kdp_hw_shmem_dbg_bufsize % ROUNDDOWN(OPTIMAL_ASTRIS_READSIZE, cpuid_cache_info->c_linesz));
589
590 STAILQ_INIT(&free_hw_shmem_dbg_bufs);
591 STAILQ_INIT(&hw_shmem_dbg_bufs_to_flush);
592
593 for (int i = 0; i < KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS; i++) {
594 cur_elm = zalloc_permanent_type(typeof(*cur_elm));
595 assert(cur_elm != NULL);
596
597 cur_elm->khsd_buf = kdp_core_hw_shmem_buf;
598 cur_elm->khsd_data_length = 0;
599
600 kdp_core_hw_shmem_buf += kdp_hw_shmem_dbg_bufsize;
601
602 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, cur_elm, khsd_elms);
603 }
604
605 PE_consistent_debug_register(kDbgIdAstrisConnection, kvtophys((vm_offset_t) hwsd_info), sizeof(pmap_paddr_t));
606 PE_consistent_debug_register(kDbgIdAstrisConnectionVers, CUR_XNU_HWSDCI_STRUCT_VERS, sizeof(uint32_t));
607 }
608
609 stage->kos_data_size = sizeof(struct shmem_stage_data);
610
611 ret = kmem_alloc(kernel_map, (vm_offset_t*) &stage->kos_data, stage->kos_data_size,
612 KMA_DATA_SHARED, VM_KERN_MEMORY_DIAG);
613 if (KERN_SUCCESS != ret) {
614 return ret;
615 }
616
617 data = (struct shmem_stage_data*) stage->kos_data;
618 data->signal_done = false;
619 data->seq_no = 0;
620 data->contact_deadline = 0;
621 nanoseconds_to_absolutetime(shmem_timeout_sec * NSEC_PER_SEC, &(data->contact_deadline_interval));
622 data->currently_filling_buf = NULL;
623 data->currently_flushing_buf = NULL;
624
625 stage->kos_funcs.kosf_reset = shmem_stage_reset;
626 stage->kos_funcs.kosf_outproc = shmem_stage_outproc;
627 stage->kos_funcs.kosf_free = shmem_stage_free;
628
629 stage->kos_initialized = true;
630
631 return KERN_SUCCESS;
632 }
633
634 void
shmem_mark_as_busy(void)635 shmem_mark_as_busy(void)
636 {
637 if (hwsd_info != NULL) {
638 hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_BUSY;
639 }
640 }
641
642 void
shmem_unmark_as_busy(void)643 shmem_unmark_as_busy(void)
644 {
645 if (hwsd_info != NULL) {
646 hwsd_info->xhsdci_status = XHSDCI_STATUS_NONE;
647 }
648 }
649
650 void
panic_spin_shmcon(void)651 panic_spin_shmcon(void)
652 {
653 if (!PE_i_can_has_debugger(NULL)) {
654 return;
655 }
656
657 if (hwsd_info == NULL) {
658 kern_coredump_log(NULL, "handshake structure not initialized\n");
659 return;
660 }
661
662 kern_coredump_log(NULL, "\nPlease go to https://panic.apple.com to report this panic\n");
663 kern_coredump_log(NULL, "Waiting for hardware shared memory debugger, handshake structure is at virt: %p, phys %p\n",
664 hwsd_info, (void *)kvtophys((vm_offset_t)hwsd_info));
665
666 hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_READY;
667 hwsd_info->xhsdci_seq_no = 0;
668 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
669
670 for (;;) {
671 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
672 if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_BEGIN) {
673 kern_dump(KERN_DUMP_HW_SHMEM_DBG);
674 }
675
676 if ((hwsd_info->xhsdci_status == XHSDCI_COREDUMP_REMOTE_DONE) ||
677 (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR)) {
678 hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_READY;
679 hwsd_info->xhsdci_seq_no = 0;
680 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
681 }
682 #ifdef __arm64__
683 /* Avoid stalling in WFE on arm32, which may not have a maximum WFE timeout like arm64. */
684 __builtin_arm_wfe();
685 #endif
686 }
687 }
688
689 #endif /* defined(CONFIG_KDP_INTERACTIVE_DEBUGGING) && defined(__arm64__) */
690