1 /*
2 * Copyright (c) 2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #if defined(CONFIG_KDP_INTERACTIVE_DEBUGGING) && defined(__arm64__)
30
31 #include <mach/mach_types.h>
32 #include <IOKit/IOTypes.h>
33 #include <kdp/output_stages/output_stages.h>
34 #include <kdp/kdp_core.h>
35 #include <kdp/processor_core.h>
36 #include <arm/cpuid.h>
37 #include <arm/caches_internal.h>
38 #include <pexpert/arm/consistent_debug.h>
39
40 #if !defined(ROUNDUP)
41 #define ROUNDUP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
42 #endif
43
44 #if !defined(ROUNDDOWN)
45 #define ROUNDDOWN(a, b) ((a) & ~((b) - 1))
46 #endif
47
48 #define KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS 2
49 #define KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE 64 * 1024
50 #define KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS 30
51
52 /*
53 * Astris can read up to 4064 bytes at a time over
54 * the probe, so we should try to make our buffer
55 * size a multiple of this to make reads by astris
56 * (the bottleneck) most efficient.
57 */
58 #define OPTIMAL_ASTRIS_READSIZE 4064
59
60 /*
61 * xnu shared memory hardware debugger support
62 *
63 * A hardware debugger can connect, read the consistent debug
64 * header to determine the physical location of the handshake
65 * structure and communicate using commands in the structure as
66 * defined below.
67 *
68 * Currently used for sending compressed coredumps to
69 * astris.
70 */
71 struct xnu_hw_shmem_dbg_command_info {
72 volatile uint32_t xhsdci_status;
73 uint32_t xhsdci_seq_no;
74 volatile uint64_t xhsdci_buf_phys_addr;
75 volatile uint32_t xhsdci_buf_data_length;
76 /* end of version 0 structure */
77 uint64_t xhsdci_coredump_total_size_uncomp;
78 uint64_t xhsdci_coredump_total_size_sent_uncomp;
79 uint32_t xhsdci_page_size;
80 } __attribute__((packed));
81
82 #define CUR_XNU_HWSDCI_STRUCT_VERS 1
83
84 #define XHSDCI_STATUS_NONE 0 /* default status */
85 #define XHSDCI_STATUS_KERNEL_BUSY 1 /* kernel is busy with other procedure */
86 #define XHSDCI_STATUS_KERNEL_READY 2 /* kernel ready to begin command */
87 #define XHSDCI_COREDUMP_BEGIN 3 /* indicates hardware debugger is ready to begin consuming coredump info */
88 #define XHSDCI_COREDUMP_BUF_READY 4 /* indicates the kernel has populated the buffer */
89 #define XHSDCI_COREDUMP_BUF_EMPTY 5 /* indicates hardware debugger is done consuming the current data */
90 #define XHSDCI_COREDUMP_STATUS_DONE 6 /* indicates last compressed data is in buffer */
91 #define XHSDCI_COREDUMP_ERROR 7 /* indicates an error was encountered */
92 #define XHSDCI_COREDUMP_REMOTE_DONE 8 /* indicates that hardware debugger is done */
93
94 struct kdp_hw_shmem_dbg_buf_elm {
95 vm_offset_t khsd_buf;
96 uint32_t khsd_data_length;
97 STAILQ_ENTRY(kdp_hw_shmem_dbg_buf_elm) khsd_elms;
98 };
99
100 struct shmem_stage_data {
101 uint32_t seq_no;
102 uint64_t contact_deadline;
103 uint64_t contact_deadline_interval;
104
105 struct kdp_hw_shmem_dbg_buf_elm *currently_filling_buf;
106 struct kdp_hw_shmem_dbg_buf_elm *currently_flushing_buf;
107 };
108
109 static uint32_t kdp_hw_shmem_dbg_bufsize;
110 static struct xnu_hw_shmem_dbg_command_info *hwsd_info = NULL;
111 static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm) free_hw_shmem_dbg_bufs =
112 STAILQ_HEAD_INITIALIZER(free_hw_shmem_dbg_bufs);
113 static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm) hw_shmem_dbg_bufs_to_flush =
114 STAILQ_HEAD_INITIALIZER(hw_shmem_dbg_bufs_to_flush);
115
116 /*
117 * Whenever we start a coredump, make sure the buffers
118 * are all on the free queue and the state is as expected.
119 * The buffers may have been left in a different state if
120 * a previous coredump attempt failed.
121 */
122 static void
shmem_stage_reset(struct kdp_output_stage * stage)123 shmem_stage_reset(struct kdp_output_stage *stage)
124 {
125 struct shmem_stage_data *data = (struct shmem_stage_data *) stage->kos_data;
126 struct kdp_hw_shmem_dbg_buf_elm *cur_elm = NULL, *tmp_elm = NULL;
127
128 STAILQ_FOREACH(cur_elm, &free_hw_shmem_dbg_bufs, khsd_elms) {
129 cur_elm->khsd_data_length = 0;
130 }
131
132 if (data->currently_filling_buf != NULL) {
133 data->currently_filling_buf->khsd_data_length = 0;
134
135 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, data->currently_filling_buf, khsd_elms);
136 data->currently_filling_buf = NULL;
137 }
138
139 if (data->currently_flushing_buf != NULL) {
140 data->currently_flushing_buf->khsd_data_length = 0;
141
142 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, data->currently_flushing_buf, khsd_elms);
143 data->currently_flushing_buf = NULL;
144 }
145
146 STAILQ_FOREACH_SAFE(cur_elm, &hw_shmem_dbg_bufs_to_flush, khsd_elms, tmp_elm) {
147 cur_elm->khsd_data_length = 0;
148
149 STAILQ_REMOVE(&hw_shmem_dbg_bufs_to_flush, cur_elm, kdp_hw_shmem_dbg_buf_elm, khsd_elms);
150 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, cur_elm, khsd_elms);
151 }
152
153 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_BUF_EMPTY;
154 data->seq_no = 0;
155 hwsd_info->xhsdci_buf_phys_addr = 0;
156 hwsd_info->xhsdci_buf_data_length = 0;
157 hwsd_info->xhsdci_coredump_total_size_uncomp = 0;
158 hwsd_info->xhsdci_coredump_total_size_sent_uncomp = 0;
159 hwsd_info->xhsdci_page_size = PAGE_SIZE;
160 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
161
162 data->contact_deadline = mach_absolute_time() + data->contact_deadline_interval;
163
164 stage->kos_bypass = false;
165 stage->kos_bytes_written = 0;
166 }
167
168 /*
169 * Tries to move buffers forward in 'progress'. If
170 * the hardware debugger is done consuming the current buffer, we
171 * can put the next one on it and move the current
172 * buffer back to the free queue.
173 */
174 static kern_return_t
shmem_dbg_process_buffers(struct kdp_output_stage * stage)175 shmem_dbg_process_buffers(struct kdp_output_stage *stage)
176 {
177 struct shmem_stage_data *data = (struct shmem_stage_data *) stage->kos_data;
178
179 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
180 if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR) {
181 kern_coredump_log(NULL, "Detected remote error, terminating...\n");
182 return kIOReturnError;
183 } else if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_BUF_EMPTY) {
184 if (hwsd_info->xhsdci_seq_no != (data->seq_no + 1)) {
185 kern_coredump_log(NULL, "Detected stale/invalid seq num. Expected: %d, received %d\n",
186 (data->seq_no + 1), hwsd_info->xhsdci_seq_no);
187 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_ERROR;
188 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
189 return kIOReturnError;
190 }
191
192 data->seq_no = hwsd_info->xhsdci_seq_no;
193
194 if (data->currently_flushing_buf != NULL) {
195 data->currently_flushing_buf->khsd_data_length = 0;
196 STAILQ_INSERT_TAIL(&free_hw_shmem_dbg_bufs, data->currently_flushing_buf, khsd_elms);
197 }
198
199 data->currently_flushing_buf = STAILQ_FIRST(&hw_shmem_dbg_bufs_to_flush);
200 if (data->currently_flushing_buf != NULL) {
201 STAILQ_REMOVE_HEAD(&hw_shmem_dbg_bufs_to_flush, khsd_elms);
202
203 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
204 hwsd_info->xhsdci_buf_phys_addr = kvtophys(data->currently_flushing_buf->khsd_buf);
205 hwsd_info->xhsdci_buf_data_length = data->currently_flushing_buf->khsd_data_length;
206 hwsd_info->xhsdci_coredump_total_size_uncomp = stage->kos_outstate->kcos_totalbytes;
207 hwsd_info->xhsdci_coredump_total_size_sent_uncomp = stage->kos_outstate->kcos_bytes_written;
208 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE);
209 hwsd_info->xhsdci_seq_no = ++(data->seq_no);
210 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_BUF_READY;
211 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
212 }
213
214 data->contact_deadline = mach_absolute_time() + data->contact_deadline_interval;
215
216 return KERN_SUCCESS;
217 } else if (mach_absolute_time() > data->contact_deadline) {
218 kern_coredump_log(NULL, "Kernel timed out waiting for hardware debugger to update handshake structure.");
219 kern_coredump_log(NULL, "No contact in %d seconds\n", KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS);
220
221 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_ERROR;
222 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
223 return kIOReturnError;
224 }
225
226 return KERN_SUCCESS;
227 }
228
229 /*
230 * Populates currently_filling_buf with a new buffer
231 * once one becomes available. Returns 0 on success
232 * or the value returned by shmem_dbg_process_buffers()
233 * if it is non-zero (an error).
234 */
235 static kern_return_t
shmem_dbg_get_buffer(struct kdp_output_stage * stage)236 shmem_dbg_get_buffer(struct kdp_output_stage *stage)
237 {
238 kern_return_t ret = KERN_SUCCESS;
239 struct shmem_stage_data *data = (struct shmem_stage_data *) stage->kos_data;
240
241 assert(data->currently_filling_buf == NULL);
242
243 while (STAILQ_EMPTY(&free_hw_shmem_dbg_bufs)) {
244 ret = shmem_dbg_process_buffers(stage);
245 if (ret) {
246 return ret;
247 }
248 }
249
250 data->currently_filling_buf = STAILQ_FIRST(&free_hw_shmem_dbg_bufs);
251 STAILQ_REMOVE_HEAD(&free_hw_shmem_dbg_bufs, khsd_elms);
252
253 assert(data->currently_filling_buf->khsd_data_length == 0);
254 return ret;
255 }
256
257
258 /*
259 * Output procedure for hardware shared memory core dumps
260 *
261 * Tries to fill up the buffer completely before flushing
262 */
263 static kern_return_t
shmem_stage_outproc(struct kdp_output_stage * stage,unsigned int request,__unused char * corename,uint64_t length,void * panic_data)264 shmem_stage_outproc(struct kdp_output_stage *stage, unsigned int request,
265 __unused char *corename, uint64_t length, void * panic_data)
266 {
267 kern_return_t ret = KERN_SUCCESS;
268 struct shmem_stage_data *data = (struct shmem_stage_data *) stage->kos_data;
269
270 assert(STAILQ_NEXT(stage, kos_next) == NULL);
271 assert(length < UINT32_MAX);
272 uint32_t bytes_remaining = (uint32_t) length;
273 uint32_t bytes_to_copy;
274
275 if (request == KDP_EOF) {
276 assert(data->currently_filling_buf == NULL);
277
278 /*
279 * Wait until we've flushed all the buffers
280 * before setting the connection status to done.
281 */
282 while (!STAILQ_EMPTY(&hw_shmem_dbg_bufs_to_flush) ||
283 data->currently_flushing_buf != NULL) {
284 ret = shmem_dbg_process_buffers(stage);
285 if (ret) {
286 return ret;
287 }
288 }
289
290 /*
291 * If the last status we saw indicates that the buffer was
292 * empty and we didn't flush any new data since then, we expect
293 * the sequence number to still match the last we saw.
294 */
295 if (hwsd_info->xhsdci_seq_no < data->seq_no) {
296 kern_coredump_log(NULL, "EOF Flush: Detected stale/invalid seq num. Expected: %d, received %d\n",
297 data->seq_no, hwsd_info->xhsdci_seq_no);
298 return -1;
299 }
300
301 data->seq_no = hwsd_info->xhsdci_seq_no;
302
303 kern_coredump_log(NULL, "Setting coredump status as done!\n");
304 hwsd_info->xhsdci_seq_no = ++(data->seq_no);
305 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_STATUS_DONE;
306 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
307
308 return ret;
309 }
310
311 assert(request == KDP_DATA);
312
313 /*
314 * The output procedure is called with length == 0 and panic_data == NULL
315 * to flush any remaining output at the end of the coredump before
316 * we call it a final time to mark the dump as done.
317 */
318 if (length == 0) {
319 assert(panic_data == NULL);
320
321 if (data->currently_filling_buf != NULL) {
322 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush, data->currently_filling_buf, khsd_elms);
323 data->currently_filling_buf = NULL;
324 }
325
326 /*
327 * Move the current buffer along if possible.
328 */
329 ret = shmem_dbg_process_buffers(stage);
330 return ret;
331 }
332
333 while (bytes_remaining != 0) {
334 /*
335 * Make sure we have a buffer to work with.
336 */
337 while (data->currently_filling_buf == NULL) {
338 ret = shmem_dbg_get_buffer(stage);
339 if (ret) {
340 return ret;
341 }
342 }
343
344 assert(kdp_hw_shmem_dbg_bufsize >= data->currently_filling_buf->khsd_data_length);
345 bytes_to_copy = MIN(bytes_remaining, kdp_hw_shmem_dbg_bufsize -
346 data->currently_filling_buf->khsd_data_length);
347 bcopy(panic_data, (void *)(data->currently_filling_buf->khsd_buf + data->currently_filling_buf->khsd_data_length),
348 bytes_to_copy);
349
350 data->currently_filling_buf->khsd_data_length += bytes_to_copy;
351
352 if (data->currently_filling_buf->khsd_data_length == kdp_hw_shmem_dbg_bufsize) {
353 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush, data->currently_filling_buf, khsd_elms);
354 data->currently_filling_buf = NULL;
355
356 /*
357 * Move it along if possible.
358 */
359 ret = shmem_dbg_process_buffers(stage);
360 if (ret) {
361 return ret;
362 }
363 }
364
365 stage->kos_bytes_written += bytes_to_copy;
366 bytes_remaining -= bytes_to_copy;
367 panic_data = (void *) ((uintptr_t)panic_data + bytes_to_copy);
368 }
369
370 return ret;
371 }
372
373 static void
shmem_stage_free(struct kdp_output_stage * stage)374 shmem_stage_free(struct kdp_output_stage *stage)
375 {
376 kmem_free(kernel_map, (vm_offset_t) stage->kos_data, stage->kos_data_size);
377
378 stage->kos_data = NULL;
379 stage->kos_data_size = 0;
380 stage->kos_initialized = false;
381 }
382
383 kern_return_t
shmem_stage_initialize(struct kdp_output_stage * stage)384 shmem_stage_initialize(struct kdp_output_stage *stage)
385 {
386 kern_return_t ret = KERN_SUCCESS;
387 struct shmem_stage_data *data = NULL;
388
389 assert(stage != NULL);
390 assert(stage->kos_initialized == false);
391 assert(stage->kos_data == NULL);
392
393 if (!hwsd_info) {
394 vm_offset_t kdp_core_hw_shmem_buf = 0;
395 struct kdp_hw_shmem_dbg_buf_elm *cur_elm = NULL;
396 cache_info_t *cpuid_cache_info = NULL;
397
398 /*
399 * We need to allocate physically contiguous memory since astris isn't capable
400 * of doing address translations while the CPUs are running.
401 */
402 kdp_hw_shmem_dbg_bufsize = KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE;
403 kmem_alloc_contig(kernel_map, &kdp_core_hw_shmem_buf,
404 kdp_hw_shmem_dbg_bufsize, VM_MAP_PAGE_MASK(kernel_map),
405 0, 0, KMA_NOFAIL | KMA_KOBJECT | KMA_DATA | KMA_PERMANENT,
406 VM_KERN_MEMORY_DIAG);
407
408 /*
409 * Put the connection info structure at the beginning of this buffer and adjust
410 * the buffer size accordingly.
411 */
412 hwsd_info = (struct xnu_hw_shmem_dbg_command_info *) kdp_core_hw_shmem_buf;
413 hwsd_info->xhsdci_status = XHSDCI_STATUS_NONE;
414 hwsd_info->xhsdci_seq_no = 0;
415 hwsd_info->xhsdci_buf_phys_addr = 0;
416 hwsd_info->xhsdci_buf_data_length = 0;
417 hwsd_info->xhsdci_coredump_total_size_uncomp = 0;
418 hwsd_info->xhsdci_coredump_total_size_sent_uncomp = 0;
419 hwsd_info->xhsdci_page_size = PAGE_SIZE;
420
421 cpuid_cache_info = cache_info();
422 assert(cpuid_cache_info != NULL);
423
424 kdp_core_hw_shmem_buf += sizeof(*hwsd_info);
425 /* Leave the handshake structure on its own cache line so buffer writes don't cause flushes of old handshake data */
426 kdp_core_hw_shmem_buf = ROUNDUP(kdp_core_hw_shmem_buf, (vm_offset_t) cpuid_cache_info->c_linesz);
427 kdp_hw_shmem_dbg_bufsize -= (uint32_t) (kdp_core_hw_shmem_buf - (vm_offset_t) hwsd_info);
428 kdp_hw_shmem_dbg_bufsize /= KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS;
429 /* The buffer size should be a cache-line length multiple */
430 kdp_hw_shmem_dbg_bufsize -= (kdp_hw_shmem_dbg_bufsize % ROUNDDOWN(OPTIMAL_ASTRIS_READSIZE, cpuid_cache_info->c_linesz));
431
432 STAILQ_INIT(&free_hw_shmem_dbg_bufs);
433 STAILQ_INIT(&hw_shmem_dbg_bufs_to_flush);
434
435 for (int i = 0; i < KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS; i++) {
436 cur_elm = zalloc_permanent_type(typeof(*cur_elm));
437 assert(cur_elm != NULL);
438
439 cur_elm->khsd_buf = kdp_core_hw_shmem_buf;
440 cur_elm->khsd_data_length = 0;
441
442 kdp_core_hw_shmem_buf += kdp_hw_shmem_dbg_bufsize;
443
444 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, cur_elm, khsd_elms);
445 }
446
447 PE_consistent_debug_register(kDbgIdAstrisConnection, kvtophys((vm_offset_t) hwsd_info), sizeof(pmap_paddr_t));
448 PE_consistent_debug_register(kDbgIdAstrisConnectionVers, CUR_XNU_HWSDCI_STRUCT_VERS, sizeof(uint32_t));
449 }
450
451 stage->kos_data_size = sizeof(struct shmem_stage_data);
452
453 ret = kmem_alloc(kernel_map, (vm_offset_t*) &stage->kos_data, stage->kos_data_size,
454 KMA_DATA, VM_KERN_MEMORY_DIAG);
455 if (KERN_SUCCESS != ret) {
456 return ret;
457 }
458
459 data = (struct shmem_stage_data*) stage->kos_data;
460 data->seq_no = 0;
461 data->contact_deadline = 0;
462 nanoseconds_to_absolutetime(KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS * NSEC_PER_SEC, &(data->contact_deadline_interval));
463 data->currently_filling_buf = NULL;
464 data->currently_flushing_buf = NULL;
465
466 stage->kos_funcs.kosf_reset = shmem_stage_reset;
467 stage->kos_funcs.kosf_outproc = shmem_stage_outproc;
468 stage->kos_funcs.kosf_free = shmem_stage_free;
469
470 stage->kos_initialized = true;
471
472 return KERN_SUCCESS;
473 }
474
475 void
shmem_mark_as_busy(void)476 shmem_mark_as_busy(void)
477 {
478 if (hwsd_info != NULL) {
479 hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_BUSY;
480 }
481 }
482
483 void
shmem_unmark_as_busy(void)484 shmem_unmark_as_busy(void)
485 {
486 if (hwsd_info != NULL) {
487 hwsd_info->xhsdci_status = XHSDCI_STATUS_NONE;
488 }
489 }
490
491 void
panic_spin_shmcon(void)492 panic_spin_shmcon(void)
493 {
494 if (!PE_i_can_has_debugger(NULL)) {
495 return;
496 }
497
498 if (hwsd_info == NULL) {
499 kern_coredump_log(NULL, "handshake structure not initialized\n");
500 return;
501 }
502
503 kern_coredump_log(NULL, "\nPlease go to https://panic.apple.com to report this panic\n");
504 kern_coredump_log(NULL, "Waiting for hardware shared memory debugger, handshake structure is at virt: %p, phys %p\n",
505 hwsd_info, (void *)kvtophys((vm_offset_t)hwsd_info));
506
507 hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_READY;
508 hwsd_info->xhsdci_seq_no = 0;
509 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
510
511 for (;;) {
512 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
513 if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_BEGIN) {
514 kern_dump(KERN_DUMP_HW_SHMEM_DBG);
515 }
516
517 if ((hwsd_info->xhsdci_status == XHSDCI_COREDUMP_REMOTE_DONE) ||
518 (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR)) {
519 hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_READY;
520 hwsd_info->xhsdci_seq_no = 0;
521 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
522 }
523 #ifdef __arm64__
524 /* Avoid stalling in WFE on arm32, which may not have a maximum WFE timeout like arm64. */
525 __builtin_arm_wfe();
526 #endif
527 }
528 }
529
530 #endif /* defined(CONFIG_KDP_INTERACTIVE_DEBUGGING) && defined(__arm64__) */
531