xref: /xnu-11215.81.4/osfmk/kdp/output_stages/out_shmem.c (revision d4514f0bc1d3f944c22d92e68b646ac3fb40d452)
1 /*
2  * Copyright (c) 2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #if defined(CONFIG_KDP_INTERACTIVE_DEBUGGING) && defined(__arm64__)
30 
31 #include <mach/mach_types.h>
32 #include <IOKit/IOTypes.h>
33 #include <kdp/output_stages/output_stages.h>
34 #include <kdp/kdp_core.h>
35 #include <kdp/processor_core.h>
36 #include <arm/cpuid.h>
37 #include <arm/caches_internal.h>
38 #include <pexpert/arm/consistent_debug.h>
39 #include <vm/vm_kern_xnu.h>
40 #include <vm/vm_map_xnu.h>
41 
42 #if !defined(ROUNDUP)
43 #define ROUNDUP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
44 #endif
45 
46 #if !defined(ROUNDDOWN)
47 #define ROUNDDOWN(a, b) ((a) & ~((b) - 1))
48 #endif
49 
50 #define KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS 2
51 #define KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE 64 * 1024
52 #define KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS 30
53 
54 /*
55  * Astris can read up to 4064 bytes at a time over
56  * the probe, so we should try to make our buffer
57  * size a multiple of this to make reads by astris
58  * (the bottleneck) most efficient.
59  */
60 #define OPTIMAL_ASTRIS_READSIZE 4064
61 
62 /*
63  * xnu shared memory hardware debugger support
64  *
65  * A hardware debugger can connect, read the consistent debug
66  * header to determine the physical location of the handshake
67  * structure and communicate using commands in the structure as
68  * defined below.
69  *
70  * Currently used for sending compressed coredumps to
71  * astris.
72  */
73 struct xnu_hw_shmem_dbg_command_info {
74 	volatile uint32_t xhsdci_status;
75 	uint32_t xhsdci_seq_no;
76 	volatile uint64_t xhsdci_buf_phys_addr;
77 	volatile uint32_t xhsdci_buf_data_length;
78 	/* end of version 0 structure */
79 	uint64_t xhsdci_coredump_total_size_uncomp;
80 	uint64_t xhsdci_coredump_total_size_sent_uncomp;
81 	uint32_t xhsdci_page_size;
82 } __attribute__((packed));
83 
84 #define CUR_XNU_HWSDCI_STRUCT_VERS 1
85 
86 #define XHSDCI_STATUS_NONE              0 /* default status */
87 #define XHSDCI_STATUS_KERNEL_BUSY       1 /* kernel is busy with other procedure */
88 #define XHSDCI_STATUS_KERNEL_READY      2 /* kernel ready to begin command */
89 #define XHSDCI_COREDUMP_BEGIN           3 /* indicates hardware debugger is ready to begin consuming coredump info */
90 #define XHSDCI_COREDUMP_BUF_READY       4 /* indicates the kernel has populated the buffer */
91 #define XHSDCI_COREDUMP_BUF_EMPTY       5 /* indicates hardware debugger is done consuming the current data */
92 #define XHSDCI_COREDUMP_STATUS_DONE     6 /* indicates last compressed data is in buffer */
93 #define XHSDCI_COREDUMP_ERROR           7 /* indicates an error was encountered */
94 #define XHSDCI_COREDUMP_REMOTE_DONE     8 /* indicates that hardware debugger is done */
95 
96 struct kdp_hw_shmem_dbg_buf_elm {
97 	vm_offset_t khsd_buf;
98 	uint32_t    khsd_data_length;
99 	STAILQ_ENTRY(kdp_hw_shmem_dbg_buf_elm) khsd_elms;
100 };
101 
102 struct shmem_stage_data {
103 	uint32_t seq_no;
104 	uint64_t contact_deadline;
105 	uint64_t contact_deadline_interval;
106 
107 	struct kdp_hw_shmem_dbg_buf_elm *currently_filling_buf;
108 	struct kdp_hw_shmem_dbg_buf_elm *currently_flushing_buf;
109 };
110 
111 static uint32_t kdp_hw_shmem_dbg_bufsize;
112 static struct xnu_hw_shmem_dbg_command_info *hwsd_info = NULL;
113 static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm) free_hw_shmem_dbg_bufs =
114     STAILQ_HEAD_INITIALIZER(free_hw_shmem_dbg_bufs);
115 static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm) hw_shmem_dbg_bufs_to_flush =
116     STAILQ_HEAD_INITIALIZER(hw_shmem_dbg_bufs_to_flush);
117 
118 /*
119  * Whenever we start a coredump, make sure the buffers
120  * are all on the free queue and the state is as expected.
121  * The buffers may have been left in a different state if
122  * a previous coredump attempt failed.
123  */
124 static void
shmem_stage_reset(struct kdp_output_stage * stage)125 shmem_stage_reset(struct kdp_output_stage *stage)
126 {
127 	struct shmem_stage_data *data = (struct shmem_stage_data *) stage->kos_data;
128 	struct kdp_hw_shmem_dbg_buf_elm *cur_elm = NULL, *tmp_elm = NULL;
129 
130 	STAILQ_FOREACH(cur_elm, &free_hw_shmem_dbg_bufs, khsd_elms) {
131 		cur_elm->khsd_data_length = 0;
132 	}
133 
134 	if (data->currently_filling_buf != NULL) {
135 		data->currently_filling_buf->khsd_data_length = 0;
136 
137 		STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, data->currently_filling_buf, khsd_elms);
138 		data->currently_filling_buf = NULL;
139 	}
140 
141 	if (data->currently_flushing_buf != NULL) {
142 		data->currently_flushing_buf->khsd_data_length = 0;
143 
144 		STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, data->currently_flushing_buf, khsd_elms);
145 		data->currently_flushing_buf = NULL;
146 	}
147 
148 	STAILQ_FOREACH_SAFE(cur_elm, &hw_shmem_dbg_bufs_to_flush, khsd_elms, tmp_elm) {
149 		cur_elm->khsd_data_length = 0;
150 
151 		STAILQ_REMOVE(&hw_shmem_dbg_bufs_to_flush, cur_elm, kdp_hw_shmem_dbg_buf_elm, khsd_elms);
152 		STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, cur_elm, khsd_elms);
153 	}
154 
155 	hwsd_info->xhsdci_status = XHSDCI_COREDUMP_BUF_EMPTY;
156 	data->seq_no = 0;
157 	hwsd_info->xhsdci_buf_phys_addr = 0;
158 	hwsd_info->xhsdci_buf_data_length = 0;
159 	hwsd_info->xhsdci_coredump_total_size_uncomp = 0;
160 	hwsd_info->xhsdci_coredump_total_size_sent_uncomp = 0;
161 	hwsd_info->xhsdci_page_size = PAGE_SIZE;
162 	FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
163 
164 	data->contact_deadline = mach_absolute_time() + data->contact_deadline_interval;
165 
166 	stage->kos_bypass = false;
167 	stage->kos_bytes_written = 0;
168 }
169 
170 /*
171  * Tries to move buffers forward in 'progress'. If
172  * the hardware debugger is done consuming the current buffer, we
173  * can put the next one on it and move the current
174  * buffer back to the free queue.
175  */
176 static kern_return_t
shmem_dbg_process_buffers(struct kdp_output_stage * stage)177 shmem_dbg_process_buffers(struct kdp_output_stage *stage)
178 {
179 	struct shmem_stage_data *data = (struct shmem_stage_data *) stage->kos_data;
180 
181 	FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
182 	if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR) {
183 		kern_coredump_log(NULL, "Detected remote error, terminating...\n");
184 		return kIOReturnError;
185 	} else if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_BUF_EMPTY) {
186 		if (hwsd_info->xhsdci_seq_no != (data->seq_no + 1)) {
187 			kern_coredump_log(NULL, "Detected stale/invalid seq num. Expected: %d, received %d\n",
188 			    (data->seq_no + 1), hwsd_info->xhsdci_seq_no);
189 			hwsd_info->xhsdci_status = XHSDCI_COREDUMP_ERROR;
190 			FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
191 			return kIOReturnError;
192 		}
193 
194 		data->seq_no = hwsd_info->xhsdci_seq_no;
195 
196 		if (data->currently_flushing_buf != NULL) {
197 			data->currently_flushing_buf->khsd_data_length = 0;
198 			STAILQ_INSERT_TAIL(&free_hw_shmem_dbg_bufs, data->currently_flushing_buf, khsd_elms);
199 		}
200 
201 		data->currently_flushing_buf = STAILQ_FIRST(&hw_shmem_dbg_bufs_to_flush);
202 		if (data->currently_flushing_buf != NULL) {
203 			STAILQ_REMOVE_HEAD(&hw_shmem_dbg_bufs_to_flush, khsd_elms);
204 
205 			FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
206 			hwsd_info->xhsdci_buf_phys_addr = kvtophys(data->currently_flushing_buf->khsd_buf);
207 			hwsd_info->xhsdci_buf_data_length = data->currently_flushing_buf->khsd_data_length;
208 			hwsd_info->xhsdci_coredump_total_size_uncomp = stage->kos_outstate->kcos_totalbytes;
209 			hwsd_info->xhsdci_coredump_total_size_sent_uncomp = stage->kos_outstate->kcos_bytes_written;
210 			FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE);
211 			hwsd_info->xhsdci_seq_no = ++(data->seq_no);
212 			hwsd_info->xhsdci_status = XHSDCI_COREDUMP_BUF_READY;
213 			FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
214 		}
215 
216 		data->contact_deadline = mach_absolute_time() + data->contact_deadline_interval;
217 
218 		return KERN_SUCCESS;
219 	} else if (mach_absolute_time() > data->contact_deadline) {
220 		kern_coredump_log(NULL, "Kernel timed out waiting for hardware debugger to update handshake structure.");
221 		kern_coredump_log(NULL, "No contact in %d seconds\n", KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS);
222 
223 		hwsd_info->xhsdci_status = XHSDCI_COREDUMP_ERROR;
224 		FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
225 		return kIOReturnError;
226 	}
227 
228 	return KERN_SUCCESS;
229 }
230 
231 /*
232  * Populates currently_filling_buf with a new buffer
233  * once one becomes available. Returns 0 on success
234  * or the value returned by shmem_dbg_process_buffers()
235  * if it is non-zero (an error).
236  */
237 static kern_return_t
shmem_dbg_get_buffer(struct kdp_output_stage * stage)238 shmem_dbg_get_buffer(struct kdp_output_stage *stage)
239 {
240 	kern_return_t ret = KERN_SUCCESS;
241 	struct shmem_stage_data *data = (struct shmem_stage_data *) stage->kos_data;
242 
243 	assert(data->currently_filling_buf == NULL);
244 
245 	while (STAILQ_EMPTY(&free_hw_shmem_dbg_bufs)) {
246 		ret = shmem_dbg_process_buffers(stage);
247 		if (ret) {
248 			return ret;
249 		}
250 	}
251 
252 	data->currently_filling_buf = STAILQ_FIRST(&free_hw_shmem_dbg_bufs);
253 	STAILQ_REMOVE_HEAD(&free_hw_shmem_dbg_bufs, khsd_elms);
254 
255 	assert(data->currently_filling_buf->khsd_data_length == 0);
256 	return ret;
257 }
258 
259 
260 /*
261  * Output procedure for hardware shared memory core dumps
262  *
263  * Tries to fill up the buffer completely before flushing
264  */
265 static kern_return_t
shmem_stage_outproc(struct kdp_output_stage * stage,unsigned int request,__unused char * corename,uint64_t length,void * panic_data)266 shmem_stage_outproc(struct kdp_output_stage *stage, unsigned int request,
267     __unused char *corename, uint64_t length, void * panic_data)
268 {
269 	kern_return_t ret = KERN_SUCCESS;
270 	struct shmem_stage_data *data = (struct shmem_stage_data *) stage->kos_data;
271 
272 	assert(STAILQ_NEXT(stage, kos_next) == NULL);
273 	assert(length < UINT32_MAX);
274 	uint32_t bytes_remaining =  (uint32_t) length;
275 	uint32_t bytes_to_copy;
276 
277 	if (request == KDP_EOF) {
278 		assert(data->currently_filling_buf == NULL);
279 
280 		/*
281 		 * Wait until we've flushed all the buffers
282 		 * before setting the connection status to done.
283 		 */
284 		while (!STAILQ_EMPTY(&hw_shmem_dbg_bufs_to_flush) ||
285 		    data->currently_flushing_buf != NULL) {
286 			ret = shmem_dbg_process_buffers(stage);
287 			if (ret) {
288 				return ret;
289 			}
290 		}
291 
292 		/*
293 		 * If the last status we saw indicates that the buffer was
294 		 * empty and we didn't flush any new data since then, we expect
295 		 * the sequence number to still match the last we saw.
296 		 */
297 		if (hwsd_info->xhsdci_seq_no < data->seq_no) {
298 			kern_coredump_log(NULL, "EOF Flush: Detected stale/invalid seq num. Expected: %d, received %d\n",
299 			    data->seq_no, hwsd_info->xhsdci_seq_no);
300 			return -1;
301 		}
302 
303 		data->seq_no = hwsd_info->xhsdci_seq_no;
304 
305 		kern_coredump_log(NULL, "Setting coredump status as done!\n");
306 		hwsd_info->xhsdci_seq_no = ++(data->seq_no);
307 		hwsd_info->xhsdci_status = XHSDCI_COREDUMP_STATUS_DONE;
308 		FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
309 
310 		return ret;
311 	}
312 
313 	assert(request == KDP_DATA);
314 
315 	/*
316 	 * The output procedure is called with length == 0 and panic_data == NULL
317 	 * to flush any remaining output at the end of the coredump before
318 	 * we call it a final time to mark the dump as done.
319 	 */
320 	if (length == 0) {
321 		assert(panic_data == NULL);
322 
323 		if (data->currently_filling_buf != NULL) {
324 			STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush, data->currently_filling_buf, khsd_elms);
325 			data->currently_filling_buf = NULL;
326 		}
327 
328 		/*
329 		 * Move the current buffer along if possible.
330 		 */
331 		ret = shmem_dbg_process_buffers(stage);
332 		return ret;
333 	}
334 
335 	while (bytes_remaining != 0) {
336 		/*
337 		 * Make sure we have a buffer to work with.
338 		 */
339 		while (data->currently_filling_buf == NULL) {
340 			ret = shmem_dbg_get_buffer(stage);
341 			if (ret) {
342 				return ret;
343 			}
344 		}
345 
346 		assert(kdp_hw_shmem_dbg_bufsize >= data->currently_filling_buf->khsd_data_length);
347 		bytes_to_copy = MIN(bytes_remaining, kdp_hw_shmem_dbg_bufsize -
348 		    data->currently_filling_buf->khsd_data_length);
349 		bcopy(panic_data, (void *)(data->currently_filling_buf->khsd_buf + data->currently_filling_buf->khsd_data_length),
350 		    bytes_to_copy);
351 
352 		data->currently_filling_buf->khsd_data_length += bytes_to_copy;
353 
354 		if (data->currently_filling_buf->khsd_data_length == kdp_hw_shmem_dbg_bufsize) {
355 			STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush, data->currently_filling_buf, khsd_elms);
356 			data->currently_filling_buf = NULL;
357 
358 			/*
359 			 * Move it along if possible.
360 			 */
361 			ret = shmem_dbg_process_buffers(stage);
362 			if (ret) {
363 				return ret;
364 			}
365 		}
366 
367 		stage->kos_bytes_written += bytes_to_copy;
368 		bytes_remaining -= bytes_to_copy;
369 		panic_data = (void *) ((uintptr_t)panic_data + bytes_to_copy);
370 	}
371 
372 	return ret;
373 }
374 
375 static void
shmem_stage_free(struct kdp_output_stage * stage)376 shmem_stage_free(struct kdp_output_stage *stage)
377 {
378 	kmem_free(kernel_map, (vm_offset_t) stage->kos_data, stage->kos_data_size);
379 
380 	stage->kos_data = NULL;
381 	stage->kos_data_size = 0;
382 	stage->kos_initialized = false;
383 }
384 
385 kern_return_t
shmem_stage_initialize(struct kdp_output_stage * stage)386 shmem_stage_initialize(struct kdp_output_stage *stage)
387 {
388 	kern_return_t ret = KERN_SUCCESS;
389 	struct shmem_stage_data *data = NULL;
390 
391 	assert(stage != NULL);
392 	assert(stage->kos_initialized == false);
393 	assert(stage->kos_data == NULL);
394 
395 	if (!hwsd_info) {
396 		vm_offset_t kdp_core_hw_shmem_buf = 0;
397 		struct kdp_hw_shmem_dbg_buf_elm *cur_elm = NULL;
398 		cache_info_t   *cpuid_cache_info = NULL;
399 
400 		/*
401 		 * We need to allocate physically contiguous memory since astris isn't capable
402 		 * of doing address translations while the CPUs are running.
403 		 */
404 		kdp_hw_shmem_dbg_bufsize = KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE;
405 		kmem_alloc_contig(kernel_map, &kdp_core_hw_shmem_buf,
406 		    kdp_hw_shmem_dbg_bufsize, VM_MAP_PAGE_MASK(kernel_map),
407 		    0, 0, KMA_NOFAIL | KMA_KOBJECT | KMA_DATA | KMA_PERMANENT,
408 		    VM_KERN_MEMORY_DIAG);
409 
410 		/*
411 		 * Put the connection info structure at the beginning of this buffer and adjust
412 		 * the buffer size accordingly.
413 		 */
414 		hwsd_info = (struct xnu_hw_shmem_dbg_command_info *) kdp_core_hw_shmem_buf;
415 		hwsd_info->xhsdci_status = XHSDCI_STATUS_NONE;
416 		hwsd_info->xhsdci_seq_no = 0;
417 		hwsd_info->xhsdci_buf_phys_addr = 0;
418 		hwsd_info->xhsdci_buf_data_length = 0;
419 		hwsd_info->xhsdci_coredump_total_size_uncomp = 0;
420 		hwsd_info->xhsdci_coredump_total_size_sent_uncomp = 0;
421 		hwsd_info->xhsdci_page_size = PAGE_SIZE;
422 
423 		cpuid_cache_info = cache_info();
424 		assert(cpuid_cache_info != NULL);
425 
426 		kdp_core_hw_shmem_buf += sizeof(*hwsd_info);
427 		/* Leave the handshake structure on its own cache line so buffer writes don't cause flushes of old handshake data */
428 		kdp_core_hw_shmem_buf = ROUNDUP(kdp_core_hw_shmem_buf, (vm_offset_t) cpuid_cache_info->c_linesz);
429 		kdp_hw_shmem_dbg_bufsize -= (uint32_t) (kdp_core_hw_shmem_buf - (vm_offset_t) hwsd_info);
430 		kdp_hw_shmem_dbg_bufsize /= KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS;
431 		/* The buffer size should be a cache-line length multiple */
432 		kdp_hw_shmem_dbg_bufsize -= (kdp_hw_shmem_dbg_bufsize % ROUNDDOWN(OPTIMAL_ASTRIS_READSIZE, cpuid_cache_info->c_linesz));
433 
434 		STAILQ_INIT(&free_hw_shmem_dbg_bufs);
435 		STAILQ_INIT(&hw_shmem_dbg_bufs_to_flush);
436 
437 		for (int i = 0; i < KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS; i++) {
438 			cur_elm = zalloc_permanent_type(typeof(*cur_elm));
439 			assert(cur_elm != NULL);
440 
441 			cur_elm->khsd_buf = kdp_core_hw_shmem_buf;
442 			cur_elm->khsd_data_length = 0;
443 
444 			kdp_core_hw_shmem_buf += kdp_hw_shmem_dbg_bufsize;
445 
446 			STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, cur_elm, khsd_elms);
447 		}
448 
449 		PE_consistent_debug_register(kDbgIdAstrisConnection, kvtophys((vm_offset_t) hwsd_info), sizeof(pmap_paddr_t));
450 		PE_consistent_debug_register(kDbgIdAstrisConnectionVers, CUR_XNU_HWSDCI_STRUCT_VERS, sizeof(uint32_t));
451 	}
452 
453 	stage->kos_data_size = sizeof(struct shmem_stage_data);
454 
455 	ret = kmem_alloc(kernel_map, (vm_offset_t*) &stage->kos_data, stage->kos_data_size,
456 	    KMA_DATA, VM_KERN_MEMORY_DIAG);
457 	if (KERN_SUCCESS != ret) {
458 		return ret;
459 	}
460 
461 	data = (struct shmem_stage_data*) stage->kos_data;
462 	data->seq_no = 0;
463 	data->contact_deadline = 0;
464 	nanoseconds_to_absolutetime(KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS * NSEC_PER_SEC, &(data->contact_deadline_interval));
465 	data->currently_filling_buf = NULL;
466 	data->currently_flushing_buf = NULL;
467 
468 	stage->kos_funcs.kosf_reset = shmem_stage_reset;
469 	stage->kos_funcs.kosf_outproc = shmem_stage_outproc;
470 	stage->kos_funcs.kosf_free = shmem_stage_free;
471 
472 	stage->kos_initialized = true;
473 
474 	return KERN_SUCCESS;
475 }
476 
477 void
shmem_mark_as_busy(void)478 shmem_mark_as_busy(void)
479 {
480 	if (hwsd_info != NULL) {
481 		hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_BUSY;
482 	}
483 }
484 
485 void
shmem_unmark_as_busy(void)486 shmem_unmark_as_busy(void)
487 {
488 	if (hwsd_info != NULL) {
489 		hwsd_info->xhsdci_status = XHSDCI_STATUS_NONE;
490 	}
491 }
492 
493 void
panic_spin_shmcon(void)494 panic_spin_shmcon(void)
495 {
496 	if (!PE_i_can_has_debugger(NULL)) {
497 		return;
498 	}
499 
500 	if (hwsd_info == NULL) {
501 		kern_coredump_log(NULL, "handshake structure not initialized\n");
502 		return;
503 	}
504 
505 	kern_coredump_log(NULL, "\nPlease go to https://panic.apple.com to report this panic\n");
506 	kern_coredump_log(NULL, "Waiting for hardware shared memory debugger, handshake structure is at virt: %p, phys %p\n",
507 	    hwsd_info, (void *)kvtophys((vm_offset_t)hwsd_info));
508 
509 	hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_READY;
510 	hwsd_info->xhsdci_seq_no = 0;
511 	FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
512 
513 	for (;;) {
514 		FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
515 		if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_BEGIN) {
516 			kern_dump(KERN_DUMP_HW_SHMEM_DBG);
517 		}
518 
519 		if ((hwsd_info->xhsdci_status == XHSDCI_COREDUMP_REMOTE_DONE) ||
520 		    (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR)) {
521 			hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_READY;
522 			hwsd_info->xhsdci_seq_no = 0;
523 			FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
524 		}
525 #ifdef __arm64__
526 		/* Avoid stalling in WFE on arm32, which may not have a maximum WFE timeout like arm64. */
527 		__builtin_arm_wfe();
528 #endif
529 	}
530 }
531 
532 #endif /* defined(CONFIG_KDP_INTERACTIVE_DEBUGGING) && defined(__arm64__) */
533