xref: /xnu-8792.61.2/bsd/kern/kern_core.c (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2000-2021 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1991 NeXT Computer, Inc.  All rights reserved.
29  *
30  *	File:	bsd/kern/kern_core.c
31  *
32  *	This file contains machine independent code for performing core dumps.
33  *
34  */
35 #if CONFIG_COREDUMP
36 
37 #include <mach/vm_param.h>
38 #include <mach/thread_status.h>
39 #include <sys/content_protection.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/signalvar.h>
43 #include <sys/resourcevar.h>
44 #include <sys/namei.h>
45 #include <sys/vnode_internal.h>
46 #include <sys/proc_internal.h>
47 #include <sys/kauth.h>
48 #include <sys/timeb.h>
49 #include <sys/times.h>
50 #include <sys/acct.h>
51 #include <sys/file_internal.h>
52 #include <sys/uio.h>
53 #include <sys/kernel.h>
54 #include <sys/stat.h>
55 
56 #include <mach-o/loader.h>
57 #include <mach/vm_region.h>
58 #include <mach/vm_statistics.h>
59 
60 #include <IOKit/IOBSD.h>
61 
62 #include <vm/vm_kern.h>
63 #include <vm/vm_protos.h> /* last */
64 #include <vm/vm_map.h>          /* current_map() */
65 #include <mach/mach_vm.h>       /* mach_vm_region_recurse() */
66 #include <mach/task.h>          /* task_suspend() */
67 #include <kern/task.h>          /* get_task_numacts() */
68 
69 #include <security/audit/audit.h>
70 
71 #if CONFIG_MACF
72 #include <security/mac_framework.h>
73 #endif /* CONFIG_MACF */
74 
75 #define COREDUMP_CUSTOM_LOCATION_ENTITLEMENT "com.apple.private.custom-coredump-location"
76 
77 typedef struct {
78 	int     flavor;                 /* the number for this flavor */
79 	mach_msg_type_number_t  count;  /* count of ints in this flavor */
80 } mythread_state_flavor_t;
81 
82 #if defined (__i386__) || defined (__x86_64__)
83 mythread_state_flavor_t thread_flavor_array[] = {
84 	{x86_THREAD_STATE, x86_THREAD_STATE_COUNT},
85 	{x86_FLOAT_STATE, x86_FLOAT_STATE_COUNT},
86 	{x86_EXCEPTION_STATE, x86_EXCEPTION_STATE_COUNT},
87 };
88 int mynum_flavors = 3;
89 #elif defined (__arm64__)
90 mythread_state_flavor_t thread_flavor_array[] = {
91 	{ARM_THREAD_STATE64, ARM_THREAD_STATE64_COUNT},
92 	/* ARM64_TODO: VFP */
93 	{ARM_EXCEPTION_STATE64, ARM_EXCEPTION_STATE64_COUNT}
94 };
95 int mynum_flavors = 2;
96 #else
97 #error architecture not supported
98 #endif
99 
100 
101 typedef struct {
102 	vm_offset_t header;
103 	size_t hoffset;
104 	mythread_state_flavor_t *flavors;
105 	size_t tstate_size;
106 	size_t flavor_count;
107 } tir_t;
108 
109 extern int freespace_mb(vnode_t vp);
110 
111 /* XXX not in a Mach header anywhere */
112 kern_return_t thread_getstatus(thread_t act, int flavor,
113     thread_state_t tstate, mach_msg_type_number_t *count);
114 void task_act_iterate_wth_args(task_t, void (*)(thread_t, void *), void *);
115 
116 #ifdef SECURE_KERNEL
117 __XNU_PRIVATE_EXTERN int do_coredump = 0;       /* default: don't dump cores */
118 #else
119 __XNU_PRIVATE_EXTERN int do_coredump = 1;       /* default: dump cores */
120 #endif /* SECURE_KERNEL */
121 __XNU_PRIVATE_EXTERN int sugid_coredump = 0; /* default: but not SGUID binaries */
122 
123 
124 /* cpu_type returns only the most generic indication of the current CPU. */
125 /* in a core we want to know the kind of process. */
126 
127 cpu_type_t
process_cpu_type(proc_t core_proc)128 process_cpu_type(proc_t core_proc)
129 {
130 	cpu_type_t what_we_think;
131 #if defined (__i386__) || defined (__x86_64__)
132 	if (IS_64BIT_PROCESS(core_proc)) {
133 		what_we_think = CPU_TYPE_X86_64;
134 	} else {
135 		what_we_think = CPU_TYPE_I386;
136 	}
137 #elif defined(__arm64__)
138 	if (IS_64BIT_PROCESS(core_proc)) {
139 		what_we_think = CPU_TYPE_ARM64;
140 	} else {
141 		what_we_think = CPU_TYPE_ARM;
142 	}
143 #endif
144 
145 	return what_we_think;
146 }
147 
148 cpu_type_t
process_cpu_subtype(proc_t core_proc)149 process_cpu_subtype(proc_t core_proc)
150 {
151 	cpu_type_t what_we_think;
152 #if defined (__i386__) || defined (__x86_64__)
153 	if (IS_64BIT_PROCESS(core_proc)) {
154 		what_we_think = CPU_SUBTYPE_X86_64_ALL;
155 	} else {
156 		what_we_think = CPU_SUBTYPE_I386_ALL;
157 	}
158 #elif defined(__arm64__)
159 	if (IS_64BIT_PROCESS(core_proc)) {
160 		what_we_think = CPU_SUBTYPE_ARM64_ALL;
161 	} else {
162 		what_we_think = CPU_SUBTYPE_ARM_ALL;
163 	}
164 #endif
165 	return what_we_think;
166 }
167 
168 static void
collectth_state(thread_t th_act,void * tirp)169 collectth_state(thread_t th_act, void *tirp)
170 {
171 	vm_offset_t     header;
172 	size_t  hoffset, i;
173 	mythread_state_flavor_t *flavors;
174 	struct thread_command   *tc;
175 	tir_t *t = (tir_t *)tirp;
176 
177 	/*
178 	 *	Fill in thread command structure.
179 	 */
180 	header = t->header;
181 	hoffset = t->hoffset;
182 	flavors = t->flavors;
183 
184 	tc = (struct thread_command *) (header + hoffset);
185 	tc->cmd = LC_THREAD;
186 	tc->cmdsize = (uint32_t)(sizeof(struct thread_command)
187 	    + t->tstate_size);
188 	hoffset += sizeof(struct thread_command);
189 	/*
190 	 * Follow with a struct thread_state_flavor and
191 	 * the appropriate thread state struct for each
192 	 * thread state flavor.
193 	 */
194 	for (i = 0; i < t->flavor_count; i++) {
195 		*(mythread_state_flavor_t *)(header + hoffset) =
196 		    flavors[i];
197 		hoffset += sizeof(mythread_state_flavor_t);
198 		thread_getstatus(th_act, flavors[i].flavor,
199 		    (thread_state_t)(header + hoffset),
200 		    &flavors[i].count);
201 		hoffset += flavors[i].count * sizeof(int);
202 	}
203 
204 	t->hoffset = hoffset;
205 }
206 
207 #if DEVELOPMENT || DEBUG
208 #define COREDUMPLOG(fmt, args...) printf("coredump (%s, pid %d): " fmt "\n", core_proc->p_comm, proc_getpid(core_proc), ## args)
209 #else
210 #define COREDUMPLOG(fmt, args...)
211 #endif
212 
213 /*
214  * coredump
215  *
216  * Description:	Create a core image on the file "core" for the process
217  *		indicated
218  *
219  * Parameters:	core_proc			Process to dump core [*]
220  *				reserve_mb			If non-zero, leave filesystem with
221  *									at least this much free space.
222  *				coredump_flags	Extra options (ignore rlimit, run fsync)
223  *
224  * Returns:	0				Success
225  *		!0				Failure errno
226  *
227  * IMPORTANT:	This function can only be called on the current process, due
228  *		to assumptions below; see variable declaration section for
229  *		details.
230  */
231 #define MAX_TSTATE_FLAVORS      10
232 int
coredump(proc_t core_proc,uint32_t reserve_mb,int coredump_flags)233 coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags)
234 {
235 /* Begin assumptions that limit us to only the current process */
236 	vfs_context_t ctx = vfs_context_current();
237 	vm_map_t        map = current_map();
238 	task_t          task = current_task();
239 /* End assumptions */
240 	kauth_cred_t cred = vfs_context_ucred(ctx);
241 	int error = 0;
242 	struct vnode_attr *vap = NULL;
243 	size_t          thread_count, segment_count;
244 	size_t          command_size, header_size, tstate_size;
245 	size_t          hoffset;
246 	off_t           foffset;
247 	mach_vm_offset_t vmoffset;
248 	vm_offset_t     header;
249 	mach_vm_size_t  vmsize;
250 	vm_prot_t       prot;
251 	vm_prot_t       maxprot;
252 	int             error1 = 0;
253 	char            stack_name[MAXCOMLEN + 6];
254 	char            *alloced_name = NULL;
255 	char            *name = NULL;
256 	mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS];
257 	vm_size_t       mapsize;
258 	size_t          i;
259 	uint32_t nesting_depth = 0;
260 	kern_return_t   kret;
261 	struct vm_region_submap_info_64 vbr;
262 	mach_msg_type_number_t vbrcount = 0;
263 	tir_t tir1;
264 	struct vnode * vp;
265 	struct mach_header      *mh = NULL;     /* protected by is_64 */
266 	struct mach_header_64   *mh64 = NULL;   /* protected by is_64 */
267 	int             is_64 = 0;
268 	size_t          mach_header_sz = sizeof(struct mach_header);
269 	size_t          segment_command_sz = sizeof(struct segment_command);
270 	const char     *format = NULL;
271 	char           *custom_location_entitlement = NULL;
272 	size_t          custom_location_entitlement_len = 0;
273 	char           *alloced_format = NULL;
274 	size_t          alloced_format_len = 0;
275 	bool            include_iokit_memory = task_is_driver(task);
276 	bool            coredump_attempted = false;
277 
278 	if (current_proc() != core_proc) {
279 		panic("coredump() called against proc that is not current_proc: %p", core_proc);
280 	}
281 
282 	if (do_coredump == 0 ||         /* Not dumping at all */
283 	    ((sugid_coredump == 0) &&   /* Not dumping SUID/SGID binaries */
284 	    ((kauth_cred_getsvuid(cred) != kauth_cred_getruid(cred)) ||
285 	    (kauth_cred_getsvgid(cred) != kauth_cred_getrgid(cred))))) {
286 		error = EFAULT;
287 		goto out2;
288 	}
289 
290 #if CONFIG_MACF
291 	error = mac_proc_check_dump_core(core_proc);
292 	if (error != 0) {
293 		goto out2;
294 	}
295 #endif
296 
297 	if (IS_64BIT_PROCESS(core_proc)) {
298 		is_64 = 1;
299 		mach_header_sz = sizeof(struct mach_header_64);
300 		segment_command_sz = sizeof(struct segment_command_64);
301 	}
302 
303 	mapsize = get_vmmap_size(map);
304 
305 	custom_location_entitlement = IOCurrentTaskGetEntitlement(COREDUMP_CUSTOM_LOCATION_ENTITLEMENT);
306 	if (custom_location_entitlement != NULL) {
307 		custom_location_entitlement_len = strlen(custom_location_entitlement);
308 		const char * dirname;
309 		if (proc_is_driver(core_proc)) {
310 			dirname = defaultdrivercorefiledir;
311 		} else {
312 			dirname = defaultcorefiledir;
313 		}
314 		size_t dirname_len = strlen(dirname);
315 		size_t printed_len;
316 
317 		/* new format is dirname + "/" + string from entitlement */
318 		alloced_format_len = dirname_len + 1 + custom_location_entitlement_len;
319 		alloced_format = kalloc_data(alloced_format_len + 1, Z_ZERO | Z_WAITOK | Z_NOFAIL);
320 		printed_len = snprintf(alloced_format, alloced_format_len + 1, "%s/%s", dirname, custom_location_entitlement);
321 		assert(printed_len == alloced_format_len);
322 
323 		format = alloced_format;
324 		coredump_flags |= COREDUMP_IGNORE_ULIMIT;
325 	} else {
326 		if (proc_is_driver(core_proc)) {
327 			format = drivercorefilename;
328 		} else {
329 			format = corefilename;
330 		}
331 	}
332 
333 	if (((coredump_flags & COREDUMP_IGNORE_ULIMIT) == 0) &&
334 	    (mapsize >= proc_limitgetcur(core_proc, RLIMIT_CORE))) {
335 		error = EFAULT;
336 		goto out2;
337 	}
338 
339 	/* log coredump failures from here */
340 	coredump_attempted = true;
341 
342 	(void) task_suspend_internal(task);
343 
344 	alloced_name = zalloc_flags(ZV_NAMEI, Z_NOWAIT | Z_ZERO);
345 
346 	/* create name according to sysctl'able format string */
347 	/* if name creation fails, fall back to historical behaviour... */
348 	if (alloced_name == NULL ||
349 	    proc_core_name(format, core_proc->p_comm, kauth_cred_getuid(cred),
350 	    proc_getpid(core_proc), alloced_name, MAXPATHLEN)) {
351 		snprintf(stack_name, sizeof(stack_name),
352 		    "/cores/core.%d", proc_getpid(core_proc));
353 		name = stack_name;
354 	} else {
355 		name = alloced_name;
356 	}
357 
358 	COREDUMPLOG("writing core to %s", name);
359 	if ((error = vnode_open(name, (O_CREAT | FWRITE | O_NOFOLLOW), S_IRUSR, VNODE_LOOKUP_NOFOLLOW, &vp, ctx))) {
360 		COREDUMPLOG("failed to open core dump file %s: error %d", name, error);
361 		goto out2;
362 	}
363 
364 	vap = kalloc_type(struct vnode_attr, Z_WAITOK | Z_ZERO);
365 	VATTR_INIT(vap);
366 	VATTR_WANTED(vap, va_nlink);
367 	/* Don't dump to non-regular files or files with links. */
368 	if (vp->v_type != VREG ||
369 	    vnode_getattr(vp, vap, ctx) || vap->va_nlink != 1) {
370 		COREDUMPLOG("failed to write core to non-regular file");
371 		error = EFAULT;
372 		goto out;
373 	}
374 
375 	VATTR_INIT(vap);        /* better to do it here than waste more stack in vnode_setsize */
376 	VATTR_SET(vap, va_data_size, 0);
377 	if (core_proc == initproc) {
378 		VATTR_SET(vap, va_dataprotect_class, PROTECTION_CLASS_D);
379 	}
380 	vnode_setattr(vp, vap, ctx);
381 	core_proc->p_acflag |= ACORE;
382 
383 	COREDUMPLOG("map size: %lu", mapsize);
384 	if ((reserve_mb > 0) &&
385 	    ((freespace_mb(vp) - (mapsize >> 20)) < reserve_mb)) {
386 		COREDUMPLOG("insufficient free space (free=%d MB, needed=%lu MB, reserve=%d MB)", freespace_mb(vp), (mapsize >> 20), reserve_mb);
387 		error = ENOSPC;
388 		goto out;
389 	}
390 
391 	/*
392 	 *	If the task is modified while dumping the file
393 	 *	(e.g., changes in threads or VM, the resulting
394 	 *	file will not necessarily be correct.
395 	 */
396 
397 	thread_count = get_task_numacts(task);
398 	segment_count = get_vmmap_entries(map); /* XXX */
399 	tir1.flavor_count = sizeof(thread_flavor_array) / sizeof(mythread_state_flavor_t);
400 	bcopy(thread_flavor_array, flavors, sizeof(thread_flavor_array));
401 	tstate_size = 0;
402 	for (i = 0; i < tir1.flavor_count; i++) {
403 		tstate_size += sizeof(mythread_state_flavor_t) +
404 		    (flavors[i].count * sizeof(int));
405 	}
406 
407 	{
408 		size_t lhs;
409 		size_t rhs;
410 
411 		/* lhs = segment_count * segment_command_sz */
412 		if (os_mul_overflow(segment_count, segment_command_sz, &lhs)) {
413 			COREDUMPLOG("error: segment size overflow: segment_count=%lu, segment_command_sz=%lu", segment_count, segment_command_sz);
414 			error = ENOMEM;
415 			goto out;
416 		}
417 
418 		/* rhs = (tstate_size + sizeof(struct thread_command)) * thread_count */
419 		if (os_add_and_mul_overflow(tstate_size, sizeof(struct thread_command), thread_count, &rhs)) {
420 			COREDUMPLOG("error: thread state size overflow: tstate_size=%lu, thread_count=%lu", tstate_size, thread_count);
421 			error = ENOMEM;
422 			goto out;
423 		}
424 
425 		/* command_size = lhs + rhs */
426 		if (os_add_overflow(lhs, rhs, &command_size)) {
427 			COREDUMPLOG("error: command size overflow: lhs=%lu, rhs=%lu", lhs, rhs);
428 			error = ENOMEM;
429 			goto out;
430 		}
431 	}
432 
433 	if (os_add_overflow(command_size, mach_header_sz, &header_size)) {
434 		COREDUMPLOG("error: header size overflow: command_size=%lu, mach_header_sz=%lu", command_size, mach_header_sz);
435 		error = ENOMEM;
436 		goto out;
437 	}
438 
439 	if (kmem_alloc(kernel_map, &header, (vm_size_t)header_size,
440 	    KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
441 		COREDUMPLOG("error: failed to allocate memory for header (size=%lu)", header_size);
442 		error = ENOMEM;
443 		goto out;
444 	}
445 
446 	/*
447 	 *	Set up Mach-O header.
448 	 */
449 	if (is_64) {
450 		mh64 = (struct mach_header_64 *)header;
451 		mh64->magic = MH_MAGIC_64;
452 		mh64->cputype = process_cpu_type(core_proc);
453 		mh64->cpusubtype = process_cpu_subtype(core_proc);
454 		mh64->filetype = MH_CORE;
455 		mh64->ncmds = (uint32_t)(segment_count + thread_count);
456 		mh64->sizeofcmds = (uint32_t)command_size;
457 	} else {
458 		mh = (struct mach_header *)header;
459 		mh->magic = MH_MAGIC;
460 		mh->cputype = process_cpu_type(core_proc);
461 		mh->cpusubtype = process_cpu_subtype(core_proc);
462 		mh->filetype = MH_CORE;
463 		mh->ncmds = (uint32_t)(segment_count + thread_count);
464 		mh->sizeofcmds = (uint32_t)command_size;
465 	}
466 
467 	hoffset = mach_header_sz;       /* offset into header */
468 	foffset = round_page(header_size);      /* offset into file */
469 	vmoffset = MACH_VM_MIN_ADDRESS;         /* offset into VM */
470 	COREDUMPLOG("mach header size: %zu", header_size);
471 
472 	/*
473 	 * We use to check for an error, here, now we try and get
474 	 * as much as we can
475 	 */
476 	COREDUMPLOG("dumping %zu segments", segment_count);
477 	while (segment_count > 0) {
478 		struct segment_command          *sc;
479 		struct segment_command_64       *sc64;
480 
481 		/*
482 		 *	Get region information for next region.
483 		 */
484 
485 		while (1) {
486 			vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
487 			if ((kret = mach_vm_region_recurse(map,
488 			    &vmoffset, &vmsize, &nesting_depth,
489 			    (vm_region_recurse_info_t)&vbr,
490 			    &vbrcount)) != KERN_SUCCESS) {
491 				break;
492 			}
493 			/*
494 			 * If we get a valid mapping back, but we're dumping
495 			 * a 32 bit process,  and it's over the allowable
496 			 * address space of a 32 bit process, it's the same
497 			 * as if mach_vm_region_recurse() failed.
498 			 */
499 			if (!(is_64) &&
500 			    (vmoffset + vmsize > VM_MAX_ADDRESS)) {
501 				kret = KERN_INVALID_ADDRESS;
502 				COREDUMPLOG("exceeded allowable region for 32-bit process");
503 				break;
504 			}
505 			if (vbr.is_submap) {
506 				nesting_depth++;
507 				continue;
508 			} else {
509 				break;
510 			}
511 		}
512 		if (kret != KERN_SUCCESS) {
513 			COREDUMPLOG("ending segment dump, kret=%d", kret);
514 			break;
515 		}
516 
517 		prot = vbr.protection;
518 		maxprot = vbr.max_protection;
519 
520 		if ((prot | maxprot) == VM_PROT_NONE) {
521 			/*
522 			 * Elide unreadable (likely reserved) segments
523 			 */
524 			COREDUMPLOG("eliding unreadable segment %llx->%llx", vmoffset, vmoffset + vmsize);
525 			vmoffset += vmsize;
526 			continue;
527 		}
528 
529 		/*
530 		 * Try as hard as possible to get read access to the data.
531 		 */
532 		if ((prot & VM_PROT_READ) == 0) {
533 			mach_vm_protect(map, vmoffset, vmsize, FALSE,
534 			    prot | VM_PROT_READ);
535 		}
536 
537 		/*
538 		 * But only try and perform the write if we can read it.
539 		 */
540 		int64_t fsize = ((maxprot & VM_PROT_READ) == VM_PROT_READ
541 		    && (include_iokit_memory || vbr.user_tag != VM_MEMORY_IOKIT)
542 		    && coredumpok(map, vmoffset)) ? vmsize : 0;
543 
544 		if (fsize) {
545 			int64_t resid = 0;
546 			const enum uio_seg sflg = IS_64BIT_PROCESS(core_proc) ?
547 			    UIO_USERSPACE64 : UIO_USERSPACE32;
548 
549 			error = vn_rdwr_64(UIO_WRITE, vp, vmoffset, fsize,
550 			    foffset, sflg, IO_NODELOCKED | IO_UNIT,
551 			    cred, &resid, core_proc);
552 
553 			if (error) {
554 				/*
555 				 * Mark segment as empty
556 				 */
557 				fsize = 0;
558 				COREDUMPLOG("failed to write segment %llx->%llx: error %d", vmoffset, vmoffset + vmsize, error);
559 			} else if (resid) {
560 				/*
561 				 * Partial write. Extend the file size so
562 				 * that the segment command contains a valid
563 				 * range of offsets, possibly creating a hole.
564 				 */
565 				VATTR_INIT(vap);
566 				VATTR_SET(vap, va_data_size, foffset + fsize);
567 				vnode_setattr(vp, vap, ctx);
568 				COREDUMPLOG("partially wrote segment %llx->%llx, resid %lld", vmoffset, vmoffset + vmsize, resid);
569 			}
570 		} else {
571 			COREDUMPLOG("skipping unreadable segment %llx->%llx", vmoffset, vmoffset + vmsize);
572 		}
573 
574 		/*
575 		 *	Fill in segment command structure.
576 		 */
577 
578 		if (is_64) {
579 			sc64 = (struct segment_command_64 *)(header + hoffset);
580 			sc64->cmd = LC_SEGMENT_64;
581 			sc64->cmdsize = sizeof(struct segment_command_64);
582 			/* segment name is zeroed by kmem_alloc */
583 			sc64->segname[0] = 0;
584 			sc64->vmaddr = vmoffset;
585 			sc64->vmsize = vmsize;
586 			sc64->fileoff = foffset;
587 			sc64->filesize = fsize;
588 			sc64->maxprot = maxprot;
589 			sc64->initprot = prot;
590 			sc64->nsects = 0;
591 			sc64->flags = 0;
592 		} else {
593 			sc = (struct segment_command *) (header + hoffset);
594 			sc->cmd = LC_SEGMENT;
595 			sc->cmdsize = sizeof(struct segment_command);
596 			/* segment name is zeroed by kmem_alloc */
597 			sc->segname[0] = 0;
598 			sc->vmaddr = CAST_DOWN_EXPLICIT(uint32_t, vmoffset);
599 			sc->vmsize = CAST_DOWN_EXPLICIT(uint32_t, vmsize);
600 			sc->fileoff = CAST_DOWN_EXPLICIT(uint32_t, foffset); /* will never truncate */
601 			sc->filesize = CAST_DOWN_EXPLICIT(uint32_t, fsize); /* will never truncate */
602 			sc->maxprot = maxprot;
603 			sc->initprot = prot;
604 			sc->nsects = 0;
605 			sc->flags = 0;
606 		}
607 
608 		hoffset += segment_command_sz;
609 		foffset += fsize;
610 		vmoffset += vmsize;
611 		segment_count--;
612 	}
613 	COREDUMPLOG("max file offset: %lld", foffset);
614 
615 	/*
616 	 * If there are remaining segments which have not been written
617 	 * out because break in the loop above, then they were not counted
618 	 * because they exceed the real address space of the executable
619 	 * type: remove them from the header's count.  This is OK, since
620 	 * we are allowed to have a sparse area following the segments.
621 	 */
622 	if (is_64) {
623 		mh64->ncmds -= segment_count;
624 		mh64->sizeofcmds -= segment_count * segment_command_sz;
625 	} else {
626 		mh->ncmds -= segment_count;
627 		mh->sizeofcmds -= segment_count * segment_command_sz;
628 	}
629 
630 	tir1.header = header;
631 	tir1.hoffset = hoffset;
632 	tir1.flavors = flavors;
633 	tir1.tstate_size = tstate_size;
634 	COREDUMPLOG("dumping %zu threads", thread_count);
635 	task_act_iterate_wth_args(task, collectth_state, &tir1);
636 
637 	/*
638 	 *	Write out the Mach header at the beginning of the
639 	 *	file.  OK to use a 32 bit write for this.
640 	 */
641 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)header, (int)MIN(header_size, INT_MAX), (off_t)0,
642 	    UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, cred, (int *) 0, core_proc);
643 	if (error != KERN_SUCCESS) {
644 		COREDUMPLOG("failed to write mach header: error %d", error);
645 	}
646 	kmem_free(kernel_map, header, header_size);
647 
648 	if ((coredump_flags & COREDUMP_FULLFSYNC) && error == 0) {
649 		error = VNOP_IOCTL(vp, F_FULLFSYNC, (caddr_t)NULL, 0, ctx);
650 		if (error != KERN_SUCCESS) {
651 			COREDUMPLOG("failed to FULLFSYNC core: error %d", error);
652 		}
653 	}
654 out:
655 	if (vap) {
656 		kfree_type(struct vnode_attr, vap);
657 	}
658 	error1 = vnode_close(vp, FWRITE, ctx);
659 	if (error1 != KERN_SUCCESS) {
660 		COREDUMPLOG("failed to close core file: error %d", error1);
661 	}
662 out2:
663 #if CONFIG_AUDIT
664 	audit_proc_coredump(core_proc, name, error);
665 #endif
666 	if (alloced_name != NULL) {
667 		zfree(ZV_NAMEI, alloced_name);
668 	}
669 	if (alloced_format != NULL) {
670 		kfree_data(alloced_format, alloced_format_len + 1);
671 	}
672 	if (custom_location_entitlement != NULL) {
673 		kfree_data(custom_location_entitlement, custom_location_entitlement_len + 1);
674 	}
675 	if (error == 0) {
676 		error = error1;
677 	}
678 
679 	if (coredump_attempted) {
680 		if (error != 0) {
681 			COREDUMPLOG("core dump failed: error %d\n", error);
682 		} else {
683 			COREDUMPLOG("core dump succeeded");
684 		}
685 	}
686 
687 	return error;
688 }
689 
690 #else /* CONFIG_COREDUMP */
691 
692 /* When core dumps aren't needed, no need to compile this file at all */
693 
694 #error assertion failed: this section is not compiled
695 
696 #endif /* CONFIG_COREDUMP */
697