1 /*
2 * Copyright (c) 2000-2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1991 NeXT Computer, Inc. All rights reserved.
29 *
30 * File: bsd/kern/kern_core.c
31 *
32 * This file contains machine independent code for performing core dumps.
33 *
34 */
35 #if CONFIG_COREDUMP
36
37 #include <mach/vm_param.h>
38 #include <mach/thread_status.h>
39 #include <sys/content_protection.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/signalvar.h>
43 #include <sys/resourcevar.h>
44 #include <sys/namei.h>
45 #include <sys/vnode_internal.h>
46 #include <sys/proc_internal.h>
47 #include <sys/kauth.h>
48 #include <sys/timeb.h>
49 #include <sys/times.h>
50 #include <sys/acct.h>
51 #include <sys/file_internal.h>
52 #include <sys/uio.h>
53 #include <sys/kernel.h>
54 #include <sys/stat.h>
55
56 #include <mach-o/loader.h>
57 #include <mach/vm_region.h>
58 #include <mach/vm_statistics.h>
59
60 #include <IOKit/IOBSD.h>
61
62 #include <vm/vm_kern.h>
63 #include <vm/vm_protos.h> /* last */
64 #include <vm/vm_map.h> /* current_map() */
65 #include <vm/pmap.h> /* pmap_user_va_bits() */
66 #include <mach/mach_vm.h> /* mach_vm_region_recurse() */
67 #include <mach/task.h> /* task_suspend() */
68 #include <kern/task.h> /* get_task_numacts() */
69
70 #include <security/audit/audit.h>
71
72 #if CONFIG_MACF
73 #include <security/mac_framework.h>
74 #endif /* CONFIG_MACF */
75
76 #include <kdp/core_notes.h>
77
78 #define COREDUMP_CUSTOM_LOCATION_ENTITLEMENT "com.apple.private.custom-coredump-location"
79
80 typedef struct {
81 int flavor; /* the number for this flavor */
82 mach_msg_type_number_t count; /* count of ints in this flavor */
83 } mythread_state_flavor_t;
84
85 #if defined (__i386__) || defined (__x86_64__)
86 mythread_state_flavor_t thread_flavor_array[] = {
87 {x86_THREAD_STATE, x86_THREAD_STATE_COUNT},
88 {x86_FLOAT_STATE, x86_FLOAT_STATE_COUNT},
89 {x86_EXCEPTION_STATE, x86_EXCEPTION_STATE_COUNT},
90 };
91 int mynum_flavors = 3;
92 #elif defined (__arm64__)
93 mythread_state_flavor_t thread_flavor_array[] = {
94 {ARM_THREAD_STATE64, ARM_THREAD_STATE64_COUNT},
95 /* ARM64_TODO: VFP */
96 {ARM_EXCEPTION_STATE64, ARM_EXCEPTION_STATE64_COUNT}
97 };
98 int mynum_flavors = 2;
99 #else
100 #error architecture not supported
101 #endif
102
103
104 typedef struct {
105 vm_offset_t header;
106 size_t hoffset;
107 mythread_state_flavor_t *flavors;
108 size_t tstate_size;
109 size_t flavor_count;
110 } tir_t;
111
112 extern int freespace_mb(vnode_t vp);
113 extern void task_lock(task_t);
114 extern void task_unlock(task_t);
115
116 /* XXX not in a Mach header anywhere */
117 kern_return_t thread_getstatus(thread_t act, int flavor,
118 thread_state_t tstate, mach_msg_type_number_t *count);
119 void task_act_iterate_wth_args_locked(task_t, void (*)(thread_t, void *), void *);
120
121 #ifdef SECURE_KERNEL
122 __XNU_PRIVATE_EXTERN int do_coredump = 0; /* default: don't dump cores */
123 #else
124 __XNU_PRIVATE_EXTERN int do_coredump = 1; /* default: dump cores */
125 #endif /* SECURE_KERNEL */
126 __XNU_PRIVATE_EXTERN int sugid_coredump = 0; /* default: but not SGUID binaries */
127
128
129 /* cpu_type returns only the most generic indication of the current CPU. */
130 /* in a core we want to know the kind of process. */
131
132 cpu_type_t
process_cpu_type(proc_t core_proc)133 process_cpu_type(proc_t core_proc)
134 {
135 cpu_type_t what_we_think;
136 #if defined (__i386__) || defined (__x86_64__)
137 if (IS_64BIT_PROCESS(core_proc)) {
138 what_we_think = CPU_TYPE_X86_64;
139 } else {
140 what_we_think = CPU_TYPE_I386;
141 }
142 #elif defined(__arm64__)
143 if (IS_64BIT_PROCESS(core_proc)) {
144 what_we_think = CPU_TYPE_ARM64;
145 } else {
146 what_we_think = CPU_TYPE_ARM;
147 }
148 #endif
149
150 return what_we_think;
151 }
152
153 cpu_type_t
process_cpu_subtype(proc_t core_proc)154 process_cpu_subtype(proc_t core_proc)
155 {
156 cpu_type_t what_we_think;
157 #if defined (__i386__) || defined (__x86_64__)
158 if (IS_64BIT_PROCESS(core_proc)) {
159 what_we_think = CPU_SUBTYPE_X86_64_ALL;
160 } else {
161 what_we_think = CPU_SUBTYPE_I386_ALL;
162 }
163 #elif defined(__arm64__)
164 if (IS_64BIT_PROCESS(core_proc)) {
165 what_we_think = CPU_SUBTYPE_ARM64_ALL;
166 } else {
167 what_we_think = CPU_SUBTYPE_ARM_ALL;
168 }
169 #endif
170 return what_we_think;
171 }
172
173 static void
collectth_state(thread_t th_act,void * tirp)174 collectth_state(thread_t th_act, void *tirp)
175 {
176 vm_offset_t header;
177 size_t hoffset, i;
178 mythread_state_flavor_t *flavors;
179 struct thread_command *tc;
180 tir_t *t = (tir_t *)tirp;
181
182 /*
183 * Fill in thread command structure.
184 */
185 header = t->header;
186 hoffset = t->hoffset;
187 flavors = t->flavors;
188
189 tc = (struct thread_command *) (header + hoffset);
190 tc->cmd = LC_THREAD;
191 tc->cmdsize = (uint32_t)(sizeof(struct thread_command)
192 + t->tstate_size);
193 hoffset += sizeof(struct thread_command);
194 /*
195 * Follow with a struct thread_state_flavor and
196 * the appropriate thread state struct for each
197 * thread state flavor.
198 */
199 for (i = 0; i < t->flavor_count; i++) {
200 *(mythread_state_flavor_t *)(header + hoffset) =
201 flavors[i];
202 hoffset += sizeof(mythread_state_flavor_t);
203 thread_getstatus(th_act, flavors[i].flavor,
204 (thread_state_t)(header + hoffset),
205 &flavors[i].count);
206 hoffset += flavors[i].count * sizeof(int);
207 }
208
209 t->hoffset = hoffset;
210 }
211
212 #if DEVELOPMENT || DEBUG
213 #define COREDUMPLOG(fmt, args...) printf("coredump (%s, pid %d): " fmt "\n", core_proc->p_comm, proc_getpid(core_proc), ## args)
214 #else
215 #define COREDUMPLOG(fmt, args...)
216 #endif
217
218 /*
219 * LC_NOTE support for userspace coredumps.
220 */
221
222 typedef int (write_note_cb_t)(struct vnode *vp, off_t foffset);
223
224 static int
note_addrable_bits(struct vnode * vp,off_t foffset)225 note_addrable_bits(struct vnode *vp, off_t foffset)
226 {
227 task_t t = current_task();
228 vfs_context_t ctx = vfs_context_current();
229 kauth_cred_t cred = vfs_context_ucred(ctx);
230
231 addrable_bits_note_t note = {
232 .version = ADDRABLE_BITS_VER,
233 .addressing_bits = pmap_user_va_bits(get_task_pmap(t)),
234 .unused = 0
235 };
236
237 return vn_rdwr_64(UIO_WRITE, vp, (vm_offset_t)¬e, sizeof(note), foffset, UIO_SYSSPACE,
238 IO_NODELOCKED | IO_UNIT, cred, 0, current_proc());
239 }
240
241 /*
242 * note handling
243 */
244
245 struct core_note {
246 size_t cn_size;
247 const char *cn_owner;
248 write_note_cb_t *cn_write_cb;
249 } const core_notes[] = {
250 {
251 .cn_size = sizeof(addrable_bits_note_t),
252 .cn_owner = ADDRABLE_BITS_DATA_OWNER,
253 .cn_write_cb = note_addrable_bits,
254 }
255 };
256
257 const size_t notes_count = sizeof(core_notes) / sizeof(struct core_note);
258
259 /*
260 * LC_NOTE commands are allocated as a part of Mach-O header and are written to
261 * disk at the end of coredump. LC_NOTE's payload has to be written in callbacks here.
262 */
263 static int
dump_notes(proc_t __unused core_proc,vm_offset_t header,size_t hoffset,struct vnode * vp,off_t foffset)264 dump_notes(proc_t __unused core_proc, vm_offset_t header, size_t hoffset, struct vnode *vp, off_t foffset)
265 {
266 for (size_t i = 0; i < notes_count; i++) {
267 int error = 0;
268
269 if (core_notes[i].cn_write_cb == NULL) {
270 continue;
271 }
272
273 /* Generate LC_NOTE command. */
274 struct note_command *nc = (struct note_command *)(header + hoffset);
275
276 nc->cmd = LC_NOTE;
277 nc->cmdsize = sizeof(struct note_command);
278 nc->offset = foffset;
279 nc->size = core_notes[i].cn_size;
280 strlcpy(nc->data_owner, core_notes[i].cn_owner, sizeof(nc->data_owner));
281
282 hoffset += sizeof(struct note_command);
283
284 /* Add note's payload. */
285 error = core_notes[i].cn_write_cb(vp, foffset);
286 if (error != KERN_SUCCESS) {
287 COREDUMPLOG("failed to write LC_NOTE %s: error %d", core_notes[i].cn_owner, error);
288 return error;
289 }
290
291 foffset += core_notes[i].cn_size;
292 }
293
294 return 0;
295 }
296
297 /*
298 * coredump
299 *
300 * Description: Create a core image on the file "core" for the process
301 * indicated
302 *
303 * Parameters: core_proc Process to dump core [*]
304 * reserve_mb If non-zero, leave filesystem with
305 * at least this much free space.
306 * coredump_flags Extra options (ignore rlimit, run fsync)
307 *
308 * Returns: 0 Success
309 * !0 Failure errno
310 *
311 * IMPORTANT: This function can only be called on the current process, due
312 * to assumptions below; see variable declaration section for
313 * details.
314 */
315 #define MAX_TSTATE_FLAVORS 10
316 int
coredump(proc_t core_proc,uint32_t reserve_mb,int coredump_flags)317 coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags)
318 {
319 /* Begin assumptions that limit us to only the current process */
320 vfs_context_t ctx = vfs_context_current();
321 vm_map_t map = current_map();
322 task_t task = current_task();
323 /* End assumptions */
324 kauth_cred_t cred = vfs_context_ucred(ctx);
325 int error = 0;
326 struct vnode_attr *vap = NULL;
327 size_t thread_count, segment_count;
328 size_t command_size, header_size, tstate_size;
329 size_t hoffset;
330 off_t foffset;
331 mach_vm_offset_t vmoffset;
332 vm_offset_t header;
333 mach_vm_size_t vmsize;
334 vm_prot_t prot;
335 vm_prot_t maxprot;
336 int error1 = 0;
337 char stack_name[MAXCOMLEN + 6];
338 char *alloced_name = NULL;
339 char *name = NULL;
340 mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS];
341 vm_size_t mapsize;
342 size_t i;
343 uint32_t nesting_depth = 0;
344 kern_return_t kret;
345 struct vm_region_submap_info_64 vbr;
346 mach_msg_type_number_t vbrcount = 0;
347 tir_t tir1;
348 struct vnode * vp;
349 struct mach_header *mh = NULL; /* protected by is_64 */
350 struct mach_header_64 *mh64 = NULL; /* protected by is_64 */
351 int is_64 = 0;
352 size_t mach_header_sz = sizeof(struct mach_header);
353 size_t segment_command_sz = sizeof(struct segment_command);
354 size_t notes_size = 0;
355 const char *format = NULL;
356 char *custom_location_entitlement = NULL;
357 size_t custom_location_entitlement_len = 0;
358 char *alloced_format = NULL;
359 size_t alloced_format_len = 0;
360 bool include_iokit_memory = task_is_driver(task);
361 bool coredump_attempted = false;
362 bool task_locked = false;
363
364 if (current_proc() != core_proc) {
365 panic("coredump() called against proc that is not current_proc: %p", core_proc);
366 }
367
368 if (do_coredump == 0 || /* Not dumping at all */
369 ((sugid_coredump == 0) && /* Not dumping SUID/SGID binaries */
370 ((kauth_cred_getsvuid(cred) != kauth_cred_getruid(cred)) ||
371 (kauth_cred_getsvgid(cred) != kauth_cred_getrgid(cred))))) {
372 error = EFAULT;
373 goto out2;
374 }
375
376 #if CONFIG_MACF
377 error = mac_proc_check_dump_core(core_proc);
378 if (error != 0) {
379 goto out2;
380 }
381 #endif
382
383 if (IS_64BIT_PROCESS(core_proc)) {
384 is_64 = 1;
385 mach_header_sz = sizeof(struct mach_header_64);
386 segment_command_sz = sizeof(struct segment_command_64);
387 }
388
389 mapsize = get_vmmap_size(map);
390
391 custom_location_entitlement = IOCurrentTaskGetEntitlement(COREDUMP_CUSTOM_LOCATION_ENTITLEMENT);
392 if (custom_location_entitlement != NULL) {
393 custom_location_entitlement_len = strlen(custom_location_entitlement);
394 const char * dirname;
395 if (proc_is_driver(core_proc)) {
396 dirname = defaultdrivercorefiledir;
397 } else {
398 dirname = defaultcorefiledir;
399 }
400 size_t dirname_len = strlen(dirname);
401 size_t printed_len;
402
403 /* new format is dirname + "/" + string from entitlement */
404 alloced_format_len = dirname_len + 1 + custom_location_entitlement_len;
405 alloced_format = kalloc_data(alloced_format_len + 1, Z_ZERO | Z_WAITOK | Z_NOFAIL);
406 printed_len = snprintf(alloced_format, alloced_format_len + 1, "%s/%s", dirname, custom_location_entitlement);
407 assert(printed_len == alloced_format_len);
408
409 format = alloced_format;
410 coredump_flags |= COREDUMP_IGNORE_ULIMIT;
411 } else {
412 if (proc_is_driver(core_proc)) {
413 format = drivercorefilename;
414 } else {
415 format = corefilename;
416 }
417 }
418
419 if (((coredump_flags & COREDUMP_IGNORE_ULIMIT) == 0) &&
420 (mapsize >= proc_limitgetcur(core_proc, RLIMIT_CORE))) {
421 error = EFAULT;
422 goto out2;
423 }
424
425 /* log coredump failures from here */
426 coredump_attempted = true;
427
428 task_lock(task);
429 task_locked = true;
430 (void) task_suspend_internal_locked(task);
431
432 alloced_name = zalloc_flags(ZV_NAMEI, Z_NOWAIT | Z_ZERO);
433
434 /* create name according to sysctl'able format string */
435 /* if name creation fails, fall back to historical behaviour... */
436 if (alloced_name == NULL ||
437 proc_core_name(format, core_proc->p_comm, kauth_cred_getuid(cred),
438 proc_getpid(core_proc), alloced_name, MAXPATHLEN)) {
439 snprintf(stack_name, sizeof(stack_name),
440 "/cores/core.%d", proc_getpid(core_proc));
441 name = stack_name;
442 } else {
443 name = alloced_name;
444 }
445
446 COREDUMPLOG("writing core to %s", name);
447 if ((error = vnode_open(name, (O_CREAT | FWRITE | O_NOFOLLOW), S_IRUSR, VNODE_LOOKUP_NOFOLLOW, &vp, ctx))) {
448 COREDUMPLOG("failed to open core dump file %s: error %d", name, error);
449 goto out2;
450 }
451
452 vap = kalloc_type(struct vnode_attr, Z_WAITOK | Z_ZERO);
453 VATTR_INIT(vap);
454 VATTR_WANTED(vap, va_nlink);
455 /* Don't dump to non-regular files or files with links. */
456 if (vp->v_type != VREG ||
457 vnode_getattr(vp, vap, ctx) || vap->va_nlink != 1) {
458 COREDUMPLOG("failed to write core to non-regular file");
459 error = EFAULT;
460 goto out;
461 }
462
463 VATTR_INIT(vap); /* better to do it here than waste more stack in vnode_setsize */
464 VATTR_SET(vap, va_data_size, 0);
465 if (core_proc == initproc) {
466 VATTR_SET(vap, va_dataprotect_class, PROTECTION_CLASS_D);
467 }
468 vnode_setattr(vp, vap, ctx);
469 core_proc->p_acflag |= ACORE;
470
471 COREDUMPLOG("map size: %lu", mapsize);
472 if ((reserve_mb > 0) &&
473 ((freespace_mb(vp) - (mapsize >> 20)) < reserve_mb)) {
474 COREDUMPLOG("insufficient free space (free=%d MB, needed=%lu MB, reserve=%d MB)", freespace_mb(vp), (mapsize >> 20), reserve_mb);
475 error = ENOSPC;
476 goto out;
477 }
478
479 thread_count = get_task_numacts(task);
480 segment_count = get_vmmap_entries(map); /* XXX */
481 tir1.flavor_count = sizeof(thread_flavor_array) / sizeof(mythread_state_flavor_t);
482 bcopy(thread_flavor_array, flavors, sizeof(thread_flavor_array));
483 tstate_size = 0;
484 for (i = 0; i < tir1.flavor_count; i++) {
485 tstate_size += sizeof(mythread_state_flavor_t) +
486 (flavors[i].count * sizeof(int));
487 }
488
489 {
490 size_t lhs;
491 size_t rhs;
492
493 /* lhs = segment_count * segment_command_sz */
494 if (os_mul_overflow(segment_count, segment_command_sz, &lhs)) {
495 COREDUMPLOG("error: segment size overflow: segment_count=%lu, segment_command_sz=%lu", segment_count, segment_command_sz);
496 error = ENOMEM;
497 goto out;
498 }
499
500 /* rhs = (tstate_size + sizeof(struct thread_command)) * thread_count */
501 if (os_add_and_mul_overflow(tstate_size, sizeof(struct thread_command), thread_count, &rhs)) {
502 COREDUMPLOG("error: thread state size overflow: tstate_size=%lu, thread_count=%lu", tstate_size, thread_count);
503 error = ENOMEM;
504 goto out;
505 }
506
507 /* command_size = lhs + rhs */
508 if (os_add_overflow(lhs, rhs, &command_size)) {
509 COREDUMPLOG("error: command size overflow: lhs=%lu, rhs=%lu", lhs, rhs);
510 error = ENOMEM;
511 goto out;
512 }
513
514 /* Add notes payload. */
515 if (os_mul_overflow(notes_count, sizeof(struct note_command), ¬es_size)) {
516 COREDUMPLOG("error: note command size overflow: note=%lu", i);
517 error = ENOMEM;
518 goto out;
519 }
520
521 if (os_add_overflow(command_size, notes_size, &command_size)) {
522 COREDUMPLOG("error: notes overflow: notes_size=%lu", notes_size);
523 error = ENOMEM;
524 goto out;
525 }
526 }
527
528 if (os_add_overflow(command_size, mach_header_sz, &header_size)) {
529 COREDUMPLOG("error: header size overflow: command_size=%lu, mach_header_sz=%lu", command_size, mach_header_sz);
530 error = ENOMEM;
531 goto out;
532 }
533
534 if (kmem_alloc(kernel_map, &header, (vm_size_t)header_size,
535 KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
536 COREDUMPLOG("error: failed to allocate memory for header (size=%lu)", header_size);
537 error = ENOMEM;
538 goto out;
539 }
540
541 /*
542 * Set up Mach-O header.
543 */
544 if (is_64) {
545 mh64 = (struct mach_header_64 *)header;
546 mh64->magic = MH_MAGIC_64;
547 mh64->cputype = process_cpu_type(core_proc);
548 mh64->cpusubtype = process_cpu_subtype(core_proc);
549 mh64->filetype = MH_CORE;
550 mh64->ncmds = (uint32_t)(segment_count + notes_count + thread_count);
551 mh64->sizeofcmds = (uint32_t)command_size;
552 } else {
553 mh = (struct mach_header *)header;
554 mh->magic = MH_MAGIC;
555 mh->cputype = process_cpu_type(core_proc);
556 mh->cpusubtype = process_cpu_subtype(core_proc);
557 mh->filetype = MH_CORE;
558 mh->ncmds = (uint32_t)(segment_count + notes_count + thread_count);
559 mh->sizeofcmds = (uint32_t)command_size;
560 }
561
562 hoffset = mach_header_sz; /* offset into header */
563 foffset = round_page(header_size); /* offset into file */
564 vmoffset = MACH_VM_MIN_ADDRESS; /* offset into VM */
565 COREDUMPLOG("mach header size: %zu", header_size);
566
567 /*
568 * We use to check for an error, here, now we try and get
569 * as much as we can
570 */
571 COREDUMPLOG("dumping %zu segments", segment_count);
572 while (segment_count > 0) {
573 struct segment_command *sc;
574 struct segment_command_64 *sc64;
575
576 /*
577 * Get region information for next region.
578 */
579
580 while (1) {
581 vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
582 if ((kret = mach_vm_region_recurse(map,
583 &vmoffset, &vmsize, &nesting_depth,
584 (vm_region_recurse_info_t)&vbr,
585 &vbrcount)) != KERN_SUCCESS) {
586 break;
587 }
588 /*
589 * If we get a valid mapping back, but we're dumping
590 * a 32 bit process, and it's over the allowable
591 * address space of a 32 bit process, it's the same
592 * as if mach_vm_region_recurse() failed.
593 */
594 if (!(is_64) &&
595 (vmoffset + vmsize > VM_MAX_ADDRESS)) {
596 kret = KERN_INVALID_ADDRESS;
597 COREDUMPLOG("exceeded allowable region for 32-bit process");
598 break;
599 }
600 if (vbr.is_submap) {
601 nesting_depth++;
602 continue;
603 } else {
604 break;
605 }
606 }
607 if (kret != KERN_SUCCESS) {
608 COREDUMPLOG("ending segment dump, kret=%d", kret);
609 break;
610 }
611
612 prot = vbr.protection;
613 maxprot = vbr.max_protection;
614
615 if ((prot | maxprot) == VM_PROT_NONE) {
616 /*
617 * Elide unreadable (likely reserved) segments
618 */
619 COREDUMPLOG("eliding unreadable segment %llx->%llx", vmoffset, vmoffset + vmsize);
620 vmoffset += vmsize;
621 continue;
622 }
623
624 /*
625 * Try as hard as possible to get read access to the data.
626 */
627 if ((prot & VM_PROT_READ) == 0) {
628 mach_vm_protect(map, vmoffset, vmsize, FALSE,
629 prot | VM_PROT_READ);
630 }
631
632 /*
633 * But only try and perform the write if we can read it.
634 */
635 int64_t fsize = ((maxprot & VM_PROT_READ) == VM_PROT_READ
636 && (include_iokit_memory || vbr.user_tag != VM_MEMORY_IOKIT)
637 && coredumpok(map, vmoffset)) ? vmsize : 0;
638
639 if (fsize) {
640 int64_t resid = 0;
641 const enum uio_seg sflg = IS_64BIT_PROCESS(core_proc) ?
642 UIO_USERSPACE64 : UIO_USERSPACE32;
643
644 error = vn_rdwr_64(UIO_WRITE, vp, vmoffset, fsize,
645 foffset, sflg, IO_NODELOCKED | IO_UNIT,
646 cred, &resid, core_proc);
647
648 if (error) {
649 /*
650 * Mark segment as empty
651 */
652 fsize = 0;
653 COREDUMPLOG("failed to write segment %llx->%llx: error %d", vmoffset, vmoffset + vmsize, error);
654 } else if (resid) {
655 /*
656 * Partial write. Extend the file size so
657 * that the segment command contains a valid
658 * range of offsets, possibly creating a hole.
659 */
660 VATTR_INIT(vap);
661 VATTR_SET(vap, va_data_size, foffset + fsize);
662 vnode_setattr(vp, vap, ctx);
663 COREDUMPLOG("partially wrote segment %llx->%llx, resid %lld", vmoffset, vmoffset + vmsize, resid);
664 }
665 } else {
666 COREDUMPLOG("skipping unreadable segment %llx->%llx", vmoffset, vmoffset + vmsize);
667 }
668
669 /*
670 * Fill in segment command structure.
671 */
672
673 if (is_64) {
674 sc64 = (struct segment_command_64 *)(header + hoffset);
675 sc64->cmd = LC_SEGMENT_64;
676 sc64->cmdsize = sizeof(struct segment_command_64);
677 /* segment name is zeroed by kmem_alloc */
678 sc64->segname[0] = 0;
679 sc64->vmaddr = vmoffset;
680 sc64->vmsize = vmsize;
681 sc64->fileoff = foffset;
682 sc64->filesize = fsize;
683 sc64->maxprot = maxprot;
684 sc64->initprot = prot;
685 sc64->nsects = 0;
686 sc64->flags = 0;
687 } else {
688 sc = (struct segment_command *) (header + hoffset);
689 sc->cmd = LC_SEGMENT;
690 sc->cmdsize = sizeof(struct segment_command);
691 /* segment name is zeroed by kmem_alloc */
692 sc->segname[0] = 0;
693 sc->vmaddr = CAST_DOWN_EXPLICIT(uint32_t, vmoffset);
694 sc->vmsize = CAST_DOWN_EXPLICIT(uint32_t, vmsize);
695 sc->fileoff = CAST_DOWN_EXPLICIT(uint32_t, foffset); /* will never truncate */
696 sc->filesize = CAST_DOWN_EXPLICIT(uint32_t, fsize); /* will never truncate */
697 sc->maxprot = maxprot;
698 sc->initprot = prot;
699 sc->nsects = 0;
700 sc->flags = 0;
701 }
702
703 hoffset += segment_command_sz;
704 foffset += fsize;
705 vmoffset += vmsize;
706 segment_count--;
707 }
708 COREDUMPLOG("max file offset: %lld", foffset);
709
710 /*
711 * If there are remaining segments which have not been written
712 * out because break in the loop above, then they were not counted
713 * because they exceed the real address space of the executable
714 * type: remove them from the header's count. This is OK, since
715 * we are allowed to have a sparse area following the segments.
716 */
717 if (is_64) {
718 mh64->ncmds -= segment_count;
719 mh64->sizeofcmds -= segment_count * segment_command_sz;
720 } else {
721 mh->ncmds -= segment_count;
722 mh->sizeofcmds -= segment_count * segment_command_sz;
723 }
724
725 /* Add LC_NOTES */
726 COREDUMPLOG("dumping %zu notes", notes_count);
727 if (dump_notes(core_proc, header, hoffset, vp, foffset) != 0) {
728 error = EFAULT;
729 goto out;
730 }
731
732 tir1.header = header;
733 tir1.hoffset = hoffset + notes_size;
734 tir1.flavors = flavors;
735 tir1.tstate_size = tstate_size;
736 COREDUMPLOG("dumping %zu threads", thread_count);
737 task_act_iterate_wth_args_locked(task, collectth_state, &tir1);
738
739 /*
740 * Write out the Mach header at the beginning of the
741 * file. OK to use a 32 bit write for this.
742 */
743 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)header, (int)MIN(header_size, INT_MAX), (off_t)0,
744 UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, cred, (int *) 0, core_proc);
745 if (error != KERN_SUCCESS) {
746 COREDUMPLOG("failed to write mach header: error %d", error);
747 }
748 kmem_free(kernel_map, header, header_size);
749
750 if ((coredump_flags & COREDUMP_FULLFSYNC) && error == 0) {
751 error = VNOP_IOCTL(vp, F_FULLFSYNC, (caddr_t)NULL, 0, ctx);
752 if (error != KERN_SUCCESS) {
753 COREDUMPLOG("failed to FULLFSYNC core: error %d", error);
754 }
755 }
756 out:
757 if (vap) {
758 kfree_type(struct vnode_attr, vap);
759 }
760 error1 = vnode_close(vp, FWRITE, ctx);
761 if (error1 != KERN_SUCCESS) {
762 COREDUMPLOG("failed to close core file: error %d", error1);
763 }
764 out2:
765 #if CONFIG_AUDIT
766 audit_proc_coredump(core_proc, name, error);
767 #endif
768 if (alloced_name != NULL) {
769 zfree(ZV_NAMEI, alloced_name);
770 }
771 if (alloced_format != NULL) {
772 kfree_data(alloced_format, alloced_format_len + 1);
773 }
774 if (custom_location_entitlement != NULL) {
775 kfree_data(custom_location_entitlement, custom_location_entitlement_len + 1);
776 }
777 if (error == 0) {
778 error = error1;
779 }
780
781 if (coredump_attempted) {
782 if (error != 0) {
783 COREDUMPLOG("core dump failed: error %d\n", error);
784 } else {
785 COREDUMPLOG("core dump succeeded");
786 }
787 }
788
789 if (task_locked) {
790 task_unlock(task);
791 }
792
793 return error;
794 }
795
796 #else /* CONFIG_COREDUMP */
797
798 /* When core dumps aren't needed, no need to compile this file at all */
799
800 #error assertion failed: this section is not compiled
801
802 #endif /* CONFIG_COREDUMP */
803