1 /*
2 * Copyright (c) 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/assert.h>
30 #include <mach/mach_types.h>
31 #include <mach/boolean.h>
32 #include <mach/vm_param.h>
33 #include <kern/kern_types.h>
34 #include <kern/mach_param.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
37 #include <kern/kern_cdata.h>
38 #include <kern/kalloc.h>
39 #include <kern/ipc_kobject.h>
40 #include <mach/mach_vm.h>
41
42 static kern_return_t kcdata_get_memory_addr_with_flavor(kcdata_descriptor_t data, uint32_t type, uint32_t size, uint64_t flags, mach_vm_address_t *user_addr);
43 static size_t kcdata_get_memory_size_for_data(uint32_t size);
44 static kern_return_t kcdata_compress_chunk_with_flags(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size, uint64_t flags);
45 static kern_return_t kcdata_compress_chunk(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size);
46 static kern_return_t kcdata_write_compression_stats(kcdata_descriptor_t data);
47 static kern_return_t kcdata_get_compression_stats(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin);
48 static void kcdata_object_no_senders(ipc_port_t port, mach_port_mscount_t mscount);
49
50 #ifndef ROUNDUP
51 #define ROUNDUP(x, y) ((((x)+(y)-1)/(y))*(y))
52 #endif
53
54 /*
55 * zlib will need to store its metadata and this value is indifferent from the
56 * window bits and other zlib internals
57 */
58 #define ZLIB_METADATA_SIZE 1440
59
60 /* #define kcdata_debug_printf printf */
61 #define kcdata_debug_printf(...) ;
62
63 #pragma pack(push, 4)
64
65 /* Internal structs for convenience */
66 struct _uint64_with_description_data {
67 char desc[KCDATA_DESC_MAXLEN];
68 uint64_t data;
69 };
70
71 struct _uint32_with_description_data {
72 char desc[KCDATA_DESC_MAXLEN];
73 uint32_t data;
74 };
75
76 #pragma pack(pop)
77
78 int _Atomic lw_corpse_obj_cnt = 0;
79
80 IPC_KOBJECT_DEFINE(IKOT_KCDATA,
81 .iko_op_movable_send = true,
82 .iko_op_stable = true,
83 .iko_op_no_senders = kcdata_object_no_senders);
84
85 KALLOC_TYPE_DEFINE(KCDATA_OBJECT, struct kcdata_object, KT_DEFAULT);
86
87 os_refgrp_decl(static, kcdata_object_refgrp, "kcdata_object", NULL);
88
89 /* Grab a throttle slot for rate-limited kcdata object type(s) */
90 kern_return_t
kcdata_object_throttle_get(kcdata_obj_flags_t flags)91 kcdata_object_throttle_get(
92 kcdata_obj_flags_t flags)
93 {
94 int oval, nval;
95
96 /* Currently only lightweight corpse is rate-limited */
97 assert(flags & KCDATA_OBJECT_TYPE_LW_CORPSE);
98 if (flags & KCDATA_OBJECT_TYPE_LW_CORPSE) {
99 os_atomic_rmw_loop(&lw_corpse_obj_cnt, oval, nval, relaxed, {
100 if (oval >= MAX_INFLIGHT_KCOBJECT_LW_CORPSE) {
101 printf("Too many lightweight corpse in flight: %d\n", oval);
102 os_atomic_rmw_loop_give_up(return KERN_RESOURCE_SHORTAGE);
103 }
104 nval = oval + 1;
105 });
106 }
107
108 return KERN_SUCCESS;
109 }
110
111 /* Release a throttle slot for rate-limited kcdata object type(s) */
112 void
kcdata_object_throttle_release(kcdata_obj_flags_t flags)113 kcdata_object_throttle_release(
114 kcdata_obj_flags_t flags)
115 {
116 int oval, nval;
117
118 /* Currently only lightweight corpse is rate-limited */
119 assert(flags & KCDATA_OBJECT_TYPE_LW_CORPSE);
120 if (flags & KCDATA_OBJECT_TYPE_LW_CORPSE) {
121 os_atomic_rmw_loop(&lw_corpse_obj_cnt, oval, nval, relaxed, {
122 nval = oval - 1;
123 if (__improbable(nval < 0)) {
124 os_atomic_rmw_loop_give_up(panic("Lightweight corpse kcdata object over-released"));
125 }
126 });
127 }
128 }
129
130 /*
131 * Create an object representation for the given kcdata.
132 *
133 * Captures kcdata descripter ref in object. If the object creation
134 * should be rate-limited, kcdata_object_throttle_get() must be called
135 * manually before invoking kcdata_create_object(), so as to save
136 * work (of creating the enclosed kcdata blob) if a throttled reference
137 * cannot be obtained in the first place.
138 */
139 kern_return_t
kcdata_create_object(kcdata_descriptor_t data,kcdata_obj_flags_t flags,uint32_t size,kcdata_object_t * objp)140 kcdata_create_object(
141 kcdata_descriptor_t data,
142 kcdata_obj_flags_t flags,
143 uint32_t size,
144 kcdata_object_t *objp)
145 {
146 kcdata_object_t obj;
147
148 if (data == NULL) {
149 return KERN_INVALID_ARGUMENT;
150 }
151
152 obj = zalloc_flags(KCDATA_OBJECT,
153 Z_ZERO | Z_WAITOK | Z_NOFAIL | Z_SET_NOTEARLY);
154
155 obj->ko_data = data;
156 obj->ko_flags = flags;
157 obj->ko_alloc_size = size;
158 obj->ko_port = IP_NULL;
159
160 os_ref_init_count(&obj->ko_refs, &kcdata_object_refgrp, 1);
161
162 *objp = obj;
163
164 return KERN_SUCCESS;
165 }
166
167 void
kcdata_object_reference(kcdata_object_t obj)168 kcdata_object_reference(kcdata_object_t obj)
169 {
170 if (obj == KCDATA_OBJECT_NULL) {
171 return;
172 }
173
174 os_ref_retain(&obj->ko_refs);
175 }
176
177 static void
kcdata_object_destroy(kcdata_object_t obj)178 kcdata_object_destroy(kcdata_object_t obj)
179 {
180 void *begin_addr;
181 ipc_port_t port;
182 kcdata_obj_flags_t flags;
183
184 if (obj == KCDATA_OBJECT_NULL) {
185 return;
186 }
187
188 port = obj->ko_port;
189 flags = obj->ko_flags;
190
191 /* Release the port */
192 if (IP_VALID(port)) {
193 ipc_kobject_dealloc_port(port, IPC_KOBJECT_NO_MSCOUNT,
194 IKOT_KCDATA);
195 }
196
197 /* Release the ref for rate-limited kcdata object type(s) */
198 kcdata_object_throttle_release(flags);
199
200 /* Destroy the kcdata backing captured in the object */
201 begin_addr = kcdata_memory_get_begin_addr(obj->ko_data);
202 kfree_data(begin_addr, obj->ko_alloc_size);
203 kcdata_memory_destroy(obj->ko_data);
204
205 /* Free the object */
206 zfree(KCDATA_OBJECT, obj);
207 }
208
209 void
kcdata_object_release(kcdata_object_t obj)210 kcdata_object_release(kcdata_object_t obj)
211 {
212 if (obj == KCDATA_OBJECT_NULL) {
213 return;
214 }
215
216 if (os_ref_release(&obj->ko_refs) > 0) {
217 return;
218 }
219 /* last ref */
220
221 kcdata_object_destroy(obj);
222 }
223
224 /* Produces kcdata object ref */
225 kcdata_object_t
convert_port_to_kcdata_object(ipc_port_t port)226 convert_port_to_kcdata_object(ipc_port_t port)
227 {
228 kcdata_object_t obj = KCDATA_OBJECT_NULL;
229
230 if (IP_VALID(port)) {
231 obj = ipc_kobject_get_stable(port, IKOT_KCDATA);
232 if (obj != KCDATA_OBJECT_NULL) {
233 zone_require(KCDATA_OBJECT->kt_zv.zv_zone, obj);
234 kcdata_object_reference(obj);
235 }
236 }
237
238 return obj;
239 }
240
241 /* Consumes kcdata object ref */
242 ipc_port_t
convert_kcdata_object_to_port(kcdata_object_t obj)243 convert_kcdata_object_to_port(kcdata_object_t obj)
244 {
245 if (obj == KCDATA_OBJECT_NULL) {
246 return IP_NULL;
247 }
248
249 zone_require(KCDATA_OBJECT->kt_zv.zv_zone, obj);
250
251 if (!ipc_kobject_make_send_lazy_alloc_port(&obj->ko_port,
252 obj, IKOT_KCDATA)) {
253 kcdata_object_release(obj);
254 }
255 /* object ref consumed */
256
257 return obj->ko_port;
258 }
259
260 static void
kcdata_object_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)261 kcdata_object_no_senders(
262 ipc_port_t port,
263 __unused mach_port_mscount_t mscount)
264 {
265 kcdata_object_t obj;
266
267 obj = ipc_kobject_get_stable(port, IKOT_KCDATA);
268 assert(obj != KCDATA_OBJECT_NULL);
269
270 /* release the ref given by no-senders notification */
271 kcdata_object_release(obj);
272 }
273
274 /*
275 * Estimates how large of a buffer that should be allocated for a buffer that will contain
276 * num_items items of known types with overall length payload_size.
277 *
278 * NOTE: This function will not give an accurate estimate for buffers that will
279 * contain unknown types (those with string descriptions).
280 */
281 uint32_t
kcdata_estimate_required_buffer_size(uint32_t num_items,uint32_t payload_size)282 kcdata_estimate_required_buffer_size(uint32_t num_items, uint32_t payload_size)
283 {
284 /*
285 * In the worst case each item will need (KCDATA_ALIGNMENT_SIZE - 1) padding
286 */
287 uint32_t max_padding_bytes = 0;
288 uint32_t max_padding_with_item_description_bytes = 0;
289 uint32_t estimated_required_buffer_size = 0;
290 const uint32_t begin_and_end_marker_bytes = 2 * sizeof(struct kcdata_item);
291
292 if (os_mul_overflow(num_items, KCDATA_ALIGNMENT_SIZE - 1, &max_padding_bytes)) {
293 panic("%s: Overflow in required buffer size estimate", __func__);
294 }
295
296 if (os_mul_and_add_overflow(num_items, sizeof(struct kcdata_item), max_padding_bytes, &max_padding_with_item_description_bytes)) {
297 panic("%s: Overflow in required buffer size estimate", __func__);
298 }
299
300 if (os_add3_overflow(max_padding_with_item_description_bytes, begin_and_end_marker_bytes, payload_size, &estimated_required_buffer_size)) {
301 panic("%s: Overflow in required buffer size estimate", __func__);
302 }
303
304 return estimated_required_buffer_size;
305 }
306
307 kcdata_descriptor_t
kcdata_memory_alloc_init(mach_vm_address_t buffer_addr_p,unsigned data_type,unsigned size,unsigned flags)308 kcdata_memory_alloc_init(mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags)
309 {
310 kcdata_descriptor_t data = NULL;
311 mach_vm_address_t user_addr = 0;
312 uint16_t clamped_flags = (uint16_t) flags;
313
314 data = kalloc_type(struct kcdata_descriptor, Z_WAITOK | Z_ZERO | Z_NOFAIL);
315 data->kcd_addr_begin = buffer_addr_p;
316 data->kcd_addr_end = buffer_addr_p;
317 data->kcd_flags = (clamped_flags & KCFLAG_USE_COPYOUT) ? clamped_flags : clamped_flags | KCFLAG_USE_MEMCOPY;
318 data->kcd_length = size;
319 data->kcd_endalloced = 0;
320
321 /* Initialize the BEGIN header */
322 if (KERN_SUCCESS != kcdata_get_memory_addr(data, data_type, 0, &user_addr)) {
323 kcdata_memory_destroy(data);
324 return NULL;
325 }
326
327 return data;
328 }
329
330 kern_return_t
kcdata_memory_static_init(kcdata_descriptor_t data,mach_vm_address_t buffer_addr_p,unsigned data_type,unsigned size,unsigned flags)331 kcdata_memory_static_init(kcdata_descriptor_t data, mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags)
332 {
333 mach_vm_address_t user_addr = 0;
334 uint16_t clamped_flags = (uint16_t) flags;
335
336 if (data == NULL) {
337 return KERN_INVALID_ARGUMENT;
338 }
339 bzero(data, sizeof(struct kcdata_descriptor));
340 data->kcd_addr_begin = buffer_addr_p;
341 data->kcd_addr_end = buffer_addr_p;
342 data->kcd_flags = (clamped_flags & KCFLAG_USE_COPYOUT) ? clamped_flags : clamped_flags | KCFLAG_USE_MEMCOPY;
343 data->kcd_length = size;
344 data->kcd_endalloced = 0;
345
346 /* Initialize the BEGIN header */
347 return kcdata_get_memory_addr(data, data_type, 0, &user_addr);
348 }
349
350 void *
kcdata_endalloc(kcdata_descriptor_t data,size_t length)351 kcdata_endalloc(kcdata_descriptor_t data, size_t length)
352 {
353 /*
354 * We do not support endalloc with a space allocation callback - the
355 * callback may need to free the remaining free space in the buffer,
356 * trampling endallocs and complicating things.
357 */
358 if (data->kcd_alloc_callback != NULL) {
359 return NULL;
360 }
361 mach_vm_address_t curend = data->kcd_addr_begin + data->kcd_length;
362 /* round up allocation and ensure return value is uint64-aligned */
363 size_t toalloc = ROUNDUP(length, sizeof(uint64_t)) + (curend % sizeof(uint64_t));
364 /* an arbitrary limit: make sure we don't allocate more then 1/4th of the remaining buffer. */
365 if (data->kcd_length / 4 <= toalloc) {
366 return NULL;
367 }
368 data->kcd_length -= toalloc;
369 data->kcd_endalloced += toalloc;
370 return (void *)(curend - toalloc);
371 }
372
373 /* Zeros and releases data allocated from the end of the buffer */
374 static void
kcdata_release_endallocs(kcdata_descriptor_t data)375 kcdata_release_endallocs(kcdata_descriptor_t data)
376 {
377 mach_vm_address_t curend = data->kcd_addr_begin + data->kcd_length;
378 size_t endalloced = data->kcd_endalloced;
379 if (endalloced > 0) {
380 bzero((void *)curend, endalloced);
381 data->kcd_length += endalloced;
382 data->kcd_endalloced = 0;
383 }
384 }
385
386 void *
kcdata_memory_get_begin_addr(kcdata_descriptor_t data)387 kcdata_memory_get_begin_addr(kcdata_descriptor_t data)
388 {
389 if (data == NULL) {
390 return NULL;
391 }
392
393 return (void *)data->kcd_addr_begin;
394 }
395
396 uint64_t
kcdata_memory_get_used_bytes(kcdata_descriptor_t kcd)397 kcdata_memory_get_used_bytes(kcdata_descriptor_t kcd)
398 {
399 assert(kcd != NULL);
400 return ((uint64_t)kcd->kcd_addr_end - (uint64_t)kcd->kcd_addr_begin) + sizeof(struct kcdata_item);
401 }
402
403 uint64_t
kcdata_memory_get_uncompressed_bytes(kcdata_descriptor_t kcd)404 kcdata_memory_get_uncompressed_bytes(kcdata_descriptor_t kcd)
405 {
406 kern_return_t kr;
407
408 assert(kcd != NULL);
409 if (kcd->kcd_flags & KCFLAG_USE_COMPRESSION) {
410 uint64_t totalout, totalin;
411
412 kr = kcdata_get_compression_stats(kcd, &totalout, &totalin);
413 if (kr == KERN_SUCCESS) {
414 return totalin;
415 } else {
416 return 0;
417 }
418 } else {
419 /* If compression wasn't used, get the number of bytes used */
420 return kcdata_memory_get_used_bytes(kcd);
421 }
422 }
423
424 /*
425 * Free up the memory associated with kcdata
426 */
427 kern_return_t
kcdata_memory_destroy(kcdata_descriptor_t data)428 kcdata_memory_destroy(kcdata_descriptor_t data)
429 {
430 if (!data) {
431 return KERN_INVALID_ARGUMENT;
432 }
433
434 /*
435 * data->kcd_addr_begin points to memory in not tracked by
436 * kcdata lib. So not clearing that here.
437 */
438 kfree_type(struct kcdata_descriptor, data);
439 return KERN_SUCCESS;
440 }
441
442 /* Used by zlib to allocate space in its metadata section */
443 static void *
kcdata_compress_zalloc(void * opaque,u_int items,u_int size)444 kcdata_compress_zalloc(void *opaque, u_int items, u_int size)
445 {
446 void *result;
447 struct kcdata_compress_descriptor *cd = opaque;
448 int alloc_size = ~31L & (31 + (items * size));
449
450 result = (void *)((uintptr_t)cd->kcd_cd_base + cd->kcd_cd_offset);
451 if ((uintptr_t) result + alloc_size > (uintptr_t) cd->kcd_cd_base + cd->kcd_cd_maxoffset) {
452 result = Z_NULL;
453 } else {
454 cd->kcd_cd_offset += alloc_size;
455 }
456
457 kcdata_debug_printf("%s: %d * %d = %d => %p\n", __func__, items, size, items * size, result);
458
459 return result;
460 }
461
462 /* Used by zlib to free previously allocated space in its metadata section */
463 static void
kcdata_compress_zfree(void * opaque,void * ptr)464 kcdata_compress_zfree(void *opaque, void *ptr)
465 {
466 (void) opaque;
467 (void) ptr;
468
469 kcdata_debug_printf("%s: ptr %p\n", __func__, ptr);
470
471 /*
472 * Since the buffers we are using are temporary, we don't worry about
473 * freeing memory for now. Besides, testing has shown that zlib only calls
474 * this at the end, near deflateEnd() or a Z_FINISH deflate() call.
475 */
476 }
477
478 /* Used to initialize the selected compression algorithm's internal state (if any) */
479 static kern_return_t
kcdata_init_compress_state(kcdata_descriptor_t data,void (* memcpy_f)(void *,const void *,size_t),uint64_t type,mach_vm_address_t totalout_addr,mach_vm_address_t totalin_addr)480 kcdata_init_compress_state(kcdata_descriptor_t data, void (*memcpy_f)(void *, const void *, size_t), uint64_t type, mach_vm_address_t totalout_addr, mach_vm_address_t totalin_addr)
481 {
482 kern_return_t ret = KERN_SUCCESS;
483 size_t size;
484 int wbits = 12, memlevel = 3;
485 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
486
487 cd->kcd_cd_memcpy_f = memcpy_f;
488 cd->kcd_cd_compression_type = type;
489 cd->kcd_cd_totalout_addr = totalout_addr;
490 cd->kcd_cd_totalin_addr = totalin_addr;
491
492 switch (type) {
493 case KCDCT_ZLIB:
494 /* allocate space for the metadata used by zlib */
495 size = round_page(ZLIB_METADATA_SIZE + zlib_deflate_memory_size(wbits, memlevel));
496 kcdata_debug_printf("%s: size = %zu kcd_length: %d\n", __func__, size, data->kcd_length);
497 kcdata_debug_printf("%s: kcd buffer [%p - %p]\n", __func__, (void *) data->kcd_addr_begin, (void *) data->kcd_addr_begin + data->kcd_length);
498 void *buf = kcdata_endalloc(data, size);
499 if (buf == NULL) {
500 return KERN_INSUFFICIENT_BUFFER_SIZE;
501 }
502
503 cd->kcd_cd_zs.avail_in = 0;
504 cd->kcd_cd_zs.next_in = NULL;
505 cd->kcd_cd_zs.avail_out = 0;
506 cd->kcd_cd_zs.next_out = NULL;
507 cd->kcd_cd_zs.opaque = cd;
508 cd->kcd_cd_zs.zalloc = kcdata_compress_zalloc;
509 cd->kcd_cd_zs.zfree = kcdata_compress_zfree;
510 cd->kcd_cd_base = (void *)(data->kcd_addr_begin + data->kcd_length - size);
511 data->kcd_length -= size;
512 cd->kcd_cd_offset = 0;
513 cd->kcd_cd_maxoffset = size;
514 cd->kcd_cd_flags = 0;
515
516 kcdata_debug_printf("%s: buffer [%p - %p]\n", __func__, cd->kcd_cd_base, cd->kcd_cd_base + size);
517
518 if (deflateInit2(&cd->kcd_cd_zs, Z_BEST_SPEED, Z_DEFLATED, wbits, memlevel, Z_DEFAULT_STRATEGY) != Z_OK) {
519 kcdata_debug_printf("EMERGENCY: deflateInit2 failed!\n");
520 ret = KERN_INVALID_ARGUMENT;
521 }
522 break;
523 default:
524 panic("kcdata_init_compress_state: invalid compression type: %d", (int) type);
525 }
526
527 return ret;
528 }
529
530
531 /*
532 * Turn on the compression logic for kcdata
533 */
534 kern_return_t
kcdata_init_compress(kcdata_descriptor_t data,int hdr_tag,void (* memcpy_f)(void *,const void *,size_t),uint64_t type)535 kcdata_init_compress(kcdata_descriptor_t data, int hdr_tag, void (*memcpy_f)(void *, const void *, size_t), uint64_t type)
536 {
537 kern_return_t kr;
538 mach_vm_address_t user_addr, totalout_addr, totalin_addr;
539 struct _uint64_with_description_data save_data;
540 const uint64_t size_req = sizeof(save_data);
541
542 assert(data && (data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0);
543
544 /* reset the compression descriptor */
545 bzero(&data->kcd_comp_d, sizeof(struct kcdata_compress_descriptor));
546
547 /* add the header information */
548 kcdata_add_uint64_with_description(data, type, "kcd_c_type");
549
550 /* reserve space to write total out */
551 bzero(&save_data, size_req);
552 strlcpy(&(save_data.desc[0]), "kcd_c_totalout", sizeof(save_data.desc));
553 kr = kcdata_get_memory_addr(data, KCDATA_TYPE_UINT64_DESC, size_req, &totalout_addr);
554 if (kr != KERN_SUCCESS) {
555 return kr;
556 }
557 memcpy((void *)totalout_addr, &save_data, size_req);
558
559 /* space for total in */
560 bzero(&save_data, size_req);
561 strlcpy(&(save_data.desc[0]), "kcd_c_totalin", sizeof(save_data.desc));
562 kr = kcdata_get_memory_addr(data, KCDATA_TYPE_UINT64_DESC, size_req, &totalin_addr);
563 if (kr != KERN_SUCCESS) {
564 return kr;
565 }
566 memcpy((void *)totalin_addr, &save_data, size_req);
567
568 /* add the inner buffer */
569 kcdata_get_memory_addr(data, hdr_tag, 0, &user_addr);
570
571 /* save the flag */
572 data->kcd_flags |= KCFLAG_USE_COMPRESSION;
573
574 /* initialize algorithm specific state */
575 kr = kcdata_init_compress_state(data, memcpy_f, type, totalout_addr + offsetof(struct _uint64_with_description_data, data), totalin_addr + offsetof(struct _uint64_with_description_data, data));
576 if (kr != KERN_SUCCESS) {
577 kcdata_debug_printf("%s: failed to initialize compression state!\n", __func__);
578 return kr;
579 }
580
581 return KERN_SUCCESS;
582 }
583
584 static inline
585 int
kcdata_zlib_translate_kcd_cf_flag(enum kcdata_compression_flush flush)586 kcdata_zlib_translate_kcd_cf_flag(enum kcdata_compression_flush flush)
587 {
588 switch (flush) {
589 case KCDCF_NO_FLUSH: return Z_NO_FLUSH;
590 case KCDCF_SYNC_FLUSH: return Z_SYNC_FLUSH;
591 case KCDCF_FINISH: return Z_FINISH;
592 default: panic("invalid kcdata_zlib_translate_kcd_cf_flag flag");
593 }
594 }
595
596 static inline
597 int
kcdata_zlib_translate_kcd_cf_expected_ret(enum kcdata_compression_flush flush)598 kcdata_zlib_translate_kcd_cf_expected_ret(enum kcdata_compression_flush flush)
599 {
600 switch (flush) {
601 case KCDCF_NO_FLUSH: /* fall through */
602 case KCDCF_SYNC_FLUSH: return Z_OK;
603 case KCDCF_FINISH: return Z_STREAM_END;
604 default: panic("invalid kcdata_zlib_translate_kcd_cf_expected_ret flag");
605 }
606 }
607
608 /* Called by kcdata_do_compress() when the configured compression algorithm is zlib */
609 static kern_return_t
kcdata_do_compress_zlib(kcdata_descriptor_t data,void * inbuffer,size_t insize,void * outbuffer,size_t outsize,size_t * wrote,enum kcdata_compression_flush flush)610 kcdata_do_compress_zlib(kcdata_descriptor_t data, void *inbuffer,
611 size_t insize, void *outbuffer, size_t outsize, size_t *wrote,
612 enum kcdata_compression_flush flush)
613 {
614 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
615 z_stream *zs = &cd->kcd_cd_zs;
616 int expected_ret, ret;
617
618 zs->next_out = outbuffer;
619 zs->avail_out = (unsigned int) outsize;
620 zs->next_in = inbuffer;
621 zs->avail_in = (unsigned int) insize;
622 ret = deflate(zs, kcdata_zlib_translate_kcd_cf_flag(flush));
623 if (zs->avail_in != 0 || zs->avail_out <= 0) {
624 return KERN_INSUFFICIENT_BUFFER_SIZE;
625 }
626
627 expected_ret = kcdata_zlib_translate_kcd_cf_expected_ret(flush);
628 if (ret != expected_ret) {
629 /*
630 * Should only fail with catastrophic, unrecoverable cases (i.e.,
631 * corrupted z_stream, or incorrect configuration)
632 */
633 panic("zlib kcdata compression ret = %d", ret);
634 }
635
636 kcdata_debug_printf("%s: %p (%zu) <- %p (%zu); flush: %d; ret = %ld\n",
637 __func__, outbuffer, outsize, inbuffer, insize, flush, outsize - zs->avail_out);
638 if (wrote) {
639 *wrote = outsize - zs->avail_out;
640 }
641 return KERN_SUCCESS;
642 }
643
644 /*
645 * Compress the buffer at @inbuffer (of size @insize) into the kcdata buffer
646 * @outbuffer (of size @outsize). Flush based on the @flush parameter.
647 *
648 * Returns KERN_SUCCESS on success, or KERN_INSUFFICIENT_BUFFER_SIZE if
649 * @outsize isn't sufficient. Also, writes the number of bytes written in the
650 * @outbuffer to @wrote.
651 */
652 static kern_return_t
kcdata_do_compress(kcdata_descriptor_t data,void * inbuffer,size_t insize,void * outbuffer,size_t outsize,size_t * wrote,enum kcdata_compression_flush flush)653 kcdata_do_compress(kcdata_descriptor_t data, void *inbuffer, size_t insize,
654 void *outbuffer, size_t outsize, size_t *wrote, enum kcdata_compression_flush flush)
655 {
656 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
657
658 assert(data->kcd_flags & KCFLAG_USE_COMPRESSION);
659
660 kcdata_debug_printf("%s: %p (%zu) <- %p (%zu); flush: %d\n",
661 __func__, outbuffer, outsize, inbuffer, insize, flush);
662
663 /* don't compress if we are in a window */
664 if (cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK || data->kcd_comp_d.kcd_cd_compression_type == KCDCT_NONE) {
665 assert(cd->kcd_cd_memcpy_f);
666 if (outsize >= insize) {
667 cd->kcd_cd_memcpy_f(outbuffer, inbuffer, insize);
668 if (wrote) {
669 *wrote = insize;
670 }
671 return KERN_SUCCESS;
672 } else {
673 return KERN_INSUFFICIENT_BUFFER_SIZE;
674 }
675 }
676
677 switch (data->kcd_comp_d.kcd_cd_compression_type) {
678 case KCDCT_ZLIB:
679 return kcdata_do_compress_zlib(data, inbuffer, insize, outbuffer, outsize, wrote, flush);
680 default:
681 panic("invalid compression type 0x%llx in kcdata_do_compress", data->kcd_comp_d.kcd_cd_compression_type);
682 }
683 }
684
685 static size_t
kcdata_compression_bound_zlib(kcdata_descriptor_t data,size_t size)686 kcdata_compression_bound_zlib(kcdata_descriptor_t data, size_t size)
687 {
688 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
689 z_stream *zs = &cd->kcd_cd_zs;
690
691 return (size_t) deflateBound(zs, (unsigned long) size);
692 }
693
694
695 /*
696 * returns the worst-case, maximum length of the compressed data when
697 * compressing a buffer of size @size using the configured algorithm.
698 */
699 static size_t
kcdata_compression_bound(kcdata_descriptor_t data,size_t size)700 kcdata_compression_bound(kcdata_descriptor_t data, size_t size)
701 {
702 switch (data->kcd_comp_d.kcd_cd_compression_type) {
703 case KCDCT_ZLIB:
704 return kcdata_compression_bound_zlib(data, size);
705 case KCDCT_NONE:
706 return size;
707 default:
708 panic("%s: unknown compression method", __func__);
709 }
710 }
711
712 /*
713 * kcdata_compress_chunk_with_flags:
714 * Compress buffer found at @input_data (length @input_size) to the kcdata
715 * buffer described by @data. This method will construct the kcdata_item_t
716 * required by parsers using the type information @type and flags @flags.
717 *
718 * Returns KERN_SUCCESS when successful. Currently, asserts on failure.
719 */
720 kern_return_t
kcdata_compress_chunk_with_flags(kcdata_descriptor_t data,uint32_t type,const void * input_data,uint32_t input_size,uint64_t kcdata_flags)721 kcdata_compress_chunk_with_flags(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size, uint64_t kcdata_flags)
722 {
723 assert(data);
724 assert((data->kcd_flags & KCFLAG_USE_COMPRESSION));
725 assert(input_data);
726 struct kcdata_item info;
727 char padding_data[16] = {0};
728 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
729 size_t wrote = 0;
730 kern_return_t kr;
731
732 kcdata_debug_printf("%s: type: %d input_data: %p (%d) kcdata_flags: 0x%llx\n",
733 __func__, type, input_data, input_size, kcdata_flags);
734
735 /*
736 * first, get memory space. The uncompressed size must fit in the remained
737 * of the kcdata buffer, in case the compression algorithm doesn't actually
738 * compress the data at all.
739 */
740 size_t total_uncompressed_size = kcdata_compression_bound(data, (size_t) kcdata_get_memory_size_for_data(input_size));
741 if (total_uncompressed_size > data->kcd_length ||
742 data->kcd_length - total_uncompressed_size < data->kcd_addr_end - data->kcd_addr_begin) {
743 kcdata_debug_printf("%s: insufficient buffer size: kcd_length => %d e-b=> %lld our size: %zu\n",
744 __func__, data->kcd_length, data->kcd_addr_end - data->kcd_addr_begin, total_uncompressed_size);
745 return KERN_INSUFFICIENT_BUFFER_SIZE;
746 }
747 uint32_t padding = kcdata_calc_padding(input_size);
748 assert(padding < sizeof(padding_data));
749
750 void *space_start = (void *) data->kcd_addr_end;
751 void *space_ptr = space_start;
752
753 /* create the output stream */
754 size_t total_uncompressed_space_remaining = total_uncompressed_size;
755
756 /* create the info data */
757 bzero(&info, sizeof(info));
758 info.type = type;
759 info.size = input_size + padding;
760 info.flags = kcdata_flags;
761
762 /*
763 * The next possibly three compresses are needed separately because of the
764 * scatter-gather nature of this operation. The kcdata item header (info)
765 * and padding are on the stack, while the actual data is somewhere else.
766 * */
767
768 /* create the input stream for info & compress */
769 enum kcdata_compression_flush flush = (padding || input_size) ? KCDCF_NO_FLUSH :
770 cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH :
771 KCDCF_SYNC_FLUSH;
772 kr = kcdata_do_compress(data, &info, sizeof(info), space_ptr, total_uncompressed_space_remaining, &wrote, flush);
773 if (kr != KERN_SUCCESS) {
774 return kr;
775 }
776 kcdata_debug_printf("%s: first wrote = %zu\n", __func__, wrote);
777 space_ptr = (void *)((uintptr_t)space_ptr + wrote);
778 total_uncompressed_space_remaining -= wrote;
779
780 /* If there is input provided, compress that here */
781 if (input_size) {
782 flush = padding ? KCDCF_NO_FLUSH :
783 cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH :
784 KCDCF_SYNC_FLUSH;
785 kr = kcdata_do_compress(data, (void *) (uintptr_t) input_data, input_size, space_ptr, total_uncompressed_space_remaining, &wrote, flush);
786 if (kr != KERN_SUCCESS) {
787 return kr;
788 }
789 kcdata_debug_printf("%s: 2nd wrote = %zu\n", __func__, wrote);
790 space_ptr = (void *)((uintptr_t)space_ptr + wrote);
791 total_uncompressed_space_remaining -= wrote;
792 }
793
794 /* If the item and its data require padding to maintain alignment,
795 * "compress" that into the output buffer. */
796 if (padding) {
797 /* write the padding */
798 kr = kcdata_do_compress(data, padding_data, padding, space_ptr, total_uncompressed_space_remaining, &wrote,
799 cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH : KCDCF_SYNC_FLUSH);
800 if (kr != KERN_SUCCESS) {
801 return kr;
802 }
803 kcdata_debug_printf("%s: 3rd wrote = %zu\n", __func__, wrote);
804 if (wrote == 0) {
805 return KERN_FAILURE;
806 }
807 space_ptr = (void *)((uintptr_t)space_ptr + wrote);
808 total_uncompressed_space_remaining -= wrote;
809 }
810
811 assert((size_t)((uintptr_t)space_ptr - (uintptr_t)space_start) <= total_uncompressed_size);
812
813 /* move the end marker forward */
814 data->kcd_addr_end = (mach_vm_address_t) space_start + (total_uncompressed_size - total_uncompressed_space_remaining);
815
816 return KERN_SUCCESS;
817 }
818
819 /*
820 * kcdata_compress_chunk:
821 * Like kcdata_compress_chunk_with_flags(), but uses the default set of kcdata flags,
822 * i.e. padding and also saves the amount of padding bytes.
823 *
824 * Returns are the same as in kcdata_compress_chunk_with_flags()
825 */
826 kern_return_t
kcdata_compress_chunk(kcdata_descriptor_t data,uint32_t type,const void * input_data,uint32_t input_size)827 kcdata_compress_chunk(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size)
828 {
829 /* these flags are for kcdata - store that the struct is padded and store the amount of padding bytes */
830 uint64_t flags = (KCDATA_FLAGS_STRUCT_PADDING_MASK & kcdata_calc_padding(input_size)) | KCDATA_FLAGS_STRUCT_HAS_PADDING;
831 return kcdata_compress_chunk_with_flags(data, type, input_data, input_size, flags);
832 }
833
834 kern_return_t
kcdata_push_data(kcdata_descriptor_t data,uint32_t type,uint32_t size,const void * input_data)835 kcdata_push_data(kcdata_descriptor_t data, uint32_t type, uint32_t size, const void *input_data)
836 {
837 if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
838 return kcdata_compress_chunk(data, type, input_data, size);
839 } else {
840 kern_return_t ret;
841 mach_vm_address_t uaddr = 0;
842 ret = kcdata_get_memory_addr(data, type, size, &uaddr);
843 if (ret != KERN_SUCCESS) {
844 return ret;
845 }
846
847 kcdata_memcpy(data, uaddr, input_data, size);
848 return KERN_SUCCESS;
849 }
850 }
851
852 kern_return_t
kcdata_push_array(kcdata_descriptor_t data,uint32_t type_of_element,uint32_t size_of_element,uint32_t count,const void * input_data)853 kcdata_push_array(kcdata_descriptor_t data, uint32_t type_of_element, uint32_t size_of_element, uint32_t count, const void *input_data)
854 {
855 uint64_t flags = type_of_element;
856 flags = (flags << 32) | count;
857 uint32_t total_size = count * size_of_element;
858 uint32_t pad = kcdata_calc_padding(total_size);
859
860 if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
861 return kcdata_compress_chunk_with_flags(data, KCDATA_TYPE_ARRAY_PAD0 | pad, input_data, total_size, flags);
862 } else {
863 kern_return_t ret;
864 mach_vm_address_t uaddr = 0;
865 ret = kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_ARRAY_PAD0 | pad, total_size, flags, &uaddr);
866 if (ret != KERN_SUCCESS) {
867 return ret;
868 }
869
870 kcdata_memcpy(data, uaddr, input_data, total_size);
871 return KERN_SUCCESS;
872 }
873 }
874
875 /* A few words on how window compression works:
876 *
877 * This is how the buffer looks when the window is opened:
878 *
879 * X---------------------------------------------------------------------X
880 * | | |
881 * | Filled with stackshot data | Zero bytes |
882 * | | |
883 * X---------------------------------------------------------------------X
884 * ^
885 * \ - kcd_addr_end
886 *
887 * Opening a window will save the current kcd_addr_end to kcd_cd_mark_begin.
888 *
889 * Any kcdata_* operation will then push data to the buffer like normal. (If
890 * you call any compressing functions they will pass-through, i.e. no
891 * compression will be done) Once the window is closed, the following takes
892 * place:
893 *
894 * X---------------------------------------------------------------------X
895 * | | | | |
896 * | Existing data | New data | Scratch buffer | |
897 * | | | | |
898 * X---------------------------------------------------------------------X
899 * ^ ^ ^
900 * | | |
901 * \ -kcd_cd_mark_begin | |
902 * | |
903 * \ - kcd_addr_end |
904 * |
905 * kcd_addr_end + (kcd_addr_end - kcd_cd_mark_begin) - /
906 *
907 * (1) The data between kcd_cd_mark_begin and kcd_addr_end is fed to the
908 * compression algorithm to compress to the scratch buffer.
909 * (2) The scratch buffer's contents are copied into the area denoted "New
910 * data" above. Effectively overwriting the uncompressed data with the
911 * compressed one.
912 * (3) kcd_addr_end is then rewound to kcd_cd_mark_begin + sizeof_compressed_data
913 */
914
915 /* Record the state, and restart compression from this later */
916 void
kcdata_compression_window_open(kcdata_descriptor_t data)917 kcdata_compression_window_open(kcdata_descriptor_t data)
918 {
919 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
920 assert((cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK) == 0);
921
922 if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
923 cd->kcd_cd_flags |= KCD_CD_FLAG_IN_MARK;
924 cd->kcd_cd_mark_begin = data->kcd_addr_end;
925 }
926 }
927
928 /* Compress the region between the mark and the current end */
929 kern_return_t
kcdata_compression_window_close(kcdata_descriptor_t data)930 kcdata_compression_window_close(kcdata_descriptor_t data)
931 {
932 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
933 uint64_t total_size, max_size;
934 void *space_start, *space_ptr;
935 size_t total_uncompressed_space_remaining, wrote = 0;
936 kern_return_t kr;
937
938 if ((data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0) {
939 return KERN_SUCCESS;
940 }
941
942 assert(cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK);
943
944 if (data->kcd_addr_end == (mach_vm_address_t) cd->kcd_cd_mark_begin) {
945 /* clear the window marker and return, this is a no-op */
946 cd->kcd_cd_flags &= ~KCD_CD_FLAG_IN_MARK;
947 return KERN_SUCCESS;
948 }
949
950 assert(cd->kcd_cd_mark_begin < data->kcd_addr_end);
951 total_size = data->kcd_addr_end - (uint64_t) cd->kcd_cd_mark_begin;
952 max_size = (uint64_t) kcdata_compression_bound(data, total_size);
953 kcdata_debug_printf("%s: total_size = %lld\n", __func__, total_size);
954
955 /*
956 * first, get memory space. The uncompressed size must fit in the remained
957 * of the kcdata buffer, in case the compression algorithm doesn't actually
958 * compress the data at all.
959 */
960 if (max_size > data->kcd_length ||
961 data->kcd_length - max_size < data->kcd_addr_end - data->kcd_addr_begin) {
962 kcdata_debug_printf("%s: insufficient buffer size: kcd_length => %d e-b=> %lld our size: %lld\n",
963 __func__, data->kcd_length, data->kcd_addr_end - data->kcd_addr_begin, max_size);
964 return KERN_INSUFFICIENT_BUFFER_SIZE;
965 }
966
967 /* clear the window marker */
968 cd->kcd_cd_flags &= ~KCD_CD_FLAG_IN_MARK;
969
970 space_start = (void *) data->kcd_addr_end;
971 space_ptr = space_start;
972 total_uncompressed_space_remaining = (unsigned int) max_size;
973 kr = kcdata_do_compress(data, (void *) cd->kcd_cd_mark_begin, total_size, space_ptr,
974 total_uncompressed_space_remaining, &wrote, KCDCF_SYNC_FLUSH);
975 if (kr != KERN_SUCCESS) {
976 return kr;
977 }
978 kcdata_debug_printf("%s: first wrote = %zu\n", __func__, wrote);
979 if (wrote == 0) {
980 return KERN_FAILURE;
981 }
982 space_ptr = (void *)((uintptr_t)space_ptr + wrote);
983 total_uncompressed_space_remaining -= wrote;
984
985 assert((size_t)((uintptr_t)space_ptr - (uintptr_t)space_start) <= max_size);
986
987 /* copy to the original location */
988 kcdata_memcpy(data, cd->kcd_cd_mark_begin, space_start, (uint32_t) (max_size - total_uncompressed_space_remaining));
989
990 /* rewind the end marker */
991 data->kcd_addr_end = cd->kcd_cd_mark_begin + (max_size - total_uncompressed_space_remaining);
992
993 return KERN_SUCCESS;
994 }
995
996 static kern_return_t
kcdata_get_compression_stats_zlib(kcdata_descriptor_t data,uint64_t * totalout,uint64_t * totalin)997 kcdata_get_compression_stats_zlib(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin)
998 {
999 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
1000 z_stream *zs = &cd->kcd_cd_zs;
1001
1002 assert((cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK) == 0);
1003
1004 *totalout = (uint64_t) zs->total_out;
1005 *totalin = (uint64_t) zs->total_in;
1006
1007 return KERN_SUCCESS;
1008 }
1009
1010 static kern_return_t
kcdata_get_compression_stats(kcdata_descriptor_t data,uint64_t * totalout,uint64_t * totalin)1011 kcdata_get_compression_stats(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin)
1012 {
1013 kern_return_t kr;
1014
1015 switch (data->kcd_comp_d.kcd_cd_compression_type) {
1016 case KCDCT_ZLIB:
1017 kr = kcdata_get_compression_stats_zlib(data, totalout, totalin);
1018 break;
1019 case KCDCT_NONE:
1020 *totalout = *totalin = kcdata_memory_get_used_bytes(data);
1021 kr = KERN_SUCCESS;
1022 break;
1023 default:
1024 panic("invalid compression flag 0x%llx in kcdata_write_compression_stats", (data->kcd_comp_d.kcd_cd_compression_type));
1025 }
1026
1027 return kr;
1028 }
1029
1030 kern_return_t
kcdata_write_compression_stats(kcdata_descriptor_t data)1031 kcdata_write_compression_stats(kcdata_descriptor_t data)
1032 {
1033 kern_return_t kr;
1034 uint64_t totalout, totalin;
1035
1036 kr = kcdata_get_compression_stats(data, &totalout, &totalin);
1037 if (kr != KERN_SUCCESS) {
1038 return kr;
1039 }
1040
1041 *(uint64_t *)data->kcd_comp_d.kcd_cd_totalout_addr = totalout;
1042 *(uint64_t *)data->kcd_comp_d.kcd_cd_totalin_addr = totalin;
1043
1044 return kr;
1045 }
1046
1047 static kern_return_t
kcdata_finish_compression_zlib(kcdata_descriptor_t data)1048 kcdata_finish_compression_zlib(kcdata_descriptor_t data)
1049 {
1050 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
1051 z_stream *zs = &cd->kcd_cd_zs;
1052
1053 /*
1054 * macOS on x86 w/ coprocessor ver. 2 and later context: Stackshot compression leaves artifacts
1055 * in the panic buffer which interferes with CRC checks. The CRC is calculated here over the full
1056 * buffer but only the portion with valid panic data is sent to iBoot via the SMC. When iBoot
1057 * calculates the CRC to compare with the value in the header it uses a zero-filled buffer.
1058 * The stackshot compression leaves non-zero bytes behind so those must be cleared prior to the CRC calculation.
1059 * This doesn't get the compression metadata; that's zeroed by kcdata_release_endallocs().
1060 *
1061 * All other contexts: The stackshot compression artifacts are present in its panic buffer but the CRC check
1062 * is done on the same buffer for the before and after calculation so there's nothing functionally
1063 * broken. The same buffer cleanup is done here for completeness' sake.
1064 * From rdar://problem/64381661
1065 */
1066
1067 void* stackshot_end = (char*)data->kcd_addr_begin + kcdata_memory_get_used_bytes(data);
1068 uint32_t zero_fill_size = data->kcd_length - kcdata_memory_get_used_bytes(data);
1069 bzero(stackshot_end, zero_fill_size);
1070
1071 if (deflateEnd(zs) == Z_OK) {
1072 return KERN_SUCCESS;
1073 } else {
1074 return KERN_FAILURE;
1075 }
1076 }
1077
1078 static kern_return_t
kcdata_finish_compression(kcdata_descriptor_t data)1079 kcdata_finish_compression(kcdata_descriptor_t data)
1080 {
1081 kcdata_write_compression_stats(data);
1082
1083 switch (data->kcd_comp_d.kcd_cd_compression_type) {
1084 case KCDCT_ZLIB:
1085 return kcdata_finish_compression_zlib(data);
1086 case KCDCT_NONE:
1087 return KERN_SUCCESS;
1088 default:
1089 panic("invalid compression type 0x%llxin kcdata_finish_compression", data->kcd_comp_d.kcd_cd_compression_type);
1090 }
1091 }
1092
1093 kern_return_t
kcdata_finish(kcdata_descriptor_t data)1094 kcdata_finish(kcdata_descriptor_t data)
1095 {
1096 int ret = KERN_SUCCESS;
1097 if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
1098 ret = kcdata_finish_compression(data);
1099 }
1100 kcdata_release_endallocs(data);
1101 return ret;
1102 }
1103
1104 void
kcd_finalize_compression(kcdata_descriptor_t data)1105 kcd_finalize_compression(kcdata_descriptor_t data)
1106 {
1107 if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
1108 data->kcd_comp_d.kcd_cd_flags |= KCD_CD_FLAG_FINALIZE;
1109 }
1110 }
1111
1112 /*
1113 * Routine: kcdata_get_memory_addr
1114 * Desc: get memory address in the userspace memory for corpse info
1115 * NOTE: The caller is responsible for zeroing the resulting memory or
1116 * using other means to mark memory if it has failed populating the
1117 * data in middle of operation.
1118 * params: data - pointer describing the crash info allocation
1119 * type - type of data to be put. See corpse.h for defined types
1120 * size - size requested. The header describes this size
1121 * returns: mach_vm_address_t address in user memory for copyout().
1122 */
1123 kern_return_t
kcdata_get_memory_addr(kcdata_descriptor_t data,uint32_t type,uint32_t size,mach_vm_address_t * user_addr)1124 kcdata_get_memory_addr(kcdata_descriptor_t data, uint32_t type, uint32_t size, mach_vm_address_t * user_addr)
1125 {
1126 /* record number of padding bytes as lower 4 bits of flags */
1127 uint64_t flags = (KCDATA_FLAGS_STRUCT_PADDING_MASK & kcdata_calc_padding(size)) | KCDATA_FLAGS_STRUCT_HAS_PADDING;
1128 return kcdata_get_memory_addr_with_flavor(data, type, size, flags, user_addr);
1129 }
1130
1131 /*
1132 * Routine: kcdata_add_buffer_end
1133 *
1134 * Desc: Write buffer end marker. This does not advance the end pointer in the
1135 * kcdata_descriptor_t, so it may be used conservatively before additional data
1136 * is added, as long as it is at least called after the last time data is added.
1137 *
1138 * params: data - pointer describing the crash info allocation
1139 */
1140
1141 kern_return_t
kcdata_write_buffer_end(kcdata_descriptor_t data)1142 kcdata_write_buffer_end(kcdata_descriptor_t data)
1143 {
1144 struct kcdata_item info;
1145 bzero(&info, sizeof(info));
1146 info.type = KCDATA_TYPE_BUFFER_END;
1147 info.size = 0;
1148 return kcdata_memcpy(data, data->kcd_addr_end, &info, sizeof(info));
1149 }
1150
1151 /*
1152 * Routine: kcdata_get_memory_addr_with_flavor
1153 * Desc: internal function with flags field. See documentation for kcdata_get_memory_addr for details
1154 */
1155
1156 static kern_return_t
kcdata_get_memory_addr_with_flavor(kcdata_descriptor_t data,uint32_t type,uint32_t size,uint64_t flags,mach_vm_address_t * user_addr)1157 kcdata_get_memory_addr_with_flavor(
1158 kcdata_descriptor_t data,
1159 uint32_t type,
1160 uint32_t size,
1161 uint64_t flags,
1162 mach_vm_address_t *user_addr)
1163 {
1164 kern_return_t kr;
1165 struct kcdata_item info;
1166
1167 uint32_t orig_size = size;
1168 /* make sure 16 byte aligned */
1169 uint32_t padding = kcdata_calc_padding(size);
1170 size += padding;
1171 uint32_t total_size = size + sizeof(info);
1172
1173 if (user_addr == NULL || data == NULL || total_size + sizeof(info) < orig_size) {
1174 return KERN_INVALID_ARGUMENT;
1175 }
1176
1177 assert(((data->kcd_flags & KCFLAG_USE_COMPRESSION) && (data->kcd_comp_d.kcd_cd_flags & KCD_CD_FLAG_IN_MARK))
1178 || ((data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0));
1179
1180 bzero(&info, sizeof(info));
1181 info.type = type;
1182 info.size = size;
1183 info.flags = flags;
1184
1185 /* check available memory, including trailer size for KCDATA_TYPE_BUFFER_END */
1186 if (total_size + sizeof(info) > data->kcd_length ||
1187 data->kcd_length - (total_size + sizeof(info)) < data->kcd_addr_end - data->kcd_addr_begin) {
1188 if (data->kcd_alloc_callback) {
1189 size_t const hdr_ftr_sz = 2 * sizeof(info);
1190 kcdata_descriptor_t new_data = data->kcd_alloc_callback(data, total_size + hdr_ftr_sz);
1191 if (new_data != NULL) {
1192 *data = *new_data;
1193 return kcdata_get_memory_addr_with_flavor(data, type, size, flags, user_addr);
1194 }
1195 }
1196 return KERN_INSUFFICIENT_BUFFER_SIZE;
1197 }
1198
1199 kr = kcdata_memcpy(data, data->kcd_addr_end, &info, sizeof(info));
1200 if (kr) {
1201 return kr;
1202 }
1203
1204 data->kcd_addr_end += sizeof(info);
1205
1206 if (padding) {
1207 kr = kcdata_bzero(data, data->kcd_addr_end + size - padding, padding);
1208 if (kr) {
1209 return kr;
1210 }
1211 }
1212
1213 *user_addr = data->kcd_addr_end;
1214 data->kcd_addr_end += size;
1215
1216 if (!(data->kcd_flags & KCFLAG_NO_AUTO_ENDBUFFER)) {
1217 /* setup the end header as well */
1218 return kcdata_write_buffer_end(data);
1219 } else {
1220 return KERN_SUCCESS;
1221 }
1222 }
1223
1224 /* Routine: kcdata_get_memory_size_for_data
1225 * Desc: returns the amount of memory that is required to store the information
1226 * in kcdata
1227 */
1228 static size_t
kcdata_get_memory_size_for_data(uint32_t size)1229 kcdata_get_memory_size_for_data(uint32_t size)
1230 {
1231 return size + kcdata_calc_padding(size) + sizeof(struct kcdata_item);
1232 }
1233
1234 /*
1235 * Routine: kcdata_get_memory_addr_for_array
1236 * Desc: get memory address in the userspace memory for corpse info
1237 * NOTE: The caller is responsible to zero the resulting memory or
1238 * user other means to mark memory if it has failed populating the
1239 * data in middle of operation.
1240 * params: data - pointer describing the crash info allocation
1241 * type_of_element - type of data to be put. See kern_cdata.h for defined types
1242 * size_of_element - size of element. The header describes this size
1243 * count - num of elements in array.
1244 * returns: mach_vm_address_t address in user memory for copyout().
1245 */
1246
1247 kern_return_t
kcdata_get_memory_addr_for_array(kcdata_descriptor_t data,uint32_t type_of_element,uint32_t size_of_element,uint32_t count,mach_vm_address_t * user_addr)1248 kcdata_get_memory_addr_for_array(
1249 kcdata_descriptor_t data,
1250 uint32_t type_of_element,
1251 uint32_t size_of_element,
1252 uint32_t count,
1253 mach_vm_address_t *user_addr)
1254 {
1255 /* for arrays we record the number of padding bytes as the low-order 4 bits
1256 * of the type field. KCDATA_TYPE_ARRAY_PAD{x} means x bytes of pad. */
1257 uint64_t flags = type_of_element;
1258 flags = (flags << 32) | count;
1259 uint32_t total_size = count * size_of_element;
1260 uint32_t pad = kcdata_calc_padding(total_size);
1261
1262 return kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_ARRAY_PAD0 | pad, total_size, flags, user_addr);
1263 }
1264
1265 /*
1266 * Routine: kcdata_add_container_marker
1267 * Desc: Add a container marker in the buffer for type and identifier.
1268 * params: data - pointer describing the crash info allocation
1269 * header_type - one of (KCDATA_TYPE_CONTAINER_BEGIN ,KCDATA_TYPE_CONTAINER_END)
1270 * container_type - type of data to be put. See kern_cdata.h for defined types
1271 * identifier - unique identifier. This is required to match nested containers.
1272 * returns: return value of kcdata_get_memory_addr()
1273 */
1274
1275 kern_return_t
kcdata_add_container_marker(kcdata_descriptor_t data,uint32_t header_type,uint32_t container_type,uint64_t identifier)1276 kcdata_add_container_marker(
1277 kcdata_descriptor_t data,
1278 uint32_t header_type,
1279 uint32_t container_type,
1280 uint64_t identifier)
1281 {
1282 mach_vm_address_t user_addr;
1283 kern_return_t kr;
1284 uint32_t data_size;
1285
1286 assert(header_type == KCDATA_TYPE_CONTAINER_END || header_type == KCDATA_TYPE_CONTAINER_BEGIN);
1287
1288 data_size = (header_type == KCDATA_TYPE_CONTAINER_BEGIN)? sizeof(uint32_t): 0;
1289
1290 if (!(data->kcd_flags & KCFLAG_USE_COMPRESSION)) {
1291 kr = kcdata_get_memory_addr_with_flavor(data, header_type, data_size, identifier, &user_addr);
1292 if (kr != KERN_SUCCESS) {
1293 return kr;
1294 }
1295
1296 if (data_size) {
1297 kr = kcdata_memcpy(data, user_addr, &container_type, data_size);
1298 }
1299 } else {
1300 kr = kcdata_compress_chunk_with_flags(data, header_type, &container_type, data_size, identifier);
1301 }
1302
1303 return kr;
1304 }
1305
1306 /*
1307 * Routine: kcdata_undo_addcontainer_begin
1308 * Desc: call this after adding a container begin but before adding anything else to revert.
1309 */
1310 kern_return_t
kcdata_undo_add_container_begin(kcdata_descriptor_t data)1311 kcdata_undo_add_container_begin(kcdata_descriptor_t data)
1312 {
1313 /*
1314 * the payload of a container begin is a single uint64_t. It is padded out
1315 * to 16 bytes.
1316 */
1317 const mach_vm_address_t padded_payload_size = 16;
1318 data->kcd_addr_end -= sizeof(struct kcdata_item) + padded_payload_size;
1319
1320 if (!(data->kcd_flags & KCFLAG_NO_AUTO_ENDBUFFER)) {
1321 /* setup the end header as well */
1322 return kcdata_write_buffer_end(data);
1323 } else {
1324 return KERN_SUCCESS;
1325 }
1326 }
1327
1328 /*
1329 * Routine: kcdata_memcpy
1330 * Desc: a common function to copy data out based on either copyout or memcopy flags
1331 * params: data - pointer describing the kcdata buffer
1332 * dst_addr - destination address
1333 * src_addr - source address
1334 * size - size in bytes to copy.
1335 * returns: KERN_NO_ACCESS if copyout fails.
1336 */
1337
1338 kern_return_t
kcdata_memcpy(kcdata_descriptor_t data,mach_vm_address_t dst_addr,const void * src_addr,uint32_t size)1339 kcdata_memcpy(kcdata_descriptor_t data, mach_vm_address_t dst_addr, const void *src_addr, uint32_t size)
1340 {
1341 if (data->kcd_flags & KCFLAG_USE_COPYOUT) {
1342 if (copyout(src_addr, dst_addr, size)) {
1343 return KERN_NO_ACCESS;
1344 }
1345 } else {
1346 memcpy((void *)dst_addr, src_addr, size);
1347 }
1348 return KERN_SUCCESS;
1349 }
1350
1351 /*
1352 * Routine: kcdata_bzero
1353 * Desc: zero out a portion of a kcdata buffer.
1354 */
1355 kern_return_t
kcdata_bzero(kcdata_descriptor_t data,mach_vm_address_t dst_addr,uint32_t size)1356 kcdata_bzero(kcdata_descriptor_t data, mach_vm_address_t dst_addr, uint32_t size)
1357 {
1358 kern_return_t kr = KERN_SUCCESS;
1359 if (data->kcd_flags & KCFLAG_USE_COPYOUT) {
1360 uint8_t zeros[16] = {};
1361 while (size) {
1362 uint32_t block_size = MIN(size, 16);
1363 kr = copyout(&zeros, dst_addr, block_size);
1364 if (kr) {
1365 return KERN_NO_ACCESS;
1366 }
1367 size -= block_size;
1368 }
1369 return KERN_SUCCESS;
1370 } else {
1371 bzero((void*)dst_addr, size);
1372 return KERN_SUCCESS;
1373 }
1374 }
1375
1376 /*
1377 * Routine: kcdata_add_type_definition
1378 * Desc: add type definition to kcdata buffer.
1379 * see feature description in documentation above.
1380 * params: data - pointer describing the kcdata buffer
1381 * type_id - unique type identifier for this data
1382 * type_name - a string of max KCDATA_DESC_MAXLEN size for name of type
1383 * elements_array - address to descriptors for each field in struct
1384 * elements_count - count of how many fields are there in struct.
1385 * returns: return code from kcdata_get_memory_addr in case of failure.
1386 */
1387
1388 kern_return_t
kcdata_add_type_definition(kcdata_descriptor_t data,uint32_t type_id,char * type_name,struct kcdata_subtype_descriptor * elements_array_addr,uint32_t elements_count)1389 kcdata_add_type_definition(
1390 kcdata_descriptor_t data,
1391 uint32_t type_id,
1392 char *type_name,
1393 struct kcdata_subtype_descriptor *elements_array_addr,
1394 uint32_t elements_count)
1395 {
1396 kern_return_t kr = KERN_SUCCESS;
1397 struct kcdata_type_definition kc_type_definition;
1398 mach_vm_address_t user_addr;
1399 uint32_t total_size = sizeof(struct kcdata_type_definition);
1400 bzero(&kc_type_definition, sizeof(kc_type_definition));
1401
1402 if (strlen(type_name) >= KCDATA_DESC_MAXLEN) {
1403 return KERN_INVALID_ARGUMENT;
1404 }
1405 strlcpy(&kc_type_definition.kct_name[0], type_name, KCDATA_DESC_MAXLEN);
1406 kc_type_definition.kct_num_elements = elements_count;
1407 kc_type_definition.kct_type_identifier = type_id;
1408
1409 total_size += elements_count * sizeof(struct kcdata_subtype_descriptor);
1410 /* record number of padding bytes as lower 4 bits of flags */
1411 if (KERN_SUCCESS != (kr = kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_TYPEDEFINTION, total_size,
1412 kcdata_calc_padding(total_size), &user_addr))) {
1413 return kr;
1414 }
1415 if (KERN_SUCCESS != (kr = kcdata_memcpy(data, user_addr, (void *)&kc_type_definition, sizeof(struct kcdata_type_definition)))) {
1416 return kr;
1417 }
1418 user_addr += sizeof(struct kcdata_type_definition);
1419 if (KERN_SUCCESS != (kr = kcdata_memcpy(data, user_addr, (void *)elements_array_addr, elements_count * sizeof(struct kcdata_subtype_descriptor)))) {
1420 return kr;
1421 }
1422 return kr;
1423 }
1424
1425 kern_return_t
kcdata_add_uint64_with_description(kcdata_descriptor_t data_desc,uint64_t data,const char * description)1426 kcdata_add_uint64_with_description(kcdata_descriptor_t data_desc, uint64_t data, const char * description)
1427 {
1428 if (strlen(description) >= KCDATA_DESC_MAXLEN) {
1429 return KERN_INVALID_ARGUMENT;
1430 }
1431
1432 kern_return_t kr = 0;
1433 mach_vm_address_t user_addr;
1434 struct _uint64_with_description_data save_data;
1435 const uint64_t size_req = sizeof(save_data);
1436 bzero(&save_data, size_req);
1437
1438 strlcpy(&(save_data.desc[0]), description, sizeof(save_data.desc));
1439 save_data.data = data;
1440
1441 if (data_desc->kcd_flags & KCFLAG_USE_COMPRESSION) {
1442 /* allocate space for the output */
1443 return kcdata_compress_chunk(data_desc, KCDATA_TYPE_UINT64_DESC, &save_data, size_req);
1444 }
1445
1446 kr = kcdata_get_memory_addr(data_desc, KCDATA_TYPE_UINT64_DESC, size_req, &user_addr);
1447 if (kr != KERN_SUCCESS) {
1448 return kr;
1449 }
1450
1451 if (data_desc->kcd_flags & KCFLAG_USE_COPYOUT) {
1452 if (copyout(&save_data, user_addr, size_req)) {
1453 return KERN_NO_ACCESS;
1454 }
1455 } else {
1456 memcpy((void *)user_addr, &save_data, size_req);
1457 }
1458 return KERN_SUCCESS;
1459 }
1460
1461 kern_return_t
kcdata_add_uint32_with_description(kcdata_descriptor_t data_desc,uint32_t data,const char * description)1462 kcdata_add_uint32_with_description(
1463 kcdata_descriptor_t data_desc,
1464 uint32_t data,
1465 const char *description)
1466 {
1467 assert(strlen(description) < KCDATA_DESC_MAXLEN);
1468 if (strlen(description) >= KCDATA_DESC_MAXLEN) {
1469 return KERN_INVALID_ARGUMENT;
1470 }
1471 kern_return_t kr = 0;
1472 mach_vm_address_t user_addr;
1473 struct _uint32_with_description_data save_data;
1474 const uint64_t size_req = sizeof(save_data);
1475
1476 bzero(&save_data, size_req);
1477 strlcpy(&(save_data.desc[0]), description, sizeof(save_data.desc));
1478 save_data.data = data;
1479
1480 if (data_desc->kcd_flags & KCFLAG_USE_COMPRESSION) {
1481 /* allocate space for the output */
1482 return kcdata_compress_chunk(data_desc, KCDATA_TYPE_UINT32_DESC, &save_data, size_req);
1483 }
1484
1485 kr = kcdata_get_memory_addr(data_desc, KCDATA_TYPE_UINT32_DESC, size_req, &user_addr);
1486 if (kr != KERN_SUCCESS) {
1487 return kr;
1488 }
1489
1490 if (data_desc->kcd_flags & KCFLAG_USE_COPYOUT) {
1491 if (copyout(&save_data, user_addr, size_req)) {
1492 return KERN_NO_ACCESS;
1493 }
1494 } else {
1495 memcpy((void *)user_addr, &save_data, size_req);
1496 }
1497
1498 return KERN_SUCCESS;
1499 }
1500
1501
1502 /* end buffer management api */
1503