xref: /xnu-8020.140.41/osfmk/kern/kern_cdata.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2015 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/assert.h>
30 #include <mach/mach_types.h>
31 #include <mach/boolean.h>
32 #include <mach/vm_param.h>
33 #include <kern/kern_types.h>
34 #include <kern/mach_param.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
37 #include <kern/kern_cdata.h>
38 #include <kern/kalloc.h>
39 #include <mach/mach_vm.h>
40 
41 static kern_return_t kcdata_get_memory_addr_with_flavor(kcdata_descriptor_t data, uint32_t type, uint32_t size, uint64_t flags, mach_vm_address_t *user_addr);
42 static size_t kcdata_get_memory_size_for_data(uint32_t size);
43 static kern_return_t kcdata_compress_chunk_with_flags(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size, uint64_t flags);
44 static kern_return_t kcdata_compress_chunk(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size);
45 static kern_return_t kcdata_write_compression_stats(kcdata_descriptor_t data);
46 static kern_return_t kcdata_get_compression_stats(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin);
47 
48 #ifndef ROUNDUP
49 #define ROUNDUP(x, y)            ((((x)+(y)-1)/(y))*(y))
50 #endif
51 
52 /*
53  * zlib will need to store its metadata and this value is indifferent from the
54  * window bits and other zlib internals
55  */
56 #define ZLIB_METADATA_SIZE 1440
57 
58 /* #define kcdata_debug_printf printf */
59 #define kcdata_debug_printf(...) ;
60 
61 #pragma pack(push, 4)
62 
63 /* Internal structs for convenience */
64 struct _uint64_with_description_data {
65 	char desc[KCDATA_DESC_MAXLEN];
66 	uint64_t data;
67 };
68 
69 struct _uint32_with_description_data {
70 	char     desc[KCDATA_DESC_MAXLEN];
71 	uint32_t data;
72 };
73 
74 #pragma pack(pop)
75 
76 /*
77  * Estimates how large of a buffer that should be allocated for a buffer that will contain
78  * num_items items of known types with overall length payload_size.
79  *
80  * NOTE: This function will not give an accurate estimate for buffers that will
81  *       contain unknown types (those with string descriptions).
82  */
83 uint32_t
kcdata_estimate_required_buffer_size(uint32_t num_items,uint32_t payload_size)84 kcdata_estimate_required_buffer_size(uint32_t num_items, uint32_t payload_size)
85 {
86 	/*
87 	 * In the worst case each item will need (KCDATA_ALIGNMENT_SIZE - 1) padding
88 	 */
89 	uint32_t max_padding_bytes = 0;
90 	uint32_t max_padding_with_item_description_bytes = 0;
91 	uint32_t estimated_required_buffer_size = 0;
92 	const uint32_t begin_and_end_marker_bytes = 2 * sizeof(struct kcdata_item);
93 
94 	if (os_mul_overflow(num_items, KCDATA_ALIGNMENT_SIZE - 1, &max_padding_bytes)) {
95 		panic("%s: Overflow in required buffer size estimate", __func__);
96 	}
97 
98 	if (os_mul_and_add_overflow(num_items, sizeof(struct kcdata_item), max_padding_bytes, &max_padding_with_item_description_bytes)) {
99 		panic("%s: Overflow in required buffer size estimate", __func__);
100 	}
101 
102 	if (os_add3_overflow(max_padding_with_item_description_bytes, begin_and_end_marker_bytes, payload_size, &estimated_required_buffer_size)) {
103 		panic("%s: Overflow in required buffer size estimate", __func__);
104 	}
105 
106 	return estimated_required_buffer_size;
107 }
108 
109 kcdata_descriptor_t
kcdata_memory_alloc_init(mach_vm_address_t buffer_addr_p,unsigned data_type,unsigned size,unsigned flags)110 kcdata_memory_alloc_init(mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags)
111 {
112 	kcdata_descriptor_t data = NULL;
113 	mach_vm_address_t user_addr = 0;
114 	uint16_t clamped_flags = (uint16_t) flags;
115 
116 	data = kalloc_type(struct kcdata_descriptor, Z_WAITOK | Z_ZERO | Z_NOFAIL);
117 	data->kcd_addr_begin = buffer_addr_p;
118 	data->kcd_addr_end = buffer_addr_p;
119 	data->kcd_flags = (clamped_flags & KCFLAG_USE_COPYOUT) ? clamped_flags : clamped_flags | KCFLAG_USE_MEMCOPY;
120 	data->kcd_length = size;
121 	data->kcd_endalloced = 0;
122 
123 	/* Initialize the BEGIN header */
124 	if (KERN_SUCCESS != kcdata_get_memory_addr(data, data_type, 0, &user_addr)) {
125 		kcdata_memory_destroy(data);
126 		return NULL;
127 	}
128 
129 	return data;
130 }
131 
132 kern_return_t
kcdata_memory_static_init(kcdata_descriptor_t data,mach_vm_address_t buffer_addr_p,unsigned data_type,unsigned size,unsigned flags)133 kcdata_memory_static_init(kcdata_descriptor_t data, mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags)
134 {
135 	mach_vm_address_t user_addr = 0;
136 	uint16_t clamped_flags = (uint16_t) flags;
137 
138 	if (data == NULL) {
139 		return KERN_INVALID_ARGUMENT;
140 	}
141 	bzero(data, sizeof(struct kcdata_descriptor));
142 	data->kcd_addr_begin = buffer_addr_p;
143 	data->kcd_addr_end = buffer_addr_p;
144 	data->kcd_flags = (clamped_flags & KCFLAG_USE_COPYOUT) ? clamped_flags : clamped_flags | KCFLAG_USE_MEMCOPY;
145 	data->kcd_length = size;
146 	data->kcd_endalloced = 0;
147 
148 	/* Initialize the BEGIN header */
149 	return kcdata_get_memory_addr(data, data_type, 0, &user_addr);
150 }
151 
152 void *
kcdata_endalloc(kcdata_descriptor_t data,size_t length)153 kcdata_endalloc(kcdata_descriptor_t data, size_t length)
154 {
155 	mach_vm_address_t curend = data->kcd_addr_begin + data->kcd_length;
156 	/* round up allocation and ensure return value is uint64-aligned */
157 	size_t toalloc = ROUNDUP(length, sizeof(uint64_t)) + (curend % sizeof(uint64_t));
158 	/* an arbitrary limit: make sure we don't allocate more then 1/4th of the remaining buffer. */
159 	if (data->kcd_length / 4 <= toalloc) {
160 		return NULL;
161 	}
162 	data->kcd_length -= toalloc;
163 	data->kcd_endalloced += toalloc;
164 	return (void *)(curend - toalloc);
165 }
166 
167 /* Zeros and releases data allocated from the end of the buffer */
168 static void
kcdata_release_endallocs(kcdata_descriptor_t data)169 kcdata_release_endallocs(kcdata_descriptor_t data)
170 {
171 	mach_vm_address_t curend = data->kcd_addr_begin + data->kcd_length;
172 	size_t endalloced = data->kcd_endalloced;
173 	if (endalloced > 0) {
174 		bzero((void *)curend, endalloced);
175 		data->kcd_length += endalloced;
176 		data->kcd_endalloced = 0;
177 	}
178 }
179 
180 void *
kcdata_memory_get_begin_addr(kcdata_descriptor_t data)181 kcdata_memory_get_begin_addr(kcdata_descriptor_t data)
182 {
183 	if (data == NULL) {
184 		return NULL;
185 	}
186 
187 	return (void *)data->kcd_addr_begin;
188 }
189 
190 uint64_t
kcdata_memory_get_used_bytes(kcdata_descriptor_t kcd)191 kcdata_memory_get_used_bytes(kcdata_descriptor_t kcd)
192 {
193 	assert(kcd != NULL);
194 	return ((uint64_t)kcd->kcd_addr_end - (uint64_t)kcd->kcd_addr_begin) + sizeof(struct kcdata_item);
195 }
196 
197 uint64_t
kcdata_memory_get_uncompressed_bytes(kcdata_descriptor_t kcd)198 kcdata_memory_get_uncompressed_bytes(kcdata_descriptor_t kcd)
199 {
200 	kern_return_t kr;
201 
202 	assert(kcd != NULL);
203 	if (kcd->kcd_flags & KCFLAG_USE_COMPRESSION) {
204 		uint64_t totalout, totalin;
205 
206 		kr = kcdata_get_compression_stats(kcd, &totalout, &totalin);
207 		if (kr == KERN_SUCCESS) {
208 			return totalin;
209 		} else {
210 			return 0;
211 		}
212 	} else {
213 		/* If compression wasn't used, get the number of bytes used  */
214 		return kcdata_memory_get_used_bytes(kcd);
215 	}
216 }
217 
218 /*
219  * Free up the memory associated with kcdata
220  */
221 kern_return_t
kcdata_memory_destroy(kcdata_descriptor_t data)222 kcdata_memory_destroy(kcdata_descriptor_t data)
223 {
224 	if (!data) {
225 		return KERN_INVALID_ARGUMENT;
226 	}
227 
228 	/*
229 	 * data->kcd_addr_begin points to memory in not tracked by
230 	 * kcdata lib. So not clearing that here.
231 	 */
232 	kfree_type(struct kcdata_descriptor, data);
233 	return KERN_SUCCESS;
234 }
235 
236 /* Used by zlib to allocate space in its metadata section */
237 static void *
kcdata_compress_zalloc(void * opaque,u_int items,u_int size)238 kcdata_compress_zalloc(void *opaque, u_int items, u_int size)
239 {
240 	void *result;
241 	struct kcdata_compress_descriptor *cd = opaque;
242 	int alloc_size = ~31L & (31 + (items * size));
243 
244 	result = (void *)((uintptr_t)cd->kcd_cd_base + cd->kcd_cd_offset);
245 	if ((uintptr_t) result + alloc_size > (uintptr_t) cd->kcd_cd_base + cd->kcd_cd_maxoffset) {
246 		result = Z_NULL;
247 	} else {
248 		cd->kcd_cd_offset += alloc_size;
249 	}
250 
251 	kcdata_debug_printf("%s: %d * %d = %d  => %p\n", __func__, items, size, items * size, result);
252 
253 	return result;
254 }
255 
256 /* Used by zlib to free previously allocated space in its metadata section */
257 static void
kcdata_compress_zfree(void * opaque,void * ptr)258 kcdata_compress_zfree(void *opaque, void *ptr)
259 {
260 	(void) opaque;
261 	(void) ptr;
262 
263 	kcdata_debug_printf("%s: ptr %p\n", __func__, ptr);
264 
265 	/*
266 	 * Since the buffers we are using are temporary, we don't worry about
267 	 * freeing memory for now. Besides, testing has shown that zlib only calls
268 	 * this at the end, near deflateEnd() or a Z_FINISH deflate() call.
269 	 */
270 }
271 
272 /* Used to initialize the selected compression algorithm's internal state (if any) */
273 static kern_return_t
kcdata_init_compress_state(kcdata_descriptor_t data,void (* memcpy_f)(void *,const void *,size_t),uint64_t type,mach_vm_address_t totalout_addr,mach_vm_address_t totalin_addr)274 kcdata_init_compress_state(kcdata_descriptor_t data, void (*memcpy_f)(void *, const void *, size_t), uint64_t type, mach_vm_address_t totalout_addr, mach_vm_address_t totalin_addr)
275 {
276 	kern_return_t ret = KERN_SUCCESS;
277 	size_t size;
278 	int wbits = 12, memlevel = 3;
279 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
280 
281 	cd->kcd_cd_memcpy_f = memcpy_f;
282 	cd->kcd_cd_compression_type = type;
283 	cd->kcd_cd_totalout_addr = totalout_addr;
284 	cd->kcd_cd_totalin_addr = totalin_addr;
285 
286 	switch (type) {
287 	case KCDCT_ZLIB:
288 		/* allocate space for the metadata used by zlib */
289 		size = round_page(ZLIB_METADATA_SIZE + zlib_deflate_memory_size(wbits, memlevel));
290 		kcdata_debug_printf("%s: size = %zu kcd_length: %d\n", __func__, size, data->kcd_length);
291 		kcdata_debug_printf("%s: kcd buffer [%p - %p]\n", __func__, (void *) data->kcd_addr_begin, (void *) data->kcd_addr_begin + data->kcd_length);
292 		void *buf = kcdata_endalloc(data, size);
293 		if (buf == NULL) {
294 			return KERN_INSUFFICIENT_BUFFER_SIZE;
295 		}
296 
297 		cd->kcd_cd_zs.avail_in = 0;
298 		cd->kcd_cd_zs.next_in = NULL;
299 		cd->kcd_cd_zs.avail_out = 0;
300 		cd->kcd_cd_zs.next_out = NULL;
301 		cd->kcd_cd_zs.opaque = cd;
302 		cd->kcd_cd_zs.zalloc = kcdata_compress_zalloc;
303 		cd->kcd_cd_zs.zfree = kcdata_compress_zfree;
304 		cd->kcd_cd_base = (void *)(data->kcd_addr_begin + data->kcd_length - size);
305 		data->kcd_length -= size;
306 		cd->kcd_cd_offset = 0;
307 		cd->kcd_cd_maxoffset = size;
308 		cd->kcd_cd_flags = 0;
309 
310 		kcdata_debug_printf("%s: buffer [%p - %p]\n", __func__, cd->kcd_cd_base, cd->kcd_cd_base + size);
311 
312 		if (deflateInit2(&cd->kcd_cd_zs, Z_BEST_SPEED, Z_DEFLATED, wbits, memlevel, Z_DEFAULT_STRATEGY) != Z_OK) {
313 			kcdata_debug_printf("EMERGENCY: deflateInit2 failed!\n");
314 			ret = KERN_INVALID_ARGUMENT;
315 		}
316 		break;
317 	default:
318 		panic("kcdata_init_compress_state: invalid compression type: %d", (int) type);
319 	}
320 
321 	return ret;
322 }
323 
324 
325 /*
326  * Turn on the compression logic for kcdata
327  */
328 kern_return_t
kcdata_init_compress(kcdata_descriptor_t data,int hdr_tag,void (* memcpy_f)(void *,const void *,size_t),uint64_t type)329 kcdata_init_compress(kcdata_descriptor_t data, int hdr_tag, void (*memcpy_f)(void *, const void *, size_t), uint64_t type)
330 {
331 	kern_return_t kr;
332 	mach_vm_address_t user_addr, totalout_addr, totalin_addr;
333 	struct _uint64_with_description_data save_data;
334 	const uint64_t size_req = sizeof(save_data);
335 
336 	assert(data && (data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0);
337 
338 	/* reset the compression descriptor */
339 	bzero(&data->kcd_comp_d, sizeof(struct kcdata_compress_descriptor));
340 
341 	/* add the header information */
342 	kcdata_add_uint64_with_description(data, type, "kcd_c_type");
343 
344 	/* reserve space to write total out */
345 	bzero(&save_data, size_req);
346 	strlcpy(&(save_data.desc[0]), "kcd_c_totalout", sizeof(save_data.desc));
347 	kr = kcdata_get_memory_addr(data, KCDATA_TYPE_UINT64_DESC, size_req, &totalout_addr);
348 	if (kr != KERN_SUCCESS) {
349 		return kr;
350 	}
351 	memcpy((void *)totalout_addr, &save_data, size_req);
352 
353 	/* space for total in */
354 	bzero(&save_data, size_req);
355 	strlcpy(&(save_data.desc[0]), "kcd_c_totalin", sizeof(save_data.desc));
356 	kr = kcdata_get_memory_addr(data, KCDATA_TYPE_UINT64_DESC, size_req, &totalin_addr);
357 	if (kr != KERN_SUCCESS) {
358 		return kr;
359 	}
360 	memcpy((void *)totalin_addr, &save_data, size_req);
361 
362 	/* add the inner buffer */
363 	kcdata_get_memory_addr(data, hdr_tag, 0, &user_addr);
364 
365 	/* save the flag */
366 	data->kcd_flags |= KCFLAG_USE_COMPRESSION;
367 
368 	/* initialize algorithm specific state */
369 	kr = kcdata_init_compress_state(data, memcpy_f, type, totalout_addr + offsetof(struct _uint64_with_description_data, data), totalin_addr + offsetof(struct _uint64_with_description_data, data));
370 	if (kr != KERN_SUCCESS) {
371 		kcdata_debug_printf("%s: failed to initialize compression state!\n", __func__);
372 		return kr;
373 	}
374 
375 	return KERN_SUCCESS;
376 }
377 
378 static inline
379 int
kcdata_zlib_translate_kcd_cf_flag(enum kcdata_compression_flush flush)380 kcdata_zlib_translate_kcd_cf_flag(enum kcdata_compression_flush flush)
381 {
382 	switch (flush) {
383 	case KCDCF_NO_FLUSH: return Z_NO_FLUSH;
384 	case KCDCF_SYNC_FLUSH: return Z_SYNC_FLUSH;
385 	case KCDCF_FINISH: return Z_FINISH;
386 	default: panic("invalid kcdata_zlib_translate_kcd_cf_flag flag");
387 	}
388 }
389 
390 static inline
391 int
kcdata_zlib_translate_kcd_cf_expected_ret(enum kcdata_compression_flush flush)392 kcdata_zlib_translate_kcd_cf_expected_ret(enum kcdata_compression_flush flush)
393 {
394 	switch (flush) {
395 	case KCDCF_NO_FLUSH:         /* fall through */
396 	case KCDCF_SYNC_FLUSH: return Z_OK;
397 	case KCDCF_FINISH: return Z_STREAM_END;
398 	default: panic("invalid kcdata_zlib_translate_kcd_cf_expected_ret flag");
399 	}
400 }
401 
402 /* Called by kcdata_do_compress() when the configured compression algorithm is zlib */
403 static kern_return_t
kcdata_do_compress_zlib(kcdata_descriptor_t data,void * inbuffer,size_t insize,void * outbuffer,size_t outsize,size_t * wrote,enum kcdata_compression_flush flush)404 kcdata_do_compress_zlib(kcdata_descriptor_t data, void *inbuffer,
405     size_t insize, void *outbuffer, size_t outsize, size_t *wrote,
406     enum kcdata_compression_flush flush)
407 {
408 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
409 	z_stream *zs = &cd->kcd_cd_zs;
410 	int expected_ret, ret;
411 
412 	zs->next_out = outbuffer;
413 	zs->avail_out = (unsigned int) outsize;
414 	zs->next_in = inbuffer;
415 	zs->avail_in = (unsigned int) insize;
416 	ret = deflate(zs, kcdata_zlib_translate_kcd_cf_flag(flush));
417 	if (zs->avail_in != 0 || zs->avail_out <= 0) {
418 		return KERN_INSUFFICIENT_BUFFER_SIZE;
419 	}
420 
421 	expected_ret = kcdata_zlib_translate_kcd_cf_expected_ret(flush);
422 	if (ret != expected_ret) {
423 		/*
424 		 * Should only fail with catastrophic, unrecoverable cases (i.e.,
425 		 * corrupted z_stream, or incorrect configuration)
426 		 */
427 		panic("zlib kcdata compression ret = %d", ret);
428 	}
429 
430 	kcdata_debug_printf("%s: %p (%zu) <- %p (%zu); flush: %d; ret = %ld\n",
431 	    __func__, outbuffer, outsize, inbuffer, insize, flush, outsize - zs->avail_out);
432 	if (wrote) {
433 		*wrote = outsize - zs->avail_out;
434 	}
435 	return KERN_SUCCESS;
436 }
437 
438 /*
439  * Compress the buffer at @inbuffer (of size @insize) into the kcdata buffer
440  * @outbuffer (of size @outsize). Flush based on the @flush parameter.
441  *
442  * Returns KERN_SUCCESS on success, or KERN_INSUFFICIENT_BUFFER_SIZE if
443  * @outsize isn't sufficient. Also, writes the number of bytes written in the
444  * @outbuffer to @wrote.
445  */
446 static kern_return_t
kcdata_do_compress(kcdata_descriptor_t data,void * inbuffer,size_t insize,void * outbuffer,size_t outsize,size_t * wrote,enum kcdata_compression_flush flush)447 kcdata_do_compress(kcdata_descriptor_t data, void *inbuffer, size_t insize,
448     void *outbuffer, size_t outsize, size_t *wrote, enum kcdata_compression_flush flush)
449 {
450 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
451 
452 	assert(data->kcd_flags & KCFLAG_USE_COMPRESSION);
453 
454 	kcdata_debug_printf("%s: %p (%zu) <- %p (%zu); flush: %d\n",
455 	    __func__, outbuffer, outsize, inbuffer, insize, flush);
456 
457 	/* don't compress if we are in a window */
458 	if (cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK || data->kcd_comp_d.kcd_cd_compression_type == KCDCT_NONE) {
459 		assert(cd->kcd_cd_memcpy_f);
460 		if (outsize >= insize) {
461 			cd->kcd_cd_memcpy_f(outbuffer, inbuffer, insize);
462 			if (wrote) {
463 				*wrote = insize;
464 			}
465 			return KERN_SUCCESS;
466 		} else {
467 			return KERN_INSUFFICIENT_BUFFER_SIZE;
468 		}
469 	}
470 
471 	switch (data->kcd_comp_d.kcd_cd_compression_type) {
472 	case KCDCT_ZLIB:
473 		return kcdata_do_compress_zlib(data, inbuffer, insize, outbuffer, outsize, wrote, flush);
474 	default:
475 		panic("invalid compression type 0x%llx in kcdata_do_compress", data->kcd_comp_d.kcd_cd_compression_type);
476 	}
477 }
478 
479 static size_t
kcdata_compression_bound_zlib(kcdata_descriptor_t data,size_t size)480 kcdata_compression_bound_zlib(kcdata_descriptor_t data, size_t size)
481 {
482 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
483 	z_stream *zs = &cd->kcd_cd_zs;
484 
485 	return (size_t) deflateBound(zs, (unsigned long) size);
486 }
487 
488 
489 /*
490  * returns the worst-case, maximum length of the compressed data when
491  * compressing a buffer of size @size using the configured algorithm.
492  */
493 static size_t
kcdata_compression_bound(kcdata_descriptor_t data,size_t size)494 kcdata_compression_bound(kcdata_descriptor_t data, size_t size)
495 {
496 	switch (data->kcd_comp_d.kcd_cd_compression_type) {
497 	case KCDCT_ZLIB:
498 		return kcdata_compression_bound_zlib(data, size);
499 	case KCDCT_NONE:
500 		return size;
501 	default:
502 		panic("%s: unknown compression method", __func__);
503 	}
504 }
505 
506 /*
507  * kcdata_compress_chunk_with_flags:
508  *		Compress buffer found at @input_data (length @input_size) to the kcdata
509  *		buffer described by @data. This method will construct the kcdata_item_t
510  *		required by parsers using the type information @type and flags @flags.
511  *
512  *	Returns KERN_SUCCESS when successful. Currently, asserts on failure.
513  */
514 kern_return_t
kcdata_compress_chunk_with_flags(kcdata_descriptor_t data,uint32_t type,const void * input_data,uint32_t input_size,uint64_t kcdata_flags)515 kcdata_compress_chunk_with_flags(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size, uint64_t kcdata_flags)
516 {
517 	assert(data);
518 	assert((data->kcd_flags & KCFLAG_USE_COMPRESSION));
519 	assert(input_data);
520 	struct kcdata_item info;
521 	char padding_data[16] = {0};
522 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
523 	size_t wrote = 0;
524 	kern_return_t kr;
525 
526 	kcdata_debug_printf("%s: type: %d input_data: %p (%d) kcdata_flags: 0x%llx\n",
527 	    __func__, type, input_data, input_size, kcdata_flags);
528 
529 	/*
530 	 * first, get memory space. The uncompressed size must fit in the remained
531 	 * of the kcdata buffer, in case the compression algorithm doesn't actually
532 	 * compress the data at all.
533 	 */
534 	size_t total_uncompressed_size = kcdata_compression_bound(data, (size_t) kcdata_get_memory_size_for_data(input_size));
535 	if (total_uncompressed_size > data->kcd_length ||
536 	    data->kcd_length - total_uncompressed_size < data->kcd_addr_end - data->kcd_addr_begin) {
537 		kcdata_debug_printf("%s: insufficient buffer size: kcd_length => %d e-b=> %lld our size: %zu\n",
538 		    __func__, data->kcd_length, data->kcd_addr_end - data->kcd_addr_begin, total_uncompressed_size);
539 		return KERN_INSUFFICIENT_BUFFER_SIZE;
540 	}
541 	uint32_t padding = kcdata_calc_padding(input_size);
542 	assert(padding < sizeof(padding_data));
543 
544 	void *space_start = (void *) data->kcd_addr_end;
545 	void *space_ptr = space_start;
546 
547 	/* create the output stream */
548 	size_t total_uncompressed_space_remaining = total_uncompressed_size;
549 
550 	/* create the info data */
551 	bzero(&info, sizeof(info));
552 	info.type = type;
553 	info.size = input_size + padding;
554 	info.flags = kcdata_flags;
555 
556 	/*
557 	 * The next possibly three compresses are needed separately because of the
558 	 * scatter-gather nature of this operation. The kcdata item header (info)
559 	 * and padding are on the stack, while the actual data is somewhere else.
560 	 * */
561 
562 	/* create the input stream for info & compress */
563 	enum kcdata_compression_flush flush = (padding || input_size) ? KCDCF_NO_FLUSH :
564 	    cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH :
565 	    KCDCF_SYNC_FLUSH;
566 	kr = kcdata_do_compress(data, &info, sizeof(info), space_ptr, total_uncompressed_space_remaining, &wrote, flush);
567 	if (kr != KERN_SUCCESS) {
568 		return kr;
569 	}
570 	kcdata_debug_printf("%s: first wrote = %zu\n", __func__, wrote);
571 	space_ptr = (void *)((uintptr_t)space_ptr + wrote);
572 	total_uncompressed_space_remaining -= wrote;
573 
574 	/* If there is input provided, compress that here */
575 	if (input_size) {
576 		flush = padding ? KCDCF_NO_FLUSH :
577 		    cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH :
578 		    KCDCF_SYNC_FLUSH;
579 		kr = kcdata_do_compress(data, (void *) (uintptr_t) input_data, input_size, space_ptr, total_uncompressed_space_remaining, &wrote, flush);
580 		if (kr != KERN_SUCCESS) {
581 			return kr;
582 		}
583 		kcdata_debug_printf("%s: 2nd wrote = %zu\n", __func__, wrote);
584 		space_ptr = (void *)((uintptr_t)space_ptr + wrote);
585 		total_uncompressed_space_remaining -= wrote;
586 	}
587 
588 	/* If the item and its data require padding to maintain alignment,
589 	 * "compress" that into the output buffer. */
590 	if (padding) {
591 		/* write the padding */
592 		kr = kcdata_do_compress(data, padding_data, padding, space_ptr, total_uncompressed_space_remaining, &wrote,
593 		    cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH : KCDCF_SYNC_FLUSH);
594 		if (kr != KERN_SUCCESS) {
595 			return kr;
596 		}
597 		kcdata_debug_printf("%s: 3rd wrote = %zu\n", __func__, wrote);
598 		if (wrote == 0) {
599 			return KERN_FAILURE;
600 		}
601 		space_ptr = (void *)((uintptr_t)space_ptr + wrote);
602 		total_uncompressed_space_remaining -= wrote;
603 	}
604 
605 	assert((size_t)((uintptr_t)space_ptr - (uintptr_t)space_start) <= total_uncompressed_size);
606 
607 	/* move the end marker forward */
608 	data->kcd_addr_end = (mach_vm_address_t) space_start + (total_uncompressed_size - total_uncompressed_space_remaining);
609 
610 	return KERN_SUCCESS;
611 }
612 
613 /*
614  * kcdata_compress_chunk:
615  *		Like kcdata_compress_chunk_with_flags(), but uses the default set of kcdata flags,
616  *		i.e. padding and also saves the amount of padding bytes.
617  *
618  * Returns are the same as in kcdata_compress_chunk_with_flags()
619  */
620 kern_return_t
kcdata_compress_chunk(kcdata_descriptor_t data,uint32_t type,const void * input_data,uint32_t input_size)621 kcdata_compress_chunk(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size)
622 {
623 	/* these flags are for kcdata - store that the struct is padded and store the amount of padding bytes */
624 	uint64_t flags = (KCDATA_FLAGS_STRUCT_PADDING_MASK & kcdata_calc_padding(input_size)) | KCDATA_FLAGS_STRUCT_HAS_PADDING;
625 	return kcdata_compress_chunk_with_flags(data, type, input_data, input_size, flags);
626 }
627 
628 kern_return_t
kcdata_push_data(kcdata_descriptor_t data,uint32_t type,uint32_t size,const void * input_data)629 kcdata_push_data(kcdata_descriptor_t data, uint32_t type, uint32_t size, const void *input_data)
630 {
631 	if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
632 		return kcdata_compress_chunk(data, type, input_data, size);
633 	} else {
634 		kern_return_t ret;
635 		mach_vm_address_t uaddr = 0;
636 		ret = kcdata_get_memory_addr(data, type, size, &uaddr);
637 		if (ret != KERN_SUCCESS) {
638 			return ret;
639 		}
640 
641 		kcdata_memcpy(data, uaddr, input_data, size);
642 		return KERN_SUCCESS;
643 	}
644 }
645 
646 kern_return_t
kcdata_push_array(kcdata_descriptor_t data,uint32_t type_of_element,uint32_t size_of_element,uint32_t count,const void * input_data)647 kcdata_push_array(kcdata_descriptor_t data, uint32_t type_of_element, uint32_t size_of_element, uint32_t count, const void *input_data)
648 {
649 	uint64_t flags      = type_of_element;
650 	flags               = (flags << 32) | count;
651 	uint32_t total_size = count * size_of_element;
652 	uint32_t pad        = kcdata_calc_padding(total_size);
653 
654 	if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
655 		return kcdata_compress_chunk_with_flags(data, KCDATA_TYPE_ARRAY_PAD0 | pad, input_data, total_size, flags);
656 	} else {
657 		kern_return_t ret;
658 		mach_vm_address_t uaddr = 0;
659 		ret = kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_ARRAY_PAD0 | pad, total_size, flags, &uaddr);
660 		if (ret != KERN_SUCCESS) {
661 			return ret;
662 		}
663 
664 		kcdata_memcpy(data, uaddr, input_data, total_size);
665 		return KERN_SUCCESS;
666 	}
667 }
668 
669 /* A few words on how window compression works:
670  *
671  * This is how the buffer looks when the window is opened:
672  *
673  * X---------------------------------------------------------------------X
674  * |                                |                                    |
675  * |   Filled with stackshot data   |            Zero bytes              |
676  * |                                |                                    |
677  * X---------------------------------------------------------------------X
678  *                                  ^
679  *									\ - kcd_addr_end
680  *
681  * Opening a window will save the current kcd_addr_end to kcd_cd_mark_begin.
682  *
683  * Any kcdata_* operation will then push data to the buffer like normal. (If
684  * you call any compressing functions they will pass-through, i.e. no
685  * compression will be done) Once the window is closed, the following takes
686  * place:
687  *
688  * X---------------------------------------------------------------------X
689  * |               |                    |                    |           |
690  * | Existing data |     New data       |   Scratch buffer   |           |
691  * |               |                    |                    |           |
692  * X---------------------------------------------------------------------X
693  *				   ^                    ^                    ^
694  *				   |                    |                    |
695  *				   \ -kcd_cd_mark_begin |                    |
696  *							            |                    |
697  *							            \ - kcd_addr_end     |
698  *							                                 |
699  *		 kcd_addr_end + (kcd_addr_end - kcd_cd_mark_begin) - /
700  *
701  *	(1) The data between kcd_cd_mark_begin and kcd_addr_end is fed to the
702  *      compression algorithm to compress to the scratch buffer.
703  *  (2) The scratch buffer's contents are copied into the area denoted "New
704  *      data" above. Effectively overwriting the uncompressed data with the
705  *      compressed one.
706  *  (3) kcd_addr_end is then rewound to kcd_cd_mark_begin + sizeof_compressed_data
707  */
708 
709 /* Record the state, and restart compression from this later */
710 void
kcdata_compression_window_open(kcdata_descriptor_t data)711 kcdata_compression_window_open(kcdata_descriptor_t data)
712 {
713 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
714 	assert((cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK) == 0);
715 
716 	if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
717 		cd->kcd_cd_flags |= KCD_CD_FLAG_IN_MARK;
718 		cd->kcd_cd_mark_begin = data->kcd_addr_end;
719 	}
720 }
721 
722 /* Compress the region between the mark and the current end */
723 kern_return_t
kcdata_compression_window_close(kcdata_descriptor_t data)724 kcdata_compression_window_close(kcdata_descriptor_t data)
725 {
726 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
727 	uint64_t total_size, max_size;
728 	void *space_start, *space_ptr;
729 	size_t total_uncompressed_space_remaining, wrote = 0;
730 	kern_return_t kr;
731 
732 	if ((data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0) {
733 		return KERN_SUCCESS;
734 	}
735 
736 	assert(cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK);
737 
738 	if (data->kcd_addr_end == (mach_vm_address_t) cd->kcd_cd_mark_begin) {
739 		/* clear the window marker and return, this is a no-op */
740 		cd->kcd_cd_flags &= ~KCD_CD_FLAG_IN_MARK;
741 		return KERN_SUCCESS;
742 	}
743 
744 	assert(cd->kcd_cd_mark_begin < data->kcd_addr_end);
745 	total_size = data->kcd_addr_end - (uint64_t) cd->kcd_cd_mark_begin;
746 	max_size = (uint64_t) kcdata_compression_bound(data, total_size);
747 	kcdata_debug_printf("%s: total_size = %lld\n", __func__, total_size);
748 
749 	/*
750 	 * first, get memory space. The uncompressed size must fit in the remained
751 	 * of the kcdata buffer, in case the compression algorithm doesn't actually
752 	 * compress the data at all.
753 	 */
754 	if (max_size > data->kcd_length ||
755 	    data->kcd_length - max_size < data->kcd_addr_end - data->kcd_addr_begin) {
756 		kcdata_debug_printf("%s: insufficient buffer size: kcd_length => %d e-b=> %lld our size: %lld\n",
757 		    __func__, data->kcd_length, data->kcd_addr_end - data->kcd_addr_begin, max_size);
758 		return KERN_INSUFFICIENT_BUFFER_SIZE;
759 	}
760 
761 	/* clear the window marker */
762 	cd->kcd_cd_flags &= ~KCD_CD_FLAG_IN_MARK;
763 
764 	space_start = (void *) data->kcd_addr_end;
765 	space_ptr = space_start;
766 	total_uncompressed_space_remaining = (unsigned int) max_size;
767 	kr = kcdata_do_compress(data, (void *) cd->kcd_cd_mark_begin, total_size, space_ptr,
768 	    total_uncompressed_space_remaining, &wrote, KCDCF_SYNC_FLUSH);
769 	if (kr != KERN_SUCCESS) {
770 		return kr;
771 	}
772 	kcdata_debug_printf("%s: first wrote = %zu\n", __func__, wrote);
773 	if (wrote == 0) {
774 		return KERN_FAILURE;
775 	}
776 	space_ptr = (void *)((uintptr_t)space_ptr + wrote);
777 	total_uncompressed_space_remaining  -= wrote;
778 
779 	assert((size_t)((uintptr_t)space_ptr - (uintptr_t)space_start) <= max_size);
780 
781 	/* copy to the original location */
782 	kcdata_memcpy(data, cd->kcd_cd_mark_begin, space_start, (uint32_t) (max_size - total_uncompressed_space_remaining));
783 
784 	/* rewind the end marker */
785 	data->kcd_addr_end = cd->kcd_cd_mark_begin + (max_size - total_uncompressed_space_remaining);
786 
787 	return KERN_SUCCESS;
788 }
789 
790 static kern_return_t
kcdata_get_compression_stats_zlib(kcdata_descriptor_t data,uint64_t * totalout,uint64_t * totalin)791 kcdata_get_compression_stats_zlib(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin)
792 {
793 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
794 	z_stream *zs = &cd->kcd_cd_zs;
795 
796 	assert((cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK) == 0);
797 
798 	*totalout = (uint64_t) zs->total_out;
799 	*totalin = (uint64_t) zs->total_in;
800 
801 	return KERN_SUCCESS;
802 }
803 
804 static kern_return_t
kcdata_get_compression_stats(kcdata_descriptor_t data,uint64_t * totalout,uint64_t * totalin)805 kcdata_get_compression_stats(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin)
806 {
807 	kern_return_t kr;
808 
809 	switch (data->kcd_comp_d.kcd_cd_compression_type) {
810 	case KCDCT_ZLIB:
811 		kr = kcdata_get_compression_stats_zlib(data, totalout, totalin);
812 		break;
813 	case KCDCT_NONE:
814 		*totalout = *totalin = kcdata_memory_get_used_bytes(data);
815 		kr = KERN_SUCCESS;
816 		break;
817 	default:
818 		panic("invalid compression flag 0x%llx in kcdata_write_compression_stats", (data->kcd_comp_d.kcd_cd_compression_type));
819 	}
820 
821 	return kr;
822 }
823 
824 kern_return_t
kcdata_write_compression_stats(kcdata_descriptor_t data)825 kcdata_write_compression_stats(kcdata_descriptor_t data)
826 {
827 	kern_return_t kr;
828 	uint64_t totalout, totalin;
829 
830 	kr = kcdata_get_compression_stats(data, &totalout, &totalin);
831 	if (kr != KERN_SUCCESS) {
832 		return kr;
833 	}
834 
835 	*(uint64_t *)data->kcd_comp_d.kcd_cd_totalout_addr = totalout;
836 	*(uint64_t *)data->kcd_comp_d.kcd_cd_totalin_addr = totalin;
837 
838 	return kr;
839 }
840 
841 static kern_return_t
kcdata_finish_compression_zlib(kcdata_descriptor_t data)842 kcdata_finish_compression_zlib(kcdata_descriptor_t data)
843 {
844 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
845 	z_stream *zs = &cd->kcd_cd_zs;
846 
847 	/*
848 	 * macOS on x86 w/ coprocessor ver. 2 and later context: Stackshot compression leaves artifacts
849 	 * in the panic buffer which interferes with CRC checks. The CRC is calculated here over the full
850 	 * buffer but only the portion with valid panic data is sent to iBoot via the SMC. When iBoot
851 	 * calculates the CRC to compare with the value in the header it uses a zero-filled buffer.
852 	 * The stackshot compression leaves non-zero bytes behind so those must be cleared prior to the CRC calculation.
853 	 * This doesn't get the compression metadata; that's zeroed by kcdata_release_endallocs().
854 	 *
855 	 * All other contexts: The stackshot compression artifacts are present in its panic buffer but the CRC check
856 	 * is done on the same buffer for the before and after calculation so there's nothing functionally
857 	 * broken. The same buffer cleanup is done here for completeness' sake.
858 	 * From rdar://problem/64381661
859 	 */
860 
861 	void* stackshot_end = (char*)data->kcd_addr_begin + kcdata_memory_get_used_bytes(data);
862 	uint32_t zero_fill_size = data->kcd_length - kcdata_memory_get_used_bytes(data);
863 	bzero(stackshot_end, zero_fill_size);
864 
865 	if (deflateEnd(zs) == Z_OK) {
866 		return KERN_SUCCESS;
867 	} else {
868 		return KERN_FAILURE;
869 	}
870 }
871 
872 static kern_return_t
kcdata_finish_compression(kcdata_descriptor_t data)873 kcdata_finish_compression(kcdata_descriptor_t data)
874 {
875 	kcdata_write_compression_stats(data);
876 
877 	switch (data->kcd_comp_d.kcd_cd_compression_type) {
878 	case KCDCT_ZLIB:
879 		return kcdata_finish_compression_zlib(data);
880 	case KCDCT_NONE:
881 		return KERN_SUCCESS;
882 	default:
883 		panic("invalid compression type 0x%llxin kcdata_finish_compression", data->kcd_comp_d.kcd_cd_compression_type);
884 	}
885 }
886 
887 kern_return_t
kcdata_finish(kcdata_descriptor_t data)888 kcdata_finish(kcdata_descriptor_t data)
889 {
890 	int ret = KERN_SUCCESS;
891 	if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
892 		ret = kcdata_finish_compression(data);
893 	}
894 	kcdata_release_endallocs(data);
895 	return ret;
896 }
897 
898 void
kcd_finalize_compression(kcdata_descriptor_t data)899 kcd_finalize_compression(kcdata_descriptor_t data)
900 {
901 	if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
902 		data->kcd_comp_d.kcd_cd_flags |= KCD_CD_FLAG_FINALIZE;
903 	}
904 }
905 
906 /*
907  * Routine: kcdata_get_memory_addr
908  * Desc: get memory address in the userspace memory for corpse info
909  *       NOTE: The caller is responsible for zeroing the resulting memory or
910  *             using other means to mark memory if it has failed populating the
911  *             data in middle of operation.
912  * params:  data - pointer describing the crash info allocation
913  *	        type - type of data to be put. See corpse.h for defined types
914  *          size - size requested. The header describes this size
915  * returns: mach_vm_address_t address in user memory for copyout().
916  */
917 kern_return_t
kcdata_get_memory_addr(kcdata_descriptor_t data,uint32_t type,uint32_t size,mach_vm_address_t * user_addr)918 kcdata_get_memory_addr(kcdata_descriptor_t data, uint32_t type, uint32_t size, mach_vm_address_t * user_addr)
919 {
920 	/* record number of padding bytes as lower 4 bits of flags */
921 	uint64_t flags = (KCDATA_FLAGS_STRUCT_PADDING_MASK & kcdata_calc_padding(size)) | KCDATA_FLAGS_STRUCT_HAS_PADDING;
922 	return kcdata_get_memory_addr_with_flavor(data, type, size, flags, user_addr);
923 }
924 
925 /*
926  * Routine: kcdata_add_buffer_end
927  *
928  * Desc: Write buffer end marker.  This does not advance the end pointer in the
929  * kcdata_descriptor_t, so it may be used conservatively before additional data
930  * is added, as long as it is at least called after the last time data is added.
931  *
932  * params:  data - pointer describing the crash info allocation
933  */
934 
935 kern_return_t
kcdata_write_buffer_end(kcdata_descriptor_t data)936 kcdata_write_buffer_end(kcdata_descriptor_t data)
937 {
938 	struct kcdata_item info;
939 	bzero(&info, sizeof(info));
940 	info.type = KCDATA_TYPE_BUFFER_END;
941 	info.size = 0;
942 	return kcdata_memcpy(data, data->kcd_addr_end, &info, sizeof(info));
943 }
944 
945 /*
946  * Routine: kcdata_get_memory_addr_with_flavor
947  * Desc: internal function with flags field. See documentation for kcdata_get_memory_addr for details
948  */
949 
950 static kern_return_t
kcdata_get_memory_addr_with_flavor(kcdata_descriptor_t data,uint32_t type,uint32_t size,uint64_t flags,mach_vm_address_t * user_addr)951 kcdata_get_memory_addr_with_flavor(
952 	kcdata_descriptor_t data,
953 	uint32_t type,
954 	uint32_t size,
955 	uint64_t flags,
956 	mach_vm_address_t *user_addr)
957 {
958 	kern_return_t kr;
959 	struct kcdata_item info;
960 
961 	uint32_t orig_size = size;
962 	/* make sure 16 byte aligned */
963 	uint32_t padding = kcdata_calc_padding(size);
964 	size += padding;
965 	uint32_t total_size  = size + sizeof(info);
966 
967 	if (user_addr == NULL || data == NULL || total_size + sizeof(info) < orig_size) {
968 		return KERN_INVALID_ARGUMENT;
969 	}
970 
971 	assert(((data->kcd_flags & KCFLAG_USE_COMPRESSION) && (data->kcd_comp_d.kcd_cd_flags & KCD_CD_FLAG_IN_MARK))
972 	    || ((data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0));
973 
974 	bzero(&info, sizeof(info));
975 	info.type  = type;
976 	info.size = size;
977 	info.flags = flags;
978 
979 	/* check available memory, including trailer size for KCDATA_TYPE_BUFFER_END */
980 	if (total_size + sizeof(info) > data->kcd_length ||
981 	    data->kcd_length - (total_size + sizeof(info)) < data->kcd_addr_end - data->kcd_addr_begin) {
982 		return KERN_INSUFFICIENT_BUFFER_SIZE;
983 	}
984 
985 	kr = kcdata_memcpy(data, data->kcd_addr_end, &info, sizeof(info));
986 	if (kr) {
987 		return kr;
988 	}
989 
990 	data->kcd_addr_end += sizeof(info);
991 
992 	if (padding) {
993 		kr = kcdata_bzero(data, data->kcd_addr_end + size - padding, padding);
994 		if (kr) {
995 			return kr;
996 		}
997 	}
998 
999 	*user_addr = data->kcd_addr_end;
1000 	data->kcd_addr_end += size;
1001 
1002 	if (!(data->kcd_flags & KCFLAG_NO_AUTO_ENDBUFFER)) {
1003 		/* setup the end header as well */
1004 		return kcdata_write_buffer_end(data);
1005 	} else {
1006 		return KERN_SUCCESS;
1007 	}
1008 }
1009 
1010 /* Routine: kcdata_get_memory_size_for_data
1011  * Desc: returns the amount of memory that is required to store the information
1012  *       in kcdata
1013  */
1014 static size_t
kcdata_get_memory_size_for_data(uint32_t size)1015 kcdata_get_memory_size_for_data(uint32_t size)
1016 {
1017 	return size + kcdata_calc_padding(size) + sizeof(struct kcdata_item);
1018 }
1019 
1020 /*
1021  * Routine: kcdata_get_memory_addr_for_array
1022  * Desc: get memory address in the userspace memory for corpse info
1023  *       NOTE: The caller is responsible to zero the resulting memory or
1024  *             user other means to mark memory if it has failed populating the
1025  *             data in middle of operation.
1026  * params:  data - pointer describing the crash info allocation
1027  *          type_of_element - type of data to be put. See kern_cdata.h for defined types
1028  *          size_of_element - size of element. The header describes this size
1029  *          count - num of elements in array.
1030  * returns: mach_vm_address_t address in user memory for copyout().
1031  */
1032 
1033 kern_return_t
kcdata_get_memory_addr_for_array(kcdata_descriptor_t data,uint32_t type_of_element,uint32_t size_of_element,uint32_t count,mach_vm_address_t * user_addr)1034 kcdata_get_memory_addr_for_array(
1035 	kcdata_descriptor_t data,
1036 	uint32_t type_of_element,
1037 	uint32_t size_of_element,
1038 	uint32_t count,
1039 	mach_vm_address_t *user_addr)
1040 {
1041 	/* for arrays we record the number of padding bytes as the low-order 4 bits
1042 	 * of the type field.  KCDATA_TYPE_ARRAY_PAD{x} means x bytes of pad. */
1043 	uint64_t flags      = type_of_element;
1044 	flags               = (flags << 32) | count;
1045 	uint32_t total_size = count * size_of_element;
1046 	uint32_t pad        = kcdata_calc_padding(total_size);
1047 
1048 	return kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_ARRAY_PAD0 | pad, total_size, flags, user_addr);
1049 }
1050 
1051 /*
1052  * Routine: kcdata_add_container_marker
1053  * Desc: Add a container marker in the buffer for type and identifier.
1054  * params:  data - pointer describing the crash info allocation
1055  *          header_type - one of (KCDATA_TYPE_CONTAINER_BEGIN ,KCDATA_TYPE_CONTAINER_END)
1056  *          container_type - type of data to be put. See kern_cdata.h for defined types
1057  *          identifier - unique identifier. This is required to match nested containers.
1058  * returns: return value of kcdata_get_memory_addr()
1059  */
1060 
1061 kern_return_t
kcdata_add_container_marker(kcdata_descriptor_t data,uint32_t header_type,uint32_t container_type,uint64_t identifier)1062 kcdata_add_container_marker(
1063 	kcdata_descriptor_t data,
1064 	uint32_t header_type,
1065 	uint32_t container_type,
1066 	uint64_t identifier)
1067 {
1068 	mach_vm_address_t user_addr;
1069 	kern_return_t kr;
1070 	uint32_t data_size;
1071 
1072 	assert(header_type == KCDATA_TYPE_CONTAINER_END || header_type == KCDATA_TYPE_CONTAINER_BEGIN);
1073 
1074 	data_size = (header_type == KCDATA_TYPE_CONTAINER_BEGIN)? sizeof(uint32_t): 0;
1075 
1076 	if (!(data->kcd_flags & KCFLAG_USE_COMPRESSION)) {
1077 		kr = kcdata_get_memory_addr_with_flavor(data, header_type, data_size, identifier, &user_addr);
1078 		if (kr != KERN_SUCCESS) {
1079 			return kr;
1080 		}
1081 
1082 		if (data_size) {
1083 			kr = kcdata_memcpy(data, user_addr, &container_type, data_size);
1084 		}
1085 	} else {
1086 		kr = kcdata_compress_chunk_with_flags(data, header_type, &container_type, data_size, identifier);
1087 	}
1088 
1089 	return kr;
1090 }
1091 
1092 /*
1093  * Routine: kcdata_undo_addcontainer_begin
1094  * Desc: call this after adding a container begin but before adding anything else to revert.
1095  */
1096 kern_return_t
kcdata_undo_add_container_begin(kcdata_descriptor_t data)1097 kcdata_undo_add_container_begin(kcdata_descriptor_t data)
1098 {
1099 	/*
1100 	 * the payload of a container begin is a single uint64_t.  It is padded out
1101 	 * to 16 bytes.
1102 	 */
1103 	const mach_vm_address_t padded_payload_size = 16;
1104 	data->kcd_addr_end -= sizeof(struct kcdata_item) + padded_payload_size;
1105 
1106 	if (!(data->kcd_flags & KCFLAG_NO_AUTO_ENDBUFFER)) {
1107 		/* setup the end header as well */
1108 		return kcdata_write_buffer_end(data);
1109 	} else {
1110 		return KERN_SUCCESS;
1111 	}
1112 }
1113 
1114 /*
1115  * Routine: kcdata_memcpy
1116  * Desc: a common function to copy data out based on either copyout or memcopy flags
1117  * params:  data - pointer describing the kcdata buffer
1118  *          dst_addr - destination address
1119  *          src_addr - source address
1120  *          size - size in bytes to copy.
1121  * returns: KERN_NO_ACCESS if copyout fails.
1122  */
1123 
1124 kern_return_t
kcdata_memcpy(kcdata_descriptor_t data,mach_vm_address_t dst_addr,const void * src_addr,uint32_t size)1125 kcdata_memcpy(kcdata_descriptor_t data, mach_vm_address_t dst_addr, const void *src_addr, uint32_t size)
1126 {
1127 	if (data->kcd_flags & KCFLAG_USE_COPYOUT) {
1128 		if (copyout(src_addr, dst_addr, size)) {
1129 			return KERN_NO_ACCESS;
1130 		}
1131 	} else {
1132 		memcpy((void *)dst_addr, src_addr, size);
1133 	}
1134 	return KERN_SUCCESS;
1135 }
1136 
1137 /*
1138  * Routine: kcdata_bzero
1139  * Desc: zero out a portion of a kcdata buffer.
1140  */
1141 kern_return_t
kcdata_bzero(kcdata_descriptor_t data,mach_vm_address_t dst_addr,uint32_t size)1142 kcdata_bzero(kcdata_descriptor_t data, mach_vm_address_t dst_addr, uint32_t size)
1143 {
1144 	kern_return_t kr = KERN_SUCCESS;
1145 	if (data->kcd_flags & KCFLAG_USE_COPYOUT) {
1146 		uint8_t zeros[16] = {};
1147 		while (size) {
1148 			uint32_t block_size = MIN(size, 16);
1149 			kr = copyout(&zeros, dst_addr, block_size);
1150 			if (kr) {
1151 				return KERN_NO_ACCESS;
1152 			}
1153 			size -= block_size;
1154 		}
1155 		return KERN_SUCCESS;
1156 	} else {
1157 		bzero((void*)dst_addr, size);
1158 		return KERN_SUCCESS;
1159 	}
1160 }
1161 
1162 /*
1163  * Routine: kcdata_add_type_definition
1164  * Desc: add type definition to kcdata buffer.
1165  *       see feature description in documentation above.
1166  * params:  data - pointer describing the kcdata buffer
1167  *          type_id - unique type identifier for this data
1168  *          type_name - a string of max KCDATA_DESC_MAXLEN size for name of type
1169  *          elements_array - address to descriptors for each field in struct
1170  *          elements_count - count of how many fields are there in struct.
1171  * returns: return code from kcdata_get_memory_addr in case of failure.
1172  */
1173 
1174 kern_return_t
kcdata_add_type_definition(kcdata_descriptor_t data,uint32_t type_id,char * type_name,struct kcdata_subtype_descriptor * elements_array_addr,uint32_t elements_count)1175 kcdata_add_type_definition(
1176 	kcdata_descriptor_t data,
1177 	uint32_t type_id,
1178 	char *type_name,
1179 	struct kcdata_subtype_descriptor *elements_array_addr,
1180 	uint32_t elements_count)
1181 {
1182 	kern_return_t kr = KERN_SUCCESS;
1183 	struct kcdata_type_definition kc_type_definition;
1184 	mach_vm_address_t user_addr;
1185 	uint32_t total_size = sizeof(struct kcdata_type_definition);
1186 	bzero(&kc_type_definition, sizeof(kc_type_definition));
1187 
1188 	if (strlen(type_name) >= KCDATA_DESC_MAXLEN) {
1189 		return KERN_INVALID_ARGUMENT;
1190 	}
1191 	strlcpy(&kc_type_definition.kct_name[0], type_name, KCDATA_DESC_MAXLEN);
1192 	kc_type_definition.kct_num_elements = elements_count;
1193 	kc_type_definition.kct_type_identifier = type_id;
1194 
1195 	total_size += elements_count * sizeof(struct kcdata_subtype_descriptor);
1196 	/* record number of padding bytes as lower 4 bits of flags */
1197 	if (KERN_SUCCESS != (kr = kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_TYPEDEFINTION, total_size,
1198 	    kcdata_calc_padding(total_size), &user_addr))) {
1199 		return kr;
1200 	}
1201 	if (KERN_SUCCESS != (kr = kcdata_memcpy(data, user_addr, (void *)&kc_type_definition, sizeof(struct kcdata_type_definition)))) {
1202 		return kr;
1203 	}
1204 	user_addr += sizeof(struct kcdata_type_definition);
1205 	if (KERN_SUCCESS != (kr = kcdata_memcpy(data, user_addr, (void *)elements_array_addr, elements_count * sizeof(struct kcdata_subtype_descriptor)))) {
1206 		return kr;
1207 	}
1208 	return kr;
1209 }
1210 
1211 kern_return_t
kcdata_add_uint64_with_description(kcdata_descriptor_t data_desc,uint64_t data,const char * description)1212 kcdata_add_uint64_with_description(kcdata_descriptor_t data_desc, uint64_t data, const char * description)
1213 {
1214 	if (strlen(description) >= KCDATA_DESC_MAXLEN) {
1215 		return KERN_INVALID_ARGUMENT;
1216 	}
1217 
1218 	kern_return_t kr = 0;
1219 	mach_vm_address_t user_addr;
1220 	struct _uint64_with_description_data save_data;
1221 	const uint64_t size_req = sizeof(save_data);
1222 	bzero(&save_data, size_req);
1223 
1224 	strlcpy(&(save_data.desc[0]), description, sizeof(save_data.desc));
1225 	save_data.data = data;
1226 
1227 	if (data_desc->kcd_flags & KCFLAG_USE_COMPRESSION) {
1228 		/* allocate space for the output */
1229 		return kcdata_compress_chunk(data_desc, KCDATA_TYPE_UINT64_DESC, &save_data, size_req);
1230 	}
1231 
1232 	kr = kcdata_get_memory_addr(data_desc, KCDATA_TYPE_UINT64_DESC, size_req, &user_addr);
1233 	if (kr != KERN_SUCCESS) {
1234 		return kr;
1235 	}
1236 
1237 	if (data_desc->kcd_flags & KCFLAG_USE_COPYOUT) {
1238 		if (copyout(&save_data, user_addr, size_req)) {
1239 			return KERN_NO_ACCESS;
1240 		}
1241 	} else {
1242 		memcpy((void *)user_addr, &save_data, size_req);
1243 	}
1244 	return KERN_SUCCESS;
1245 }
1246 
1247 kern_return_t
kcdata_add_uint32_with_description(kcdata_descriptor_t data_desc,uint32_t data,const char * description)1248 kcdata_add_uint32_with_description(
1249 	kcdata_descriptor_t data_desc,
1250 	uint32_t data,
1251 	const char *description)
1252 {
1253 	assert(strlen(description) < KCDATA_DESC_MAXLEN);
1254 	if (strlen(description) >= KCDATA_DESC_MAXLEN) {
1255 		return KERN_INVALID_ARGUMENT;
1256 	}
1257 	kern_return_t kr = 0;
1258 	mach_vm_address_t user_addr;
1259 	struct _uint32_with_description_data save_data;
1260 	const uint64_t size_req = sizeof(save_data);
1261 
1262 	bzero(&save_data, size_req);
1263 	strlcpy(&(save_data.desc[0]), description, sizeof(save_data.desc));
1264 	save_data.data = data;
1265 
1266 	if (data_desc->kcd_flags & KCFLAG_USE_COMPRESSION) {
1267 		/* allocate space for the output */
1268 		return kcdata_compress_chunk(data_desc, KCDATA_TYPE_UINT32_DESC, &save_data, size_req);
1269 	}
1270 
1271 	kr = kcdata_get_memory_addr(data_desc, KCDATA_TYPE_UINT32_DESC, size_req, &user_addr);
1272 	if (kr != KERN_SUCCESS) {
1273 		return kr;
1274 	}
1275 
1276 	if (data_desc->kcd_flags & KCFLAG_USE_COPYOUT) {
1277 		if (copyout(&save_data, user_addr, size_req)) {
1278 			return KERN_NO_ACCESS;
1279 		}
1280 	} else {
1281 		memcpy((void *)user_addr, &save_data, size_req);
1282 	}
1283 
1284 	return KERN_SUCCESS;
1285 }
1286 
1287 
1288 /* end buffer management api */
1289