xref: /xnu-8019.80.24/osfmk/kern/kern_cdata.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2015 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/assert.h>
30 #include <mach/mach_types.h>
31 #include <mach/boolean.h>
32 #include <mach/vm_param.h>
33 #include <kern/kern_types.h>
34 #include <kern/mach_param.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
37 #include <kern/kern_cdata.h>
38 #include <kern/kalloc.h>
39 #include <mach/mach_vm.h>
40 
41 static kern_return_t kcdata_get_memory_addr_with_flavor(kcdata_descriptor_t data, uint32_t type, uint32_t size, uint64_t flags, mach_vm_address_t *user_addr);
42 static size_t kcdata_get_memory_size_for_data(uint32_t size);
43 static kern_return_t kcdata_compress_chunk_with_flags(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size, uint64_t flags);
44 static kern_return_t kcdata_compress_chunk(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size);
45 static kern_return_t kcdata_write_compression_stats(kcdata_descriptor_t data);
46 static kern_return_t kcdata_get_compression_stats(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin);
47 
48 /*
49  * zlib will need to store its metadata and this value is indifferent from the
50  * window bits and other zlib internals
51  */
52 #define ZLIB_METADATA_SIZE 1440
53 
54 /* #define kcdata_debug_printf printf */
55 #define kcdata_debug_printf(...) ;
56 
57 #pragma pack(push, 4)
58 
59 /* Internal structs for convenience */
60 struct _uint64_with_description_data {
61 	char desc[KCDATA_DESC_MAXLEN];
62 	uint64_t data;
63 };
64 
65 struct _uint32_with_description_data {
66 	char     desc[KCDATA_DESC_MAXLEN];
67 	uint32_t data;
68 };
69 
70 #pragma pack(pop)
71 
72 /*
73  * Estimates how large of a buffer that should be allocated for a buffer that will contain
74  * num_items items of known types with overall length payload_size.
75  *
76  * NOTE: This function will not give an accurate estimate for buffers that will
77  *       contain unknown types (those with string descriptions).
78  */
79 uint32_t
kcdata_estimate_required_buffer_size(uint32_t num_items,uint32_t payload_size)80 kcdata_estimate_required_buffer_size(uint32_t num_items, uint32_t payload_size)
81 {
82 	/*
83 	 * In the worst case each item will need (KCDATA_ALIGNMENT_SIZE - 1) padding
84 	 */
85 	uint32_t max_padding_bytes = 0;
86 	uint32_t max_padding_with_item_description_bytes = 0;
87 	uint32_t estimated_required_buffer_size = 0;
88 	const uint32_t begin_and_end_marker_bytes = 2 * sizeof(struct kcdata_item);
89 
90 	if (os_mul_overflow(num_items, KCDATA_ALIGNMENT_SIZE - 1, &max_padding_bytes)) {
91 		panic("%s: Overflow in required buffer size estimate", __func__);
92 	}
93 
94 	if (os_mul_and_add_overflow(num_items, sizeof(struct kcdata_item), max_padding_bytes, &max_padding_with_item_description_bytes)) {
95 		panic("%s: Overflow in required buffer size estimate", __func__);
96 	}
97 
98 	if (os_add3_overflow(max_padding_with_item_description_bytes, begin_and_end_marker_bytes, payload_size, &estimated_required_buffer_size)) {
99 		panic("%s: Overflow in required buffer size estimate", __func__);
100 	}
101 
102 	return estimated_required_buffer_size;
103 }
104 
105 kcdata_descriptor_t
kcdata_memory_alloc_init(mach_vm_address_t buffer_addr_p,unsigned data_type,unsigned size,unsigned flags)106 kcdata_memory_alloc_init(mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags)
107 {
108 	kcdata_descriptor_t data = NULL;
109 	mach_vm_address_t user_addr = 0;
110 	uint16_t clamped_flags = (uint16_t) flags;
111 
112 	data = kalloc_type(struct kcdata_descriptor, Z_WAITOK | Z_ZERO | Z_NOFAIL);
113 	data->kcd_addr_begin = buffer_addr_p;
114 	data->kcd_addr_end = buffer_addr_p;
115 	data->kcd_flags = (clamped_flags & KCFLAG_USE_COPYOUT) ? clamped_flags : clamped_flags | KCFLAG_USE_MEMCOPY;
116 	data->kcd_length = size;
117 
118 	/* Initialize the BEGIN header */
119 	if (KERN_SUCCESS != kcdata_get_memory_addr(data, data_type, 0, &user_addr)) {
120 		kcdata_memory_destroy(data);
121 		return NULL;
122 	}
123 
124 	return data;
125 }
126 
127 kern_return_t
kcdata_memory_static_init(kcdata_descriptor_t data,mach_vm_address_t buffer_addr_p,unsigned data_type,unsigned size,unsigned flags)128 kcdata_memory_static_init(kcdata_descriptor_t data, mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags)
129 {
130 	mach_vm_address_t user_addr = 0;
131 	uint16_t clamped_flags = (uint16_t) flags;
132 
133 	if (data == NULL) {
134 		return KERN_INVALID_ARGUMENT;
135 	}
136 	bzero(data, sizeof(struct kcdata_descriptor));
137 	data->kcd_addr_begin = buffer_addr_p;
138 	data->kcd_addr_end = buffer_addr_p;
139 	data->kcd_flags = (clamped_flags & KCFLAG_USE_COPYOUT) ? clamped_flags : clamped_flags | KCFLAG_USE_MEMCOPY;
140 	data->kcd_length = size;
141 
142 	/* Initialize the BEGIN header */
143 	return kcdata_get_memory_addr(data, data_type, 0, &user_addr);
144 }
145 
146 void *
kcdata_memory_get_begin_addr(kcdata_descriptor_t data)147 kcdata_memory_get_begin_addr(kcdata_descriptor_t data)
148 {
149 	if (data == NULL) {
150 		return NULL;
151 	}
152 
153 	return (void *)data->kcd_addr_begin;
154 }
155 
156 uint64_t
kcdata_memory_get_used_bytes(kcdata_descriptor_t kcd)157 kcdata_memory_get_used_bytes(kcdata_descriptor_t kcd)
158 {
159 	assert(kcd != NULL);
160 	return ((uint64_t)kcd->kcd_addr_end - (uint64_t)kcd->kcd_addr_begin) + sizeof(struct kcdata_item);
161 }
162 
163 uint64_t
kcdata_memory_get_uncompressed_bytes(kcdata_descriptor_t kcd)164 kcdata_memory_get_uncompressed_bytes(kcdata_descriptor_t kcd)
165 {
166 	kern_return_t kr;
167 
168 	assert(kcd != NULL);
169 	if (kcd->kcd_flags & KCFLAG_USE_COMPRESSION) {
170 		uint64_t totalout, totalin;
171 
172 		kr = kcdata_get_compression_stats(kcd, &totalout, &totalin);
173 		if (kr == KERN_SUCCESS) {
174 			return totalin;
175 		} else {
176 			return 0;
177 		}
178 	} else {
179 		/* If compression wasn't used, get the number of bytes used  */
180 		return kcdata_memory_get_used_bytes(kcd);
181 	}
182 }
183 
184 /*
185  * Free up the memory associated with kcdata
186  */
187 kern_return_t
kcdata_memory_destroy(kcdata_descriptor_t data)188 kcdata_memory_destroy(kcdata_descriptor_t data)
189 {
190 	if (!data) {
191 		return KERN_INVALID_ARGUMENT;
192 	}
193 
194 	/*
195 	 * data->kcd_addr_begin points to memory in not tracked by
196 	 * kcdata lib. So not clearing that here.
197 	 */
198 	kfree_type(struct kcdata_descriptor, data);
199 	return KERN_SUCCESS;
200 }
201 
202 /* Used by zlib to allocate space in its metadata section */
203 static void *
kcdata_compress_zalloc(void * opaque,u_int items,u_int size)204 kcdata_compress_zalloc(void *opaque, u_int items, u_int size)
205 {
206 	void *result;
207 	struct kcdata_compress_descriptor *cd = opaque;
208 	int alloc_size = ~31L & (31 + (items * size));
209 
210 	result = (void *)((uintptr_t)cd->kcd_cd_base + cd->kcd_cd_offset);
211 	if ((uintptr_t) result + alloc_size > (uintptr_t) cd->kcd_cd_base + cd->kcd_cd_maxoffset) {
212 		result = Z_NULL;
213 	} else {
214 		cd->kcd_cd_offset += alloc_size;
215 	}
216 
217 	kcdata_debug_printf("%s: %d * %d = %d  => %p\n", __func__, items, size, items * size, result);
218 
219 	return result;
220 }
221 
222 /* Used by zlib to free previously allocated space in its metadata section */
223 static void
kcdata_compress_zfree(void * opaque,void * ptr)224 kcdata_compress_zfree(void *opaque, void *ptr)
225 {
226 	(void) opaque;
227 	(void) ptr;
228 
229 	kcdata_debug_printf("%s: ptr %p\n", __func__, ptr);
230 
231 	/*
232 	 * Since the buffers we are using are temporary, we don't worry about
233 	 * freeing memory for now. Besides, testing has shown that zlib only calls
234 	 * this at the end, near deflateEnd() or a Z_FINISH deflate() call.
235 	 */
236 }
237 
238 /* Used to initialize the selected compression algorithm's internal state (if any) */
239 static kern_return_t
kcdata_init_compress_state(kcdata_descriptor_t data,void (* memcpy_f)(void *,const void *,size_t),uint64_t type,mach_vm_address_t totalout_addr,mach_vm_address_t totalin_addr)240 kcdata_init_compress_state(kcdata_descriptor_t data, void (*memcpy_f)(void *, const void *, size_t), uint64_t type, mach_vm_address_t totalout_addr, mach_vm_address_t totalin_addr)
241 {
242 	kern_return_t ret = KERN_SUCCESS;
243 	size_t size;
244 	int wbits = 12, memlevel = 3;
245 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
246 
247 	cd->kcd_cd_memcpy_f = memcpy_f;
248 	cd->kcd_cd_compression_type = type;
249 	cd->kcd_cd_totalout_addr = totalout_addr;
250 	cd->kcd_cd_totalin_addr = totalin_addr;
251 
252 	switch (type) {
253 	case KCDCT_ZLIB:
254 		/* allocate space for the metadata used by zlib */
255 		size = round_page(ZLIB_METADATA_SIZE + zlib_deflate_memory_size(wbits, memlevel));
256 		kcdata_debug_printf("%s: size = %zu kcd_length: %d\n", __func__, size, data->kcd_length);
257 		kcdata_debug_printf("%s: kcd buffer [%p - %p]\n", __func__, (void *) data->kcd_addr_begin, (void *) data->kcd_addr_begin + data->kcd_length);
258 
259 		if (4 * size > data->kcd_length) {
260 			return KERN_INSUFFICIENT_BUFFER_SIZE;
261 		}
262 
263 		cd->kcd_cd_zs.avail_in = 0;
264 		cd->kcd_cd_zs.next_in = NULL;
265 		cd->kcd_cd_zs.avail_out = 0;
266 		cd->kcd_cd_zs.next_out = NULL;
267 		cd->kcd_cd_zs.opaque = cd;
268 		cd->kcd_cd_zs.zalloc = kcdata_compress_zalloc;
269 		cd->kcd_cd_zs.zfree = kcdata_compress_zfree;
270 		cd->kcd_cd_base = (void *)(data->kcd_addr_begin + data->kcd_length - size);
271 		data->kcd_length -= size;
272 		cd->kcd_cd_offset = 0;
273 		cd->kcd_cd_maxoffset = size;
274 		cd->kcd_cd_flags = 0;
275 
276 		kcdata_debug_printf("%s: buffer [%p - %p]\n", __func__, cd->kcd_cd_base, cd->kcd_cd_base + size);
277 
278 		if (deflateInit2(&cd->kcd_cd_zs, Z_BEST_SPEED, Z_DEFLATED, wbits, memlevel, Z_DEFAULT_STRATEGY) != Z_OK) {
279 			kcdata_debug_printf("EMERGENCY: deflateInit2 failed!\n");
280 			ret = KERN_INVALID_ARGUMENT;
281 		}
282 		break;
283 	default:
284 		panic("kcdata_init_compress_state: invalid compression type: %d", (int) type);
285 	}
286 
287 	return ret;
288 }
289 
290 
291 /*
292  * Turn on the compression logic for kcdata
293  */
294 kern_return_t
kcdata_init_compress(kcdata_descriptor_t data,int hdr_tag,void (* memcpy_f)(void *,const void *,size_t),uint64_t type)295 kcdata_init_compress(kcdata_descriptor_t data, int hdr_tag, void (*memcpy_f)(void *, const void *, size_t), uint64_t type)
296 {
297 	kern_return_t kr;
298 	mach_vm_address_t user_addr, totalout_addr, totalin_addr;
299 	struct _uint64_with_description_data save_data;
300 	const uint64_t size_req = sizeof(save_data);
301 
302 	assert(data && (data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0);
303 
304 	/* reset the compression descriptor */
305 	bzero(&data->kcd_comp_d, sizeof(struct kcdata_compress_descriptor));
306 
307 	/* add the header information */
308 	kcdata_add_uint64_with_description(data, type, "kcd_c_type");
309 
310 	/* reserve space to write total out */
311 	bzero(&save_data, size_req);
312 	strlcpy(&(save_data.desc[0]), "kcd_c_totalout", sizeof(save_data.desc));
313 	kr = kcdata_get_memory_addr(data, KCDATA_TYPE_UINT64_DESC, size_req, &totalout_addr);
314 	if (kr != KERN_SUCCESS) {
315 		return kr;
316 	}
317 	memcpy((void *)totalout_addr, &save_data, size_req);
318 
319 	/* space for total in */
320 	bzero(&save_data, size_req);
321 	strlcpy(&(save_data.desc[0]), "kcd_c_totalin", sizeof(save_data.desc));
322 	kr = kcdata_get_memory_addr(data, KCDATA_TYPE_UINT64_DESC, size_req, &totalin_addr);
323 	if (kr != KERN_SUCCESS) {
324 		return kr;
325 	}
326 	memcpy((void *)totalin_addr, &save_data, size_req);
327 
328 	/* add the inner buffer */
329 	kcdata_get_memory_addr(data, hdr_tag, 0, &user_addr);
330 
331 	/* save the flag */
332 	data->kcd_flags |= KCFLAG_USE_COMPRESSION;
333 
334 	/* initialize algorithm specific state */
335 	kr = kcdata_init_compress_state(data, memcpy_f, type, totalout_addr + offsetof(struct _uint64_with_description_data, data), totalin_addr + offsetof(struct _uint64_with_description_data, data));
336 	if (kr != KERN_SUCCESS) {
337 		kcdata_debug_printf("%s: failed to initialize compression state!\n", __func__);
338 		return kr;
339 	}
340 
341 	return KERN_SUCCESS;
342 }
343 
344 static inline
345 int
kcdata_zlib_translate_kcd_cf_flag(enum kcdata_compression_flush flush)346 kcdata_zlib_translate_kcd_cf_flag(enum kcdata_compression_flush flush)
347 {
348 	switch (flush) {
349 	case KCDCF_NO_FLUSH: return Z_NO_FLUSH;
350 	case KCDCF_SYNC_FLUSH: return Z_SYNC_FLUSH;
351 	case KCDCF_FINISH: return Z_FINISH;
352 	default: panic("invalid kcdata_zlib_translate_kcd_cf_flag flag");
353 	}
354 }
355 
356 static inline
357 int
kcdata_zlib_translate_kcd_cf_expected_ret(enum kcdata_compression_flush flush)358 kcdata_zlib_translate_kcd_cf_expected_ret(enum kcdata_compression_flush flush)
359 {
360 	switch (flush) {
361 	case KCDCF_NO_FLUSH:         /* fall through */
362 	case KCDCF_SYNC_FLUSH: return Z_OK;
363 	case KCDCF_FINISH: return Z_STREAM_END;
364 	default: panic("invalid kcdata_zlib_translate_kcd_cf_expected_ret flag");
365 	}
366 }
367 
368 /* Called by kcdata_do_compress() when the configured compression algorithm is zlib */
369 static kern_return_t
kcdata_do_compress_zlib(kcdata_descriptor_t data,void * inbuffer,size_t insize,void * outbuffer,size_t outsize,size_t * wrote,enum kcdata_compression_flush flush)370 kcdata_do_compress_zlib(kcdata_descriptor_t data, void *inbuffer,
371     size_t insize, void *outbuffer, size_t outsize, size_t *wrote,
372     enum kcdata_compression_flush flush)
373 {
374 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
375 	z_stream *zs = &cd->kcd_cd_zs;
376 	int expected_ret, ret;
377 
378 	zs->next_out = outbuffer;
379 	zs->avail_out = (unsigned int) outsize;
380 	zs->next_in = inbuffer;
381 	zs->avail_in = (unsigned int) insize;
382 	ret = deflate(zs, kcdata_zlib_translate_kcd_cf_flag(flush));
383 	if (zs->avail_in != 0 || zs->avail_out <= 0) {
384 		return KERN_INSUFFICIENT_BUFFER_SIZE;
385 	}
386 
387 	expected_ret = kcdata_zlib_translate_kcd_cf_expected_ret(flush);
388 	if (ret != expected_ret) {
389 		/*
390 		 * Should only fail with catastrophic, unrecoverable cases (i.e.,
391 		 * corrupted z_stream, or incorrect configuration)
392 		 */
393 		panic("zlib kcdata compression ret = %d", ret);
394 	}
395 
396 	kcdata_debug_printf("%s: %p (%zu) <- %p (%zu); flush: %d; ret = %ld\n",
397 	    __func__, outbuffer, outsize, inbuffer, insize, flush, outsize - zs->avail_out);
398 	if (wrote) {
399 		*wrote = outsize - zs->avail_out;
400 	}
401 	return KERN_SUCCESS;
402 }
403 
404 /*
405  * Compress the buffer at @inbuffer (of size @insize) into the kcdata buffer
406  * @outbuffer (of size @outsize). Flush based on the @flush parameter.
407  *
408  * Returns KERN_SUCCESS on success, or KERN_INSUFFICIENT_BUFFER_SIZE if
409  * @outsize isn't sufficient. Also, writes the number of bytes written in the
410  * @outbuffer to @wrote.
411  */
412 static kern_return_t
kcdata_do_compress(kcdata_descriptor_t data,void * inbuffer,size_t insize,void * outbuffer,size_t outsize,size_t * wrote,enum kcdata_compression_flush flush)413 kcdata_do_compress(kcdata_descriptor_t data, void *inbuffer, size_t insize,
414     void *outbuffer, size_t outsize, size_t *wrote, enum kcdata_compression_flush flush)
415 {
416 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
417 
418 	assert(data->kcd_flags & KCFLAG_USE_COMPRESSION);
419 
420 	kcdata_debug_printf("%s: %p (%zu) <- %p (%zu); flush: %d\n",
421 	    __func__, outbuffer, outsize, inbuffer, insize, flush);
422 
423 	/* don't compress if we are in a window */
424 	if (cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK || data->kcd_comp_d.kcd_cd_compression_type == KCDCT_NONE) {
425 		assert(cd->kcd_cd_memcpy_f);
426 		if (outsize >= insize) {
427 			cd->kcd_cd_memcpy_f(outbuffer, inbuffer, insize);
428 			if (wrote) {
429 				*wrote = insize;
430 			}
431 			return KERN_SUCCESS;
432 		} else {
433 			return KERN_INSUFFICIENT_BUFFER_SIZE;
434 		}
435 	}
436 
437 	switch (data->kcd_comp_d.kcd_cd_compression_type) {
438 	case KCDCT_ZLIB:
439 		return kcdata_do_compress_zlib(data, inbuffer, insize, outbuffer, outsize, wrote, flush);
440 	default:
441 		panic("invalid compression type 0x%llx in kcdata_do_compress", data->kcd_comp_d.kcd_cd_compression_type);
442 	}
443 }
444 
445 static size_t
kcdata_compression_bound_zlib(kcdata_descriptor_t data,size_t size)446 kcdata_compression_bound_zlib(kcdata_descriptor_t data, size_t size)
447 {
448 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
449 	z_stream *zs = &cd->kcd_cd_zs;
450 
451 	return (size_t) deflateBound(zs, (unsigned long) size);
452 }
453 
454 
455 /*
456  * returns the worst-case, maximum length of the compressed data when
457  * compressing a buffer of size @size using the configured algorithm.
458  */
459 static size_t
kcdata_compression_bound(kcdata_descriptor_t data,size_t size)460 kcdata_compression_bound(kcdata_descriptor_t data, size_t size)
461 {
462 	switch (data->kcd_comp_d.kcd_cd_compression_type) {
463 	case KCDCT_ZLIB:
464 		return kcdata_compression_bound_zlib(data, size);
465 	case KCDCT_NONE:
466 		return size;
467 	default:
468 		panic("%s: unknown compression method", __func__);
469 	}
470 }
471 
472 /*
473  * kcdata_compress_chunk_with_flags:
474  *		Compress buffer found at @input_data (length @input_size) to the kcdata
475  *		buffer described by @data. This method will construct the kcdata_item_t
476  *		required by parsers using the type information @type and flags @flags.
477  *
478  *	Returns KERN_SUCCESS when successful. Currently, asserts on failure.
479  */
480 kern_return_t
kcdata_compress_chunk_with_flags(kcdata_descriptor_t data,uint32_t type,const void * input_data,uint32_t input_size,uint64_t kcdata_flags)481 kcdata_compress_chunk_with_flags(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size, uint64_t kcdata_flags)
482 {
483 	assert(data);
484 	assert((data->kcd_flags & KCFLAG_USE_COMPRESSION));
485 	assert(input_data);
486 	struct kcdata_item info;
487 	char padding_data[16] = {0};
488 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
489 	size_t wrote = 0;
490 	kern_return_t kr;
491 
492 	kcdata_debug_printf("%s: type: %d input_data: %p (%d) kcdata_flags: 0x%llx\n",
493 	    __func__, type, input_data, input_size, kcdata_flags);
494 
495 	/*
496 	 * first, get memory space. The uncompressed size must fit in the remained
497 	 * of the kcdata buffer, in case the compression algorithm doesn't actually
498 	 * compress the data at all.
499 	 */
500 	size_t total_uncompressed_size = kcdata_compression_bound(data, (size_t) kcdata_get_memory_size_for_data(input_size));
501 	if (total_uncompressed_size > data->kcd_length ||
502 	    data->kcd_length - total_uncompressed_size < data->kcd_addr_end - data->kcd_addr_begin) {
503 		kcdata_debug_printf("%s: insufficient buffer size: kcd_length => %d e-b=> %lld our size: %zu\n",
504 		    __func__, data->kcd_length, data->kcd_addr_end - data->kcd_addr_begin, total_uncompressed_size);
505 		return KERN_INSUFFICIENT_BUFFER_SIZE;
506 	}
507 	uint32_t padding = kcdata_calc_padding(input_size);
508 	assert(padding < sizeof(padding_data));
509 
510 	void *space_start = (void *) data->kcd_addr_end;
511 	void *space_ptr = space_start;
512 
513 	/* create the output stream */
514 	size_t total_uncompressed_space_remaining = total_uncompressed_size;
515 
516 	/* create the info data */
517 	bzero(&info, sizeof(info));
518 	info.type = type;
519 	info.size = input_size + padding;
520 	info.flags = kcdata_flags;
521 
522 	/*
523 	 * The next possibly three compresses are needed separately because of the
524 	 * scatter-gather nature of this operation. The kcdata item header (info)
525 	 * and padding are on the stack, while the actual data is somewhere else.
526 	 * */
527 
528 	/* create the input stream for info & compress */
529 	enum kcdata_compression_flush flush = (padding || input_size) ? KCDCF_NO_FLUSH :
530 	    cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH :
531 	    KCDCF_SYNC_FLUSH;
532 	kr = kcdata_do_compress(data, &info, sizeof(info), space_ptr, total_uncompressed_space_remaining, &wrote, flush);
533 	if (kr != KERN_SUCCESS) {
534 		return kr;
535 	}
536 	kcdata_debug_printf("%s: first wrote = %zu\n", __func__, wrote);
537 	space_ptr = (void *)((uintptr_t)space_ptr + wrote);
538 	total_uncompressed_space_remaining -= wrote;
539 
540 	/* If there is input provided, compress that here */
541 	if (input_size) {
542 		flush = padding ? KCDCF_NO_FLUSH :
543 		    cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH :
544 		    KCDCF_SYNC_FLUSH;
545 		kr = kcdata_do_compress(data, (void *) (uintptr_t) input_data, input_size, space_ptr, total_uncompressed_space_remaining, &wrote, flush);
546 		if (kr != KERN_SUCCESS) {
547 			return kr;
548 		}
549 		kcdata_debug_printf("%s: 2nd wrote = %zu\n", __func__, wrote);
550 		space_ptr = (void *)((uintptr_t)space_ptr + wrote);
551 		total_uncompressed_space_remaining -= wrote;
552 	}
553 
554 	/* If the item and its data require padding to maintain alignment,
555 	 * "compress" that into the output buffer. */
556 	if (padding) {
557 		/* write the padding */
558 		kr = kcdata_do_compress(data, padding_data, padding, space_ptr, total_uncompressed_space_remaining, &wrote,
559 		    cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH : KCDCF_SYNC_FLUSH);
560 		if (kr != KERN_SUCCESS) {
561 			return kr;
562 		}
563 		kcdata_debug_printf("%s: 3rd wrote = %zu\n", __func__, wrote);
564 		if (wrote == 0) {
565 			return KERN_FAILURE;
566 		}
567 		space_ptr = (void *)((uintptr_t)space_ptr + wrote);
568 		total_uncompressed_space_remaining -= wrote;
569 	}
570 
571 	assert((size_t)((uintptr_t)space_ptr - (uintptr_t)space_start) <= total_uncompressed_size);
572 
573 	/* move the end marker forward */
574 	data->kcd_addr_end = (mach_vm_address_t) space_start + (total_uncompressed_size - total_uncompressed_space_remaining);
575 
576 	return KERN_SUCCESS;
577 }
578 
579 /*
580  * kcdata_compress_chunk:
581  *		Like kcdata_compress_chunk_with_flags(), but uses the default set of kcdata flags,
582  *		i.e. padding and also saves the amount of padding bytes.
583  *
584  * Returns are the same as in kcdata_compress_chunk_with_flags()
585  */
586 kern_return_t
kcdata_compress_chunk(kcdata_descriptor_t data,uint32_t type,const void * input_data,uint32_t input_size)587 kcdata_compress_chunk(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size)
588 {
589 	/* these flags are for kcdata - store that the struct is padded and store the amount of padding bytes */
590 	uint64_t flags = (KCDATA_FLAGS_STRUCT_PADDING_MASK & kcdata_calc_padding(input_size)) | KCDATA_FLAGS_STRUCT_HAS_PADDING;
591 	return kcdata_compress_chunk_with_flags(data, type, input_data, input_size, flags);
592 }
593 
594 kern_return_t
kcdata_push_data(kcdata_descriptor_t data,uint32_t type,uint32_t size,const void * input_data)595 kcdata_push_data(kcdata_descriptor_t data, uint32_t type, uint32_t size, const void *input_data)
596 {
597 	if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
598 		return kcdata_compress_chunk(data, type, input_data, size);
599 	} else {
600 		kern_return_t ret;
601 		mach_vm_address_t uaddr = 0;
602 		ret = kcdata_get_memory_addr(data, type, size, &uaddr);
603 		if (ret != KERN_SUCCESS) {
604 			return ret;
605 		}
606 
607 		kcdata_memcpy(data, uaddr, input_data, size);
608 		return KERN_SUCCESS;
609 	}
610 }
611 
612 kern_return_t
kcdata_push_array(kcdata_descriptor_t data,uint32_t type_of_element,uint32_t size_of_element,uint32_t count,const void * input_data)613 kcdata_push_array(kcdata_descriptor_t data, uint32_t type_of_element, uint32_t size_of_element, uint32_t count, const void *input_data)
614 {
615 	uint64_t flags      = type_of_element;
616 	flags               = (flags << 32) | count;
617 	uint32_t total_size = count * size_of_element;
618 	uint32_t pad        = kcdata_calc_padding(total_size);
619 
620 	if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
621 		return kcdata_compress_chunk_with_flags(data, KCDATA_TYPE_ARRAY_PAD0 | pad, input_data, total_size, flags);
622 	} else {
623 		kern_return_t ret;
624 		mach_vm_address_t uaddr = 0;
625 		ret = kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_ARRAY_PAD0 | pad, total_size, flags, &uaddr);
626 		if (ret != KERN_SUCCESS) {
627 			return ret;
628 		}
629 
630 		kcdata_memcpy(data, uaddr, input_data, total_size);
631 		return KERN_SUCCESS;
632 	}
633 }
634 
635 /* A few words on how window compression works:
636  *
637  * This is how the buffer looks when the window is opened:
638  *
639  * X---------------------------------------------------------------------X
640  * |                                |                                    |
641  * |   Filled with stackshot data   |            Zero bytes              |
642  * |                                |                                    |
643  * X---------------------------------------------------------------------X
644  *                                  ^
645  *									\ - kcd_addr_end
646  *
647  * Opening a window will save the current kcd_addr_end to kcd_cd_mark_begin.
648  *
649  * Any kcdata_* operation will then push data to the buffer like normal. (If
650  * you call any compressing functions they will pass-through, i.e. no
651  * compression will be done) Once the window is closed, the following takes
652  * place:
653  *
654  * X---------------------------------------------------------------------X
655  * |               |                    |                    |           |
656  * | Existing data |     New data       |   Scratch buffer   |           |
657  * |               |                    |                    |           |
658  * X---------------------------------------------------------------------X
659  *				   ^                    ^                    ^
660  *				   |                    |                    |
661  *				   \ -kcd_cd_mark_begin |                    |
662  *							            |                    |
663  *							            \ - kcd_addr_end     |
664  *							                                 |
665  *		 kcd_addr_end + (kcd_addr_end - kcd_cd_mark_begin) - /
666  *
667  *	(1) The data between kcd_cd_mark_begin and kcd_addr_end is fed to the
668  *      compression algorithm to compress to the scratch buffer.
669  *  (2) The scratch buffer's contents are copied into the area denoted "New
670  *      data" above. Effectively overwriting the uncompressed data with the
671  *      compressed one.
672  *  (3) kcd_addr_end is then rewound to kcd_cd_mark_begin + sizeof_compressed_data
673  */
674 
675 /* Record the state, and restart compression from this later */
676 void
kcdata_compression_window_open(kcdata_descriptor_t data)677 kcdata_compression_window_open(kcdata_descriptor_t data)
678 {
679 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
680 	assert((cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK) == 0);
681 
682 	if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
683 		cd->kcd_cd_flags |= KCD_CD_FLAG_IN_MARK;
684 		cd->kcd_cd_mark_begin = data->kcd_addr_end;
685 	}
686 }
687 
688 /* Compress the region between the mark and the current end */
689 kern_return_t
kcdata_compression_window_close(kcdata_descriptor_t data)690 kcdata_compression_window_close(kcdata_descriptor_t data)
691 {
692 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
693 	uint64_t total_size, max_size;
694 	void *space_start, *space_ptr;
695 	size_t total_uncompressed_space_remaining, wrote = 0;
696 	kern_return_t kr;
697 
698 	if ((data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0) {
699 		return KERN_SUCCESS;
700 	}
701 
702 	assert(cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK);
703 
704 	if (data->kcd_addr_end == (mach_vm_address_t) cd->kcd_cd_mark_begin) {
705 		/* clear the window marker and return, this is a no-op */
706 		cd->kcd_cd_flags &= ~KCD_CD_FLAG_IN_MARK;
707 		return KERN_SUCCESS;
708 	}
709 
710 	assert(cd->kcd_cd_mark_begin < data->kcd_addr_end);
711 	total_size = data->kcd_addr_end - (uint64_t) cd->kcd_cd_mark_begin;
712 	max_size = (uint64_t) kcdata_compression_bound(data, total_size);
713 	kcdata_debug_printf("%s: total_size = %lld\n", __func__, total_size);
714 
715 	/*
716 	 * first, get memory space. The uncompressed size must fit in the remained
717 	 * of the kcdata buffer, in case the compression algorithm doesn't actually
718 	 * compress the data at all.
719 	 */
720 	if (max_size > data->kcd_length ||
721 	    data->kcd_length - max_size < data->kcd_addr_end - data->kcd_addr_begin) {
722 		kcdata_debug_printf("%s: insufficient buffer size: kcd_length => %d e-b=> %lld our size: %lld\n",
723 		    __func__, data->kcd_length, data->kcd_addr_end - data->kcd_addr_begin, max_size);
724 		return KERN_INSUFFICIENT_BUFFER_SIZE;
725 	}
726 
727 	/* clear the window marker */
728 	cd->kcd_cd_flags &= ~KCD_CD_FLAG_IN_MARK;
729 
730 	space_start = (void *) data->kcd_addr_end;
731 	space_ptr = space_start;
732 	total_uncompressed_space_remaining = (unsigned int) max_size;
733 	kr = kcdata_do_compress(data, (void *) cd->kcd_cd_mark_begin, total_size, space_ptr,
734 	    total_uncompressed_space_remaining, &wrote, KCDCF_SYNC_FLUSH);
735 	if (kr != KERN_SUCCESS) {
736 		return kr;
737 	}
738 	kcdata_debug_printf("%s: first wrote = %zu\n", __func__, wrote);
739 	if (wrote == 0) {
740 		return KERN_FAILURE;
741 	}
742 	space_ptr = (void *)((uintptr_t)space_ptr + wrote);
743 	total_uncompressed_space_remaining  -= wrote;
744 
745 	assert((size_t)((uintptr_t)space_ptr - (uintptr_t)space_start) <= max_size);
746 
747 	/* copy to the original location */
748 	kcdata_memcpy(data, cd->kcd_cd_mark_begin, space_start, (uint32_t) (max_size - total_uncompressed_space_remaining));
749 
750 	/* rewind the end marker */
751 	data->kcd_addr_end = cd->kcd_cd_mark_begin + (max_size - total_uncompressed_space_remaining);
752 
753 	return KERN_SUCCESS;
754 }
755 
756 static kern_return_t
kcdata_get_compression_stats_zlib(kcdata_descriptor_t data,uint64_t * totalout,uint64_t * totalin)757 kcdata_get_compression_stats_zlib(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin)
758 {
759 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
760 	z_stream *zs = &cd->kcd_cd_zs;
761 
762 	assert((cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK) == 0);
763 
764 	*totalout = (uint64_t) zs->total_out;
765 	*totalin = (uint64_t) zs->total_in;
766 
767 	return KERN_SUCCESS;
768 }
769 
770 static kern_return_t
kcdata_get_compression_stats(kcdata_descriptor_t data,uint64_t * totalout,uint64_t * totalin)771 kcdata_get_compression_stats(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin)
772 {
773 	kern_return_t kr;
774 
775 	switch (data->kcd_comp_d.kcd_cd_compression_type) {
776 	case KCDCT_ZLIB:
777 		kr = kcdata_get_compression_stats_zlib(data, totalout, totalin);
778 		break;
779 	case KCDCT_NONE:
780 		*totalout = *totalin = kcdata_memory_get_used_bytes(data);
781 		kr = KERN_SUCCESS;
782 		break;
783 	default:
784 		panic("invalid compression flag 0x%llx in kcdata_write_compression_stats", (data->kcd_comp_d.kcd_cd_compression_type));
785 	}
786 
787 	return kr;
788 }
789 
790 kern_return_t
kcdata_write_compression_stats(kcdata_descriptor_t data)791 kcdata_write_compression_stats(kcdata_descriptor_t data)
792 {
793 	kern_return_t kr;
794 	uint64_t totalout, totalin;
795 
796 	kr = kcdata_get_compression_stats(data, &totalout, &totalin);
797 	if (kr != KERN_SUCCESS) {
798 		return kr;
799 	}
800 
801 	*(uint64_t *)data->kcd_comp_d.kcd_cd_totalout_addr = totalout;
802 	*(uint64_t *)data->kcd_comp_d.kcd_cd_totalin_addr = totalin;
803 
804 	return kr;
805 }
806 
807 static kern_return_t
kcdata_finish_compression_zlib(kcdata_descriptor_t data)808 kcdata_finish_compression_zlib(kcdata_descriptor_t data)
809 {
810 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
811 	z_stream *zs = &cd->kcd_cd_zs;
812 
813 	/*
814 	 * macOS on x86 w/ coprocessor ver. 2 and later context: Stackshot compression leaves artifacts
815 	 * in the panic buffer which interferes with CRC checks. The CRC is calculated here over the full
816 	 * buffer but only the portion with valid panic data is sent to iBoot via the SMC. When iBoot
817 	 * calculates the CRC to compare with the value in the header it uses a zero-filled buffer.
818 	 * The stackshot compression leaves non-zero bytes behind so those must be cleared prior to the CRC calculation.
819 	 *
820 	 * All other contexts: The stackshot compression artifacts are present in its panic buffer but the CRC check
821 	 * is done on the same buffer for the before and after calculation so there's nothing functionally
822 	 * broken. The same buffer cleanup is done here for completeness' sake.
823 	 * From rdar://problem/64381661
824 	 */
825 
826 	void* stackshot_end = (char*)data->kcd_addr_begin + kcdata_memory_get_used_bytes(data);
827 	uint32_t zero_fill_size = data->kcd_length - kcdata_memory_get_used_bytes(data);
828 	bzero(stackshot_end, zero_fill_size);
829 
830 	if (deflateEnd(zs) == Z_OK) {
831 		return KERN_SUCCESS;
832 	} else {
833 		return KERN_FAILURE;
834 	}
835 }
836 
837 kern_return_t
kcdata_finish_compression(kcdata_descriptor_t data)838 kcdata_finish_compression(kcdata_descriptor_t data)
839 {
840 	kcdata_write_compression_stats(data);
841 
842 	switch (data->kcd_comp_d.kcd_cd_compression_type) {
843 	case KCDCT_ZLIB:
844 		data->kcd_length += data->kcd_comp_d.kcd_cd_maxoffset;
845 		return kcdata_finish_compression_zlib(data);
846 	case KCDCT_NONE:
847 		return KERN_SUCCESS;
848 	default:
849 		panic("invalid compression type 0x%llxin kcdata_finish_compression", data->kcd_comp_d.kcd_cd_compression_type);
850 	}
851 }
852 
853 void
kcd_finalize_compression(kcdata_descriptor_t data)854 kcd_finalize_compression(kcdata_descriptor_t data)
855 {
856 	if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
857 		data->kcd_comp_d.kcd_cd_flags |= KCD_CD_FLAG_FINALIZE;
858 	}
859 }
860 
861 /*
862  * Routine: kcdata_get_memory_addr
863  * Desc: get memory address in the userspace memory for corpse info
864  *       NOTE: The caller is responsible for zeroing the resulting memory or
865  *             using other means to mark memory if it has failed populating the
866  *             data in middle of operation.
867  * params:  data - pointer describing the crash info allocation
868  *	        type - type of data to be put. See corpse.h for defined types
869  *          size - size requested. The header describes this size
870  * returns: mach_vm_address_t address in user memory for copyout().
871  */
872 kern_return_t
kcdata_get_memory_addr(kcdata_descriptor_t data,uint32_t type,uint32_t size,mach_vm_address_t * user_addr)873 kcdata_get_memory_addr(kcdata_descriptor_t data, uint32_t type, uint32_t size, mach_vm_address_t * user_addr)
874 {
875 	/* record number of padding bytes as lower 4 bits of flags */
876 	uint64_t flags = (KCDATA_FLAGS_STRUCT_PADDING_MASK & kcdata_calc_padding(size)) | KCDATA_FLAGS_STRUCT_HAS_PADDING;
877 	return kcdata_get_memory_addr_with_flavor(data, type, size, flags, user_addr);
878 }
879 
880 /*
881  * Routine: kcdata_add_buffer_end
882  *
883  * Desc: Write buffer end marker.  This does not advance the end pointer in the
884  * kcdata_descriptor_t, so it may be used conservatively before additional data
885  * is added, as long as it is at least called after the last time data is added.
886  *
887  * params:  data - pointer describing the crash info allocation
888  */
889 
890 kern_return_t
kcdata_write_buffer_end(kcdata_descriptor_t data)891 kcdata_write_buffer_end(kcdata_descriptor_t data)
892 {
893 	struct kcdata_item info;
894 	bzero(&info, sizeof(info));
895 	info.type = KCDATA_TYPE_BUFFER_END;
896 	info.size = 0;
897 	return kcdata_memcpy(data, data->kcd_addr_end, &info, sizeof(info));
898 }
899 
900 /*
901  * Routine: kcdata_get_memory_addr_with_flavor
902  * Desc: internal function with flags field. See documentation for kcdata_get_memory_addr for details
903  */
904 
905 static kern_return_t
kcdata_get_memory_addr_with_flavor(kcdata_descriptor_t data,uint32_t type,uint32_t size,uint64_t flags,mach_vm_address_t * user_addr)906 kcdata_get_memory_addr_with_flavor(
907 	kcdata_descriptor_t data,
908 	uint32_t type,
909 	uint32_t size,
910 	uint64_t flags,
911 	mach_vm_address_t *user_addr)
912 {
913 	kern_return_t kr;
914 	struct kcdata_item info;
915 
916 	uint32_t orig_size = size;
917 	/* make sure 16 byte aligned */
918 	uint32_t padding = kcdata_calc_padding(size);
919 	size += padding;
920 	uint32_t total_size  = size + sizeof(info);
921 
922 	if (user_addr == NULL || data == NULL || total_size + sizeof(info) < orig_size) {
923 		return KERN_INVALID_ARGUMENT;
924 	}
925 
926 	assert(((data->kcd_flags & KCFLAG_USE_COMPRESSION) && (data->kcd_comp_d.kcd_cd_flags & KCD_CD_FLAG_IN_MARK))
927 	    || ((data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0));
928 
929 	bzero(&info, sizeof(info));
930 	info.type  = type;
931 	info.size = size;
932 	info.flags = flags;
933 
934 	/* check available memory, including trailer size for KCDATA_TYPE_BUFFER_END */
935 	if (total_size + sizeof(info) > data->kcd_length ||
936 	    data->kcd_length - (total_size + sizeof(info)) < data->kcd_addr_end - data->kcd_addr_begin) {
937 		return KERN_INSUFFICIENT_BUFFER_SIZE;
938 	}
939 
940 	kr = kcdata_memcpy(data, data->kcd_addr_end, &info, sizeof(info));
941 	if (kr) {
942 		return kr;
943 	}
944 
945 	data->kcd_addr_end += sizeof(info);
946 
947 	if (padding) {
948 		kr = kcdata_bzero(data, data->kcd_addr_end + size - padding, padding);
949 		if (kr) {
950 			return kr;
951 		}
952 	}
953 
954 	*user_addr = data->kcd_addr_end;
955 	data->kcd_addr_end += size;
956 
957 	if (!(data->kcd_flags & KCFLAG_NO_AUTO_ENDBUFFER)) {
958 		/* setup the end header as well */
959 		return kcdata_write_buffer_end(data);
960 	} else {
961 		return KERN_SUCCESS;
962 	}
963 }
964 
965 /* Routine: kcdata_get_memory_size_for_data
966  * Desc: returns the amount of memory that is required to store the information
967  *       in kcdata
968  */
969 static size_t
kcdata_get_memory_size_for_data(uint32_t size)970 kcdata_get_memory_size_for_data(uint32_t size)
971 {
972 	return size + kcdata_calc_padding(size) + sizeof(struct kcdata_item);
973 }
974 
975 /*
976  * Routine: kcdata_get_memory_addr_for_array
977  * Desc: get memory address in the userspace memory for corpse info
978  *       NOTE: The caller is responsible to zero the resulting memory or
979  *             user other means to mark memory if it has failed populating the
980  *             data in middle of operation.
981  * params:  data - pointer describing the crash info allocation
982  *          type_of_element - type of data to be put. See kern_cdata.h for defined types
983  *          size_of_element - size of element. The header describes this size
984  *          count - num of elements in array.
985  * returns: mach_vm_address_t address in user memory for copyout().
986  */
987 
988 kern_return_t
kcdata_get_memory_addr_for_array(kcdata_descriptor_t data,uint32_t type_of_element,uint32_t size_of_element,uint32_t count,mach_vm_address_t * user_addr)989 kcdata_get_memory_addr_for_array(
990 	kcdata_descriptor_t data,
991 	uint32_t type_of_element,
992 	uint32_t size_of_element,
993 	uint32_t count,
994 	mach_vm_address_t *user_addr)
995 {
996 	/* for arrays we record the number of padding bytes as the low-order 4 bits
997 	 * of the type field.  KCDATA_TYPE_ARRAY_PAD{x} means x bytes of pad. */
998 	uint64_t flags      = type_of_element;
999 	flags               = (flags << 32) | count;
1000 	uint32_t total_size = count * size_of_element;
1001 	uint32_t pad        = kcdata_calc_padding(total_size);
1002 
1003 	return kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_ARRAY_PAD0 | pad, total_size, flags, user_addr);
1004 }
1005 
1006 /*
1007  * Routine: kcdata_add_container_marker
1008  * Desc: Add a container marker in the buffer for type and identifier.
1009  * params:  data - pointer describing the crash info allocation
1010  *          header_type - one of (KCDATA_TYPE_CONTAINER_BEGIN ,KCDATA_TYPE_CONTAINER_END)
1011  *          container_type - type of data to be put. See kern_cdata.h for defined types
1012  *          identifier - unique identifier. This is required to match nested containers.
1013  * returns: return value of kcdata_get_memory_addr()
1014  */
1015 
1016 kern_return_t
kcdata_add_container_marker(kcdata_descriptor_t data,uint32_t header_type,uint32_t container_type,uint64_t identifier)1017 kcdata_add_container_marker(
1018 	kcdata_descriptor_t data,
1019 	uint32_t header_type,
1020 	uint32_t container_type,
1021 	uint64_t identifier)
1022 {
1023 	mach_vm_address_t user_addr;
1024 	kern_return_t kr;
1025 	uint32_t data_size;
1026 
1027 	assert(header_type == KCDATA_TYPE_CONTAINER_END || header_type == KCDATA_TYPE_CONTAINER_BEGIN);
1028 
1029 	data_size = (header_type == KCDATA_TYPE_CONTAINER_BEGIN)? sizeof(uint32_t): 0;
1030 
1031 	if (!(data->kcd_flags & KCFLAG_USE_COMPRESSION)) {
1032 		kr = kcdata_get_memory_addr_with_flavor(data, header_type, data_size, identifier, &user_addr);
1033 		if (kr != KERN_SUCCESS) {
1034 			return kr;
1035 		}
1036 
1037 		if (data_size) {
1038 			kr = kcdata_memcpy(data, user_addr, &container_type, data_size);
1039 		}
1040 	} else {
1041 		kr = kcdata_compress_chunk_with_flags(data, header_type, &container_type, data_size, identifier);
1042 	}
1043 
1044 	return kr;
1045 }
1046 
1047 /*
1048  * Routine: kcdata_undo_addcontainer_begin
1049  * Desc: call this after adding a container begin but before adding anything else to revert.
1050  */
1051 kern_return_t
kcdata_undo_add_container_begin(kcdata_descriptor_t data)1052 kcdata_undo_add_container_begin(kcdata_descriptor_t data)
1053 {
1054 	/*
1055 	 * the payload of a container begin is a single uint64_t.  It is padded out
1056 	 * to 16 bytes.
1057 	 */
1058 	const mach_vm_address_t padded_payload_size = 16;
1059 	data->kcd_addr_end -= sizeof(struct kcdata_item) + padded_payload_size;
1060 
1061 	if (!(data->kcd_flags & KCFLAG_NO_AUTO_ENDBUFFER)) {
1062 		/* setup the end header as well */
1063 		return kcdata_write_buffer_end(data);
1064 	} else {
1065 		return KERN_SUCCESS;
1066 	}
1067 }
1068 
1069 /*
1070  * Routine: kcdata_memcpy
1071  * Desc: a common function to copy data out based on either copyout or memcopy flags
1072  * params:  data - pointer describing the kcdata buffer
1073  *          dst_addr - destination address
1074  *          src_addr - source address
1075  *          size - size in bytes to copy.
1076  * returns: KERN_NO_ACCESS if copyout fails.
1077  */
1078 
1079 kern_return_t
kcdata_memcpy(kcdata_descriptor_t data,mach_vm_address_t dst_addr,const void * src_addr,uint32_t size)1080 kcdata_memcpy(kcdata_descriptor_t data, mach_vm_address_t dst_addr, const void *src_addr, uint32_t size)
1081 {
1082 	if (data->kcd_flags & KCFLAG_USE_COPYOUT) {
1083 		if (copyout(src_addr, dst_addr, size)) {
1084 			return KERN_NO_ACCESS;
1085 		}
1086 	} else {
1087 		memcpy((void *)dst_addr, src_addr, size);
1088 	}
1089 	return KERN_SUCCESS;
1090 }
1091 
1092 /*
1093  * Routine: kcdata_bzero
1094  * Desc: zero out a portion of a kcdata buffer.
1095  */
1096 kern_return_t
kcdata_bzero(kcdata_descriptor_t data,mach_vm_address_t dst_addr,uint32_t size)1097 kcdata_bzero(kcdata_descriptor_t data, mach_vm_address_t dst_addr, uint32_t size)
1098 {
1099 	kern_return_t kr = KERN_SUCCESS;
1100 	if (data->kcd_flags & KCFLAG_USE_COPYOUT) {
1101 		uint8_t zeros[16] = {};
1102 		while (size) {
1103 			uint32_t block_size = MIN(size, 16);
1104 			kr = copyout(&zeros, dst_addr, block_size);
1105 			if (kr) {
1106 				return KERN_NO_ACCESS;
1107 			}
1108 			size -= block_size;
1109 		}
1110 		return KERN_SUCCESS;
1111 	} else {
1112 		bzero((void*)dst_addr, size);
1113 		return KERN_SUCCESS;
1114 	}
1115 }
1116 
1117 /*
1118  * Routine: kcdata_add_type_definition
1119  * Desc: add type definition to kcdata buffer.
1120  *       see feature description in documentation above.
1121  * params:  data - pointer describing the kcdata buffer
1122  *          type_id - unique type identifier for this data
1123  *          type_name - a string of max KCDATA_DESC_MAXLEN size for name of type
1124  *          elements_array - address to descriptors for each field in struct
1125  *          elements_count - count of how many fields are there in struct.
1126  * returns: return code from kcdata_get_memory_addr in case of failure.
1127  */
1128 
1129 kern_return_t
kcdata_add_type_definition(kcdata_descriptor_t data,uint32_t type_id,char * type_name,struct kcdata_subtype_descriptor * elements_array_addr,uint32_t elements_count)1130 kcdata_add_type_definition(
1131 	kcdata_descriptor_t data,
1132 	uint32_t type_id,
1133 	char *type_name,
1134 	struct kcdata_subtype_descriptor *elements_array_addr,
1135 	uint32_t elements_count)
1136 {
1137 	kern_return_t kr = KERN_SUCCESS;
1138 	struct kcdata_type_definition kc_type_definition;
1139 	mach_vm_address_t user_addr;
1140 	uint32_t total_size = sizeof(struct kcdata_type_definition);
1141 	bzero(&kc_type_definition, sizeof(kc_type_definition));
1142 
1143 	if (strlen(type_name) >= KCDATA_DESC_MAXLEN) {
1144 		return KERN_INVALID_ARGUMENT;
1145 	}
1146 	strlcpy(&kc_type_definition.kct_name[0], type_name, KCDATA_DESC_MAXLEN);
1147 	kc_type_definition.kct_num_elements = elements_count;
1148 	kc_type_definition.kct_type_identifier = type_id;
1149 
1150 	total_size += elements_count * sizeof(struct kcdata_subtype_descriptor);
1151 	/* record number of padding bytes as lower 4 bits of flags */
1152 	if (KERN_SUCCESS != (kr = kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_TYPEDEFINTION, total_size,
1153 	    kcdata_calc_padding(total_size), &user_addr))) {
1154 		return kr;
1155 	}
1156 	if (KERN_SUCCESS != (kr = kcdata_memcpy(data, user_addr, (void *)&kc_type_definition, sizeof(struct kcdata_type_definition)))) {
1157 		return kr;
1158 	}
1159 	user_addr += sizeof(struct kcdata_type_definition);
1160 	if (KERN_SUCCESS != (kr = kcdata_memcpy(data, user_addr, (void *)elements_array_addr, elements_count * sizeof(struct kcdata_subtype_descriptor)))) {
1161 		return kr;
1162 	}
1163 	return kr;
1164 }
1165 
1166 kern_return_t
kcdata_add_uint64_with_description(kcdata_descriptor_t data_desc,uint64_t data,const char * description)1167 kcdata_add_uint64_with_description(kcdata_descriptor_t data_desc, uint64_t data, const char * description)
1168 {
1169 	if (strlen(description) >= KCDATA_DESC_MAXLEN) {
1170 		return KERN_INVALID_ARGUMENT;
1171 	}
1172 
1173 	kern_return_t kr = 0;
1174 	mach_vm_address_t user_addr;
1175 	struct _uint64_with_description_data save_data;
1176 	const uint64_t size_req = sizeof(save_data);
1177 	bzero(&save_data, size_req);
1178 
1179 	strlcpy(&(save_data.desc[0]), description, sizeof(save_data.desc));
1180 	save_data.data = data;
1181 
1182 	if (data_desc->kcd_flags & KCFLAG_USE_COMPRESSION) {
1183 		/* allocate space for the output */
1184 		return kcdata_compress_chunk(data_desc, KCDATA_TYPE_UINT64_DESC, &save_data, size_req);
1185 	}
1186 
1187 	kr = kcdata_get_memory_addr(data_desc, KCDATA_TYPE_UINT64_DESC, size_req, &user_addr);
1188 	if (kr != KERN_SUCCESS) {
1189 		return kr;
1190 	}
1191 
1192 	if (data_desc->kcd_flags & KCFLAG_USE_COPYOUT) {
1193 		if (copyout(&save_data, user_addr, size_req)) {
1194 			return KERN_NO_ACCESS;
1195 		}
1196 	} else {
1197 		memcpy((void *)user_addr, &save_data, size_req);
1198 	}
1199 	return KERN_SUCCESS;
1200 }
1201 
1202 kern_return_t
kcdata_add_uint32_with_description(kcdata_descriptor_t data_desc,uint32_t data,const char * description)1203 kcdata_add_uint32_with_description(
1204 	kcdata_descriptor_t data_desc,
1205 	uint32_t data,
1206 	const char *description)
1207 {
1208 	assert(strlen(description) < KCDATA_DESC_MAXLEN);
1209 	if (strlen(description) >= KCDATA_DESC_MAXLEN) {
1210 		return KERN_INVALID_ARGUMENT;
1211 	}
1212 	kern_return_t kr = 0;
1213 	mach_vm_address_t user_addr;
1214 	struct _uint32_with_description_data save_data;
1215 	const uint64_t size_req = sizeof(save_data);
1216 
1217 	bzero(&save_data, size_req);
1218 	strlcpy(&(save_data.desc[0]), description, sizeof(save_data.desc));
1219 	save_data.data = data;
1220 
1221 	if (data_desc->kcd_flags & KCFLAG_USE_COMPRESSION) {
1222 		/* allocate space for the output */
1223 		return kcdata_compress_chunk(data_desc, KCDATA_TYPE_UINT32_DESC, &save_data, size_req);
1224 	}
1225 
1226 	kr = kcdata_get_memory_addr(data_desc, KCDATA_TYPE_UINT32_DESC, size_req, &user_addr);
1227 	if (kr != KERN_SUCCESS) {
1228 		return kr;
1229 	}
1230 
1231 	if (data_desc->kcd_flags & KCFLAG_USE_COPYOUT) {
1232 		if (copyout(&save_data, user_addr, size_req)) {
1233 			return KERN_NO_ACCESS;
1234 		}
1235 	} else {
1236 		memcpy((void *)user_addr, &save_data, size_req);
1237 	}
1238 
1239 	return KERN_SUCCESS;
1240 }
1241 
1242 
1243 /* end buffer management api */
1244