xref: /xnu-8792.61.2/osfmk/kern/kern_cdata.c (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2015 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/assert.h>
30 #include <mach/mach_types.h>
31 #include <mach/boolean.h>
32 #include <mach/vm_param.h>
33 #include <kern/kern_types.h>
34 #include <kern/mach_param.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
37 #include <kern/kern_cdata.h>
38 #include <kern/kalloc.h>
39 #include <kern/ipc_kobject.h>
40 #include <mach/mach_vm.h>
41 
42 static kern_return_t kcdata_get_memory_addr_with_flavor(kcdata_descriptor_t data, uint32_t type, uint32_t size, uint64_t flags, mach_vm_address_t *user_addr);
43 static size_t kcdata_get_memory_size_for_data(uint32_t size);
44 static kern_return_t kcdata_compress_chunk_with_flags(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size, uint64_t flags);
45 static kern_return_t kcdata_compress_chunk(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size);
46 static kern_return_t kcdata_write_compression_stats(kcdata_descriptor_t data);
47 static kern_return_t kcdata_get_compression_stats(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin);
48 static void kcdata_object_no_senders(ipc_port_t port, mach_port_mscount_t mscount);
49 
50 #ifndef ROUNDUP
51 #define ROUNDUP(x, y)            ((((x)+(y)-1)/(y))*(y))
52 #endif
53 
54 /*
55  * zlib will need to store its metadata and this value is indifferent from the
56  * window bits and other zlib internals
57  */
58 #define ZLIB_METADATA_SIZE 1440
59 
60 /* #define kcdata_debug_printf printf */
61 #define kcdata_debug_printf(...) ;
62 
63 #pragma pack(push, 4)
64 
65 /* Internal structs for convenience */
66 struct _uint64_with_description_data {
67 	char desc[KCDATA_DESC_MAXLEN];
68 	uint64_t data;
69 };
70 
71 struct _uint32_with_description_data {
72 	char     desc[KCDATA_DESC_MAXLEN];
73 	uint32_t data;
74 };
75 
76 #pragma pack(pop)
77 
78 int _Atomic lw_corpse_obj_cnt = 0;
79 
80 IPC_KOBJECT_DEFINE(IKOT_KCDATA,
81     .iko_op_stable     = true,
82     .iko_op_no_senders = kcdata_object_no_senders);
83 
84 KALLOC_TYPE_DEFINE(KCDATA_OBJECT, struct kcdata_object, KT_DEFAULT);
85 
86 os_refgrp_decl(static, kcdata_object_refgrp, "kcdata_object", NULL);
87 
88 /* Grab a throttle slot for rate-limited kcdata object type(s) */
89 kern_return_t
kcdata_object_throttle_get(kcdata_obj_flags_t flags)90 kcdata_object_throttle_get(
91 	kcdata_obj_flags_t flags)
92 {
93 	int oval, nval;
94 
95 	/* Currently only lightweight corpse is rate-limited */
96 	assert(flags & KCDATA_OBJECT_TYPE_LW_CORPSE);
97 	if (flags & KCDATA_OBJECT_TYPE_LW_CORPSE) {
98 		os_atomic_rmw_loop(&lw_corpse_obj_cnt, oval, nval, relaxed, {
99 			if (oval >= MAX_INFLIGHT_KCOBJECT_LW_CORPSE) {
100 			        printf("Too many lightweight corpse in flight: %d\n", oval);
101 			        os_atomic_rmw_loop_give_up(return KERN_RESOURCE_SHORTAGE);
102 			}
103 			nval = oval + 1;
104 		});
105 	}
106 
107 	return KERN_SUCCESS;
108 }
109 
110 /* Release a throttle slot for rate-limited kcdata object type(s) */
111 void
kcdata_object_throttle_release(kcdata_obj_flags_t flags)112 kcdata_object_throttle_release(
113 	kcdata_obj_flags_t flags)
114 {
115 	int oval, nval;
116 
117 	/* Currently only lightweight corpse is rate-limited */
118 	assert(flags & KCDATA_OBJECT_TYPE_LW_CORPSE);
119 	if (flags & KCDATA_OBJECT_TYPE_LW_CORPSE) {
120 		os_atomic_rmw_loop(&lw_corpse_obj_cnt, oval, nval, relaxed, {
121 			nval = oval - 1;
122 			if (__improbable(nval < 0)) {
123 			        os_atomic_rmw_loop_give_up(panic("Lightweight corpse kcdata object over-released"));
124 			}
125 		});
126 	}
127 }
128 
129 /*
130  * Create an object representation for the given kcdata.
131  *
132  * Captures kcdata descripter ref in object. If the object creation
133  * should be rate-limited, kcdata_object_throttle_get() must be called
134  * manually before invoking kcdata_create_object(), so as to save
135  * work (of creating the enclosed kcdata blob) if a throttled reference
136  * cannot be obtained in the first place.
137  */
138 kern_return_t
kcdata_create_object(kcdata_descriptor_t data,kcdata_obj_flags_t flags,uint32_t size,kcdata_object_t * objp)139 kcdata_create_object(
140 	kcdata_descriptor_t data,
141 	kcdata_obj_flags_t flags,
142 	uint32_t        size,
143 	kcdata_object_t *objp)
144 {
145 	kcdata_object_t obj;
146 
147 	if (data == NULL) {
148 		return KERN_INVALID_ARGUMENT;
149 	}
150 
151 	obj = zalloc_flags(KCDATA_OBJECT, Z_ZERO | Z_WAITOK | Z_NOFAIL);
152 
153 	obj->ko_data = data;
154 	obj->ko_flags = flags;
155 	obj->ko_alloc_size = size;
156 	obj->ko_port = IP_NULL;
157 
158 	os_ref_init_count(&obj->ko_refs, &kcdata_object_refgrp, 1);
159 
160 	*objp = obj;
161 
162 	return KERN_SUCCESS;
163 }
164 
165 void
kcdata_object_reference(kcdata_object_t obj)166 kcdata_object_reference(kcdata_object_t obj)
167 {
168 	if (obj == KCDATA_OBJECT_NULL) {
169 		return;
170 	}
171 
172 	os_ref_retain(&obj->ko_refs);
173 }
174 
175 static void
kcdata_object_destroy(kcdata_object_t obj)176 kcdata_object_destroy(kcdata_object_t obj)
177 {
178 	void *begin_addr;
179 	ipc_port_t port;
180 	kcdata_obj_flags_t flags;
181 
182 	if (obj == KCDATA_OBJECT_NULL) {
183 		return;
184 	}
185 
186 	port = obj->ko_port;
187 	flags = obj->ko_flags;
188 
189 	/* Release the port */
190 	if (IP_VALID(port)) {
191 		ipc_kobject_dealloc_port(port, 0, IKOT_KCDATA);
192 	}
193 
194 	/* Release the ref for rate-limited kcdata object type(s) */
195 	kcdata_object_throttle_release(flags);
196 
197 	/* Destroy the kcdata backing captured in the object */
198 	begin_addr = kcdata_memory_get_begin_addr(obj->ko_data);
199 	kfree_data(begin_addr, obj->ko_alloc_size);
200 	kcdata_memory_destroy(obj->ko_data);
201 
202 	/* Free the object */
203 	zfree(KCDATA_OBJECT, obj);
204 }
205 
206 void
kcdata_object_release(kcdata_object_t obj)207 kcdata_object_release(kcdata_object_t obj)
208 {
209 	if (obj == KCDATA_OBJECT_NULL) {
210 		return;
211 	}
212 
213 	if (os_ref_release(&obj->ko_refs) > 0) {
214 		return;
215 	}
216 	/* last ref */
217 
218 	kcdata_object_destroy(obj);
219 }
220 
221 /* Produces kcdata object ref */
222 kcdata_object_t
convert_port_to_kcdata_object(ipc_port_t port)223 convert_port_to_kcdata_object(ipc_port_t port)
224 {
225 	kcdata_object_t obj = KCDATA_OBJECT_NULL;
226 
227 	if (IP_VALID(port)) {
228 		obj = ipc_kobject_get_stable(port, IKOT_KCDATA);
229 		if (obj != KCDATA_OBJECT_NULL) {
230 			zone_require(KCDATA_OBJECT->kt_zv.zv_zone, obj);
231 			kcdata_object_reference(obj);
232 		}
233 	}
234 
235 	return obj;
236 }
237 
238 /* Consumes kcdata object ref */
239 ipc_port_t
convert_kcdata_object_to_port(kcdata_object_t obj)240 convert_kcdata_object_to_port(kcdata_object_t obj)
241 {
242 	if (obj == KCDATA_OBJECT_NULL) {
243 		return IP_NULL;
244 	}
245 
246 	zone_require(KCDATA_OBJECT->kt_zv.zv_zone, obj);
247 
248 	if (!ipc_kobject_make_send_lazy_alloc_port(&obj->ko_port,
249 	    obj, IKOT_KCDATA, IPC_KOBJECT_ALLOC_NONE)) {
250 		kcdata_object_release(obj);
251 	}
252 	/* object ref consumed */
253 
254 	return obj->ko_port;
255 }
256 
257 static void
kcdata_object_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)258 kcdata_object_no_senders(
259 	ipc_port_t port,
260 	__unused mach_port_mscount_t mscount)
261 {
262 	kcdata_object_t obj;
263 
264 	obj = ipc_kobject_get_stable(port, IKOT_KCDATA);
265 	assert(obj != KCDATA_OBJECT_NULL);
266 
267 	/* release the ref given by no-senders notification */
268 	kcdata_object_release(obj);
269 }
270 
271 /*
272  * Estimates how large of a buffer that should be allocated for a buffer that will contain
273  * num_items items of known types with overall length payload_size.
274  *
275  * NOTE: This function will not give an accurate estimate for buffers that will
276  *       contain unknown types (those with string descriptions).
277  */
278 uint32_t
kcdata_estimate_required_buffer_size(uint32_t num_items,uint32_t payload_size)279 kcdata_estimate_required_buffer_size(uint32_t num_items, uint32_t payload_size)
280 {
281 	/*
282 	 * In the worst case each item will need (KCDATA_ALIGNMENT_SIZE - 1) padding
283 	 */
284 	uint32_t max_padding_bytes = 0;
285 	uint32_t max_padding_with_item_description_bytes = 0;
286 	uint32_t estimated_required_buffer_size = 0;
287 	const uint32_t begin_and_end_marker_bytes = 2 * sizeof(struct kcdata_item);
288 
289 	if (os_mul_overflow(num_items, KCDATA_ALIGNMENT_SIZE - 1, &max_padding_bytes)) {
290 		panic("%s: Overflow in required buffer size estimate", __func__);
291 	}
292 
293 	if (os_mul_and_add_overflow(num_items, sizeof(struct kcdata_item), max_padding_bytes, &max_padding_with_item_description_bytes)) {
294 		panic("%s: Overflow in required buffer size estimate", __func__);
295 	}
296 
297 	if (os_add3_overflow(max_padding_with_item_description_bytes, begin_and_end_marker_bytes, payload_size, &estimated_required_buffer_size)) {
298 		panic("%s: Overflow in required buffer size estimate", __func__);
299 	}
300 
301 	return estimated_required_buffer_size;
302 }
303 
304 kcdata_descriptor_t
kcdata_memory_alloc_init(mach_vm_address_t buffer_addr_p,unsigned data_type,unsigned size,unsigned flags)305 kcdata_memory_alloc_init(mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags)
306 {
307 	kcdata_descriptor_t data = NULL;
308 	mach_vm_address_t user_addr = 0;
309 	uint16_t clamped_flags = (uint16_t) flags;
310 
311 	data = kalloc_type(struct kcdata_descriptor, Z_WAITOK | Z_ZERO | Z_NOFAIL);
312 	data->kcd_addr_begin = buffer_addr_p;
313 	data->kcd_addr_end = buffer_addr_p;
314 	data->kcd_flags = (clamped_flags & KCFLAG_USE_COPYOUT) ? clamped_flags : clamped_flags | KCFLAG_USE_MEMCOPY;
315 	data->kcd_length = size;
316 	data->kcd_endalloced = 0;
317 
318 	/* Initialize the BEGIN header */
319 	if (KERN_SUCCESS != kcdata_get_memory_addr(data, data_type, 0, &user_addr)) {
320 		kcdata_memory_destroy(data);
321 		return NULL;
322 	}
323 
324 	return data;
325 }
326 
327 kern_return_t
kcdata_memory_static_init(kcdata_descriptor_t data,mach_vm_address_t buffer_addr_p,unsigned data_type,unsigned size,unsigned flags)328 kcdata_memory_static_init(kcdata_descriptor_t data, mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags)
329 {
330 	mach_vm_address_t user_addr = 0;
331 	uint16_t clamped_flags = (uint16_t) flags;
332 
333 	if (data == NULL) {
334 		return KERN_INVALID_ARGUMENT;
335 	}
336 	bzero(data, sizeof(struct kcdata_descriptor));
337 	data->kcd_addr_begin = buffer_addr_p;
338 	data->kcd_addr_end = buffer_addr_p;
339 	data->kcd_flags = (clamped_flags & KCFLAG_USE_COPYOUT) ? clamped_flags : clamped_flags | KCFLAG_USE_MEMCOPY;
340 	data->kcd_length = size;
341 	data->kcd_endalloced = 0;
342 
343 	/* Initialize the BEGIN header */
344 	return kcdata_get_memory_addr(data, data_type, 0, &user_addr);
345 }
346 
347 void *
kcdata_endalloc(kcdata_descriptor_t data,size_t length)348 kcdata_endalloc(kcdata_descriptor_t data, size_t length)
349 {
350 	mach_vm_address_t curend = data->kcd_addr_begin + data->kcd_length;
351 	/* round up allocation and ensure return value is uint64-aligned */
352 	size_t toalloc = ROUNDUP(length, sizeof(uint64_t)) + (curend % sizeof(uint64_t));
353 	/* an arbitrary limit: make sure we don't allocate more then 1/4th of the remaining buffer. */
354 	if (data->kcd_length / 4 <= toalloc) {
355 		return NULL;
356 	}
357 	data->kcd_length -= toalloc;
358 	data->kcd_endalloced += toalloc;
359 	return (void *)(curend - toalloc);
360 }
361 
362 /* Zeros and releases data allocated from the end of the buffer */
363 static void
kcdata_release_endallocs(kcdata_descriptor_t data)364 kcdata_release_endallocs(kcdata_descriptor_t data)
365 {
366 	mach_vm_address_t curend = data->kcd_addr_begin + data->kcd_length;
367 	size_t endalloced = data->kcd_endalloced;
368 	if (endalloced > 0) {
369 		bzero((void *)curend, endalloced);
370 		data->kcd_length += endalloced;
371 		data->kcd_endalloced = 0;
372 	}
373 }
374 
375 void *
kcdata_memory_get_begin_addr(kcdata_descriptor_t data)376 kcdata_memory_get_begin_addr(kcdata_descriptor_t data)
377 {
378 	if (data == NULL) {
379 		return NULL;
380 	}
381 
382 	return (void *)data->kcd_addr_begin;
383 }
384 
385 uint64_t
kcdata_memory_get_used_bytes(kcdata_descriptor_t kcd)386 kcdata_memory_get_used_bytes(kcdata_descriptor_t kcd)
387 {
388 	assert(kcd != NULL);
389 	return ((uint64_t)kcd->kcd_addr_end - (uint64_t)kcd->kcd_addr_begin) + sizeof(struct kcdata_item);
390 }
391 
392 uint64_t
kcdata_memory_get_uncompressed_bytes(kcdata_descriptor_t kcd)393 kcdata_memory_get_uncompressed_bytes(kcdata_descriptor_t kcd)
394 {
395 	kern_return_t kr;
396 
397 	assert(kcd != NULL);
398 	if (kcd->kcd_flags & KCFLAG_USE_COMPRESSION) {
399 		uint64_t totalout, totalin;
400 
401 		kr = kcdata_get_compression_stats(kcd, &totalout, &totalin);
402 		if (kr == KERN_SUCCESS) {
403 			return totalin;
404 		} else {
405 			return 0;
406 		}
407 	} else {
408 		/* If compression wasn't used, get the number of bytes used  */
409 		return kcdata_memory_get_used_bytes(kcd);
410 	}
411 }
412 
413 /*
414  * Free up the memory associated with kcdata
415  */
416 kern_return_t
kcdata_memory_destroy(kcdata_descriptor_t data)417 kcdata_memory_destroy(kcdata_descriptor_t data)
418 {
419 	if (!data) {
420 		return KERN_INVALID_ARGUMENT;
421 	}
422 
423 	/*
424 	 * data->kcd_addr_begin points to memory in not tracked by
425 	 * kcdata lib. So not clearing that here.
426 	 */
427 	kfree_type(struct kcdata_descriptor, data);
428 	return KERN_SUCCESS;
429 }
430 
431 /* Used by zlib to allocate space in its metadata section */
432 static void *
kcdata_compress_zalloc(void * opaque,u_int items,u_int size)433 kcdata_compress_zalloc(void *opaque, u_int items, u_int size)
434 {
435 	void *result;
436 	struct kcdata_compress_descriptor *cd = opaque;
437 	int alloc_size = ~31L & (31 + (items * size));
438 
439 	result = (void *)((uintptr_t)cd->kcd_cd_base + cd->kcd_cd_offset);
440 	if ((uintptr_t) result + alloc_size > (uintptr_t) cd->kcd_cd_base + cd->kcd_cd_maxoffset) {
441 		result = Z_NULL;
442 	} else {
443 		cd->kcd_cd_offset += alloc_size;
444 	}
445 
446 	kcdata_debug_printf("%s: %d * %d = %d  => %p\n", __func__, items, size, items * size, result);
447 
448 	return result;
449 }
450 
451 /* Used by zlib to free previously allocated space in its metadata section */
452 static void
kcdata_compress_zfree(void * opaque,void * ptr)453 kcdata_compress_zfree(void *opaque, void *ptr)
454 {
455 	(void) opaque;
456 	(void) ptr;
457 
458 	kcdata_debug_printf("%s: ptr %p\n", __func__, ptr);
459 
460 	/*
461 	 * Since the buffers we are using are temporary, we don't worry about
462 	 * freeing memory for now. Besides, testing has shown that zlib only calls
463 	 * this at the end, near deflateEnd() or a Z_FINISH deflate() call.
464 	 */
465 }
466 
467 /* Used to initialize the selected compression algorithm's internal state (if any) */
468 static kern_return_t
kcdata_init_compress_state(kcdata_descriptor_t data,void (* memcpy_f)(void *,const void *,size_t),uint64_t type,mach_vm_address_t totalout_addr,mach_vm_address_t totalin_addr)469 kcdata_init_compress_state(kcdata_descriptor_t data, void (*memcpy_f)(void *, const void *, size_t), uint64_t type, mach_vm_address_t totalout_addr, mach_vm_address_t totalin_addr)
470 {
471 	kern_return_t ret = KERN_SUCCESS;
472 	size_t size;
473 	int wbits = 12, memlevel = 3;
474 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
475 
476 	cd->kcd_cd_memcpy_f = memcpy_f;
477 	cd->kcd_cd_compression_type = type;
478 	cd->kcd_cd_totalout_addr = totalout_addr;
479 	cd->kcd_cd_totalin_addr = totalin_addr;
480 
481 	switch (type) {
482 	case KCDCT_ZLIB:
483 		/* allocate space for the metadata used by zlib */
484 		size = round_page(ZLIB_METADATA_SIZE + zlib_deflate_memory_size(wbits, memlevel));
485 		kcdata_debug_printf("%s: size = %zu kcd_length: %d\n", __func__, size, data->kcd_length);
486 		kcdata_debug_printf("%s: kcd buffer [%p - %p]\n", __func__, (void *) data->kcd_addr_begin, (void *) data->kcd_addr_begin + data->kcd_length);
487 		void *buf = kcdata_endalloc(data, size);
488 		if (buf == NULL) {
489 			return KERN_INSUFFICIENT_BUFFER_SIZE;
490 		}
491 
492 		cd->kcd_cd_zs.avail_in = 0;
493 		cd->kcd_cd_zs.next_in = NULL;
494 		cd->kcd_cd_zs.avail_out = 0;
495 		cd->kcd_cd_zs.next_out = NULL;
496 		cd->kcd_cd_zs.opaque = cd;
497 		cd->kcd_cd_zs.zalloc = kcdata_compress_zalloc;
498 		cd->kcd_cd_zs.zfree = kcdata_compress_zfree;
499 		cd->kcd_cd_base = (void *)(data->kcd_addr_begin + data->kcd_length - size);
500 		data->kcd_length -= size;
501 		cd->kcd_cd_offset = 0;
502 		cd->kcd_cd_maxoffset = size;
503 		cd->kcd_cd_flags = 0;
504 
505 		kcdata_debug_printf("%s: buffer [%p - %p]\n", __func__, cd->kcd_cd_base, cd->kcd_cd_base + size);
506 
507 		if (deflateInit2(&cd->kcd_cd_zs, Z_BEST_SPEED, Z_DEFLATED, wbits, memlevel, Z_DEFAULT_STRATEGY) != Z_OK) {
508 			kcdata_debug_printf("EMERGENCY: deflateInit2 failed!\n");
509 			ret = KERN_INVALID_ARGUMENT;
510 		}
511 		break;
512 	default:
513 		panic("kcdata_init_compress_state: invalid compression type: %d", (int) type);
514 	}
515 
516 	return ret;
517 }
518 
519 
520 /*
521  * Turn on the compression logic for kcdata
522  */
523 kern_return_t
kcdata_init_compress(kcdata_descriptor_t data,int hdr_tag,void (* memcpy_f)(void *,const void *,size_t),uint64_t type)524 kcdata_init_compress(kcdata_descriptor_t data, int hdr_tag, void (*memcpy_f)(void *, const void *, size_t), uint64_t type)
525 {
526 	kern_return_t kr;
527 	mach_vm_address_t user_addr, totalout_addr, totalin_addr;
528 	struct _uint64_with_description_data save_data;
529 	const uint64_t size_req = sizeof(save_data);
530 
531 	assert(data && (data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0);
532 
533 	/* reset the compression descriptor */
534 	bzero(&data->kcd_comp_d, sizeof(struct kcdata_compress_descriptor));
535 
536 	/* add the header information */
537 	kcdata_add_uint64_with_description(data, type, "kcd_c_type");
538 
539 	/* reserve space to write total out */
540 	bzero(&save_data, size_req);
541 	strlcpy(&(save_data.desc[0]), "kcd_c_totalout", sizeof(save_data.desc));
542 	kr = kcdata_get_memory_addr(data, KCDATA_TYPE_UINT64_DESC, size_req, &totalout_addr);
543 	if (kr != KERN_SUCCESS) {
544 		return kr;
545 	}
546 	memcpy((void *)totalout_addr, &save_data, size_req);
547 
548 	/* space for total in */
549 	bzero(&save_data, size_req);
550 	strlcpy(&(save_data.desc[0]), "kcd_c_totalin", sizeof(save_data.desc));
551 	kr = kcdata_get_memory_addr(data, KCDATA_TYPE_UINT64_DESC, size_req, &totalin_addr);
552 	if (kr != KERN_SUCCESS) {
553 		return kr;
554 	}
555 	memcpy((void *)totalin_addr, &save_data, size_req);
556 
557 	/* add the inner buffer */
558 	kcdata_get_memory_addr(data, hdr_tag, 0, &user_addr);
559 
560 	/* save the flag */
561 	data->kcd_flags |= KCFLAG_USE_COMPRESSION;
562 
563 	/* initialize algorithm specific state */
564 	kr = kcdata_init_compress_state(data, memcpy_f, type, totalout_addr + offsetof(struct _uint64_with_description_data, data), totalin_addr + offsetof(struct _uint64_with_description_data, data));
565 	if (kr != KERN_SUCCESS) {
566 		kcdata_debug_printf("%s: failed to initialize compression state!\n", __func__);
567 		return kr;
568 	}
569 
570 	return KERN_SUCCESS;
571 }
572 
573 static inline
574 int
kcdata_zlib_translate_kcd_cf_flag(enum kcdata_compression_flush flush)575 kcdata_zlib_translate_kcd_cf_flag(enum kcdata_compression_flush flush)
576 {
577 	switch (flush) {
578 	case KCDCF_NO_FLUSH: return Z_NO_FLUSH;
579 	case KCDCF_SYNC_FLUSH: return Z_SYNC_FLUSH;
580 	case KCDCF_FINISH: return Z_FINISH;
581 	default: panic("invalid kcdata_zlib_translate_kcd_cf_flag flag");
582 	}
583 }
584 
585 static inline
586 int
kcdata_zlib_translate_kcd_cf_expected_ret(enum kcdata_compression_flush flush)587 kcdata_zlib_translate_kcd_cf_expected_ret(enum kcdata_compression_flush flush)
588 {
589 	switch (flush) {
590 	case KCDCF_NO_FLUSH:         /* fall through */
591 	case KCDCF_SYNC_FLUSH: return Z_OK;
592 	case KCDCF_FINISH: return Z_STREAM_END;
593 	default: panic("invalid kcdata_zlib_translate_kcd_cf_expected_ret flag");
594 	}
595 }
596 
597 /* Called by kcdata_do_compress() when the configured compression algorithm is zlib */
598 static kern_return_t
kcdata_do_compress_zlib(kcdata_descriptor_t data,void * inbuffer,size_t insize,void * outbuffer,size_t outsize,size_t * wrote,enum kcdata_compression_flush flush)599 kcdata_do_compress_zlib(kcdata_descriptor_t data, void *inbuffer,
600     size_t insize, void *outbuffer, size_t outsize, size_t *wrote,
601     enum kcdata_compression_flush flush)
602 {
603 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
604 	z_stream *zs = &cd->kcd_cd_zs;
605 	int expected_ret, ret;
606 
607 	zs->next_out = outbuffer;
608 	zs->avail_out = (unsigned int) outsize;
609 	zs->next_in = inbuffer;
610 	zs->avail_in = (unsigned int) insize;
611 	ret = deflate(zs, kcdata_zlib_translate_kcd_cf_flag(flush));
612 	if (zs->avail_in != 0 || zs->avail_out <= 0) {
613 		return KERN_INSUFFICIENT_BUFFER_SIZE;
614 	}
615 
616 	expected_ret = kcdata_zlib_translate_kcd_cf_expected_ret(flush);
617 	if (ret != expected_ret) {
618 		/*
619 		 * Should only fail with catastrophic, unrecoverable cases (i.e.,
620 		 * corrupted z_stream, or incorrect configuration)
621 		 */
622 		panic("zlib kcdata compression ret = %d", ret);
623 	}
624 
625 	kcdata_debug_printf("%s: %p (%zu) <- %p (%zu); flush: %d; ret = %ld\n",
626 	    __func__, outbuffer, outsize, inbuffer, insize, flush, outsize - zs->avail_out);
627 	if (wrote) {
628 		*wrote = outsize - zs->avail_out;
629 	}
630 	return KERN_SUCCESS;
631 }
632 
633 /*
634  * Compress the buffer at @inbuffer (of size @insize) into the kcdata buffer
635  * @outbuffer (of size @outsize). Flush based on the @flush parameter.
636  *
637  * Returns KERN_SUCCESS on success, or KERN_INSUFFICIENT_BUFFER_SIZE if
638  * @outsize isn't sufficient. Also, writes the number of bytes written in the
639  * @outbuffer to @wrote.
640  */
641 static kern_return_t
kcdata_do_compress(kcdata_descriptor_t data,void * inbuffer,size_t insize,void * outbuffer,size_t outsize,size_t * wrote,enum kcdata_compression_flush flush)642 kcdata_do_compress(kcdata_descriptor_t data, void *inbuffer, size_t insize,
643     void *outbuffer, size_t outsize, size_t *wrote, enum kcdata_compression_flush flush)
644 {
645 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
646 
647 	assert(data->kcd_flags & KCFLAG_USE_COMPRESSION);
648 
649 	kcdata_debug_printf("%s: %p (%zu) <- %p (%zu); flush: %d\n",
650 	    __func__, outbuffer, outsize, inbuffer, insize, flush);
651 
652 	/* don't compress if we are in a window */
653 	if (cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK || data->kcd_comp_d.kcd_cd_compression_type == KCDCT_NONE) {
654 		assert(cd->kcd_cd_memcpy_f);
655 		if (outsize >= insize) {
656 			cd->kcd_cd_memcpy_f(outbuffer, inbuffer, insize);
657 			if (wrote) {
658 				*wrote = insize;
659 			}
660 			return KERN_SUCCESS;
661 		} else {
662 			return KERN_INSUFFICIENT_BUFFER_SIZE;
663 		}
664 	}
665 
666 	switch (data->kcd_comp_d.kcd_cd_compression_type) {
667 	case KCDCT_ZLIB:
668 		return kcdata_do_compress_zlib(data, inbuffer, insize, outbuffer, outsize, wrote, flush);
669 	default:
670 		panic("invalid compression type 0x%llx in kcdata_do_compress", data->kcd_comp_d.kcd_cd_compression_type);
671 	}
672 }
673 
674 static size_t
kcdata_compression_bound_zlib(kcdata_descriptor_t data,size_t size)675 kcdata_compression_bound_zlib(kcdata_descriptor_t data, size_t size)
676 {
677 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
678 	z_stream *zs = &cd->kcd_cd_zs;
679 
680 	return (size_t) deflateBound(zs, (unsigned long) size);
681 }
682 
683 
684 /*
685  * returns the worst-case, maximum length of the compressed data when
686  * compressing a buffer of size @size using the configured algorithm.
687  */
688 static size_t
kcdata_compression_bound(kcdata_descriptor_t data,size_t size)689 kcdata_compression_bound(kcdata_descriptor_t data, size_t size)
690 {
691 	switch (data->kcd_comp_d.kcd_cd_compression_type) {
692 	case KCDCT_ZLIB:
693 		return kcdata_compression_bound_zlib(data, size);
694 	case KCDCT_NONE:
695 		return size;
696 	default:
697 		panic("%s: unknown compression method", __func__);
698 	}
699 }
700 
701 /*
702  * kcdata_compress_chunk_with_flags:
703  *		Compress buffer found at @input_data (length @input_size) to the kcdata
704  *		buffer described by @data. This method will construct the kcdata_item_t
705  *		required by parsers using the type information @type and flags @flags.
706  *
707  *	Returns KERN_SUCCESS when successful. Currently, asserts on failure.
708  */
709 kern_return_t
kcdata_compress_chunk_with_flags(kcdata_descriptor_t data,uint32_t type,const void * input_data,uint32_t input_size,uint64_t kcdata_flags)710 kcdata_compress_chunk_with_flags(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size, uint64_t kcdata_flags)
711 {
712 	assert(data);
713 	assert((data->kcd_flags & KCFLAG_USE_COMPRESSION));
714 	assert(input_data);
715 	struct kcdata_item info;
716 	char padding_data[16] = {0};
717 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
718 	size_t wrote = 0;
719 	kern_return_t kr;
720 
721 	kcdata_debug_printf("%s: type: %d input_data: %p (%d) kcdata_flags: 0x%llx\n",
722 	    __func__, type, input_data, input_size, kcdata_flags);
723 
724 	/*
725 	 * first, get memory space. The uncompressed size must fit in the remained
726 	 * of the kcdata buffer, in case the compression algorithm doesn't actually
727 	 * compress the data at all.
728 	 */
729 	size_t total_uncompressed_size = kcdata_compression_bound(data, (size_t) kcdata_get_memory_size_for_data(input_size));
730 	if (total_uncompressed_size > data->kcd_length ||
731 	    data->kcd_length - total_uncompressed_size < data->kcd_addr_end - data->kcd_addr_begin) {
732 		kcdata_debug_printf("%s: insufficient buffer size: kcd_length => %d e-b=> %lld our size: %zu\n",
733 		    __func__, data->kcd_length, data->kcd_addr_end - data->kcd_addr_begin, total_uncompressed_size);
734 		return KERN_INSUFFICIENT_BUFFER_SIZE;
735 	}
736 	uint32_t padding = kcdata_calc_padding(input_size);
737 	assert(padding < sizeof(padding_data));
738 
739 	void *space_start = (void *) data->kcd_addr_end;
740 	void *space_ptr = space_start;
741 
742 	/* create the output stream */
743 	size_t total_uncompressed_space_remaining = total_uncompressed_size;
744 
745 	/* create the info data */
746 	bzero(&info, sizeof(info));
747 	info.type = type;
748 	info.size = input_size + padding;
749 	info.flags = kcdata_flags;
750 
751 	/*
752 	 * The next possibly three compresses are needed separately because of the
753 	 * scatter-gather nature of this operation. The kcdata item header (info)
754 	 * and padding are on the stack, while the actual data is somewhere else.
755 	 * */
756 
757 	/* create the input stream for info & compress */
758 	enum kcdata_compression_flush flush = (padding || input_size) ? KCDCF_NO_FLUSH :
759 	    cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH :
760 	    KCDCF_SYNC_FLUSH;
761 	kr = kcdata_do_compress(data, &info, sizeof(info), space_ptr, total_uncompressed_space_remaining, &wrote, flush);
762 	if (kr != KERN_SUCCESS) {
763 		return kr;
764 	}
765 	kcdata_debug_printf("%s: first wrote = %zu\n", __func__, wrote);
766 	space_ptr = (void *)((uintptr_t)space_ptr + wrote);
767 	total_uncompressed_space_remaining -= wrote;
768 
769 	/* If there is input provided, compress that here */
770 	if (input_size) {
771 		flush = padding ? KCDCF_NO_FLUSH :
772 		    cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH :
773 		    KCDCF_SYNC_FLUSH;
774 		kr = kcdata_do_compress(data, (void *) (uintptr_t) input_data, input_size, space_ptr, total_uncompressed_space_remaining, &wrote, flush);
775 		if (kr != KERN_SUCCESS) {
776 			return kr;
777 		}
778 		kcdata_debug_printf("%s: 2nd wrote = %zu\n", __func__, wrote);
779 		space_ptr = (void *)((uintptr_t)space_ptr + wrote);
780 		total_uncompressed_space_remaining -= wrote;
781 	}
782 
783 	/* If the item and its data require padding to maintain alignment,
784 	 * "compress" that into the output buffer. */
785 	if (padding) {
786 		/* write the padding */
787 		kr = kcdata_do_compress(data, padding_data, padding, space_ptr, total_uncompressed_space_remaining, &wrote,
788 		    cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH : KCDCF_SYNC_FLUSH);
789 		if (kr != KERN_SUCCESS) {
790 			return kr;
791 		}
792 		kcdata_debug_printf("%s: 3rd wrote = %zu\n", __func__, wrote);
793 		if (wrote == 0) {
794 			return KERN_FAILURE;
795 		}
796 		space_ptr = (void *)((uintptr_t)space_ptr + wrote);
797 		total_uncompressed_space_remaining -= wrote;
798 	}
799 
800 	assert((size_t)((uintptr_t)space_ptr - (uintptr_t)space_start) <= total_uncompressed_size);
801 
802 	/* move the end marker forward */
803 	data->kcd_addr_end = (mach_vm_address_t) space_start + (total_uncompressed_size - total_uncompressed_space_remaining);
804 
805 	return KERN_SUCCESS;
806 }
807 
808 /*
809  * kcdata_compress_chunk:
810  *		Like kcdata_compress_chunk_with_flags(), but uses the default set of kcdata flags,
811  *		i.e. padding and also saves the amount of padding bytes.
812  *
813  * Returns are the same as in kcdata_compress_chunk_with_flags()
814  */
815 kern_return_t
kcdata_compress_chunk(kcdata_descriptor_t data,uint32_t type,const void * input_data,uint32_t input_size)816 kcdata_compress_chunk(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size)
817 {
818 	/* these flags are for kcdata - store that the struct is padded and store the amount of padding bytes */
819 	uint64_t flags = (KCDATA_FLAGS_STRUCT_PADDING_MASK & kcdata_calc_padding(input_size)) | KCDATA_FLAGS_STRUCT_HAS_PADDING;
820 	return kcdata_compress_chunk_with_flags(data, type, input_data, input_size, flags);
821 }
822 
823 kern_return_t
kcdata_push_data(kcdata_descriptor_t data,uint32_t type,uint32_t size,const void * input_data)824 kcdata_push_data(kcdata_descriptor_t data, uint32_t type, uint32_t size, const void *input_data)
825 {
826 	if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
827 		return kcdata_compress_chunk(data, type, input_data, size);
828 	} else {
829 		kern_return_t ret;
830 		mach_vm_address_t uaddr = 0;
831 		ret = kcdata_get_memory_addr(data, type, size, &uaddr);
832 		if (ret != KERN_SUCCESS) {
833 			return ret;
834 		}
835 
836 		kcdata_memcpy(data, uaddr, input_data, size);
837 		return KERN_SUCCESS;
838 	}
839 }
840 
841 kern_return_t
kcdata_push_array(kcdata_descriptor_t data,uint32_t type_of_element,uint32_t size_of_element,uint32_t count,const void * input_data)842 kcdata_push_array(kcdata_descriptor_t data, uint32_t type_of_element, uint32_t size_of_element, uint32_t count, const void *input_data)
843 {
844 	uint64_t flags      = type_of_element;
845 	flags               = (flags << 32) | count;
846 	uint32_t total_size = count * size_of_element;
847 	uint32_t pad        = kcdata_calc_padding(total_size);
848 
849 	if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
850 		return kcdata_compress_chunk_with_flags(data, KCDATA_TYPE_ARRAY_PAD0 | pad, input_data, total_size, flags);
851 	} else {
852 		kern_return_t ret;
853 		mach_vm_address_t uaddr = 0;
854 		ret = kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_ARRAY_PAD0 | pad, total_size, flags, &uaddr);
855 		if (ret != KERN_SUCCESS) {
856 			return ret;
857 		}
858 
859 		kcdata_memcpy(data, uaddr, input_data, total_size);
860 		return KERN_SUCCESS;
861 	}
862 }
863 
864 /* A few words on how window compression works:
865  *
866  * This is how the buffer looks when the window is opened:
867  *
868  * X---------------------------------------------------------------------X
869  * |                                |                                    |
870  * |   Filled with stackshot data   |            Zero bytes              |
871  * |                                |                                    |
872  * X---------------------------------------------------------------------X
873  *                                  ^
874  *									\ - kcd_addr_end
875  *
876  * Opening a window will save the current kcd_addr_end to kcd_cd_mark_begin.
877  *
878  * Any kcdata_* operation will then push data to the buffer like normal. (If
879  * you call any compressing functions they will pass-through, i.e. no
880  * compression will be done) Once the window is closed, the following takes
881  * place:
882  *
883  * X---------------------------------------------------------------------X
884  * |               |                    |                    |           |
885  * | Existing data |     New data       |   Scratch buffer   |           |
886  * |               |                    |                    |           |
887  * X---------------------------------------------------------------------X
888  *				   ^                    ^                    ^
889  *				   |                    |                    |
890  *				   \ -kcd_cd_mark_begin |                    |
891  *							            |                    |
892  *							            \ - kcd_addr_end     |
893  *							                                 |
894  *		 kcd_addr_end + (kcd_addr_end - kcd_cd_mark_begin) - /
895  *
896  *	(1) The data between kcd_cd_mark_begin and kcd_addr_end is fed to the
897  *      compression algorithm to compress to the scratch buffer.
898  *  (2) The scratch buffer's contents are copied into the area denoted "New
899  *      data" above. Effectively overwriting the uncompressed data with the
900  *      compressed one.
901  *  (3) kcd_addr_end is then rewound to kcd_cd_mark_begin + sizeof_compressed_data
902  */
903 
904 /* Record the state, and restart compression from this later */
905 void
kcdata_compression_window_open(kcdata_descriptor_t data)906 kcdata_compression_window_open(kcdata_descriptor_t data)
907 {
908 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
909 	assert((cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK) == 0);
910 
911 	if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
912 		cd->kcd_cd_flags |= KCD_CD_FLAG_IN_MARK;
913 		cd->kcd_cd_mark_begin = data->kcd_addr_end;
914 	}
915 }
916 
917 /* Compress the region between the mark and the current end */
918 kern_return_t
kcdata_compression_window_close(kcdata_descriptor_t data)919 kcdata_compression_window_close(kcdata_descriptor_t data)
920 {
921 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
922 	uint64_t total_size, max_size;
923 	void *space_start, *space_ptr;
924 	size_t total_uncompressed_space_remaining, wrote = 0;
925 	kern_return_t kr;
926 
927 	if ((data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0) {
928 		return KERN_SUCCESS;
929 	}
930 
931 	assert(cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK);
932 
933 	if (data->kcd_addr_end == (mach_vm_address_t) cd->kcd_cd_mark_begin) {
934 		/* clear the window marker and return, this is a no-op */
935 		cd->kcd_cd_flags &= ~KCD_CD_FLAG_IN_MARK;
936 		return KERN_SUCCESS;
937 	}
938 
939 	assert(cd->kcd_cd_mark_begin < data->kcd_addr_end);
940 	total_size = data->kcd_addr_end - (uint64_t) cd->kcd_cd_mark_begin;
941 	max_size = (uint64_t) kcdata_compression_bound(data, total_size);
942 	kcdata_debug_printf("%s: total_size = %lld\n", __func__, total_size);
943 
944 	/*
945 	 * first, get memory space. The uncompressed size must fit in the remained
946 	 * of the kcdata buffer, in case the compression algorithm doesn't actually
947 	 * compress the data at all.
948 	 */
949 	if (max_size > data->kcd_length ||
950 	    data->kcd_length - max_size < data->kcd_addr_end - data->kcd_addr_begin) {
951 		kcdata_debug_printf("%s: insufficient buffer size: kcd_length => %d e-b=> %lld our size: %lld\n",
952 		    __func__, data->kcd_length, data->kcd_addr_end - data->kcd_addr_begin, max_size);
953 		return KERN_INSUFFICIENT_BUFFER_SIZE;
954 	}
955 
956 	/* clear the window marker */
957 	cd->kcd_cd_flags &= ~KCD_CD_FLAG_IN_MARK;
958 
959 	space_start = (void *) data->kcd_addr_end;
960 	space_ptr = space_start;
961 	total_uncompressed_space_remaining = (unsigned int) max_size;
962 	kr = kcdata_do_compress(data, (void *) cd->kcd_cd_mark_begin, total_size, space_ptr,
963 	    total_uncompressed_space_remaining, &wrote, KCDCF_SYNC_FLUSH);
964 	if (kr != KERN_SUCCESS) {
965 		return kr;
966 	}
967 	kcdata_debug_printf("%s: first wrote = %zu\n", __func__, wrote);
968 	if (wrote == 0) {
969 		return KERN_FAILURE;
970 	}
971 	space_ptr = (void *)((uintptr_t)space_ptr + wrote);
972 	total_uncompressed_space_remaining  -= wrote;
973 
974 	assert((size_t)((uintptr_t)space_ptr - (uintptr_t)space_start) <= max_size);
975 
976 	/* copy to the original location */
977 	kcdata_memcpy(data, cd->kcd_cd_mark_begin, space_start, (uint32_t) (max_size - total_uncompressed_space_remaining));
978 
979 	/* rewind the end marker */
980 	data->kcd_addr_end = cd->kcd_cd_mark_begin + (max_size - total_uncompressed_space_remaining);
981 
982 	return KERN_SUCCESS;
983 }
984 
985 static kern_return_t
kcdata_get_compression_stats_zlib(kcdata_descriptor_t data,uint64_t * totalout,uint64_t * totalin)986 kcdata_get_compression_stats_zlib(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin)
987 {
988 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
989 	z_stream *zs = &cd->kcd_cd_zs;
990 
991 	assert((cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK) == 0);
992 
993 	*totalout = (uint64_t) zs->total_out;
994 	*totalin = (uint64_t) zs->total_in;
995 
996 	return KERN_SUCCESS;
997 }
998 
999 static kern_return_t
kcdata_get_compression_stats(kcdata_descriptor_t data,uint64_t * totalout,uint64_t * totalin)1000 kcdata_get_compression_stats(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin)
1001 {
1002 	kern_return_t kr;
1003 
1004 	switch (data->kcd_comp_d.kcd_cd_compression_type) {
1005 	case KCDCT_ZLIB:
1006 		kr = kcdata_get_compression_stats_zlib(data, totalout, totalin);
1007 		break;
1008 	case KCDCT_NONE:
1009 		*totalout = *totalin = kcdata_memory_get_used_bytes(data);
1010 		kr = KERN_SUCCESS;
1011 		break;
1012 	default:
1013 		panic("invalid compression flag 0x%llx in kcdata_write_compression_stats", (data->kcd_comp_d.kcd_cd_compression_type));
1014 	}
1015 
1016 	return kr;
1017 }
1018 
1019 kern_return_t
kcdata_write_compression_stats(kcdata_descriptor_t data)1020 kcdata_write_compression_stats(kcdata_descriptor_t data)
1021 {
1022 	kern_return_t kr;
1023 	uint64_t totalout, totalin;
1024 
1025 	kr = kcdata_get_compression_stats(data, &totalout, &totalin);
1026 	if (kr != KERN_SUCCESS) {
1027 		return kr;
1028 	}
1029 
1030 	*(uint64_t *)data->kcd_comp_d.kcd_cd_totalout_addr = totalout;
1031 	*(uint64_t *)data->kcd_comp_d.kcd_cd_totalin_addr = totalin;
1032 
1033 	return kr;
1034 }
1035 
1036 static kern_return_t
kcdata_finish_compression_zlib(kcdata_descriptor_t data)1037 kcdata_finish_compression_zlib(kcdata_descriptor_t data)
1038 {
1039 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
1040 	z_stream *zs = &cd->kcd_cd_zs;
1041 
1042 	/*
1043 	 * macOS on x86 w/ coprocessor ver. 2 and later context: Stackshot compression leaves artifacts
1044 	 * in the panic buffer which interferes with CRC checks. The CRC is calculated here over the full
1045 	 * buffer but only the portion with valid panic data is sent to iBoot via the SMC. When iBoot
1046 	 * calculates the CRC to compare with the value in the header it uses a zero-filled buffer.
1047 	 * The stackshot compression leaves non-zero bytes behind so those must be cleared prior to the CRC calculation.
1048 	 * This doesn't get the compression metadata; that's zeroed by kcdata_release_endallocs().
1049 	 *
1050 	 * All other contexts: The stackshot compression artifacts are present in its panic buffer but the CRC check
1051 	 * is done on the same buffer for the before and after calculation so there's nothing functionally
1052 	 * broken. The same buffer cleanup is done here for completeness' sake.
1053 	 * From rdar://problem/64381661
1054 	 */
1055 
1056 	void* stackshot_end = (char*)data->kcd_addr_begin + kcdata_memory_get_used_bytes(data);
1057 	uint32_t zero_fill_size = data->kcd_length - kcdata_memory_get_used_bytes(data);
1058 	bzero(stackshot_end, zero_fill_size);
1059 
1060 	if (deflateEnd(zs) == Z_OK) {
1061 		return KERN_SUCCESS;
1062 	} else {
1063 		return KERN_FAILURE;
1064 	}
1065 }
1066 
1067 static kern_return_t
kcdata_finish_compression(kcdata_descriptor_t data)1068 kcdata_finish_compression(kcdata_descriptor_t data)
1069 {
1070 	kcdata_write_compression_stats(data);
1071 
1072 	switch (data->kcd_comp_d.kcd_cd_compression_type) {
1073 	case KCDCT_ZLIB:
1074 		return kcdata_finish_compression_zlib(data);
1075 	case KCDCT_NONE:
1076 		return KERN_SUCCESS;
1077 	default:
1078 		panic("invalid compression type 0x%llxin kcdata_finish_compression", data->kcd_comp_d.kcd_cd_compression_type);
1079 	}
1080 }
1081 
1082 kern_return_t
kcdata_finish(kcdata_descriptor_t data)1083 kcdata_finish(kcdata_descriptor_t data)
1084 {
1085 	int ret = KERN_SUCCESS;
1086 	if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
1087 		ret = kcdata_finish_compression(data);
1088 	}
1089 	kcdata_release_endallocs(data);
1090 	return ret;
1091 }
1092 
1093 void
kcd_finalize_compression(kcdata_descriptor_t data)1094 kcd_finalize_compression(kcdata_descriptor_t data)
1095 {
1096 	if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
1097 		data->kcd_comp_d.kcd_cd_flags |= KCD_CD_FLAG_FINALIZE;
1098 	}
1099 }
1100 
1101 /*
1102  * Routine: kcdata_get_memory_addr
1103  * Desc: get memory address in the userspace memory for corpse info
1104  *       NOTE: The caller is responsible for zeroing the resulting memory or
1105  *             using other means to mark memory if it has failed populating the
1106  *             data in middle of operation.
1107  * params:  data - pointer describing the crash info allocation
1108  *	        type - type of data to be put. See corpse.h for defined types
1109  *          size - size requested. The header describes this size
1110  * returns: mach_vm_address_t address in user memory for copyout().
1111  */
1112 kern_return_t
kcdata_get_memory_addr(kcdata_descriptor_t data,uint32_t type,uint32_t size,mach_vm_address_t * user_addr)1113 kcdata_get_memory_addr(kcdata_descriptor_t data, uint32_t type, uint32_t size, mach_vm_address_t * user_addr)
1114 {
1115 	/* record number of padding bytes as lower 4 bits of flags */
1116 	uint64_t flags = (KCDATA_FLAGS_STRUCT_PADDING_MASK & kcdata_calc_padding(size)) | KCDATA_FLAGS_STRUCT_HAS_PADDING;
1117 	return kcdata_get_memory_addr_with_flavor(data, type, size, flags, user_addr);
1118 }
1119 
1120 /*
1121  * Routine: kcdata_add_buffer_end
1122  *
1123  * Desc: Write buffer end marker.  This does not advance the end pointer in the
1124  * kcdata_descriptor_t, so it may be used conservatively before additional data
1125  * is added, as long as it is at least called after the last time data is added.
1126  *
1127  * params:  data - pointer describing the crash info allocation
1128  */
1129 
1130 kern_return_t
kcdata_write_buffer_end(kcdata_descriptor_t data)1131 kcdata_write_buffer_end(kcdata_descriptor_t data)
1132 {
1133 	struct kcdata_item info;
1134 	bzero(&info, sizeof(info));
1135 	info.type = KCDATA_TYPE_BUFFER_END;
1136 	info.size = 0;
1137 	return kcdata_memcpy(data, data->kcd_addr_end, &info, sizeof(info));
1138 }
1139 
1140 /*
1141  * Routine: kcdata_get_memory_addr_with_flavor
1142  * Desc: internal function with flags field. See documentation for kcdata_get_memory_addr for details
1143  */
1144 
1145 static kern_return_t
kcdata_get_memory_addr_with_flavor(kcdata_descriptor_t data,uint32_t type,uint32_t size,uint64_t flags,mach_vm_address_t * user_addr)1146 kcdata_get_memory_addr_with_flavor(
1147 	kcdata_descriptor_t data,
1148 	uint32_t type,
1149 	uint32_t size,
1150 	uint64_t flags,
1151 	mach_vm_address_t *user_addr)
1152 {
1153 	kern_return_t kr;
1154 	struct kcdata_item info;
1155 
1156 	uint32_t orig_size = size;
1157 	/* make sure 16 byte aligned */
1158 	uint32_t padding = kcdata_calc_padding(size);
1159 	size += padding;
1160 	uint32_t total_size  = size + sizeof(info);
1161 
1162 	if (user_addr == NULL || data == NULL || total_size + sizeof(info) < orig_size) {
1163 		return KERN_INVALID_ARGUMENT;
1164 	}
1165 
1166 	assert(((data->kcd_flags & KCFLAG_USE_COMPRESSION) && (data->kcd_comp_d.kcd_cd_flags & KCD_CD_FLAG_IN_MARK))
1167 	    || ((data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0));
1168 
1169 	bzero(&info, sizeof(info));
1170 	info.type  = type;
1171 	info.size = size;
1172 	info.flags = flags;
1173 
1174 	/* check available memory, including trailer size for KCDATA_TYPE_BUFFER_END */
1175 	if (total_size + sizeof(info) > data->kcd_length ||
1176 	    data->kcd_length - (total_size + sizeof(info)) < data->kcd_addr_end - data->kcd_addr_begin) {
1177 		return KERN_INSUFFICIENT_BUFFER_SIZE;
1178 	}
1179 
1180 	kr = kcdata_memcpy(data, data->kcd_addr_end, &info, sizeof(info));
1181 	if (kr) {
1182 		return kr;
1183 	}
1184 
1185 	data->kcd_addr_end += sizeof(info);
1186 
1187 	if (padding) {
1188 		kr = kcdata_bzero(data, data->kcd_addr_end + size - padding, padding);
1189 		if (kr) {
1190 			return kr;
1191 		}
1192 	}
1193 
1194 	*user_addr = data->kcd_addr_end;
1195 	data->kcd_addr_end += size;
1196 
1197 	if (!(data->kcd_flags & KCFLAG_NO_AUTO_ENDBUFFER)) {
1198 		/* setup the end header as well */
1199 		return kcdata_write_buffer_end(data);
1200 	} else {
1201 		return KERN_SUCCESS;
1202 	}
1203 }
1204 
1205 /* Routine: kcdata_get_memory_size_for_data
1206  * Desc: returns the amount of memory that is required to store the information
1207  *       in kcdata
1208  */
1209 static size_t
kcdata_get_memory_size_for_data(uint32_t size)1210 kcdata_get_memory_size_for_data(uint32_t size)
1211 {
1212 	return size + kcdata_calc_padding(size) + sizeof(struct kcdata_item);
1213 }
1214 
1215 /*
1216  * Routine: kcdata_get_memory_addr_for_array
1217  * Desc: get memory address in the userspace memory for corpse info
1218  *       NOTE: The caller is responsible to zero the resulting memory or
1219  *             user other means to mark memory if it has failed populating the
1220  *             data in middle of operation.
1221  * params:  data - pointer describing the crash info allocation
1222  *          type_of_element - type of data to be put. See kern_cdata.h for defined types
1223  *          size_of_element - size of element. The header describes this size
1224  *          count - num of elements in array.
1225  * returns: mach_vm_address_t address in user memory for copyout().
1226  */
1227 
1228 kern_return_t
kcdata_get_memory_addr_for_array(kcdata_descriptor_t data,uint32_t type_of_element,uint32_t size_of_element,uint32_t count,mach_vm_address_t * user_addr)1229 kcdata_get_memory_addr_for_array(
1230 	kcdata_descriptor_t data,
1231 	uint32_t type_of_element,
1232 	uint32_t size_of_element,
1233 	uint32_t count,
1234 	mach_vm_address_t *user_addr)
1235 {
1236 	/* for arrays we record the number of padding bytes as the low-order 4 bits
1237 	 * of the type field.  KCDATA_TYPE_ARRAY_PAD{x} means x bytes of pad. */
1238 	uint64_t flags      = type_of_element;
1239 	flags               = (flags << 32) | count;
1240 	uint32_t total_size = count * size_of_element;
1241 	uint32_t pad        = kcdata_calc_padding(total_size);
1242 
1243 	return kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_ARRAY_PAD0 | pad, total_size, flags, user_addr);
1244 }
1245 
1246 /*
1247  * Routine: kcdata_add_container_marker
1248  * Desc: Add a container marker in the buffer for type and identifier.
1249  * params:  data - pointer describing the crash info allocation
1250  *          header_type - one of (KCDATA_TYPE_CONTAINER_BEGIN ,KCDATA_TYPE_CONTAINER_END)
1251  *          container_type - type of data to be put. See kern_cdata.h for defined types
1252  *          identifier - unique identifier. This is required to match nested containers.
1253  * returns: return value of kcdata_get_memory_addr()
1254  */
1255 
1256 kern_return_t
kcdata_add_container_marker(kcdata_descriptor_t data,uint32_t header_type,uint32_t container_type,uint64_t identifier)1257 kcdata_add_container_marker(
1258 	kcdata_descriptor_t data,
1259 	uint32_t header_type,
1260 	uint32_t container_type,
1261 	uint64_t identifier)
1262 {
1263 	mach_vm_address_t user_addr;
1264 	kern_return_t kr;
1265 	uint32_t data_size;
1266 
1267 	assert(header_type == KCDATA_TYPE_CONTAINER_END || header_type == KCDATA_TYPE_CONTAINER_BEGIN);
1268 
1269 	data_size = (header_type == KCDATA_TYPE_CONTAINER_BEGIN)? sizeof(uint32_t): 0;
1270 
1271 	if (!(data->kcd_flags & KCFLAG_USE_COMPRESSION)) {
1272 		kr = kcdata_get_memory_addr_with_flavor(data, header_type, data_size, identifier, &user_addr);
1273 		if (kr != KERN_SUCCESS) {
1274 			return kr;
1275 		}
1276 
1277 		if (data_size) {
1278 			kr = kcdata_memcpy(data, user_addr, &container_type, data_size);
1279 		}
1280 	} else {
1281 		kr = kcdata_compress_chunk_with_flags(data, header_type, &container_type, data_size, identifier);
1282 	}
1283 
1284 	return kr;
1285 }
1286 
1287 /*
1288  * Routine: kcdata_undo_addcontainer_begin
1289  * Desc: call this after adding a container begin but before adding anything else to revert.
1290  */
1291 kern_return_t
kcdata_undo_add_container_begin(kcdata_descriptor_t data)1292 kcdata_undo_add_container_begin(kcdata_descriptor_t data)
1293 {
1294 	/*
1295 	 * the payload of a container begin is a single uint64_t.  It is padded out
1296 	 * to 16 bytes.
1297 	 */
1298 	const mach_vm_address_t padded_payload_size = 16;
1299 	data->kcd_addr_end -= sizeof(struct kcdata_item) + padded_payload_size;
1300 
1301 	if (!(data->kcd_flags & KCFLAG_NO_AUTO_ENDBUFFER)) {
1302 		/* setup the end header as well */
1303 		return kcdata_write_buffer_end(data);
1304 	} else {
1305 		return KERN_SUCCESS;
1306 	}
1307 }
1308 
1309 /*
1310  * Routine: kcdata_memcpy
1311  * Desc: a common function to copy data out based on either copyout or memcopy flags
1312  * params:  data - pointer describing the kcdata buffer
1313  *          dst_addr - destination address
1314  *          src_addr - source address
1315  *          size - size in bytes to copy.
1316  * returns: KERN_NO_ACCESS if copyout fails.
1317  */
1318 
1319 kern_return_t
kcdata_memcpy(kcdata_descriptor_t data,mach_vm_address_t dst_addr,const void * src_addr,uint32_t size)1320 kcdata_memcpy(kcdata_descriptor_t data, mach_vm_address_t dst_addr, const void *src_addr, uint32_t size)
1321 {
1322 	if (data->kcd_flags & KCFLAG_USE_COPYOUT) {
1323 		if (copyout(src_addr, dst_addr, size)) {
1324 			return KERN_NO_ACCESS;
1325 		}
1326 	} else {
1327 		memcpy((void *)dst_addr, src_addr, size);
1328 	}
1329 	return KERN_SUCCESS;
1330 }
1331 
1332 /*
1333  * Routine: kcdata_bzero
1334  * Desc: zero out a portion of a kcdata buffer.
1335  */
1336 kern_return_t
kcdata_bzero(kcdata_descriptor_t data,mach_vm_address_t dst_addr,uint32_t size)1337 kcdata_bzero(kcdata_descriptor_t data, mach_vm_address_t dst_addr, uint32_t size)
1338 {
1339 	kern_return_t kr = KERN_SUCCESS;
1340 	if (data->kcd_flags & KCFLAG_USE_COPYOUT) {
1341 		uint8_t zeros[16] = {};
1342 		while (size) {
1343 			uint32_t block_size = MIN(size, 16);
1344 			kr = copyout(&zeros, dst_addr, block_size);
1345 			if (kr) {
1346 				return KERN_NO_ACCESS;
1347 			}
1348 			size -= block_size;
1349 		}
1350 		return KERN_SUCCESS;
1351 	} else {
1352 		bzero((void*)dst_addr, size);
1353 		return KERN_SUCCESS;
1354 	}
1355 }
1356 
1357 /*
1358  * Routine: kcdata_add_type_definition
1359  * Desc: add type definition to kcdata buffer.
1360  *       see feature description in documentation above.
1361  * params:  data - pointer describing the kcdata buffer
1362  *          type_id - unique type identifier for this data
1363  *          type_name - a string of max KCDATA_DESC_MAXLEN size for name of type
1364  *          elements_array - address to descriptors for each field in struct
1365  *          elements_count - count of how many fields are there in struct.
1366  * returns: return code from kcdata_get_memory_addr in case of failure.
1367  */
1368 
1369 kern_return_t
kcdata_add_type_definition(kcdata_descriptor_t data,uint32_t type_id,char * type_name,struct kcdata_subtype_descriptor * elements_array_addr,uint32_t elements_count)1370 kcdata_add_type_definition(
1371 	kcdata_descriptor_t data,
1372 	uint32_t type_id,
1373 	char *type_name,
1374 	struct kcdata_subtype_descriptor *elements_array_addr,
1375 	uint32_t elements_count)
1376 {
1377 	kern_return_t kr = KERN_SUCCESS;
1378 	struct kcdata_type_definition kc_type_definition;
1379 	mach_vm_address_t user_addr;
1380 	uint32_t total_size = sizeof(struct kcdata_type_definition);
1381 	bzero(&kc_type_definition, sizeof(kc_type_definition));
1382 
1383 	if (strlen(type_name) >= KCDATA_DESC_MAXLEN) {
1384 		return KERN_INVALID_ARGUMENT;
1385 	}
1386 	strlcpy(&kc_type_definition.kct_name[0], type_name, KCDATA_DESC_MAXLEN);
1387 	kc_type_definition.kct_num_elements = elements_count;
1388 	kc_type_definition.kct_type_identifier = type_id;
1389 
1390 	total_size += elements_count * sizeof(struct kcdata_subtype_descriptor);
1391 	/* record number of padding bytes as lower 4 bits of flags */
1392 	if (KERN_SUCCESS != (kr = kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_TYPEDEFINTION, total_size,
1393 	    kcdata_calc_padding(total_size), &user_addr))) {
1394 		return kr;
1395 	}
1396 	if (KERN_SUCCESS != (kr = kcdata_memcpy(data, user_addr, (void *)&kc_type_definition, sizeof(struct kcdata_type_definition)))) {
1397 		return kr;
1398 	}
1399 	user_addr += sizeof(struct kcdata_type_definition);
1400 	if (KERN_SUCCESS != (kr = kcdata_memcpy(data, user_addr, (void *)elements_array_addr, elements_count * sizeof(struct kcdata_subtype_descriptor)))) {
1401 		return kr;
1402 	}
1403 	return kr;
1404 }
1405 
1406 kern_return_t
kcdata_add_uint64_with_description(kcdata_descriptor_t data_desc,uint64_t data,const char * description)1407 kcdata_add_uint64_with_description(kcdata_descriptor_t data_desc, uint64_t data, const char * description)
1408 {
1409 	if (strlen(description) >= KCDATA_DESC_MAXLEN) {
1410 		return KERN_INVALID_ARGUMENT;
1411 	}
1412 
1413 	kern_return_t kr = 0;
1414 	mach_vm_address_t user_addr;
1415 	struct _uint64_with_description_data save_data;
1416 	const uint64_t size_req = sizeof(save_data);
1417 	bzero(&save_data, size_req);
1418 
1419 	strlcpy(&(save_data.desc[0]), description, sizeof(save_data.desc));
1420 	save_data.data = data;
1421 
1422 	if (data_desc->kcd_flags & KCFLAG_USE_COMPRESSION) {
1423 		/* allocate space for the output */
1424 		return kcdata_compress_chunk(data_desc, KCDATA_TYPE_UINT64_DESC, &save_data, size_req);
1425 	}
1426 
1427 	kr = kcdata_get_memory_addr(data_desc, KCDATA_TYPE_UINT64_DESC, size_req, &user_addr);
1428 	if (kr != KERN_SUCCESS) {
1429 		return kr;
1430 	}
1431 
1432 	if (data_desc->kcd_flags & KCFLAG_USE_COPYOUT) {
1433 		if (copyout(&save_data, user_addr, size_req)) {
1434 			return KERN_NO_ACCESS;
1435 		}
1436 	} else {
1437 		memcpy((void *)user_addr, &save_data, size_req);
1438 	}
1439 	return KERN_SUCCESS;
1440 }
1441 
1442 kern_return_t
kcdata_add_uint32_with_description(kcdata_descriptor_t data_desc,uint32_t data,const char * description)1443 kcdata_add_uint32_with_description(
1444 	kcdata_descriptor_t data_desc,
1445 	uint32_t data,
1446 	const char *description)
1447 {
1448 	assert(strlen(description) < KCDATA_DESC_MAXLEN);
1449 	if (strlen(description) >= KCDATA_DESC_MAXLEN) {
1450 		return KERN_INVALID_ARGUMENT;
1451 	}
1452 	kern_return_t kr = 0;
1453 	mach_vm_address_t user_addr;
1454 	struct _uint32_with_description_data save_data;
1455 	const uint64_t size_req = sizeof(save_data);
1456 
1457 	bzero(&save_data, size_req);
1458 	strlcpy(&(save_data.desc[0]), description, sizeof(save_data.desc));
1459 	save_data.data = data;
1460 
1461 	if (data_desc->kcd_flags & KCFLAG_USE_COMPRESSION) {
1462 		/* allocate space for the output */
1463 		return kcdata_compress_chunk(data_desc, KCDATA_TYPE_UINT32_DESC, &save_data, size_req);
1464 	}
1465 
1466 	kr = kcdata_get_memory_addr(data_desc, KCDATA_TYPE_UINT32_DESC, size_req, &user_addr);
1467 	if (kr != KERN_SUCCESS) {
1468 		return kr;
1469 	}
1470 
1471 	if (data_desc->kcd_flags & KCFLAG_USE_COPYOUT) {
1472 		if (copyout(&save_data, user_addr, size_req)) {
1473 			return KERN_NO_ACCESS;
1474 		}
1475 	} else {
1476 		memcpy((void *)user_addr, &save_data, size_req);
1477 	}
1478 
1479 	return KERN_SUCCESS;
1480 }
1481 
1482 
1483 /* end buffer management api */
1484