1 /*
2 * Copyright (c) 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 /*
31 *
32 * THE KCDATA MANIFESTO
33 *
34 * Kcdata is a self-describing data serialization format. It is meant to get
35 * nested data structures out of xnu with minimum fuss, but also for that data
36 * to be easy to parse. It is also meant to allow us to add new fields and
37 * evolve the data format without breaking old parsers.
38 *
39 * Kcdata is a permanent data format suitable for long-term storage including
40 * in files. It is very important that we continue to be able to parse old
41 * versions of kcdata-based formats. To this end, there are several
42 * invariants you MUST MAINTAIN if you alter this file.
43 *
44 * * None of the magic numbers should ever be a byteswap of themselves or
45 * of any of the other magic numbers.
46 *
47 * * Never remove any type.
48 *
49 * * All kcdata structs must be packed, and must exclusively use fixed-size
50 * types.
51 *
52 * * Never change the definition of any type, except to add new fields to
53 * the end.
54 *
55 * * If you do add new fields to the end of a type, do not actually change
56 * the definition of the old structure. Instead, define a new structure
57 * with the new fields. See thread_snapshot_v3 as an example. This
58 * provides source compatibility for old readers, and also documents where
59 * the potential size cutoffs are.
60 *
61 * * If you change libkdd, or kcdata.py run the unit tests under libkdd.
62 *
63 * * If you add a type or extend an existing one, add a sample test to
64 * libkdd/tests so future changes to libkdd will always parse your struct
65 * correctly.
66 *
67 * For example to add a field to this:
68 *
69 * struct foobar {
70 * uint32_t baz;
71 * uint32_t quux;
72 * } __attribute__ ((packed));
73 *
74 * Define an evolved structure alongside it like this:
75 *
76 * struct foobar_v2 {
77 * uint32_t baz;
78 * uint32_t quux;
79 * ///////// This is where the original structure's layout ended! sizeof(struct foobar) was 8 ////////
80 * uint32_t frozzle;
81 * } __attribute__ ((packed));
82 *
83 * If you are parsing kcdata formats, you MUST
84 *
85 * * Check the length field of each struct, including array elements. If the
86 * struct is longer than you expect, you must ignore the extra data.
87 *
88 * * Ignore any data types you do not understand.
89 *
90 * Additionally, we want to be as forward compatible as we can. Meaning old
91 * tools should still be able to use new data whenever possible. To this end,
92 * you should:
93 *
94 * * Try not to add new versions of types that supplant old ones. Instead
95 * extend the length of existing types or add supplemental types.
96 *
97 * * Try not to remove information from existing kcdata formats, unless
98 * removal was explicitly asked for. For example it is fine to add a
99 * stackshot flag to remove unwanted information, but you should not
100 * remove it from the default stackshot if the new flag is absent.
101 *
102 * * (TBD) If you do break old readers by removing information or
103 * supplanting old structs, then increase the major version number.
104 *
105 *
106 *
107 * The following is a description of the kcdata format.
108 *
109 *
110 * The format for data is setup in a generic format as follows
111 *
112 * Layout of data structure:
113 *
114 * | 8 - bytes |
115 * | type = MAGIC | LENGTH |
116 * | 0 |
117 * | type | size |
118 * | flags |
119 * | data |
120 * |___________data____________|
121 * | type | size |
122 * | flags |
123 * |___________data____________|
124 * | type = END | size=0 |
125 * | 0 |
126 *
127 *
128 * The type field describes what kind of data is passed. For example type = TASK_CRASHINFO_UUID means the following data is a uuid.
129 * These types need to be defined in task_corpse.h for easy consumption by userspace inspection tools.
130 *
131 * Some range of types is reserved for special types like ints, longs etc. A cool new functionality made possible with this
132 * extensible data format is that kernel can decide to put more information as required without requiring user space tools to
133 * re-compile to be compatible. The case of rusage struct versions could be introduced without breaking existing tools.
134 *
135 * Feature description: Generic data with description
136 * -------------------
137 * Further more generic data with description is very much possible now. For example
138 *
139 * - kcdata_add_uint64_with_description(cdatainfo, 0x700, "NUM MACH PORTS");
140 * - and more functions that allow adding description.
141 * The userspace tools can then look at the description and print the data even if they are not compiled with knowledge of the field apriori.
142 *
143 * Example data:
144 * 0000 57 f1 ad de 00 00 00 00 00 00 00 00 00 00 00 00 W...............
145 * 0010 01 00 00 00 00 00 00 00 30 00 00 00 00 00 00 00 ........0.......
146 * 0020 50 49 44 00 00 00 00 00 00 00 00 00 00 00 00 00 PID.............
147 * 0030 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
148 * 0040 9c 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
149 * 0050 01 00 00 00 00 00 00 00 30 00 00 00 00 00 00 00 ........0.......
150 * 0060 50 41 52 45 4e 54 20 50 49 44 00 00 00 00 00 00 PARENT PID......
151 * 0070 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
152 * 0080 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
153 * 0090 ed 58 91 f1
154 *
155 * Feature description: Container markers for compound data
156 * ------------------
157 * If a given kernel data type is complex and requires adding multiple optional fields inside a container
158 * object for a consumer to understand arbitrary data, we package it using container markers.
159 *
160 * For example, the stackshot code gathers information and describes the state of a given task with respect
161 * to many subsystems. It includes data such as io stats, vm counters, process names/flags and syscall counts.
162 *
163 * kcdata_add_container_marker(kcdata_p, KCDATA_TYPE_CONTAINER_BEGIN, STACKSHOT_KCCONTAINER_TASK, task_uniqueid);
164 * // add multiple data, or add_<type>_with_description()s here
165 *
166 * kcdata_add_container_marker(kcdata_p, KCDATA_TYPE_CONTAINER_END, STACKSHOT_KCCONTAINER_TASK, task_uniqueid);
167 *
168 * Feature description: Custom Data formats on demand
169 * --------------------
170 * With the self describing nature of format, the kernel provider can describe a data type (uniquely identified by a number) and use
171 * it in the buffer for sending data. The consumer can parse the type information and have knowledge of describing incoming data.
172 * Following is an example of how we can describe a kernel specific struct sample_disk_io_stats in buffer.
173 *
174 * struct sample_disk_io_stats {
175 * uint64_t disk_reads_count;
176 * uint64_t disk_reads_size;
177 * uint64_t io_priority_count[4];
178 * uint64_t io_priority_size;
179 * } __attribute__ ((packed));
180 *
181 *
182 * struct kcdata_subtype_descriptor disk_io_stats_def[] = {
183 * {KCS_SUBTYPE_FLAGS_NONE, KC_ST_UINT64, 0 * sizeof(uint64_t), sizeof(uint64_t), "disk_reads_count"},
184 * {KCS_SUBTYPE_FLAGS_NONE, KC_ST_UINT64, 1 * sizeof(uint64_t), sizeof(uint64_t), "disk_reads_size"},
185 * {KCS_SUBTYPE_FLAGS_ARRAY, KC_ST_UINT64, 2 * sizeof(uint64_t), KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)), "io_priority_count"},
186 * {KCS_SUBTYPE_FLAGS_ARRAY, KC_ST_UINT64, (2 + 4) * sizeof(uint64_t), sizeof(uint64_t), "io_priority_size"},
187 * };
188 *
189 * Now you can add this custom type definition into the buffer as
190 * kcdata_add_type_definition(kcdata_p, KCTYPE_SAMPLE_DISK_IO_STATS, "sample_disk_io_stats",
191 * &disk_io_stats_def[0], sizeof(disk_io_stats_def)/sizeof(struct kcdata_subtype_descriptor));
192 *
193 * Feature description: Compression
194 * --------------------
195 * In order to avoid keeping large amounts of memory reserved for a panic stackshot, kcdata has support
196 * for compressing the buffer in a streaming fashion. New data pushed to the kcdata buffer will be
197 * automatically compressed using an algorithm selected by the API user (currently, we only support
198 * pass-through and zlib, in the future we plan to add WKDM support, see: 57913859).
199 *
200 * To start using compression, call:
201 * kcdata_init_compress(kcdata_p, hdr_tag, memcpy_f, comp_type);
202 * where:
203 * `kcdata_p` is the kcdata buffer that will be used
204 * `hdr_tag` is the usual header tag denoting what type of kcdata buffer this will be
205 * `memcpy_f` a memcpy(3) function to use to copy into the buffer, optional.
206 * `compy_type` is the compression type, see KCDCT_ZLIB for an example.
207 *
208 * Once compression is initialized:
209 * (1) all self-describing APIs will automatically compress
210 * (2) you can now use the following APIs to compress data into the buffer:
211 * (None of the following will compress unless kcdata_init_compress() has been called)
212 *
213 * - kcdata_push_data(kcdata_descriptor_t data, uint32_t type, uint32_t size, const void *input_data)
214 * Pushes the buffer of kctype @type at[@input_data, @input_data + @size]
215 * into the kcdata buffer @data, compressing if needed.
216 *
217 * - kcdata_push_array(kcdata_descriptor_t data, uint32_t type_of_element,
218 * uint32_t size_of_element, uint32_t count, const void *input_data)
219 * Pushes the array found at @input_data, with element type @type_of_element, where
220 * each element is of size @size_of_element and there are @count elements into the kcdata buffer
221 * at @data.
222 *
223 * - kcdata_compression_window_open/close(kcdata_descriptor_t data)
224 * In case the data you are trying to push to the kcdata buffer @data is difficult to predict,
225 * you can open a "compression window". Between an open and a close, no compression will be done.
226 * Once you close the window, the underlying compression algorithm will compress the data into the buffer
227 * and automatically rewind the current end marker of the kcdata buffer.
228 * There is an ASCII art in kern_cdata.c to aid the reader in understanding
229 * this.
230 *
231 * - kcdata_finish_compression(kcdata_descriptor_t data)
232 * Must be called at the end to flush any underlying buffers used by the compression algorithms.
233 * This function will also add some statistics about the compression to the buffer which helps with
234 * decompressing later.
235 *
236 */
237
238
239 #ifndef _KCDATA_H_
240 #define _KCDATA_H_
241
242 #include <stdint.h>
243 #include <string.h>
244 #include <uuid/uuid.h>
245
246
247
248 #define KCDATA_DESC_MAXLEN 32 /* including NULL byte at end */
249
250 #define KCDATA_FLAGS_STRUCT_PADDING_MASK 0xf
251 #define KCDATA_FLAGS_STRUCT_HAS_PADDING 0x80
252
253 /*
254 * kcdata aligns elements to 16 byte boundaries.
255 */
256 #define KCDATA_ALIGNMENT_SIZE 0x10
257
258 struct kcdata_item {
259 uint32_t type;
260 uint32_t size; /* len(data) */
261 /* flags.
262 *
263 * For structures:
264 * padding = flags & 0xf
265 * has_padding = (flags & 0x80) >> 7
266 *
267 * has_padding is needed to disambiguate cases such as
268 * thread_snapshot_v2 and thread_snapshot_v3. Their
269 * respective sizes are 0x68 and 0x70, and thread_snapshot_v2
270 * was emitted by old kernels *before* we started recording
271 * padding. Since legacy thread_snapsht_v2 and modern
272 * thread_snapshot_v3 will both record 0 for the padding
273 * flags, we need some other bit which will be nonzero in the
274 * flags to disambiguate.
275 *
276 * This is why we hardcode a special case for
277 * STACKSHOT_KCTYPE_THREAD_SNAPSHOT into the iterator
278 * functions below. There is only a finite number of such
279 * hardcodings which will ever be needed. They can occur
280 * when:
281 *
282 * * We have a legacy structure that predates padding flags
283 *
284 * * which we want to extend without changing the kcdata type
285 *
286 * * by only so many bytes as would fit in the space that
287 * was previously unused padding.
288 *
289 * For containers:
290 * container_id = flags
291 *
292 * For arrays:
293 * element_count = flags & UINT32_MAX
294 * element_type = (flags >> 32) & UINT32_MAX
295 */
296 uint64_t flags;
297 char data[]; /* must be at the end */
298 };
299
300 typedef struct kcdata_item * kcdata_item_t;
301
302 enum KCDATA_SUBTYPE_TYPES { KC_ST_CHAR = 1, KC_ST_INT8, KC_ST_UINT8, KC_ST_INT16, KC_ST_UINT16, KC_ST_INT32, KC_ST_UINT32, KC_ST_INT64, KC_ST_UINT64 };
303 typedef enum KCDATA_SUBTYPE_TYPES kctype_subtype_t;
304
305 /*
306 * A subtype description structure that defines
307 * how a compound data is laid out in memory. This
308 * provides on the fly definition of types and consumption
309 * by the parser.
310 */
311 struct kcdata_subtype_descriptor {
312 uint8_t kcs_flags;
313 #define KCS_SUBTYPE_FLAGS_NONE 0x0
314 #define KCS_SUBTYPE_FLAGS_ARRAY 0x1
315 /* Force struct type even if only one element.
316 *
317 * Normally a kcdata_type_definition is treated as a structure if it has
318 * more than one subtype descriptor. Otherwise it is treated as a simple
319 * type. For example libkdd will represent a simple integer 42 as simply
320 * 42, but it will represent a structure containing an integer 42 as
321 * {"field_name": 42}..
322 *
323 * If a kcdata_type_definition has only single subtype, then it will be
324 * treated as a structure iff KCS_SUBTYPE_FLAGS_STRUCT is set. If it has
325 * multiple subtypes, it will always be treated as a structure.
326 *
327 * KCS_SUBTYPE_FLAGS_MERGE has the opposite effect. If this flag is used then
328 * even if there are multiple elements, they will all be treated as individual
329 * properties of the parent dictionary.
330 */
331 #define KCS_SUBTYPE_FLAGS_STRUCT 0x2 /* force struct type even if only one element */
332 #define KCS_SUBTYPE_FLAGS_MERGE 0x4 /* treat as multiple elements of parents instead of struct */
333 uint8_t kcs_elem_type; /* restricted to kctype_subtype_t */
334 uint16_t kcs_elem_offset; /* offset in struct where data is found */
335 uint32_t kcs_elem_size; /* size of element (or) packed state for array type */
336 char kcs_name[KCDATA_DESC_MAXLEN]; /* max 31 bytes for name of field */
337 };
338
339 typedef struct kcdata_subtype_descriptor * kcdata_subtype_descriptor_t;
340
341 /*
342 * In case of array of basic c types in kctype_subtype_t,
343 * size is packed in lower 16 bits and
344 * count is packed in upper 16 bits of kcs_elem_size field.
345 */
346 #define KCS_SUBTYPE_PACK_SIZE(e_count, e_size) (((e_count)&0xffffu) << 16 | ((e_size)&0xffffu))
347
348 static inline uint32_t
kcs_get_elem_size(kcdata_subtype_descriptor_t d)349 kcs_get_elem_size(kcdata_subtype_descriptor_t d)
350 {
351 if (d->kcs_flags & KCS_SUBTYPE_FLAGS_ARRAY) {
352 /* size is composed as ((count &0xffff)<<16 | (elem_size & 0xffff)) */
353 return (uint32_t)((d->kcs_elem_size & 0xffff) * ((d->kcs_elem_size & 0xffff0000) >> 16));
354 }
355 return d->kcs_elem_size;
356 }
357
358 static inline uint32_t
kcs_get_elem_count(kcdata_subtype_descriptor_t d)359 kcs_get_elem_count(kcdata_subtype_descriptor_t d)
360 {
361 if (d->kcs_flags & KCS_SUBTYPE_FLAGS_ARRAY) {
362 return (d->kcs_elem_size >> 16) & 0xffff;
363 }
364 return 1;
365 }
366
367 static inline int
kcs_set_elem_size(kcdata_subtype_descriptor_t d,uint32_t size,uint32_t count)368 kcs_set_elem_size(kcdata_subtype_descriptor_t d, uint32_t size, uint32_t count)
369 {
370 if (count > 1) {
371 /* means we are setting up an array */
372 if (size > 0xffff || count > 0xffff) {
373 return -1; //invalid argument
374 }
375 d->kcs_elem_size = ((count & 0xffff) << 16 | (size & 0xffff));
376 } else {
377 d->kcs_elem_size = size;
378 }
379 return 0;
380 }
381
382 struct kcdata_type_definition {
383 uint32_t kct_type_identifier;
384 uint32_t kct_num_elements;
385 char kct_name[KCDATA_DESC_MAXLEN];
386 struct kcdata_subtype_descriptor kct_elements[];
387 };
388
389
390 /* chunk type definitions. 0 - 0x7ff are reserved and defined here
391 * NOTE: Please update kcdata/libkdd/kcdtypes.c if you make any changes
392 * in STACKSHOT_KCTYPE_* types.
393 */
394
395 /*
396 * Types with description value.
397 * these will have KCDATA_DESC_MAXLEN-1 length string description
398 * and rest of kcdata_iter_size() - KCDATA_DESC_MAXLEN bytes as data
399 */
400 #define KCDATA_TYPE_INVALID 0x0u
401 #define KCDATA_TYPE_STRING_DESC 0x1u
402 #define KCDATA_TYPE_UINT32_DESC 0x2u
403 #define KCDATA_TYPE_UINT64_DESC 0x3u
404 #define KCDATA_TYPE_INT32_DESC 0x4u
405 #define KCDATA_TYPE_INT64_DESC 0x5u
406 #define KCDATA_TYPE_BINDATA_DESC 0x6u
407
408 /*
409 * Compound type definitions
410 */
411 #define KCDATA_TYPE_ARRAY 0x11u /* Array of data OBSOLETE DONT USE THIS*/
412 #define KCDATA_TYPE_TYPEDEFINTION 0x12u /* Meta type that describes a type on the fly. */
413 #define KCDATA_TYPE_CONTAINER_BEGIN \
414 0x13u /* Container type which has corresponding CONTAINER_END header. \
415 * KCDATA_TYPE_CONTAINER_BEGIN has type in the data segment. \
416 * Both headers have (uint64_t) ID for matching up nested data. \
417 */
418 #define KCDATA_TYPE_CONTAINER_END 0x14u
419
420 #define KCDATA_TYPE_ARRAY_PAD0 0x20u /* Array of data with 0 byte of padding*/
421 #define KCDATA_TYPE_ARRAY_PAD1 0x21u /* Array of data with 1 byte of padding*/
422 #define KCDATA_TYPE_ARRAY_PAD2 0x22u /* Array of data with 2 byte of padding*/
423 #define KCDATA_TYPE_ARRAY_PAD3 0x23u /* Array of data with 3 byte of padding*/
424 #define KCDATA_TYPE_ARRAY_PAD4 0x24u /* Array of data with 4 byte of padding*/
425 #define KCDATA_TYPE_ARRAY_PAD5 0x25u /* Array of data with 5 byte of padding*/
426 #define KCDATA_TYPE_ARRAY_PAD6 0x26u /* Array of data with 6 byte of padding*/
427 #define KCDATA_TYPE_ARRAY_PAD7 0x27u /* Array of data with 7 byte of padding*/
428 #define KCDATA_TYPE_ARRAY_PAD8 0x28u /* Array of data with 8 byte of padding*/
429 #define KCDATA_TYPE_ARRAY_PAD9 0x29u /* Array of data with 9 byte of padding*/
430 #define KCDATA_TYPE_ARRAY_PADa 0x2au /* Array of data with a byte of padding*/
431 #define KCDATA_TYPE_ARRAY_PADb 0x2bu /* Array of data with b byte of padding*/
432 #define KCDATA_TYPE_ARRAY_PADc 0x2cu /* Array of data with c byte of padding*/
433 #define KCDATA_TYPE_ARRAY_PADd 0x2du /* Array of data with d byte of padding*/
434 #define KCDATA_TYPE_ARRAY_PADe 0x2eu /* Array of data with e byte of padding*/
435 #define KCDATA_TYPE_ARRAY_PADf 0x2fu /* Array of data with f byte of padding*/
436
437 /*
438 * Generic data types that are most commonly used
439 */
440 #define KCDATA_TYPE_LIBRARY_LOADINFO 0x30u /* struct dyld_uuid_info_32 */
441 #define KCDATA_TYPE_LIBRARY_LOADINFO64 0x31u /* struct dyld_uuid_info_64 */
442 #define KCDATA_TYPE_TIMEBASE 0x32u /* struct mach_timebase_info */
443 #define KCDATA_TYPE_MACH_ABSOLUTE_TIME 0x33u /* uint64_t */
444 #define KCDATA_TYPE_TIMEVAL 0x34u /* struct timeval64 */
445 #define KCDATA_TYPE_USECS_SINCE_EPOCH 0x35u /* time in usecs uint64_t */
446 #define KCDATA_TYPE_PID 0x36u /* int32_t */
447 #define KCDATA_TYPE_PROCNAME 0x37u /* char * */
448 #define KCDATA_TYPE_NESTED_KCDATA 0x38u /* nested kcdata buffer */
449 #define KCDATA_TYPE_LIBRARY_AOTINFO 0x39u /* struct user64_dyld_aot_info */
450
451 #define KCDATA_TYPE_BUFFER_END 0xF19158EDu
452
453 /* MAGIC numbers defined for each class of chunked data
454 *
455 * To future-proof against big-endian arches, make sure none of these magic
456 * numbers are byteswaps of each other
457 */
458
459 #define KCDATA_BUFFER_BEGIN_CRASHINFO 0xDEADF157u /* owner: corpses/task_corpse.h */
460 /* type-range: 0x800 - 0x8ff */
461 #define KCDATA_BUFFER_BEGIN_STACKSHOT 0x59a25807u /* owner: sys/stackshot.h */
462 /* type-range: 0x900 - 0x93f */
463 #define KCDATA_BUFFER_BEGIN_COMPRESSED 0x434f4d50u /* owner: sys/stackshot.h */
464 /* type-range: 0x900 - 0x93f */
465 #define KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT 0xDE17A59Au /* owner: sys/stackshot.h */
466 /* type-range: 0x940 - 0x9ff */
467 #define KCDATA_BUFFER_BEGIN_BTINFO 0x46414E47u /* owner: kern/kern_exit.c */
468 /* type-range: 0xa01 - 0xaff */
469 #define KCDATA_BUFFER_BEGIN_OS_REASON 0x53A20900u /* owner: sys/reason.h */
470 /* type-range: 0x1000-0x103f */
471 #define KCDATA_BUFFER_BEGIN_XNUPOST_CONFIG 0x1e21c09fu /* owner: osfmk/tests/kernel_tests.c */
472 /* type-range: 0x1040-0x105f */
473
474 /* next type range number available 0x1060 */
475 /**************** definitions for XNUPOST *********************/
476 #define XNUPOST_KCTYPE_TESTCONFIG 0x1040
477
478 /**************** definitions for stackshot *********************/
479
480 /* This value must always match IO_NUM_PRIORITIES defined in thread_info.h */
481 #define STACKSHOT_IO_NUM_PRIORITIES 4
482 /* This value must always match MAXTHREADNAMESIZE used in bsd */
483 #define STACKSHOT_MAX_THREAD_NAME_SIZE 64
484
485 /*
486 * NOTE: Please update kcdata/libkdd/kcdtypes.c if you make any changes
487 * in STACKSHOT_KCTYPE_* types.
488 */
489 #define STACKSHOT_KCTYPE_IOSTATS 0x901u /* io_stats_snapshot */
490 #define STACKSHOT_KCTYPE_GLOBAL_MEM_STATS 0x902u /* struct mem_and_io_snapshot */
491 #define STACKSHOT_KCCONTAINER_TASK 0x903u
492 #define STACKSHOT_KCCONTAINER_THREAD 0x904u
493 #define STACKSHOT_KCTYPE_TASK_SNAPSHOT 0x905u /* task_snapshot_v2, task_snapshot_v3 */
494 #define STACKSHOT_KCTYPE_THREAD_SNAPSHOT 0x906u /* thread_snapshot_v2, thread_snapshot_v3 */
495 #define STACKSHOT_KCTYPE_DONATING_PIDS 0x907u /* int[] */
496 #define STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO 0x908u /* dyld_shared_cache_loadinfo */
497 #define STACKSHOT_KCTYPE_THREAD_NAME 0x909u /* char[] */
498 #define STACKSHOT_KCTYPE_KERN_STACKFRAME 0x90Au /* struct stack_snapshot_frame32 */
499 #define STACKSHOT_KCTYPE_KERN_STACKFRAME64 0x90Bu /* struct stack_snapshot_frame64 */
500 #define STACKSHOT_KCTYPE_USER_STACKFRAME 0x90Cu /* struct stack_snapshot_frame32 */
501 #define STACKSHOT_KCTYPE_USER_STACKFRAME64 0x90Du /* struct stack_snapshot_frame64 */
502 #define STACKSHOT_KCTYPE_BOOTARGS 0x90Eu /* boot args string */
503 #define STACKSHOT_KCTYPE_OSVERSION 0x90Fu /* os version string, same as running uname -a */
504 #define STACKSHOT_KCTYPE_KERN_PAGE_SIZE 0x910u /* kernel page size in uint32_t */
505 #define STACKSHOT_KCTYPE_JETSAM_LEVEL 0x911u /* jetsam level in uint32_t */
506 #define STACKSHOT_KCTYPE_DELTA_SINCE_TIMESTAMP 0x912u /* timestamp used for the delta stackshot */
507 #define STACKSHOT_KCTYPE_KERN_STACKLR 0x913u /* uint32_t */
508 #define STACKSHOT_KCTYPE_KERN_STACKLR64 0x914u /* uint64_t */
509 #define STACKSHOT_KCTYPE_USER_STACKLR 0x915u /* uint32_t */
510 #define STACKSHOT_KCTYPE_USER_STACKLR64 0x916u /* uint64_t */
511 #define STACKSHOT_KCTYPE_NONRUNNABLE_TIDS 0x917u /* uint64_t */
512 #define STACKSHOT_KCTYPE_NONRUNNABLE_TASKS 0x918u /* uint64_t */
513 #define STACKSHOT_KCTYPE_CPU_TIMES 0x919u /* struct stackshot_cpu_times or stackshot_cpu_times_v2 */
514 #define STACKSHOT_KCTYPE_STACKSHOT_DURATION 0x91au /* struct stackshot_duration */
515 #define STACKSHOT_KCTYPE_STACKSHOT_FAULT_STATS 0x91bu /* struct stackshot_fault_stats */
516 #define STACKSHOT_KCTYPE_KERNELCACHE_LOADINFO 0x91cu /* kernelcache UUID -- same as KCDATA_TYPE_LIBRARY_LOADINFO64 */
517 #define STACKSHOT_KCTYPE_THREAD_WAITINFO 0x91du /* struct stackshot_thread_waitinfo */
518 #define STACKSHOT_KCTYPE_THREAD_GROUP_SNAPSHOT 0x91eu /* struct thread_group_snapshot{,_v2,_v3} */
519 #define STACKSHOT_KCTYPE_THREAD_GROUP 0x91fu /* uint64_t */
520 #define STACKSHOT_KCTYPE_JETSAM_COALITION_SNAPSHOT 0x920u /* struct jetsam_coalition_snapshot */
521 #define STACKSHOT_KCTYPE_JETSAM_COALITION 0x921u /* uint64_t */
522 #define STACKSHOT_KCTYPE_THREAD_POLICY_VERSION 0x922u /* THREAD_POLICY_INTERNAL_STRUCT_VERSION in uint32 */
523 #define STACKSHOT_KCTYPE_INSTRS_CYCLES 0x923u /* struct instrs_cycles_snapshot_v2 */
524 #define STACKSHOT_KCTYPE_USER_STACKTOP 0x924u /* struct stack_snapshot_stacktop */
525 #define STACKSHOT_KCTYPE_ASID 0x925u /* uint32_t */
526 #define STACKSHOT_KCTYPE_PAGE_TABLES 0x926u /* uint64_t */
527 #define STACKSHOT_KCTYPE_SYS_SHAREDCACHE_LAYOUT 0x927u /* same as KCDATA_TYPE_LIBRARY_LOADINFO64 */
528 #define STACKSHOT_KCTYPE_THREAD_DISPATCH_QUEUE_LABEL 0x928u /* dispatch queue label */
529 #define STACKSHOT_KCTYPE_THREAD_TURNSTILEINFO 0x929u /* struct stackshot_thread_turnstileinfo */
530 #define STACKSHOT_KCTYPE_TASK_CPU_ARCHITECTURE 0x92au /* struct stackshot_cpu_architecture */
531 #define STACKSHOT_KCTYPE_LATENCY_INFO 0x92bu /* struct stackshot_latency_collection_v2 */
532 #define STACKSHOT_KCTYPE_LATENCY_INFO_TASK 0x92cu /* struct stackshot_latency_task */
533 #define STACKSHOT_KCTYPE_LATENCY_INFO_THREAD 0x92du /* struct stackshot_latency_thread */
534 #define STACKSHOT_KCTYPE_LOADINFO64_TEXT_EXEC 0x92eu /* TEXT_EXEC load info -- same as KCDATA_TYPE_LIBRARY_LOADINFO64 */
535 #define STACKSHOT_KCTYPE_AOTCACHE_LOADINFO 0x92fu /* struct dyld_aot_cache_uuid_info */
536 #define STACKSHOT_KCTYPE_TRANSITIONING_TASK_SNAPSHOT 0x930u /* transitioning_task_snapshot */
537 #define STACKSHOT_KCCONTAINER_TRANSITIONING_TASK 0x931u
538 #define STACKSHOT_KCTYPE_USER_ASYNC_START_INDEX 0x932u /* uint32_t index in user_stack of beginning of async stack */
539 #define STACKSHOT_KCTYPE_USER_ASYNC_STACKLR64 0x933u /* uint64_t async stack pointers */
540 #define STACKSHOT_KCCONTAINER_PORTLABEL 0x934u /* container for port label info */
541 #define STACKSHOT_KCTYPE_PORTLABEL 0x935u /* struct stackshot_portlabel */
542 #define STACKSHOT_KCTYPE_PORTLABEL_NAME 0x936u /* string port name */
543 #define STACKSHOT_KCTYPE_DYLD_COMPACTINFO 0x937u /* binary blob of dyld info (variable size) */
544 #define STACKSHOT_KCTYPE_SUSPENSION_INFO 0x938u /* struct stackshot_suspension_info */
545 #define STACKSHOT_KCTYPE_SUSPENSION_SOURCE 0x939u /* struct stackshot_suspension_source */
546
547 #define STACKSHOT_KCTYPE_TASK_DELTA_SNAPSHOT 0x940u /* task_delta_snapshot_v2 */
548 #define STACKSHOT_KCTYPE_THREAD_DELTA_SNAPSHOT 0x941u /* thread_delta_snapshot_v* */
549 #define STACKSHOT_KCCONTAINER_SHAREDCACHE 0x942u /* container for shared cache info */
550 #define STACKSHOT_KCTYPE_SHAREDCACHE_INFO 0x943u /* dyld_shared_cache_loadinfo_v2 */
551 #define STACKSHOT_KCTYPE_SHAREDCACHE_AOTINFO 0x944u /* struct dyld_aot_cache_uuid_info */
552 #define STACKSHOT_KCTYPE_SHAREDCACHE_ID 0x945u /* uint32_t in task: if we aren't attached to Primary, which one */
553 #define STACKSHOT_KCTYPE_CODESIGNING_INFO 0x946u /* struct stackshot_task_codesigning_info */
554 #define STACKSHOT_KCTYPE_OS_BUILD_VERSION 0x947u /* os build version string (ex: 20A123) */
555 #define STACKSHOT_KCTYPE_KERN_EXCLAVES_THREADINFO 0x948u /* struct thread_exclaves_info */
556 #define STACKSHOT_KCCONTAINER_EXCLAVES 0x949u /* exclave threads info */
557 #define STACKSHOT_KCCONTAINER_EXCLAVE_SCRESULT 0x94au /* exclave thread container for one scid */
558 #define STACKSHOT_KCTYPE_EXCLAVE_SCRESULT_INFO 0x94bu /* struct exclave_scresult_info */
559 #define STACKSHOT_KCCONTAINER_EXCLAVE_IPCSTACKENTRY 0x94cu /* container for one chunk of exclave IPC chain */
560 #define STACKSHOT_KCTYPE_EXCLAVE_IPCSTACKENTRY_INFO 0x94du /* struct exclave_ipcstackentry_info */
561 #define STACKSHOT_KCTYPE_EXCLAVE_IPCSTACKENTRY_ECSTACK 0x94eu /* exclave_ecstackentry_addr_t */
562 #define STACKSHOT_KCCONTAINER_EXCLAVE_ADDRESSSPACE 0x94fu /* exclave address space container */
563 #define STACKSHOT_KCTYPE_EXCLAVE_ADDRESSSPACE_INFO 0x950u /* struct exclave_addressspace_info */
564 #define STACKSHOT_KCTYPE_EXCLAVE_ADDRESSSPACE_NAME 0x951u /* exclave component name */
565 #define STACKSHOT_KCCONTAINER_EXCLAVE_TEXTLAYOUT 0x952u /* exclave text layout container */
566 #define STACKSHOT_KCTYPE_EXCLAVE_TEXTLAYOUT_INFO 0x953u /* struct exclave_textlayout_info */
567 #define STACKSHOT_KCTYPE_EXCLAVE_TEXTLAYOUT_SEGMENTS 0x954u /* struct exclave_textlayout_segment_v2 */
568 #define STACKSHOT_KCTYPE_KERN_EXCLAVES_CRASH_THREADINFO 0x955u /* struct thread_crash_exclaves_info */
569 #define STACKSHOT_KCTYPE_LATENCY_INFO_CPU 0x956u /* struct stackshot_latency_cpu */
570 #define STACKSHOT_KCTYPE_TASK_EXEC_META 0x957u /* struct task_exec_meta */
571 #define STACKSHOT_KCTYPE_TASK_MEMORYSTATUS 0x958u /* struct task_memorystatus_snapshot */
572 #define STACKSHOT_KCTYPE_MTEINFO_CELL 0x959u /* struct mteinfo_cell */
573 #define STACKSHOT_KCTYPE_LATENCY_INFO_BUFFER 0x95au /* struct stackshot_latency_buffer */
574
575 struct stack_snapshot_frame32 {
576 uint32_t lr;
577 uint32_t sp;
578 };
579
580 struct stack_snapshot_frame64 {
581 uint64_t lr;
582 uint64_t sp;
583 };
584
585 struct dyld_uuid_info_32 {
586 uint32_t imageLoadAddress; /* base address image is mapped at */
587 uuid_t imageUUID;
588 };
589
590 struct dyld_uuid_info_64 {
591 uint64_t imageLoadAddress; /* XXX image slide */
592 uuid_t imageUUID;
593 };
594
595 /*
596 * N.B.: Newer kernels output dyld_shared_cache_loadinfo structures
597 * instead of this, since the field names match their contents better.
598 */
599 struct dyld_uuid_info_64_v2 {
600 uint64_t imageLoadAddress; /* XXX image slide */
601 uuid_t imageUUID;
602 /* end of version 1 of dyld_uuid_info_64. sizeof v1 was 24 */
603 uint64_t imageSlidBaseAddress; /* slid base address or slid first mapping of image */
604 };
605
606 enum dyld_shared_cache_flags {
607 kSharedCacheSystemPrimary = 0x1, /* primary shared cache on the system; attached tasks will have kTaskSharedRegionSystem set */
608 kSharedCacheDriverkit = 0x2, /* driverkit shared cache */
609 kSharedCacheAOT = 0x4, /* Rosetta shared cache */
610 };
611
612 /*
613 * This is the renamed version of dyld_uuid_info_64 with more accurate
614 * field names, for STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO. Any users
615 * must be aware of the dyld_uuid_info_64* version history and ensure
616 * the fields they are accessing are within the actual bounds.
617 *
618 * OLD_FIELD NEW_FIELD
619 * imageLoadAddress sharedCacheSlide
620 * imageUUID sharedCacheUUID
621 * imageSlidBaseAddress sharedCacheUnreliableSlidBaseAddress
622 * - sharedCacheSlidFirstMapping
623 * - sharedCacheID
624 * - sharedCacheFlags
625 */
626 struct dyld_shared_cache_loadinfo_v2 {
627 uint64_t sharedCacheSlide; /* image slide value */
628 uuid_t sharedCacheUUID;
629 /* end of version 1 of dyld_uuid_info_64. sizeof v1 was 24 */
630 uint64_t sharedCacheUnreliableSlidBaseAddress; /* for backwards-compatibility; use sharedCacheSlidFirstMapping if available */
631 /* end of version 2 of dyld_uuid_info_64. sizeof v2 was 32 */
632 uint64_t sharedCacheSlidFirstMapping; /* slid base address of first mapping */
633 /* end of version 1 of dyld_shared_cache_loadinfo. sizeof was 40 */
634 uint32_t sharedCacheID; /* ID of shared cache */
635 uint32_t sharedCacheFlags;
636 };
637
638 struct dyld_shared_cache_loadinfo {
639 uint64_t sharedCacheSlide; /* image slide value */
640 uuid_t sharedCacheUUID;
641 /* end of version 1 of dyld_uuid_info_64. sizeof v1 was 24 */
642 uint64_t sharedCacheUnreliableSlidBaseAddress; /* for backwards-compatibility; use sharedCacheSlidFirstMapping if available */
643 /* end of version 2 of dyld_uuid_info_64. sizeof v2 was 32 */
644 uint64_t sharedCacheSlidFirstMapping; /* slid base address of first mapping */
645 };
646
647 struct dyld_aot_cache_uuid_info {
648 uint64_t x86SlidBaseAddress; /* slid first mapping address of x86 shared cache */
649 uuid_t x86UUID; /* UUID of x86 shared cache */
650 uint64_t aotSlidBaseAddress; /* slide first mapping address of aot cache */
651 uuid_t aotUUID; /* UUID of aot shared cache */
652 };
653
654 struct user32_dyld_uuid_info {
655 uint32_t imageLoadAddress; /* base address image is mapped into */
656 uuid_t imageUUID; /* UUID of image */
657 };
658
659 struct user64_dyld_uuid_info {
660 uint64_t imageLoadAddress; /* base address image is mapped into */
661 uuid_t imageUUID; /* UUID of image */
662 };
663
664 #define DYLD_AOT_IMAGE_KEY_SIZE 32
665
666 struct user64_dyld_aot_info {
667 uint64_t x86LoadAddress;
668 uint64_t aotLoadAddress;
669 uint64_t aotImageSize;
670 uint8_t aotImageKey[DYLD_AOT_IMAGE_KEY_SIZE];
671 };
672
673 enum task_snapshot_flags {
674 /* k{User,Kernel}64_p (values 0x1 and 0x2) are defined in generic_snapshot_flags */
675 kTaskRsrcFlagged = 0x4, // In the EXC_RESOURCE danger zone?
676 kTerminatedSnapshot = 0x8,
677 kPidSuspended = 0x10, // true for suspended task
678 kFrozen = 0x20, // true for hibernated task (along with pidsuspended)
679 kTaskDarwinBG = 0x40,
680 kTaskExtDarwinBG = 0x80,
681 kTaskVisVisible = 0x100,
682 kTaskVisNonvisible = 0x200,
683 kTaskIsForeground = 0x400,
684 kTaskIsBoosted = 0x800,
685 kTaskIsSuppressed = 0x1000,
686 kTaskIsTimerThrottled = 0x2000, /* deprecated */
687 kTaskIsImpDonor = 0x4000,
688 kTaskIsLiveImpDonor = 0x8000,
689 kTaskIsDirty = 0x10000,
690 kTaskWqExceededConstrainedThreadLimit = 0x20000,
691 kTaskWqExceededTotalThreadLimit = 0x40000,
692 kTaskWqFlagsAvailable = 0x80000,
693 kTaskUUIDInfoFaultedIn = 0x100000, /* successfully faulted in some UUID info */
694 kTaskUUIDInfoMissing = 0x200000, /* some UUID info was paged out */
695 kTaskUUIDInfoTriedFault = 0x400000, /* tried to fault in UUID info */
696 kTaskSharedRegionInfoUnavailable = 0x800000, /* shared region info unavailable */
697 kTaskTALEngaged = 0x1000000,
698 /* 0x2000000 unused */
699 kTaskIsDirtyTracked = 0x4000000,
700 kTaskAllowIdleExit = 0x8000000,
701 kTaskIsTranslated = 0x10000000,
702 kTaskSharedRegionNone = 0x20000000, /* task doesn't have a shared region */
703 kTaskSharedRegionSystem = 0x40000000, /* task attached to region with kSharedCacheSystemPrimary set */
704 kTaskSharedRegionOther = 0x80000000, /* task is attached to a different shared region */
705 kTaskDyldCompactInfoNone = 0x100000000,
706 kTaskDyldCompactInfoTooBig = 0x200000000,
707 kTaskDyldCompactInfoFaultedIn = 0x400000000,
708 kTaskDyldCompactInfoMissing = 0x800000000,
709 kTaskDyldCompactInfoTriedFault = 0x1000000000,
710 kTaskWqExceededCooperativeThreadLimit = 0x2000000000,
711 kTaskWqExceededActiveConstrainedThreadLimit = 0x4000000000,
712 kTaskRunawayMitigation = 0x8000000000,
713 kTaskIsActive = 0x10000000000,
714 kTaskIsManaged = 0x20000000000,
715 kTaskHasAssertion = 0x40000000000,
716 }; // Note: Add any new flags to kcdata.py (ts_ss_flags)
717
718 enum task_transition_type {
719 kTaskIsTerminated = 0x1,// Past LPEXIT
720 };
721
722 /* See kcdata_private.h for more flag definitions */
723 enum task_exec_flags : uint64_t {
724 kTaskExecTranslated = 0x01, /* Task is running under translation (eg, Rosetta) */
725 kTaskExecHardenedHeap = 0x02, /* Task has the hardened heap security feature */
726 kTaskExecReserved00 = 0x04,
727 kTaskExecReserved01 = 0x08,
728 kTaskExecReserved02 = 0x10,
729 kTaskExecReserved03 = 0x20
730 };
731
732 /* metadata about a task that is fixed at spawn/exec time */
733 struct task_exec_meta {
734 uint64_t tem_flags; /* task_exec_flags */
735 } __attribute__((packed));
736
737
738 /* MTE info cell state, must match mte_cell_state_t */
739 __enum_closed_decl(mte_info_cell_state_t, uint8_t, {
740 MTE_INFO_STATE_DISABLED,
741 MTE_INFO_STATE_PINNED,
742 MTE_INFO_STATE_DEACTIVATING,
743 MTE_INFO_STATE_CLAIMED,
744 MTE_INFO_STATE_INACTIVE,
745 MTE_INFO_STATE_RECLAIMING,
746 MTE_INFO_STATE_ACTIVATING,
747 MTE_INFO_STATE_ACTIVE,
748 });
749
750 /* MTE info cell data */
751 struct mte_info_cell {
752 uint8_t mic_state;
753 uint8_t mic_tagged_count; /* Number of tagged pages in this tag storage page */
754 uint8_t mic_free_count; /* Number of free pages in this tag storage page */
755 uint8_t mic_wired_count; /* Number of wired pages in this tag storage page, regardless of tagging */
756 uint8_t mic_wired_tagged_count; /* Number of tagged pages wired that aren't used by kernel memory allocators */
757 uint8_t mic_kernel_wired_tagged_count; /* Number of tagged pages wired for use by the kernel memory allocators, kmem and zalloc */
758 } __attribute__((packed));
759
760 enum thread_snapshot_flags {
761 /* k{User,Kernel}64_p (values 0x1 and 0x2) are defined in generic_snapshot_flags */
762 kHasDispatchSerial = 0x4,
763 kStacksPCOnly = 0x8, /* Stack traces have no frame pointers. */
764 kThreadDarwinBG = 0x10, /* Thread is darwinbg */
765 kThreadIOPassive = 0x20, /* Thread uses passive IO */
766 kThreadSuspended = 0x40, /* Thread is suspended */
767 kThreadTruncatedBT = 0x80, /* Unmapped pages caused truncated backtrace */
768 kGlobalForcedIdle = 0x100, /* Thread performs global forced idle */
769 kThreadFaultedBT = 0x200, /* Some thread stack pages were faulted in as part of BT */
770 kThreadTriedFaultBT = 0x400, /* We tried to fault in thread stack pages as part of BT */
771 kThreadOnCore = 0x800, /* Thread was on-core when we entered debugger context */
772 kThreadIdleWorker = 0x1000, /* Thread is an idle libpthread worker thread */
773 kThreadMain = 0x2000, /* Thread is the main thread */
774 kThreadTruncKernBT = 0x4000, /* Unmapped pages caused truncated kernel BT */
775 kThreadTruncUserBT = 0x8000, /* Unmapped pages caused truncated user BT */
776 kThreadTruncUserAsyncBT = 0x10000, /* Unmapped pages caused truncated user async BT */
777 }; // Note: Add any new flags to kcdata.py (ths_ss_flags)
778
779 struct mem_and_io_snapshot {
780 uint32_t snapshot_magic;
781 uint32_t free_pages;
782 uint32_t active_pages;
783 uint32_t inactive_pages;
784 uint32_t purgeable_pages;
785 uint32_t wired_pages;
786 uint32_t speculative_pages;
787 uint32_t throttled_pages;
788 uint32_t filebacked_pages;
789 uint32_t compressions;
790 uint32_t decompressions;
791 uint32_t compressor_size;
792 int32_t busy_buffer_count;
793 uint32_t pages_wanted;
794 uint32_t pages_reclaimed;
795 uint8_t pages_wanted_reclaimed_valid; // did mach_vm_pressure_monitor succeed?
796 } __attribute__((packed));
797
798 /* SS_TH_* macros are for ths_state */
799 #define SS_TH_WAIT 0x01 /* queued for waiting */
800 #define SS_TH_SUSP 0x02 /* stopped or requested to stop */
801 #define SS_TH_RUN 0x04 /* running or on runq */
802 #define SS_TH_UNINT 0x08 /* waiting uninteruptibly */
803 #define SS_TH_TERMINATE 0x10 /* halted at termination */
804 #define SS_TH_TERMINATE2 0x20 /* added to termination queue */
805 #define SS_TH_IDLE 0x80 /* idling processor */
806
807 struct thread_snapshot_v2 {
808 uint64_t ths_thread_id;
809 uint64_t ths_wait_event;
810 uint64_t ths_continuation;
811 uint64_t ths_total_syscalls;
812 uint64_t ths_voucher_identifier;
813 uint64_t ths_dqserialnum;
814 uint64_t ths_user_time;
815 uint64_t ths_sys_time;
816 uint64_t ths_ss_flags;
817 uint64_t ths_last_run_time;
818 uint64_t ths_last_made_runnable_time;
819 uint32_t ths_state;
820 uint32_t ths_sched_flags;
821 int16_t ths_base_priority;
822 int16_t ths_sched_priority;
823 uint8_t ths_eqos;
824 uint8_t ths_rqos;
825 uint8_t ths_rqos_override;
826 uint8_t ths_io_tier;
827 } __attribute__((packed));
828
829 struct thread_snapshot_v3 {
830 uint64_t ths_thread_id;
831 uint64_t ths_wait_event;
832 uint64_t ths_continuation;
833 uint64_t ths_total_syscalls;
834 uint64_t ths_voucher_identifier;
835 uint64_t ths_dqserialnum;
836 uint64_t ths_user_time;
837 uint64_t ths_sys_time;
838 uint64_t ths_ss_flags;
839 uint64_t ths_last_run_time;
840 uint64_t ths_last_made_runnable_time;
841 uint32_t ths_state;
842 uint32_t ths_sched_flags;
843 int16_t ths_base_priority;
844 int16_t ths_sched_priority;
845 uint8_t ths_eqos;
846 uint8_t ths_rqos;
847 uint8_t ths_rqos_override;
848 uint8_t ths_io_tier;
849 uint64_t ths_thread_t;
850 } __attribute__((packed));
851
852
853 struct thread_snapshot_v4 {
854 uint64_t ths_thread_id;
855 uint64_t ths_wait_event;
856 uint64_t ths_continuation;
857 uint64_t ths_total_syscalls;
858 uint64_t ths_voucher_identifier;
859 uint64_t ths_dqserialnum;
860 uint64_t ths_user_time;
861 uint64_t ths_sys_time;
862 uint64_t ths_ss_flags;
863 uint64_t ths_last_run_time;
864 uint64_t ths_last_made_runnable_time;
865 uint32_t ths_state;
866 uint32_t ths_sched_flags;
867 int16_t ths_base_priority;
868 int16_t ths_sched_priority;
869 uint8_t ths_eqos;
870 uint8_t ths_rqos;
871 uint8_t ths_rqos_override;
872 uint8_t ths_io_tier;
873 uint64_t ths_thread_t;
874 uint64_t ths_requested_policy;
875 uint64_t ths_effective_policy;
876 } __attribute__((packed));
877
878
879 struct thread_group_snapshot {
880 uint64_t tgs_id;
881 char tgs_name[16];
882 } __attribute__((packed));
883
884 /*
885 * In general these flags mirror their THREAD_GROUP_FLAGS_ counterparts.
886 * THREAD_GROUP_FLAGS_UI_APP was repurposed and THREAD_GROUP_FLAGS_APPLICATION
887 * introduced to take its place. To remain compatible, kThreadGroupUIApp is
888 * kept around and kThreadGroupUIApplication introduced.
889 */
890 enum thread_group_flags {
891 kThreadGroupEfficient = 0x1,
892 kThreadGroupApplication = 0x2,
893 kThreadGroupUIApp = 0x2,
894 kThreadGroupCritical = 0x4,
895 kThreadGroupBestEffort = 0x8,
896 kThreadGroupUIApplication = 0x100,
897 kThreadGroupManaged = 0x200,
898 kThreadGroupStrictTimers = 0x400,
899 }; // Note: Add any new flags to kcdata.py (tgs_flags)
900
901 struct thread_group_snapshot_v2 {
902 uint64_t tgs_id;
903 char tgs_name[16];
904 uint64_t tgs_flags;
905 } __attribute__((packed));
906
907 struct thread_group_snapshot_v3 {
908 uint64_t tgs_id;
909 char tgs_name[16];
910 uint64_t tgs_flags;
911 char tgs_name_cont[16];
912 } __attribute__((packed));
913
914 enum coalition_flags {
915 kCoalitionTermRequested = 0x1,
916 kCoalitionTerminated = 0x2,
917 kCoalitionReaped = 0x4,
918 kCoalitionPrivileged = 0x8,
919 }; // Note: Add any new flags to kcdata.py (jcs_flags)
920
921 struct jetsam_coalition_snapshot {
922 uint64_t jcs_id;
923 uint64_t jcs_flags;
924 uint64_t jcs_thread_group;
925 uint64_t jcs_leader_task_uniqueid;
926 } __attribute__((packed));
927
928 struct instrs_cycles_snapshot {
929 uint64_t ics_instructions;
930 uint64_t ics_cycles;
931 } __attribute__((packed));
932
933 struct instrs_cycles_snapshot_v2 {
934 uint64_t ics_instructions;
935 uint64_t ics_cycles;
936 uint64_t ics_p_instructions;
937 uint64_t ics_p_cycles;
938 } __attribute__((packed));
939
940 struct thread_delta_snapshot_v2 {
941 uint64_t tds_thread_id;
942 uint64_t tds_voucher_identifier;
943 uint64_t tds_ss_flags;
944 uint64_t tds_last_made_runnable_time;
945 uint32_t tds_state;
946 uint32_t tds_sched_flags;
947 int16_t tds_base_priority;
948 int16_t tds_sched_priority;
949 uint8_t tds_eqos;
950 uint8_t tds_rqos;
951 uint8_t tds_rqos_override;
952 uint8_t tds_io_tier;
953 } __attribute__ ((packed));
954
955 struct thread_delta_snapshot_v3 {
956 uint64_t tds_thread_id;
957 uint64_t tds_voucher_identifier;
958 uint64_t tds_ss_flags;
959 uint64_t tds_last_made_runnable_time;
960 uint32_t tds_state;
961 uint32_t tds_sched_flags;
962 int16_t tds_base_priority;
963 int16_t tds_sched_priority;
964 uint8_t tds_eqos;
965 uint8_t tds_rqos;
966 uint8_t tds_rqos_override;
967 uint8_t tds_io_tier;
968 uint64_t tds_requested_policy;
969 uint64_t tds_effective_policy;
970 } __attribute__ ((packed));
971
972 struct io_stats_snapshot {
973 /*
974 * I/O Statistics
975 * XXX: These fields must be together.
976 */
977 uint64_t ss_disk_reads_count;
978 uint64_t ss_disk_reads_size;
979 uint64_t ss_disk_writes_count;
980 uint64_t ss_disk_writes_size;
981 uint64_t ss_io_priority_count[STACKSHOT_IO_NUM_PRIORITIES];
982 uint64_t ss_io_priority_size[STACKSHOT_IO_NUM_PRIORITIES];
983 uint64_t ss_paging_count;
984 uint64_t ss_paging_size;
985 uint64_t ss_non_paging_count;
986 uint64_t ss_non_paging_size;
987 uint64_t ss_data_count;
988 uint64_t ss_data_size;
989 uint64_t ss_metadata_count;
990 uint64_t ss_metadata_size;
991 /* XXX: I/O Statistics end */
992 } __attribute__ ((packed));
993
994 struct task_snapshot_v2 {
995 uint64_t ts_unique_pid;
996 uint64_t ts_ss_flags;
997 uint64_t ts_user_time_in_terminated_threads;
998 uint64_t ts_system_time_in_terminated_threads;
999 uint64_t ts_p_start_sec;
1000 uint64_t ts_task_size;
1001 uint64_t ts_max_resident_size;
1002 uint32_t ts_suspend_count;
1003 uint32_t ts_faults;
1004 uint32_t ts_pageins;
1005 uint32_t ts_cow_faults;
1006 uint32_t ts_was_throttled;
1007 uint32_t ts_did_throttle;
1008 uint32_t ts_latency_qos;
1009 int32_t ts_pid;
1010 char ts_p_comm[32];
1011 } __attribute__ ((packed));
1012
1013 struct task_snapshot_v3 {
1014 uint64_t ts_unique_pid;
1015 uint64_t ts_ss_flags;
1016 uint64_t ts_user_time_in_terminated_threads;
1017 uint64_t ts_system_time_in_terminated_threads;
1018 uint64_t ts_p_start_sec;
1019 uint64_t ts_task_size;
1020 uint64_t ts_max_resident_size;
1021 uint32_t ts_suspend_count;
1022 uint32_t ts_faults;
1023 uint32_t ts_pageins;
1024 uint32_t ts_cow_faults;
1025 uint32_t ts_was_throttled;
1026 uint32_t ts_did_throttle;
1027 uint32_t ts_latency_qos;
1028 int32_t ts_pid;
1029 char ts_p_comm[32];
1030 uint32_t ts_uid;
1031 uint32_t ts_gid;
1032 } __attribute__ ((packed));
1033
1034 struct transitioning_task_snapshot {
1035 uint64_t tts_unique_pid;
1036 uint64_t tts_ss_flags;
1037 uint64_t tts_transition_type;
1038 int32_t tts_pid;
1039 char tts_p_comm[32];
1040 } __attribute__ ((packed));
1041
1042 struct task_delta_snapshot_v2 {
1043 uint64_t tds_unique_pid;
1044 uint64_t tds_ss_flags;
1045 uint64_t tds_user_time_in_terminated_threads;
1046 uint64_t tds_system_time_in_terminated_threads;
1047 uint64_t tds_task_size;
1048 uint64_t tds_max_resident_size;
1049 uint32_t tds_suspend_count;
1050 uint32_t tds_faults;
1051 uint32_t tds_pageins;
1052 uint32_t tds_cow_faults;
1053 uint32_t tds_was_throttled;
1054 uint32_t tds_did_throttle;
1055 uint32_t tds_latency_qos;
1056 } __attribute__ ((packed));
1057
1058 struct task_memorystatus_snapshot {
1059 int32_t tms_current_memlimit;
1060 int32_t tms_effectivepriority;
1061 int32_t tms_requestedpriority;
1062 int32_t tms_assertionpriority;
1063 } __attribute__ ((packed));
1064
1065 #define KCDATA_INVALID_CS_TRUST_LEVEL 0xffffffff
1066 struct stackshot_task_codesigning_info {
1067 uint64_t csflags;
1068 uint32_t cs_trust_level;
1069 } __attribute__ ((packed));
1070
1071 struct stackshot_cpu_times {
1072 uint64_t user_usec;
1073 uint64_t system_usec;
1074 } __attribute__((packed));
1075
1076 struct stackshot_cpu_times_v2 {
1077 uint64_t user_usec;
1078 uint64_t system_usec;
1079 uint64_t runnable_usec;
1080 } __attribute__((packed));
1081
1082 struct stackshot_duration {
1083 uint64_t stackshot_duration;
1084 uint64_t stackshot_duration_outer;
1085 } __attribute__((packed));
1086
1087 struct stackshot_duration_v2 {
1088 uint64_t stackshot_duration;
1089 uint64_t stackshot_duration_outer;
1090 uint64_t stackshot_duration_prior;
1091 } __attribute__((packed));
1092
1093 struct stackshot_fault_stats {
1094 uint32_t sfs_pages_faulted_in; /* number of pages faulted in using KDP fault path */
1095 uint64_t sfs_time_spent_faulting; /* MATUs spent faulting */
1096 uint64_t sfs_system_max_fault_time; /* MATUs fault time limit per stackshot */
1097 uint8_t sfs_stopped_faulting; /* we stopped decompressing because we hit the limit */
1098 } __attribute__((packed));
1099
1100 typedef struct stackshot_thread_waitinfo {
1101 uint64_t owner; /* The thread that owns the object */
1102 uint64_t waiter; /* The thread that's waiting on the object */
1103 uint64_t context; /* A context uniquely identifying the object */
1104 uint8_t wait_type; /* The type of object that the thread is waiting on */
1105 } __attribute__((packed)) thread_waitinfo_t;
1106
1107 typedef struct stackshot_thread_waitinfo_v2 {
1108 uint64_t owner; /* The thread that owns the object */
1109 uint64_t waiter; /* The thread that's waiting on the object */
1110 uint64_t context; /* A context uniquely identifying the object */
1111 uint8_t wait_type; /* The type of object that the thread is waiting on */
1112 int16_t portlabel_id; /* matches to a stackshot_portlabel, or NONE or MISSING */
1113 uint32_t wait_flags; /* info about the wait */
1114 #define STACKSHOT_WAITINFO_FLAGS_SPECIALREPLY 0x1 /* We're waiting on a special reply port */
1115 } __attribute__((packed)) thread_waitinfo_v2_t;
1116
1117
1118 typedef struct stackshot_thread_turnstileinfo {
1119 uint64_t waiter; /* The thread that's waiting on the object */
1120 uint64_t turnstile_context; /* Associated data (either thread id, or workq addr) */
1121 uint8_t turnstile_priority;
1122 uint8_t number_of_hops;
1123 uint64_t turnstile_flags; /* see below */
1124 } __attribute__((packed)) thread_turnstileinfo_t;
1125
1126 typedef struct stackshot_thread_turnstileinfo_v2 {
1127 uint64_t waiter; /* The thread that's waiting on the object */
1128 uint64_t turnstile_context; /* Associated data (either thread id, or workq addr) */
1129 uint8_t turnstile_priority;
1130 uint8_t number_of_hops;
1131 #define STACKSHOT_TURNSTILE_STATUS_UNKNOWN 0x01 /* The final inheritor is unknown (bug?) */
1132 #define STACKSHOT_TURNSTILE_STATUS_LOCKED_WAITQ 0x02 /* A waitq was found to be locked */
1133 #define STACKSHOT_TURNSTILE_STATUS_WORKQUEUE 0x04 /* The final inheritor is a workqueue */
1134 #define STACKSHOT_TURNSTILE_STATUS_THREAD 0x08 /* The final inheritor is a thread */
1135 #define STACKSHOT_TURNSTILE_STATUS_BLOCKED_ON_TASK 0x10 /* blocked on task, dind't find thread */
1136 #define STACKSHOT_TURNSTILE_STATUS_HELD_IPLOCK 0x20 /* the ip_lock was held */
1137 #define STACKSHOT_TURNSTILE_STATUS_SENDPORT 0x40 /* port_labelid was from a send port */
1138 #define STACKSHOT_TURNSTILE_STATUS_RECEIVEPORT 0x80 /* port_labelid was from a receive port */
1139 uint64_t turnstile_flags; // Note: Add any new flags to kcdata.py (turnstile_flags)
1140 int16_t portlabel_id; /* matches to a stackshot_portlabel, or NONE or MISSING */
1141 } __attribute__((packed)) thread_turnstileinfo_v2_t;
1142
1143 #define STACKSHOT_TURNSTILE_STATUS_PORTFLAGS (STACKSHOT_TURNSTILE_STATUS_SENDPORT | STACKSHOT_TURNSTILE_STATUS_RECEIVEPORT)
1144
1145 #define STACKSHOT_PORTLABELID_NONE (0) /* No port label found */
1146 #define STACKSHOT_PORTLABELID_MISSING (-1) /* portlabel found, but stackshot ran out of space to track it */
1147
1148 #define STACKSHOT_WAITOWNER_KERNEL (UINT64_MAX - 1)
1149 #define STACKSHOT_WAITOWNER_PORT_LOCKED (UINT64_MAX - 2)
1150 #define STACKSHOT_WAITOWNER_PSET_LOCKED (UINT64_MAX - 3)
1151 #define STACKSHOT_WAITOWNER_INTRANSIT (UINT64_MAX - 4)
1152 #define STACKSHOT_WAITOWNER_MTXSPIN (UINT64_MAX - 5)
1153 #define STACKSHOT_WAITOWNER_THREQUESTED (UINT64_MAX - 6) /* workloop waiting for a new worker thread */
1154 #define STACKSHOT_WAITOWNER_SUSPENDED (UINT64_MAX - 7) /* workloop is suspended */
1155
1156 #define STACKSHOT_PORTLABEL_READFAILED 0x1 /* could not read port information */
1157 #define STACKSHOT_PORTLABEL_THROTTLED 0x2 /* service port is marked as throttled */
1158
1159 struct portlabel_info {
1160 int16_t portlabel_id; /* kcdata-specific ID for this port label */
1161 uint16_t portlabel_flags; /* STACKSHOT_PORTLABEL_* */
1162 uint8_t portlabel_domain; /* launchd domain */
1163 } __attribute__((packed));
1164
1165 struct stackshot_cpu_architecture {
1166 int32_t cputype;
1167 int32_t cpusubtype;
1168 } __attribute__((packed));
1169
1170 struct stack_snapshot_stacktop {
1171 uint64_t sp;
1172 uint8_t stack_contents[8];
1173 };
1174
1175 /* only collected if STACKSHOT_COLLECTS_LATENCY_INFO is set to !0 */
1176 struct stackshot_latency_collection {
1177 uint64_t latency_version;
1178 uint64_t setup_latency;
1179 uint64_t total_task_iteration_latency;
1180 uint64_t total_terminated_task_iteration_latency;
1181 } __attribute__((packed));
1182
1183 /* only collected if STACKSHOT_COLLECTS_LATENCY_INFO is set to !0 */
1184 struct stackshot_latency_collection_v2 {
1185 uint64_t latency_version;
1186 uint64_t setup_latency_mt;
1187 uint64_t total_task_iteration_latency_mt;
1188 uint64_t total_terminated_task_iteration_latency_mt;
1189 uint64_t task_queue_building_latency_mt;
1190 uint64_t terminated_task_queue_building_latency_mt;
1191 uint64_t cpu_wait_latency_mt;
1192 int32_t main_cpu_number;
1193 int32_t calling_cpu_number;
1194 uint64_t buffer_size;
1195 uint64_t buffer_used;
1196 uint64_t buffer_overhead;
1197 uint64_t buffer_count;
1198 } __attribute__((packed));
1199
1200 /* only collected if STACKSHOT_COLLECTS_LATENCY_INFO is set to !0 */
1201 struct stackshot_latency_cpu {
1202 int32_t cpu_number;
1203 int32_t cluster_type;
1204 uint64_t init_latency_mt;
1205 uint64_t workqueue_latency_mt;
1206 uint64_t total_latency_mt;
1207 uint64_t total_cycles;
1208 uint64_t total_instrs;
1209 uint64_t tasks_processed;
1210 uint64_t threads_processed;
1211 uint64_t faulting_time_mt;
1212 uint64_t total_buf;
1213 uint64_t intercluster_buf_used;
1214 } __attribute__((packed));
1215
1216 /* only collected if STACKSHOT_COLLECTS_LATENCY_INFO is set to !0 */
1217 struct stackshot_latency_buffer {
1218 int32_t cluster_type;
1219 uint64_t size;
1220 uint64_t used;
1221 uint64_t overhead;
1222 } __attribute__ ((packed));
1223
1224 /* only collected if STACKSHOT_COLLECTS_LATENCY_INFO is set to !0 */
1225 struct stackshot_latency_task {
1226 uint64_t task_uniqueid;
1227 uint64_t setup_latency;
1228 uint64_t task_thread_count_loop_latency;
1229 uint64_t task_thread_data_loop_latency;
1230 uint64_t cur_tsnap_latency;
1231 uint64_t pmap_latency;
1232 uint64_t bsd_proc_ids_latency;
1233 uint64_t misc_latency;
1234 uint64_t misc2_latency;
1235 uint64_t end_latency;
1236 } __attribute__((packed));
1237
1238 /* only collected if STACKSHOT_COLLECTS_LATENCY_INFO is set to !0 */
1239 struct stackshot_latency_thread {
1240 uint64_t thread_id;
1241 uint64_t cur_thsnap1_latency;
1242 uint64_t dispatch_serial_latency;
1243 uint64_t dispatch_label_latency;
1244 uint64_t cur_thsnap2_latency;
1245 uint64_t thread_name_latency;
1246 uint64_t sur_times_latency;
1247 uint64_t user_stack_latency;
1248 uint64_t kernel_stack_latency;
1249 uint64_t misc_latency;
1250 } __attribute__((packed));
1251
1252 struct stackshot_suspension_info {
1253 uint64_t tss_last_start; /* mach_absolute_time of beginning of last suspension*/
1254 uint64_t tss_last_end; /* mach_absolute_time of end of last suspension */
1255 uint64_t tss_count; /* number of times this task has been suspended */
1256 uint64_t tss_duration; /* sum(mach_absolute_time) of time spend suspended */
1257 } __attribute__((packed));
1258
1259 struct stackshot_suspension_source {
1260 uint64_t tss_time; /* mach_absolute_time of suspend */
1261 uint64_t tss_tid; /* tid of suspending thread */
1262 int tss_pid; /* pid of suspending task */
1263 char tss_procname[65]; /* name of suspending task */
1264 } __attribute__((packed));
1265
1266 /**************** definitions for exclaves *********************/
1267
1268 enum thread_exclaves_flags : uint32_t {
1269 kExclaveRPCActive = 0x1, /* Thread is handling RPC call in secure world */
1270 kExclaveUpcallActive = 0x2, /* Thread has upcalled back into xnu while handling RPC */
1271 kExclaveSchedulerRequest = 0x4, /* Thread is handling scheduler request */
1272 };
1273
1274 struct thread_exclaves_info {
1275 uint64_t tei_scid; /* Scheduling context for exclave IPC stack */
1276 uint32_t tei_thread_offset; /* # frames from top of stack exclave frames should be inserted */
1277 uint32_t tei_flags; /* A combination of enum thread_exclaves_flags values */
1278 } __attribute__((packed));
1279
1280 struct thread_crash_exclaves_info {
1281 uint64_t tcei_scid; /* Scheduling context for exclave IPC stack */
1282 uint64_t tcei_thread_id; /* Corresponding xnu thread id */
1283 uint32_t tcei_flags; /* A combination of enum thread_exclaves_flags values */
1284 } __attribute__((packed));
1285
1286 enum exclave_scresult_flags : uint64_t {
1287 kExclaveScresultHaveIPCStack = 0x1,
1288 };
1289
1290 struct exclave_scresult_info {
1291 uint64_t esc_id;
1292 uint64_t esc_flags; /* A combination of enum exclave_scresult_flags values */
1293 } __attribute__((packed));
1294
1295 enum exclave_ipcstackentry_flags : uint64_t {
1296 kExclaveIpcStackEntryHaveInvocationID = 0x1,
1297 kExclaveIpcStackEntryHaveStack = 0x2,
1298 };
1299
1300 struct exclave_ipcstackentry_info {
1301 uint64_t eise_asid; /* ASID */
1302 uint64_t eise_tnid; /* Thread numeric ID, may be UINT64_MAX if ommitted */
1303 uint64_t eise_invocationid; /* Invocation ID, may be UINT64_MAX if ommitted */
1304 uint64_t eise_flags; /* A combination of enum exclave_ipcstackentry_flags values */
1305 } __attribute__((packed));
1306
1307 typedef uint64_t exclave_ecstackentry_addr_t;
1308
1309 enum exclave_addressspace_flags : uint64_t {
1310 kExclaveAddressSpaceHaveSlide = 0x1, /* slide info provided */
1311 };
1312
1313 struct exclave_addressspace_info {
1314 uint64_t eas_id; /* ASID */
1315 uint64_t eas_flags; /* A combination of enum exclave_addressspace_flags values */
1316 uint64_t eas_layoutid; /* textLayout for this address space */
1317 uint64_t eas_slide; /* slide to apply to textlayout, or UINT64_MAX if omitted */
1318 uint64_t eas_asroot; /* ASRoot/TTBR0 value used as an identifier for the address space by cL4 */
1319 } __attribute__((packed));
1320
1321 enum exclave_textlayout_flags : uint64_t {
1322 kExclaveTextLayoutLoadAddressesSynthetic = 0x1, /* Load Addresses are synthetic */
1323 kExclaveTextLayoutLoadAddressesUnslid = 0x2, /* Load Addresses are accurate and unslid */
1324 kExclaveTextLayoutHasSharedCache = 0x4, /* sharedcache_index is valid, refers to entry # in STACKSHOT_KCTYPE_EXCLAVE_TEXTLAYOUT_SEGMENTS array */
1325 };
1326
1327 struct exclave_textlayout_info_v1 {
1328 uint64_t layout_id;
1329 uint64_t etl_flags; /* A combination of enum exclave_textlayout_flags values */
1330 } __attribute__((packed));
1331
1332 struct exclave_textlayout_info {
1333 uint64_t layout_id;
1334 uint64_t etl_flags; /* A combination of enum exclave_textlayout_flags values */
1335 uint32_t sharedcache_index; /* index in SEGMENTs, or UINT32_MAX */
1336 } __attribute__((packed));
1337
1338 struct exclave_textlayout_segment {
1339 uuid_t layoutSegment_uuid;
1340 uint64_t layoutSegment_loadAddress; /* Synthetic Load Address */
1341 } __attribute__((packed));
1342
1343 struct exclave_textlayout_segment_v2 {
1344 uuid_t layoutSegment_uuid;
1345 uint64_t layoutSegment_loadAddress; /* Synthetic Load Address */
1346 uint64_t layoutSegment_rawLoadAddress; /* Raw Load Address when unslided */
1347 } __attribute__((packed));
1348
1349 /**************** definitions for crashinfo *********************/
1350
1351 /*
1352 * NOTE: Please update kcdata/libkdd/kcdtypes.c if you make any changes
1353 * in TASK_CRASHINFO_* types.
1354 */
1355
1356 /* FIXME some of these types aren't clean (fixed width, packed, and defined *here*) */
1357
1358 struct crashinfo_proc_uniqidentifierinfo {
1359 uint8_t p_uuid[16]; /* UUID of the main executable */
1360 uint64_t p_uniqueid; /* 64 bit unique identifier for process */
1361 uint64_t p_puniqueid; /* unique identifier for process's parent */
1362 uint64_t p_reserve2; /* reserved for future use */
1363 uint64_t p_reserve3; /* reserved for future use */
1364 uint64_t p_reserve4; /* reserved for future use */
1365 } __attribute__((packed));
1366
1367 #define MAX_TRIAGE_STRING_LEN (128)
1368
1369 struct kernel_triage_info_v1 {
1370 char triage_string1[MAX_TRIAGE_STRING_LEN];
1371 char triage_string2[MAX_TRIAGE_STRING_LEN];
1372 char triage_string3[MAX_TRIAGE_STRING_LEN];
1373 char triage_string4[MAX_TRIAGE_STRING_LEN];
1374 char triage_string5[MAX_TRIAGE_STRING_LEN];
1375 } __attribute__((packed));
1376
1377 struct crashinfo_jit_address_range {
1378 uint64_t start_address;
1379 uint64_t end_address;
1380 } __attribute__((packed));
1381
1382 struct crashinfo_mb {
1383 uint64_t start_address;
1384 uint64_t data[64];
1385 } __attribute__((packed));
1386
1387 struct crashinfo_task_security_config {
1388 uint32_t task_security_config; /* struct task_security_config */
1389 } __attribute__((packed));
1390
1391
1392 #define MAX_CRASHINFO_SIGNING_ID_LEN 64
1393 #define MAX_CRASHINFO_TEAM_ID_LEN 32
1394
1395 #define TASK_CRASHINFO_BEGIN KCDATA_BUFFER_BEGIN_CRASHINFO
1396 #define TASK_CRASHINFO_STRING_DESC KCDATA_TYPE_STRING_DESC
1397 #define TASK_CRASHINFO_UINT32_DESC KCDATA_TYPE_UINT32_DESC
1398 #define TASK_CRASHINFO_UINT64_DESC KCDATA_TYPE_UINT64_DESC
1399
1400 #define TASK_CRASHINFO_EXTMODINFO 0x801
1401 #define TASK_CRASHINFO_BSDINFOWITHUNIQID 0x802 /* struct crashinfo_proc_uniqidentifierinfo */
1402 #define TASK_CRASHINFO_TASKDYLD_INFO 0x803
1403 #define TASK_CRASHINFO_UUID 0x804
1404 #define TASK_CRASHINFO_PID 0x805
1405 #define TASK_CRASHINFO_PPID 0x806
1406 #define TASK_CRASHINFO_RUSAGE 0x807 /* struct rusage DEPRECATED do not use.
1407 * This struct has longs in it */
1408 #define TASK_CRASHINFO_RUSAGE_INFO 0x808 /* struct rusage_info_v3 from resource.h */
1409 #define TASK_CRASHINFO_PROC_NAME 0x809 /* char * */
1410 #define TASK_CRASHINFO_PROC_STARTTIME 0x80B /* struct timeval64 */
1411 #define TASK_CRASHINFO_USERSTACK 0x80C /* uint64_t */
1412 #define TASK_CRASHINFO_ARGSLEN 0x80D
1413 #define TASK_CRASHINFO_EXCEPTION_CODES 0x80E /* mach_exception_data_t */
1414 #define TASK_CRASHINFO_PROC_PATH 0x80F /* string of len MAXPATHLEN */
1415 #define TASK_CRASHINFO_PROC_CSFLAGS 0x810 /* uint32_t */
1416 #define TASK_CRASHINFO_PROC_STATUS 0x811 /* char */
1417 #define TASK_CRASHINFO_UID 0x812 /* uid_t */
1418 #define TASK_CRASHINFO_GID 0x813 /* gid_t */
1419 #define TASK_CRASHINFO_PROC_ARGC 0x814 /* int */
1420 #define TASK_CRASHINFO_PROC_FLAGS 0x815 /* unsigned int */
1421 #define TASK_CRASHINFO_CPUTYPE 0x816 /* cpu_type_t */
1422 #define TASK_CRASHINFO_WORKQUEUEINFO 0x817 /* struct proc_workqueueinfo */
1423 #define TASK_CRASHINFO_RESPONSIBLE_PID 0x818 /* pid_t */
1424 #define TASK_CRASHINFO_DIRTY_FLAGS 0x819 /* int */
1425 #define TASK_CRASHINFO_CRASHED_THREADID 0x81A /* uint64_t */
1426 #define TASK_CRASHINFO_COALITION_ID 0x81B /* uint64_t */
1427 #define TASK_CRASHINFO_UDATA_PTRS 0x81C /* uint64_t */
1428 #define TASK_CRASHINFO_MEMORY_LIMIT 0x81D /* uint64_t */
1429
1430 #define TASK_CRASHINFO_LEDGER_INTERNAL 0x81E /* uint64_t */
1431 #define TASK_CRASHINFO_LEDGER_INTERNAL_COMPRESSED 0x81F /* uint64_t */
1432 #define TASK_CRASHINFO_LEDGER_IOKIT_MAPPED 0x820 /* uint64_t */
1433 #define TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING 0x821 /* uint64_t */
1434 #define TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING_COMPRESSED 0x822 /* uint64_t */
1435 #define TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE 0x823 /* uint64_t */
1436 #define TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE_COMPRESSED 0x824 /* uint64_t */
1437 #define TASK_CRASHINFO_LEDGER_PAGE_TABLE 0x825 /* uint64_t */
1438 #define TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT 0x826 /* uint64_t */
1439 #define TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT_LIFETIME_MAX 0x827 /* uint64_t */
1440 #define TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE 0x828 /* uint64_t */
1441 #define TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE_COMPRESSED 0x829 /* uint64_t */
1442 #define TASK_CRASHINFO_LEDGER_WIRED_MEM 0x82A /* uint64_t */
1443 #define TASK_CRASHINFO_PROC_PERSONA_ID 0x82B /* uid_t */
1444 #define TASK_CRASHINFO_MEMORY_LIMIT_INCREASE 0x82C /* uint32_t */
1445 #define TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT 0x82D /* uint64_t */
1446 #define TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT_COMPRESSED 0x82E /* uint64_t */
1447 #define TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT 0x82F /* uint64_t */
1448 #define TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT_COMPRESSED 0x830 /* uint64_t */
1449 #define TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT 0x831 /* uint64_t */
1450 #define TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT_COMPRESSED 0x832 /* uint64_t */
1451 #define TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT 0x833 /* uint64_t */
1452 #define TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT_COMPRESSED 0x834 /* uint64_t */
1453 #define TASK_CRASHINFO_MEMORYSTATUS_EFFECTIVE_PRIORITY 0x835 /* int32_t */
1454 #define TASK_CRASHINFO_KERNEL_TRIAGE_INFO_V1 0x836 /* struct kernel_triage_info_v1 */
1455
1456 #define TASK_CRASHINFO_TASK_IS_CORPSE_FORK 0x837 /* boolean_t */
1457 #define TASK_CRASHINFO_EXCEPTION_TYPE 0x838 /* int */
1458
1459 #define TASK_CRASHINFO_CRASH_COUNT 0x839 /* int */
1460 #define TASK_CRASHINFO_THROTTLE_TIMEOUT 0x83A /* int */
1461
1462 #define TASK_CRASHINFO_CS_SIGNING_ID 0x83B /* string of len MAX_CRASHINFO_SIGNING_ID_LEN */
1463 #define TASK_CRASHINFO_CS_TEAM_ID 0x83C /* string of len MAX_CRASHINFO_TEAM_ID_LEN */
1464 #define TASK_CRASHINFO_CS_VALIDATION_CATEGORY 0x83D /* uint32_t */
1465 #define TASK_CRASHINFO_CS_TRUST_LEVEL 0x83E /* uint32_t */
1466 #define TASK_CRASHINFO_PROC_CPUTYPE 0x83F /* cpu_type_t */
1467 #define TASK_CRASHINFO_JIT_ADDRESS_RANGE 0x840 /* struct crashinfo_jit_address_range */
1468 #define TASK_CRASHINFO_MB 0x841 /* struct crashinfo_mb */
1469 #define TASK_CRASHINFO_CS_AUXILIARY_INFO 0x842 /* uint64_t */
1470 #define TASK_CRASHINFO_RLIM_CORE 0x843 /* rlim_t */
1471 #define TASK_CRASHINFO_CORE_ALLOWED 0x844 /* uint8_t */
1472 #define TASK_CRASHINFO_TASK_SECURITY_CONFIG 0x845 /* struct task_security_config */
1473
1474
1475 #define TASK_CRASHINFO_END KCDATA_TYPE_BUFFER_END
1476
1477 /**************** definitions for backtrace info *********************/
1478
1479 /* tstate is variable length with count elements */
1480 struct btinfo_thread_state_data_t {
1481 uint32_t flavor;
1482 uint32_t count;
1483 int tstate[];
1484 };
1485
1486 struct btinfo_sc_load_info64 {
1487 uint64_t sharedCacheSlide;
1488 uuid_t sharedCacheUUID;
1489 uint64_t sharedCacheBaseAddress;
1490 };
1491
1492 struct btinfo_sc_load_info {
1493 uint32_t sharedCacheSlide;
1494 uuid_t sharedCacheUUID;
1495 uint32_t sharedCacheBaseAddress;
1496 };
1497
1498 #define TASK_BTINFO_BEGIN KCDATA_BUFFER_BEGIN_BTINFO
1499
1500 /* Shared keys with CRASHINFO */
1501 #define TASK_BTINFO_PID 0xA01
1502 #define TASK_BTINFO_PPID 0xA02
1503 #define TASK_BTINFO_PROC_NAME 0xA03
1504 #define TASK_BTINFO_PROC_PATH 0xA04
1505 #define TASK_BTINFO_UID 0xA05
1506 #define TASK_BTINFO_GID 0xA06
1507 #define TASK_BTINFO_PROC_FLAGS 0xA07
1508 #define TASK_BTINFO_CPUTYPE 0xA08
1509 #define TASK_BTINFO_EXCEPTION_CODES 0xA09
1510 #define TASK_BTINFO_EXCEPTION_TYPE 0xA0A
1511 #define TASK_BTINFO_RUSAGE_INFO 0xA0B
1512 #define TASK_BTINFO_COALITION_ID 0xA0C
1513 #define TASK_BTINFO_CRASH_COUNT 0xA0D
1514 #define TASK_BTINFO_THROTTLE_TIMEOUT 0xA0E
1515
1516 /* Only in BTINFO */
1517 #define TASK_BTINFO_THREAD_ID 0xA20 /* uint64_t */
1518 #define TASK_BTINFO_THREAD_NAME 0xA21 /* string of len MAXTHREADNAMESIZE */
1519 #define TASK_BTINFO_THREAD_STATE 0xA22 /* struct btinfo_thread_state_data_t */
1520 #define TASK_BTINFO_THREAD_EXCEPTION_STATE 0xA23 /* struct btinfo_thread_state_data_t */
1521 #define TASK_BTINFO_BACKTRACE 0xA24 /* array of uintptr_t */
1522 #define TASK_BTINFO_BACKTRACE64 0xA25 /* array of uintptr_t */
1523 #define TASK_BTINFO_ASYNC_BACKTRACE64 0xA26 /* array of uintptr_t */
1524 #define TASK_BTINFO_ASYNC_START_INDEX 0xA27 /* uint32_t */
1525 #define TASK_BTINFO_PLATFORM 0xA28 /* uint32_t */
1526 #define TASK_BTINFO_SC_LOADINFO 0xA29 /* struct btinfo_sc_load_info */
1527 #define TASK_BTINFO_SC_LOADINFO64 0xA2A /* struct btinfo_sc_load_info64 */
1528
1529 #define TASK_BTINFO_DYLD_LOADINFO KCDATA_TYPE_LIBRARY_LOADINFO
1530 #define TASK_BTINFO_DYLD_LOADINFO64 KCDATA_TYPE_LIBRARY_LOADINFO64
1531
1532 /* Last one */
1533 #define TASK_BTINFO_FLAGS 0xAFF /* uint32_t */
1534 #define TASK_BTINFO_FLAG_BT_TRUNCATED 0x1
1535 #define TASK_BTINFO_FLAG_ASYNC_BT_TRUNCATED 0x2
1536 #define TASK_BTINFO_FLAG_TASK_TERMINATED 0x4 /* task is terminated */
1537 #define TASK_BTINFO_FLAG_KCDATA_INCOMPLETE 0x8 /* lw corpse collection is incomplete */
1538
1539 #define TASK_BTINFO_END KCDATA_TYPE_BUFFER_END
1540
1541 /**************** definitions for os reasons *********************/
1542
1543 #define EXIT_REASON_SNAPSHOT 0x1001
1544 #define EXIT_REASON_USER_DESC 0x1002 /* string description of reason */
1545 #define EXIT_REASON_USER_PAYLOAD 0x1003 /* user payload data */
1546 #define EXIT_REASON_CODESIGNING_INFO 0x1004
1547 #define EXIT_REASON_WORKLOOP_ID 0x1005
1548 #define EXIT_REASON_DISPATCH_QUEUE_NO 0x1006
1549
1550 struct exit_reason_snapshot {
1551 uint32_t ers_namespace;
1552 uint64_t ers_code;
1553 /* end of version 1 of exit_reason_snapshot. sizeof v1 was 12 */
1554 uint64_t ers_flags;
1555 } __attribute__((packed));
1556
1557 #define EXIT_REASON_CODESIG_PATH_MAX 1024
1558
1559 struct codesigning_exit_reason_info {
1560 uint64_t ceri_virt_addr;
1561 uint64_t ceri_file_offset;
1562 char ceri_pathname[EXIT_REASON_CODESIG_PATH_MAX];
1563 char ceri_filename[EXIT_REASON_CODESIG_PATH_MAX];
1564 uint64_t ceri_codesig_modtime_secs;
1565 uint64_t ceri_codesig_modtime_nsecs;
1566 uint64_t ceri_page_modtime_secs;
1567 uint64_t ceri_page_modtime_nsecs;
1568 uint8_t ceri_path_truncated;
1569 uint8_t ceri_object_codesigned;
1570 uint8_t ceri_page_codesig_validated;
1571 uint8_t ceri_page_codesig_tainted;
1572 uint8_t ceri_page_codesig_nx;
1573 uint8_t ceri_page_wpmapped;
1574 uint8_t ceri_page_slid;
1575 uint8_t ceri_page_dirty;
1576 uint32_t ceri_page_shadow_depth;
1577 } __attribute__((packed));
1578
1579 #define EXIT_REASON_USER_DESC_MAX_LEN 1024
1580 #define EXIT_REASON_PAYLOAD_MAX_LEN 2048
1581 /**************** safe iterators *********************/
1582 #if !__has_ptrcheck
1583
1584 typedef struct kcdata_iter {
1585 kcdata_item_t item;
1586 void *end;
1587 } kcdata_iter_t;
1588
1589
1590 static inline
1591 kcdata_iter_t
kcdata_iter(void * buffer,unsigned long size)1592 kcdata_iter(void *buffer, unsigned long size)
1593 {
1594 kcdata_iter_t iter;
1595 iter.item = (kcdata_item_t) buffer;
1596 iter.end = (void*) (((uintptr_t)buffer) + size);
1597 return iter;
1598 }
1599
1600 static inline
1601 kcdata_iter_t kcdata_iter_unsafe(void *buffer) __attribute__((deprecated));
1602
1603 static inline
1604 kcdata_iter_t
kcdata_iter_unsafe(void * buffer)1605 kcdata_iter_unsafe(void *buffer)
1606 {
1607 kcdata_iter_t iter;
1608 iter.item = (kcdata_item_t) buffer;
1609 iter.end = (void*) (uintptr_t) ~0;
1610 return iter;
1611 }
1612
1613 static const kcdata_iter_t kcdata_invalid_iter = { .item = NULL, .end = NULL };
1614
1615 static inline
1616 int
kcdata_iter_valid(kcdata_iter_t iter)1617 kcdata_iter_valid(kcdata_iter_t iter)
1618 {
1619 return
1620 ((uintptr_t)iter.item + sizeof(struct kcdata_item) <= (uintptr_t)iter.end) &&
1621 ((uintptr_t)iter.item + sizeof(struct kcdata_item) + iter.item->size <= (uintptr_t)iter.end);
1622 }
1623
1624
1625 static inline
1626 kcdata_iter_t
kcdata_iter_next(kcdata_iter_t iter)1627 kcdata_iter_next(kcdata_iter_t iter)
1628 {
1629 iter.item = (kcdata_item_t) (((uintptr_t)iter.item) + sizeof(struct kcdata_item) + (iter.item->size));
1630 return iter;
1631 }
1632
1633 static inline uint32_t
kcdata_iter_type(kcdata_iter_t iter)1634 kcdata_iter_type(kcdata_iter_t iter)
1635 {
1636 if ((iter.item->type & ~0xfu) == KCDATA_TYPE_ARRAY_PAD0) {
1637 return KCDATA_TYPE_ARRAY;
1638 } else {
1639 return iter.item->type;
1640 }
1641 }
1642
1643 static inline uint32_t
kcdata_calc_padding(uint32_t size)1644 kcdata_calc_padding(uint32_t size)
1645 {
1646 /* calculate number of bytes to add to size to get something divisible by 16 */
1647 return (-size) & 0xf;
1648 }
1649
1650 static inline uint32_t
kcdata_flags_get_padding(uint64_t flags)1651 kcdata_flags_get_padding(uint64_t flags)
1652 {
1653 return flags & KCDATA_FLAGS_STRUCT_PADDING_MASK;
1654 }
1655
1656 /* see comment above about has_padding */
1657 static inline int
kcdata_iter_is_legacy_item(kcdata_iter_t iter,uint32_t legacy_size)1658 kcdata_iter_is_legacy_item(kcdata_iter_t iter, uint32_t legacy_size)
1659 {
1660 uint32_t legacy_size_padded = legacy_size + kcdata_calc_padding(legacy_size);
1661 return iter.item->size == legacy_size_padded &&
1662 (iter.item->flags & (KCDATA_FLAGS_STRUCT_PADDING_MASK | KCDATA_FLAGS_STRUCT_HAS_PADDING)) == 0;
1663 }
1664
1665 static inline uint32_t
kcdata_iter_size(kcdata_iter_t iter)1666 kcdata_iter_size(kcdata_iter_t iter)
1667 {
1668 uint32_t legacy_size = 0;
1669
1670 switch (kcdata_iter_type(iter)) {
1671 case KCDATA_TYPE_ARRAY:
1672 case KCDATA_TYPE_CONTAINER_BEGIN:
1673 return iter.item->size;
1674 case STACKSHOT_KCTYPE_THREAD_SNAPSHOT: {
1675 legacy_size = sizeof(struct thread_snapshot_v2);
1676 if (kcdata_iter_is_legacy_item(iter, legacy_size)) {
1677 return legacy_size;
1678 }
1679
1680 goto not_legacy;
1681 }
1682 case STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO: {
1683 legacy_size = sizeof(struct dyld_uuid_info_64);
1684 if (kcdata_iter_is_legacy_item(iter, legacy_size)) {
1685 return legacy_size;
1686 }
1687
1688 goto not_legacy;
1689 }
1690 not_legacy:
1691 default:
1692 if (iter.item->size < kcdata_flags_get_padding(iter.item->flags)) {
1693 return 0;
1694 } else {
1695 return iter.item->size - kcdata_flags_get_padding(iter.item->flags);
1696 }
1697 }
1698 }
1699
1700 static inline uint64_t
kcdata_iter_flags(kcdata_iter_t iter)1701 kcdata_iter_flags(kcdata_iter_t iter)
1702 {
1703 return iter.item->flags;
1704 }
1705
1706 static inline
1707 void *
kcdata_iter_payload(kcdata_iter_t iter)1708 kcdata_iter_payload(kcdata_iter_t iter)
1709 {
1710 return &iter.item->data;
1711 }
1712
1713
1714 static inline
1715 uint32_t
kcdata_iter_array_elem_type(kcdata_iter_t iter)1716 kcdata_iter_array_elem_type(kcdata_iter_t iter)
1717 {
1718 return (iter.item->flags >> 32) & UINT32_MAX;
1719 }
1720
1721 static inline
1722 uint32_t
kcdata_iter_array_elem_count(kcdata_iter_t iter)1723 kcdata_iter_array_elem_count(kcdata_iter_t iter)
1724 {
1725 return (iter.item->flags) & UINT32_MAX;
1726 }
1727
1728 /* KCDATA_TYPE_ARRAY is ambiguous about the size of the array elements. Size is
1729 * calculated as total_size / elements_count, but total size got padded out to a
1730 * 16 byte alignment. New kernels will generate KCDATA_TYPE_ARRAY_PAD* instead
1731 * to explicitly tell us how much padding was used. Here we have a fixed, never
1732 * to be altered list of the sizes of array elements that were used before I
1733 * discovered this issue. If you find a KCDATA_TYPE_ARRAY that is not one of
1734 * these types, treat it as invalid data. */
1735
1736 static inline
1737 uint32_t
kcdata_iter_array_size_switch(kcdata_iter_t iter)1738 kcdata_iter_array_size_switch(kcdata_iter_t iter)
1739 {
1740 switch (kcdata_iter_array_elem_type(iter)) {
1741 case KCDATA_TYPE_LIBRARY_LOADINFO:
1742 return sizeof(struct dyld_uuid_info_32);
1743 case KCDATA_TYPE_LIBRARY_LOADINFO64:
1744 return sizeof(struct dyld_uuid_info_64);
1745 case STACKSHOT_KCTYPE_KERN_STACKFRAME:
1746 case STACKSHOT_KCTYPE_USER_STACKFRAME:
1747 return sizeof(struct stack_snapshot_frame32);
1748 case STACKSHOT_KCTYPE_KERN_STACKFRAME64:
1749 case STACKSHOT_KCTYPE_USER_STACKFRAME64:
1750 return sizeof(struct stack_snapshot_frame64);
1751 case STACKSHOT_KCTYPE_DONATING_PIDS:
1752 return sizeof(int32_t);
1753 case STACKSHOT_KCTYPE_THREAD_DELTA_SNAPSHOT:
1754 return sizeof(struct thread_delta_snapshot_v2);
1755 // This one is only here to make some unit tests work. It should be OK to
1756 // remove.
1757 case TASK_CRASHINFO_CRASHED_THREADID:
1758 return sizeof(uint64_t);
1759 default:
1760 return 0;
1761 }
1762 }
1763
1764 static inline
1765 int
kcdata_iter_array_valid(kcdata_iter_t iter)1766 kcdata_iter_array_valid(kcdata_iter_t iter)
1767 {
1768 if (!kcdata_iter_valid(iter)) {
1769 return 0;
1770 }
1771 if (kcdata_iter_type(iter) != KCDATA_TYPE_ARRAY) {
1772 return 0;
1773 }
1774 if (kcdata_iter_array_elem_count(iter) == 0) {
1775 return iter.item->size == 0;
1776 }
1777 if (iter.item->type == KCDATA_TYPE_ARRAY) {
1778 uint32_t elem_size = kcdata_iter_array_size_switch(iter);
1779 if (elem_size == 0) {
1780 return 0;
1781 }
1782 /* sizes get aligned to the nearest 16. */
1783 return
1784 kcdata_iter_array_elem_count(iter) <= iter.item->size / elem_size &&
1785 iter.item->size % kcdata_iter_array_elem_count(iter) < 16;
1786 } else {
1787 return
1788 (iter.item->type & 0xf) <= iter.item->size &&
1789 kcdata_iter_array_elem_count(iter) <= iter.item->size - (iter.item->type & 0xf) &&
1790 (iter.item->size - (iter.item->type & 0xf)) % kcdata_iter_array_elem_count(iter) == 0;
1791 }
1792 }
1793
1794
1795 static inline
1796 uint32_t
kcdata_iter_array_elem_size(kcdata_iter_t iter)1797 kcdata_iter_array_elem_size(kcdata_iter_t iter)
1798 {
1799 if (iter.item->type == KCDATA_TYPE_ARRAY) {
1800 return kcdata_iter_array_size_switch(iter);
1801 }
1802 if (kcdata_iter_array_elem_count(iter) == 0) {
1803 return 0;
1804 }
1805 return (iter.item->size - (iter.item->type & 0xf)) / kcdata_iter_array_elem_count(iter);
1806 }
1807
1808 static inline
1809 int
kcdata_iter_container_valid(kcdata_iter_t iter)1810 kcdata_iter_container_valid(kcdata_iter_t iter)
1811 {
1812 return
1813 kcdata_iter_valid(iter) &&
1814 kcdata_iter_type(iter) == KCDATA_TYPE_CONTAINER_BEGIN &&
1815 iter.item->size >= sizeof(uint32_t);
1816 }
1817
1818 static inline
1819 uint32_t
kcdata_iter_container_type(kcdata_iter_t iter)1820 kcdata_iter_container_type(kcdata_iter_t iter)
1821 {
1822 return *(uint32_t *) kcdata_iter_payload(iter);
1823 }
1824
1825 static inline
1826 uint64_t
kcdata_iter_container_id(kcdata_iter_t iter)1827 kcdata_iter_container_id(kcdata_iter_t iter)
1828 {
1829 return iter.item->flags;
1830 }
1831
1832
1833 #define KCDATA_ITER_FOREACH(iter) for(; kcdata_iter_valid(iter) && iter.item->type != KCDATA_TYPE_BUFFER_END; iter = kcdata_iter_next(iter))
1834 #define KCDATA_ITER_FOREACH_FAILED(iter) (!kcdata_iter_valid(iter) || (iter).item->type != KCDATA_TYPE_BUFFER_END)
1835
1836 static inline
1837 kcdata_iter_t
kcdata_iter_find_type(kcdata_iter_t iter,uint32_t type)1838 kcdata_iter_find_type(kcdata_iter_t iter, uint32_t type)
1839 {
1840 KCDATA_ITER_FOREACH(iter)
1841 {
1842 if (kcdata_iter_type(iter) == type) {
1843 return iter;
1844 }
1845 }
1846 return kcdata_invalid_iter;
1847 }
1848
1849 static inline
1850 int
kcdata_iter_data_with_desc_valid(kcdata_iter_t iter,uint32_t minsize)1851 kcdata_iter_data_with_desc_valid(kcdata_iter_t iter, uint32_t minsize)
1852 {
1853 return
1854 kcdata_iter_valid(iter) &&
1855 kcdata_iter_size(iter) >= KCDATA_DESC_MAXLEN + minsize &&
1856 ((char*)kcdata_iter_payload(iter))[KCDATA_DESC_MAXLEN - 1] == 0;
1857 }
1858
1859 static inline
1860 char *
kcdata_iter_string(kcdata_iter_t iter,uint32_t offset)1861 kcdata_iter_string(kcdata_iter_t iter, uint32_t offset)
1862 {
1863 if (offset > kcdata_iter_size(iter)) {
1864 return NULL;
1865 }
1866 uint32_t maxlen = kcdata_iter_size(iter) - offset;
1867 char *s = ((char*)kcdata_iter_payload(iter)) + offset;
1868 if (strnlen(s, maxlen) < maxlen) {
1869 return s;
1870 } else {
1871 return NULL;
1872 }
1873 }
1874
1875 static inline void
kcdata_iter_get_data_with_desc(kcdata_iter_t iter,char ** desc_ptr,void ** data_ptr,uint32_t * size_ptr)1876 kcdata_iter_get_data_with_desc(kcdata_iter_t iter, char **desc_ptr, void **data_ptr, uint32_t *size_ptr)
1877 {
1878 if (desc_ptr) {
1879 *desc_ptr = (char *)kcdata_iter_payload(iter);
1880 }
1881 if (data_ptr) {
1882 *data_ptr = (void *)((uintptr_t)kcdata_iter_payload(iter) + KCDATA_DESC_MAXLEN);
1883 }
1884 if (size_ptr) {
1885 *size_ptr = kcdata_iter_size(iter) - KCDATA_DESC_MAXLEN;
1886 }
1887 }
1888
1889 #endif /* !__has_ptrcheck */
1890 #endif
1891