1 /*
2 * Copyright (c) 2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
30
31 #include <mach/mach_types.h>
32 #include <IOKit/IOTypes.h>
33 #include <kdp/output_stages/output_stages.h>
34 #include <kdp/kdp_core.h>
35 #include <kdp/processor_core.h>
36 #include <libkern/zlib.h>
37 #include <vm/vm_kern_xnu.h>
38
39 #define MAX_ZLIB_CHUNK (1024 * 1024 * 1024)
40
41 #if defined(__arm64__)
42 #define LEVEL Z_BEST_SPEED
43 #define NETBUF 0
44 #else
45 #define LEVEL Z_BEST_SPEED
46 #define NETBUF 1440
47 #endif
48
49 #define ROUND_32B(x) (~31UL & (31 + (x)))
50
51 struct zlib_stage_data {
52 z_stream zs;
53 size_t allocation_offset;
54 };
55
56 static void *
zlib_alloc(void * opaque,u_int items,u_int size)57 zlib_alloc(void *opaque, u_int items, u_int size)
58 {
59 struct kdp_output_stage *stage = (struct kdp_output_stage *) opaque;
60 struct zlib_stage_data *data = (struct zlib_stage_data *) stage->kos_data;
61 void * result;
62
63 result = (void *) ((vm_offset_t) data + data->allocation_offset);
64 data->allocation_offset += ROUND_32B(items * size); // 32b align for vector crc
65 assert(data->allocation_offset <= stage->kos_data_size);
66
67 return result;
68 }
69
70 static void
zlib_free(void * __unused ref,void * __unused ptr)71 zlib_free(void * __unused ref, void * __unused ptr)
72 {
73 }
74
75 /*
76 * flushes any data to the output proc immediately
77 */
78 static int
zlib_zoutput(z_streamp strm,Bytef * buf,unsigned len)79 zlib_zoutput(z_streamp strm, Bytef *buf, unsigned len)
80 {
81 struct kdp_output_stage *stage = (typeof(stage))strm->opaque;
82 struct kdp_output_stage *next_stage = STAILQ_NEXT(stage, kos_next);
83 IOReturn ret;
84
85 if (stage->kos_outstate->kcos_error == kIOReturnSuccess) {
86 ret = next_stage->kos_funcs.kosf_outproc(next_stage, KDP_DATA, NULL, len, buf);
87 if (ret != kIOReturnSuccess) {
88 kern_coredump_log(NULL, "(zlib_zoutput) outproc(KDP_DATA, NULL, 0x%x, %p) returned 0x%x\n",
89 len, buf, ret);
90 stage->kos_outstate->kcos_error = ret;
91 } else {
92 stage->kos_bytes_written += len;
93 }
94 }
95
96 return len;
97 }
98
99 static int
zlib_zinput(z_streamp strm,Bytef * buf,unsigned size)100 zlib_zinput(z_streamp strm, Bytef *buf, unsigned size)
101 {
102 unsigned len;
103
104 len = strm->avail_in;
105 if (len > size) {
106 len = size;
107 }
108 if (len == 0) {
109 return 0;
110 }
111
112 if (strm->next_in != (Bytef *) strm) {
113 memcpy(buf, strm->next_in, len);
114 } else {
115 bzero(buf, len);
116 }
117 strm->adler = z_crc32(strm->adler, buf, len);
118
119 strm->avail_in -= len;
120 strm->next_in += len;
121 strm->total_in += len;
122
123 return (int)len;
124 }
125
126
127 static kern_return_t
zlib_stream_output_chunk(struct kdp_output_stage * stage,unsigned int length,void * data)128 zlib_stream_output_chunk(struct kdp_output_stage *stage, unsigned int length, void *data)
129 {
130 struct zlib_stage_data *stage_data;
131 z_stream * zs;
132 int zr;
133 boolean_t flush;
134
135 stage_data = (struct zlib_stage_data *) stage->kos_data;
136 zs = &(stage_data->zs);
137 flush = (!length && !data);
138 zr = Z_OK;
139
140 assert(!zs->avail_in);
141
142 while (zr >= 0) {
143 if (!zs->avail_in && !flush) {
144 if (!length) {
145 break;
146 }
147 zs->next_in = data ? data : (Bytef *) zs /* zero marker */;
148 zs->avail_in = length;
149 length = 0;
150 }
151 if (!zs->avail_out) {
152 zs->next_out = (Bytef *) zs;
153 zs->avail_out = UINT32_MAX;
154 }
155 zr = deflate(zs, flush ? Z_FINISH : Z_NO_FLUSH);
156 if (Z_STREAM_END == zr) {
157 break;
158 }
159 if (zr != Z_OK) {
160 kern_coredump_log(NULL, "ZERR %d\n", zr);
161 stage->kos_outstate->kcos_error = kIOReturnError;
162 }
163 }
164
165 if (flush) {
166 zlib_zoutput(zs, NULL, 0);
167 }
168
169 return stage->kos_outstate->kcos_error;
170 }
171
172 static kern_return_t
zlib_stage_reset(struct kdp_output_stage * stage,__unused const char * corename,__unused kern_coredump_type_t coretype)173 zlib_stage_reset(struct kdp_output_stage *stage, __unused const char *corename, __unused kern_coredump_type_t coretype)
174 {
175 struct zlib_stage_data *data = (struct zlib_stage_data *) stage->kos_data;
176
177 stage->kos_bypass = false;
178 stage->kos_bytes_written = 0;
179
180 /* Re-initialize zstream variables */
181 data->zs.avail_in = 0;
182 data->zs.next_in = NULL;
183 data->zs.avail_out = 0;
184 data->zs.next_out = NULL;
185
186 deflateResetWithIO(&(data->zs), zlib_zinput, zlib_zoutput);
187
188 return KERN_SUCCESS;
189 }
190
191 static kern_return_t
zlib_stage_outproc(struct kdp_output_stage * stage,unsigned int request,char * corename,uint64_t length,void * panic_data)192 zlib_stage_outproc(struct kdp_output_stage *stage, unsigned int request,
193 char *corename, uint64_t length, void *panic_data)
194 {
195 kern_return_t err = KERN_SUCCESS;
196 struct kdp_output_stage *next_stage = STAILQ_NEXT(stage, kos_next);
197 unsigned int chunk;
198
199 assert(next_stage != NULL);
200
201 switch (request) {
202 case KDP_SEEK:
203 stage->kos_bypass = true;
204 err = next_stage->kos_funcs.kosf_outproc(next_stage, request, corename, length, panic_data);
205 if (KERN_SUCCESS != err) {
206 kern_coredump_log(NULL, "(%s) next stage output failed with error 0x%x\n", __func__, err);
207 }
208 break;
209 case KDP_DATA:
210 if (!stage->kos_bypass) {
211 do{
212 if (length <= MAX_ZLIB_CHUNK) {
213 chunk = (typeof(chunk))length;
214 } else {
215 chunk = MAX_ZLIB_CHUNK;
216 }
217
218 err = zlib_stream_output_chunk(stage, chunk, panic_data);
219 if (KERN_SUCCESS != err) {
220 kern_coredump_log(NULL, "(%s) zlib_stream_output_chunk failed with error 0x%x\n", __func__, err);
221 }
222
223 length -= chunk;
224
225 if (panic_data) {
226 panic_data = (void *) (((uintptr_t) panic_data) + chunk);
227 }
228 } while (length && (KERN_SUCCESS == err));
229 } else {
230 err = next_stage->kos_funcs.kosf_outproc(next_stage, request, corename, length, panic_data);
231 if (KERN_SUCCESS != err) {
232 kern_coredump_log(NULL, "(%s) next stage output failed with error 0x%x\n", __func__, err);
233 }
234 }
235 break;
236 case KDP_WRQ:
237 OS_FALLTHROUGH;
238 case KDP_FLUSH:
239 OS_FALLTHROUGH;
240 case KDP_EOF:
241 err = next_stage->kos_funcs.kosf_outproc(next_stage, request, corename, length, panic_data);
242 if (KERN_SUCCESS != err) {
243 kern_coredump_log(NULL, "(%s) next stage output failed with error 0x%x\n", __func__, err);
244 }
245 break;
246 default:
247 break;
248 }
249
250 return err;
251 }
252
253 static void
zlib_stage_free(struct kdp_output_stage * stage)254 zlib_stage_free(struct kdp_output_stage *stage)
255 {
256 kmem_free(kernel_map, (vm_offset_t) stage->kos_data, stage->kos_data_size);
257 stage->kos_data = NULL;
258 stage->kos_data_size = 0;
259 stage->kos_initialized = false;
260 }
261
262 kern_return_t
zlib_stage_initialize(struct kdp_output_stage * stage)263 zlib_stage_initialize(struct kdp_output_stage *stage)
264 {
265 const int wbits = 12;
266 const int memlevel = 3;
267 kern_return_t ret = KERN_SUCCESS;
268 struct zlib_stage_data *data = NULL;
269
270 assert(stage != NULL);
271 assert(stage->kos_initialized == false);
272 assert(stage->kos_data == NULL);
273
274 stage->kos_data_size = round_page(sizeof(struct zlib_stage_data) + NETBUF + zlib_deflate_memory_size(wbits, memlevel));
275 printf("kdp_core zlib memory 0x%lx\n", stage->kos_data_size);
276 /*
277 * Note: KMA_DATA isn't right because we have pointers,
278 * but it is assumed by the generic code that kos_data
279 * is a linear buffer which requires more work to split.
280 *
281 * We still want to use KMA_DATA for it as it has more
282 * chances to have VA in catastrophic cases.
283 */
284 ret = kmem_alloc(kernel_map, (vm_offset_t*) &stage->kos_data, stage->kos_data_size,
285 KMA_DATA_SHARED, VM_KERN_MEMORY_DIAG);
286 if (KERN_SUCCESS != ret) {
287 printf("zlib_stage_initialize failed to allocate memory. Error 0x%x\n", ret);
288 return ret;
289 }
290
291 data = (struct zlib_stage_data *)(stage->kos_data);
292 data->allocation_offset = ROUND_32B(sizeof(struct zlib_stage_data)); // Start allocating from the end of the data struct
293 data->zs.zalloc = zlib_alloc;
294 data->zs.zfree = zlib_free;
295 data->zs.opaque = (void*) stage;
296
297 if (deflateInit2(&(data->zs), LEVEL, Z_DEFLATED, wbits + 16 /*gzip mode*/, memlevel, Z_DEFAULT_STRATEGY)) {
298 /* Allocation failed */
299 zlib_stage_free(stage);
300 return kIOReturnError;
301 }
302
303 stage->kos_funcs.kosf_reset = zlib_stage_reset;
304 stage->kos_funcs.kosf_outproc = zlib_stage_outproc;
305 stage->kos_funcs.kosf_free = zlib_stage_free;
306
307 stage->kos_initialized = true;
308
309 return ret;
310 }
311
312 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
313