1 /*
2 * Copyright (c) 2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
30
31 #include <mach/mach_types.h>
32 #include <IOKit/IOTypes.h>
33 #include <kdp/output_stages/output_stages.h>
34 #include <kdp/kdp_core.h>
35 #include <kdp/processor_core.h>
36 #include <libkern/zlib.h>
37
38 #define MAX_ZLIB_CHUNK (1024 * 1024 * 1024)
39
40 #if defined(__arm64__)
41 #define LEVEL Z_BEST_SPEED
42 #define NETBUF 0
43 #else
44 #define LEVEL Z_BEST_SPEED
45 #define NETBUF 1440
46 #endif
47
48 #define ROUND_32B(x) (~31UL & (31 + (x)))
49
50 struct zlib_stage_data {
51 z_stream zs;
52 size_t allocation_offset;
53 };
54
55 static void *
zlib_alloc(void * opaque,u_int items,u_int size)56 zlib_alloc(void *opaque, u_int items, u_int size)
57 {
58 struct kdp_output_stage *stage = (struct kdp_output_stage *) opaque;
59 struct zlib_stage_data *data = (struct zlib_stage_data *) stage->kos_data;
60 void * result;
61
62 result = (void *) ((vm_offset_t) data + data->allocation_offset);
63 data->allocation_offset += ROUND_32B(items * size); // 32b align for vector crc
64 assert(data->allocation_offset <= stage->kos_data_size);
65
66 return result;
67 }
68
69 static void
zlib_free(void * __unused ref,void * __unused ptr)70 zlib_free(void * __unused ref, void * __unused ptr)
71 {
72 }
73
74 /*
75 * flushes any data to the output proc immediately
76 */
77 static int
zlib_zoutput(z_streamp strm,Bytef * buf,unsigned len)78 zlib_zoutput(z_streamp strm, Bytef *buf, unsigned len)
79 {
80 struct kdp_output_stage *stage = (typeof(stage))strm->opaque;
81 struct kdp_output_stage *next_stage = STAILQ_NEXT(stage, kos_next);
82 IOReturn ret;
83
84 if (stage->kos_outstate->kcos_error == kIOReturnSuccess) {
85 ret = next_stage->kos_funcs.kosf_outproc(next_stage, KDP_DATA, NULL, len, buf);
86 if (ret != kIOReturnSuccess) {
87 kern_coredump_log(NULL, "(zlib_zoutput) outproc(KDP_DATA, NULL, 0x%x, %p) returned 0x%x\n",
88 len, buf, ret);
89 stage->kos_outstate->kcos_error = ret;
90 } else {
91 stage->kos_bytes_written += len;
92 }
93 }
94
95 return len;
96 }
97
98 static int
zlib_zinput(z_streamp strm,Bytef * buf,unsigned size)99 zlib_zinput(z_streamp strm, Bytef *buf, unsigned size)
100 {
101 unsigned len;
102
103 len = strm->avail_in;
104 if (len > size) {
105 len = size;
106 }
107 if (len == 0) {
108 return 0;
109 }
110
111 if (strm->next_in != (Bytef *) strm) {
112 memcpy(buf, strm->next_in, len);
113 } else {
114 bzero(buf, len);
115 }
116 strm->adler = z_crc32(strm->adler, buf, len);
117
118 strm->avail_in -= len;
119 strm->next_in += len;
120 strm->total_in += len;
121
122 return (int)len;
123 }
124
125
126 static kern_return_t
zlib_stream_output_chunk(struct kdp_output_stage * stage,unsigned int length,void * data)127 zlib_stream_output_chunk(struct kdp_output_stage *stage, unsigned int length, void *data)
128 {
129 struct zlib_stage_data *stage_data;
130 z_stream * zs;
131 int zr;
132 boolean_t flush;
133
134 stage_data = (struct zlib_stage_data *) stage->kos_data;
135 zs = &(stage_data->zs);
136 flush = (!length && !data);
137 zr = Z_OK;
138
139 assert(!zs->avail_in);
140
141 while (zr >= 0) {
142 if (!zs->avail_in && !flush) {
143 if (!length) {
144 break;
145 }
146 zs->next_in = data ? data : (Bytef *) zs /* zero marker */;
147 zs->avail_in = length;
148 length = 0;
149 }
150 if (!zs->avail_out) {
151 zs->next_out = (Bytef *) zs;
152 zs->avail_out = UINT32_MAX;
153 }
154 zr = deflate(zs, flush ? Z_FINISH : Z_NO_FLUSH);
155 if (Z_STREAM_END == zr) {
156 break;
157 }
158 if (zr != Z_OK) {
159 kern_coredump_log(NULL, "ZERR %d\n", zr);
160 stage->kos_outstate->kcos_error = kIOReturnError;
161 }
162 }
163
164 if (flush) {
165 zlib_zoutput(zs, NULL, 0);
166 }
167
168 return stage->kos_outstate->kcos_error;
169 }
170
171 static void
zlib_stage_reset(struct kdp_output_stage * stage)172 zlib_stage_reset(struct kdp_output_stage *stage)
173 {
174 struct zlib_stage_data *data = (struct zlib_stage_data *) stage->kos_data;
175
176 stage->kos_bypass = false;
177 stage->kos_bytes_written = 0;
178
179 /* Re-initialize zstream variables */
180 data->zs.avail_in = 0;
181 data->zs.next_in = NULL;
182 data->zs.avail_out = 0;
183 data->zs.next_out = NULL;
184
185 deflateResetWithIO(&(data->zs), zlib_zinput, zlib_zoutput);
186 }
187
188 static kern_return_t
zlib_stage_outproc(struct kdp_output_stage * stage,unsigned int request,char * corename,uint64_t length,void * panic_data)189 zlib_stage_outproc(struct kdp_output_stage *stage, unsigned int request,
190 char *corename, uint64_t length, void *panic_data)
191 {
192 kern_return_t err = KERN_SUCCESS;
193 struct kdp_output_stage *next_stage = STAILQ_NEXT(stage, kos_next);
194 unsigned int chunk;
195
196 assert(next_stage != NULL);
197
198 switch (request) {
199 case KDP_SEEK:
200 stage->kos_bypass = true;
201 err = next_stage->kos_funcs.kosf_outproc(next_stage, request, corename, length, panic_data);
202 break;
203 case KDP_DATA:
204 if (!stage->kos_bypass) {
205 do{
206 if (length <= MAX_ZLIB_CHUNK) {
207 chunk = (typeof(chunk))length;
208 } else {
209 chunk = MAX_ZLIB_CHUNK;
210 }
211
212 err = zlib_stream_output_chunk(stage, chunk, panic_data);
213
214 length -= chunk;
215
216 if (panic_data) {
217 panic_data = (void *) (((uintptr_t) panic_data) + chunk);
218 }
219 } while (length && (KERN_SUCCESS == err));
220 } else {
221 err = next_stage->kos_funcs.kosf_outproc(next_stage, request, corename, length, panic_data);
222 }
223 break;
224 case KDP_WRQ:
225 OS_FALLTHROUGH;
226 case KDP_FLUSH:
227 OS_FALLTHROUGH;
228 case KDP_EOF:
229 err = next_stage->kos_funcs.kosf_outproc(next_stage, request, corename, length, panic_data);
230 break;
231 default:
232 break;
233 }
234
235 return err;
236 }
237
238 static void
zlib_stage_free(struct kdp_output_stage * stage)239 zlib_stage_free(struct kdp_output_stage *stage)
240 {
241 kmem_free(kernel_map, (vm_offset_t) stage->kos_data, stage->kos_data_size);
242 stage->kos_data = NULL;
243 stage->kos_data_size = 0;
244 stage->kos_initialized = false;
245 }
246
247 kern_return_t
zlib_stage_initialize(struct kdp_output_stage * stage)248 zlib_stage_initialize(struct kdp_output_stage *stage)
249 {
250 const int wbits = 12;
251 const int memlevel = 3;
252 kern_return_t ret = KERN_SUCCESS;
253 struct zlib_stage_data *data = NULL;
254
255 assert(stage != NULL);
256 assert(stage->kos_initialized == false);
257 assert(stage->kos_data == NULL);
258
259 stage->kos_data_size = round_page(sizeof(struct zlib_stage_data) + NETBUF + zlib_deflate_memory_size(wbits, memlevel));
260 printf("kdp_core zlib memory 0x%lx\n", stage->kos_data_size);
261 /*
262 * Note: KMA_DATA isn't right because we have pointers,
263 * but it is assumed by the generic code that kos_data
264 * is a linear buffer which requires more work to split.
265 *
266 * We still want to use KMA_DATA for it as it has more
267 * chances to have VA in catastrophic cases.
268 */
269 ret = kmem_alloc(kernel_map, (vm_offset_t*) &stage->kos_data, stage->kos_data_size,
270 KMA_DATA, VM_KERN_MEMORY_DIAG);
271 if (KERN_SUCCESS != ret) {
272 printf("zlib_stage_initialize failed to allocate memory. Error 0x%x\n", ret);
273 return ret;
274 }
275
276 data = (struct zlib_stage_data *)(stage->kos_data);
277 data->allocation_offset = ROUND_32B(sizeof(struct zlib_stage_data)); // Start allocating from the end of the data struct
278 data->zs.zalloc = zlib_alloc;
279 data->zs.zfree = zlib_free;
280 data->zs.opaque = (void*) stage;
281
282 if (deflateInit2(&(data->zs), LEVEL, Z_DEFLATED, wbits + 16 /*gzip mode*/, memlevel, Z_DEFAULT_STRATEGY)) {
283 /* Allocation failed */
284 zlib_stage_free(stage);
285 return kIOReturnError;
286 }
287
288 stage->kos_funcs.kosf_reset = zlib_stage_reset;
289 stage->kos_funcs.kosf_outproc = zlib_stage_outproc;
290 stage->kos_funcs.kosf_free = zlib_stage_free;
291
292 stage->kos_initialized = true;
293
294 return ret;
295 }
296
297 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
298