1 /*
2 * Copyright (c) 2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
30
31 #include <mach/mach_types.h>
32 #include <IOKit/IOTypes.h>
33 #include <kdp/output_stages/output_stages.h>
34 #include <kdp/kdp_core.h>
35 #include <kdp/processor_core.h>
36 #include <libkern/zlib.h>
37 #include <vm/vm_kern_xnu.h>
38
39 #define MAX_ZLIB_CHUNK (1024 * 1024 * 1024)
40
41 #if defined(__arm64__)
42 #define LEVEL Z_BEST_SPEED
43 #define NETBUF 0
44 #else
45 #define LEVEL Z_BEST_SPEED
46 #define NETBUF 1440
47 #endif
48
49 #define ROUND_32B(x) (~31UL & (31 + (x)))
50
51 struct zlib_stage_data {
52 z_stream zs;
53 size_t allocation_offset;
54 };
55
56 static void *
zlib_alloc(void * opaque,u_int items,u_int size)57 zlib_alloc(void *opaque, u_int items, u_int size)
58 {
59 struct kdp_output_stage *stage = (struct kdp_output_stage *) opaque;
60 struct zlib_stage_data *data = (struct zlib_stage_data *) stage->kos_data;
61 void * result;
62
63 result = (void *) ((vm_offset_t) data + data->allocation_offset);
64 data->allocation_offset += ROUND_32B(items * size); // 32b align for vector crc
65 assert(data->allocation_offset <= stage->kos_data_size);
66
67 return result;
68 }
69
70 static void
zlib_free(void * __unused ref,void * __unused ptr)71 zlib_free(void * __unused ref, void * __unused ptr)
72 {
73 }
74
75 /*
76 * flushes any data to the output proc immediately
77 */
78 static int
zlib_zoutput(z_streamp strm,Bytef * buf,unsigned len)79 zlib_zoutput(z_streamp strm, Bytef *buf, unsigned len)
80 {
81 struct kdp_output_stage *stage = (typeof(stage))strm->opaque;
82 struct kdp_output_stage *next_stage = STAILQ_NEXT(stage, kos_next);
83 IOReturn ret;
84
85 if (stage->kos_outstate->kcos_error == kIOReturnSuccess) {
86 ret = next_stage->kos_funcs.kosf_outproc(next_stage, KDP_DATA, NULL, len, buf);
87 if (ret != kIOReturnSuccess) {
88 kern_coredump_log(NULL, "(zlib_zoutput) outproc(KDP_DATA, NULL, 0x%x, %p) returned 0x%x\n",
89 len, buf, ret);
90 stage->kos_outstate->kcos_error = ret;
91 } else {
92 stage->kos_bytes_written += len;
93 }
94 }
95
96 return len;
97 }
98
99 static int
zlib_zinput(z_streamp strm,Bytef * buf,unsigned size)100 zlib_zinput(z_streamp strm, Bytef *buf, unsigned size)
101 {
102 unsigned len;
103
104 len = strm->avail_in;
105 if (len > size) {
106 len = size;
107 }
108 if (len == 0) {
109 return 0;
110 }
111
112 if (strm->next_in != (Bytef *) strm) {
113 memcpy(buf, strm->next_in, len);
114 } else {
115 bzero(buf, len);
116 }
117 strm->adler = z_crc32(strm->adler, buf, len);
118
119 strm->avail_in -= len;
120 strm->next_in += len;
121 strm->total_in += len;
122
123 return (int)len;
124 }
125
126
127 static kern_return_t
zlib_stream_output_chunk(struct kdp_output_stage * stage,unsigned int length,void * data)128 zlib_stream_output_chunk(struct kdp_output_stage *stage, unsigned int length, void *data)
129 {
130 struct zlib_stage_data *stage_data;
131 z_stream * zs;
132 int zr;
133 boolean_t flush;
134
135 stage_data = (struct zlib_stage_data *) stage->kos_data;
136 zs = &(stage_data->zs);
137 flush = (!length && !data);
138 zr = Z_OK;
139
140 assert(!zs->avail_in);
141
142 while (zr >= 0) {
143 if (!zs->avail_in && !flush) {
144 if (!length) {
145 break;
146 }
147 zs->next_in = data ? data : (Bytef *) zs /* zero marker */;
148 zs->avail_in = length;
149 length = 0;
150 }
151 if (!zs->avail_out) {
152 zs->next_out = (Bytef *) zs;
153 zs->avail_out = UINT32_MAX;
154 }
155 zr = deflate(zs, flush ? Z_FINISH : Z_NO_FLUSH);
156 if (Z_STREAM_END == zr) {
157 break;
158 }
159 if (zr != Z_OK) {
160 kern_coredump_log(NULL, "ZERR %d\n", zr);
161 stage->kos_outstate->kcos_error = kIOReturnError;
162 }
163 }
164
165 if (flush) {
166 zlib_zoutput(zs, NULL, 0);
167 }
168
169 return stage->kos_outstate->kcos_error;
170 }
171
172 static void
zlib_stage_reset(struct kdp_output_stage * stage)173 zlib_stage_reset(struct kdp_output_stage *stage)
174 {
175 struct zlib_stage_data *data = (struct zlib_stage_data *) stage->kos_data;
176
177 stage->kos_bypass = false;
178 stage->kos_bytes_written = 0;
179
180 /* Re-initialize zstream variables */
181 data->zs.avail_in = 0;
182 data->zs.next_in = NULL;
183 data->zs.avail_out = 0;
184 data->zs.next_out = NULL;
185
186 deflateResetWithIO(&(data->zs), zlib_zinput, zlib_zoutput);
187 }
188
189 static kern_return_t
zlib_stage_outproc(struct kdp_output_stage * stage,unsigned int request,char * corename,uint64_t length,void * panic_data)190 zlib_stage_outproc(struct kdp_output_stage *stage, unsigned int request,
191 char *corename, uint64_t length, void *panic_data)
192 {
193 kern_return_t err = KERN_SUCCESS;
194 struct kdp_output_stage *next_stage = STAILQ_NEXT(stage, kos_next);
195 unsigned int chunk;
196
197 assert(next_stage != NULL);
198
199 switch (request) {
200 case KDP_SEEK:
201 stage->kos_bypass = true;
202 err = next_stage->kos_funcs.kosf_outproc(next_stage, request, corename, length, panic_data);
203 break;
204 case KDP_DATA:
205 if (!stage->kos_bypass) {
206 do{
207 if (length <= MAX_ZLIB_CHUNK) {
208 chunk = (typeof(chunk))length;
209 } else {
210 chunk = MAX_ZLIB_CHUNK;
211 }
212
213 err = zlib_stream_output_chunk(stage, chunk, panic_data);
214
215 length -= chunk;
216
217 if (panic_data) {
218 panic_data = (void *) (((uintptr_t) panic_data) + chunk);
219 }
220 } while (length && (KERN_SUCCESS == err));
221 } else {
222 err = next_stage->kos_funcs.kosf_outproc(next_stage, request, corename, length, panic_data);
223 }
224 break;
225 case KDP_WRQ:
226 OS_FALLTHROUGH;
227 case KDP_FLUSH:
228 OS_FALLTHROUGH;
229 case KDP_EOF:
230 err = next_stage->kos_funcs.kosf_outproc(next_stage, request, corename, length, panic_data);
231 break;
232 default:
233 break;
234 }
235
236 return err;
237 }
238
239 static void
zlib_stage_free(struct kdp_output_stage * stage)240 zlib_stage_free(struct kdp_output_stage *stage)
241 {
242 kmem_free(kernel_map, (vm_offset_t) stage->kos_data, stage->kos_data_size);
243 stage->kos_data = NULL;
244 stage->kos_data_size = 0;
245 stage->kos_initialized = false;
246 }
247
248 kern_return_t
zlib_stage_initialize(struct kdp_output_stage * stage)249 zlib_stage_initialize(struct kdp_output_stage *stage)
250 {
251 const int wbits = 12;
252 const int memlevel = 3;
253 kern_return_t ret = KERN_SUCCESS;
254 struct zlib_stage_data *data = NULL;
255
256 assert(stage != NULL);
257 assert(stage->kos_initialized == false);
258 assert(stage->kos_data == NULL);
259
260 stage->kos_data_size = round_page(sizeof(struct zlib_stage_data) + NETBUF + zlib_deflate_memory_size(wbits, memlevel));
261 printf("kdp_core zlib memory 0x%lx\n", stage->kos_data_size);
262 /*
263 * Note: KMA_DATA isn't right because we have pointers,
264 * but it is assumed by the generic code that kos_data
265 * is a linear buffer which requires more work to split.
266 *
267 * We still want to use KMA_DATA for it as it has more
268 * chances to have VA in catastrophic cases.
269 */
270 ret = kmem_alloc(kernel_map, (vm_offset_t*) &stage->kos_data, stage->kos_data_size,
271 KMA_DATA, VM_KERN_MEMORY_DIAG);
272 if (KERN_SUCCESS != ret) {
273 printf("zlib_stage_initialize failed to allocate memory. Error 0x%x\n", ret);
274 return ret;
275 }
276
277 data = (struct zlib_stage_data *)(stage->kos_data);
278 data->allocation_offset = ROUND_32B(sizeof(struct zlib_stage_data)); // Start allocating from the end of the data struct
279 data->zs.zalloc = zlib_alloc;
280 data->zs.zfree = zlib_free;
281 data->zs.opaque = (void*) stage;
282
283 if (deflateInit2(&(data->zs), LEVEL, Z_DEFLATED, wbits + 16 /*gzip mode*/, memlevel, Z_DEFAULT_STRATEGY)) {
284 /* Allocation failed */
285 zlib_stage_free(stage);
286 return kIOReturnError;
287 }
288
289 stage->kos_funcs.kosf_reset = zlib_stage_reset;
290 stage->kos_funcs.kosf_outproc = zlib_stage_outproc;
291 stage->kos_funcs.kosf_free = zlib_stage_free;
292
293 stage->kos_initialized = true;
294
295 return ret;
296 }
297
298 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
299