xref: /xnu-8792.81.2/bsd/kern/kern_debug.c (revision 19c3b8c28c31cb8130e034cfb5df6bf9ba342d90)
1 /*
2  * Copyright (c) 2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/sysctl.h>
30 
31 #include <kern/cpu_data.h>
32 
33 #if __arm64__
34 #include <arm/machine_routines.h>
35 #endif /* __arm64__ */
36 
37 #if CONFIG_DEBUG_SYSCALL_REJECTION
38 
39 #include <mach/mach_time.h>
40 
41 #include <kern/bits.h>
42 #include <kern/clock.h>
43 #include <kern/exc_guard.h>
44 #include <kern/exception.h>
45 #include <kern/kalloc.h>
46 #include <kern/simple_lock.h>
47 #include <kern/startup.h>
48 #include <kern/syscall_sw.h>
49 #include <kern/task.h>
50 
51 #include <pexpert/pexpert.h>
52 
53 #include <sys/syscall.h>
54 #include <sys/sysent.h>
55 #include <sys/systm.h>
56 #include <sys/types.h>
57 #include <sys/user.h>
58 #include <sys/variant_internal.h>
59 
60 #include <sys/kern_debug.h>
61 
62 #define SYSCALL_REJECTION_MODE_IGNORE   0
63 #define SYSCALL_REJECTION_MODE_GUARD    1
64 #define SYSCALL_REJECTION_MODE_CRASH    2
65 
66 TUNABLE_WRITEABLE(int, debug_syscall_rejection_mode, "syscall_rejection_mode",
67 #if DEVELOPMENT || DEBUG
68     SYSCALL_REJECTION_MODE_GUARD
69 #else
70     SYSCALL_REJECTION_MODE_IGNORE
71 #endif
72     );
73 
74 static int
sysctl_debug_syscall_rejection_mode(struct sysctl_oid __unused * oidp,void * __unused arg1,int __unused arg2,struct sysctl_req * req)75 sysctl_debug_syscall_rejection_mode(struct sysctl_oid __unused *oidp, void * __unused arg1, int __unused arg2,
76     struct sysctl_req *req)
77 {
78 	int error, changed;
79 	int value = *(int *) arg1;
80 
81 	if (!os_variant_has_internal_diagnostics("com.apple.xnu")) {
82 		return ENOTSUP;
83 	}
84 
85 	error = sysctl_io_number(req, value, sizeof(value), &value, &changed);
86 	if (!error && changed) {
87 		debug_syscall_rejection_mode = value;
88 	}
89 	return error;
90 }
91 
92 void
reset_debug_syscall_rejection_mode(void)93 reset_debug_syscall_rejection_mode(void)
94 {
95 	if (!os_variant_has_internal_diagnostics("com.apple.xnu")) {
96 		debug_syscall_rejection_mode = 0;
97 	}
98 }
99 
100 SYSCTL_PROC(_kern, OID_AUTO, debug_syscall_rejection_mode, CTLFLAG_RW | CTLFLAG_LOCKED,
101     &debug_syscall_rejection_mode, 0, sysctl_debug_syscall_rejection_mode, "I", "0: ignore, 1: non-fatal, 2: crash");
102 
103 
104 static size_t const predefined_masks = 2; // 0: null mask (all 0), 1: all mask (all 1)
105 
106 /*
107  * The number of masks is derived from the mask selector width:
108  *
109  * A selector is just made of an index into syscall_rejection_masks,
110  * with the exception of the highest bit, which indicates whether the
111  * mask is to be added as an "allow" mask or a "deny" mask.
112  * Additionally, predefined masks don't actually have storage and are
113  * handled specially, so syscall_rejection_masks starts with the first
114  * non-predefined mask (and is sized appropriately).
115  */
116 static size_t const syscall_rejection_mask_count = SYSCALL_REJECTION_SELECTOR_MASK_COUNT - predefined_masks;
117 static syscall_rejection_mask_t syscall_rejection_masks[syscall_rejection_mask_count];
118 
119 #define SR_MASK_SIZE (BITMAP_SIZE(mach_trap_count + nsysent))
120 
121 static LCK_GRP_DECLARE(syscall_rejection_lck_grp, "syscall rejection lock");
122 static LCK_MTX_DECLARE(syscall_rejection_mtx, &syscall_rejection_lck_grp);
123 
124 bool
debug_syscall_rejection_handle(int syscall_mach_trap_number)125 debug_syscall_rejection_handle(int syscall_mach_trap_number)
126 {
127 	uthread_t ut = current_uthread();
128 	uint64_t const flags = ut->syscall_rejection_flags;
129 	bool fatal = (bool)(flags & SYSCALL_REJECTION_FLAGS_FORCE_FATAL);
130 
131 	switch (debug_syscall_rejection_mode) {
132 	case SYSCALL_REJECTION_MODE_IGNORE:
133 		if (!fatal) {
134 			/* ignore */
135 			break;
136 		}
137 		OS_FALLTHROUGH;
138 	case SYSCALL_REJECTION_MODE_CRASH:
139 		fatal = true;
140 		OS_FALLTHROUGH;
141 	case SYSCALL_REJECTION_MODE_GUARD: {
142 		if (flags & SYSCALL_REJECTION_FLAGS_ONCE) {
143 			int const number = syscall_mach_trap_number < 0 ? -syscall_mach_trap_number : (mach_trap_count + syscall_mach_trap_number);
144 
145 			// don't trip on this system call again
146 			bitmap_set(ut->syscall_rejection_mask, number);
147 			bitmap_set(ut->syscall_rejection_once_mask, number);
148 		}
149 
150 		mach_exception_code_t code = 0;
151 		EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_REJECTED_SC);
152 		EXC_GUARD_ENCODE_FLAVOR(code, 0);
153 		EXC_GUARD_ENCODE_TARGET(code, syscall_mach_trap_number < 0);
154 		mach_exception_subcode_t subcode =
155 		    syscall_mach_trap_number < 0 ? -syscall_mach_trap_number : syscall_mach_trap_number;
156 
157 		if (!fatal) {
158 			task_violated_guard(code, subcode, NULL, TRUE);
159 		} else {
160 			thread_guard_violation(current_thread(), code, subcode, fatal);
161 		}
162 		break;
163 	};
164 	default:
165 		/* ignore */
166 		;
167 	}
168 	return fatal;
169 }
170 
171 extern int exit_with_guard_exception(void *p, mach_exception_data_type_t code,
172     mach_exception_data_type_t subcode);
173 
174 void
rejected_syscall_guard_ast(thread_t t,mach_exception_data_type_t code,mach_exception_data_type_t subcode)175 rejected_syscall_guard_ast(
176 	thread_t t,
177 	mach_exception_data_type_t code,
178 	mach_exception_data_type_t subcode)
179 {
180 	/*
181 	 * Check if anyone has registered for Synchronous EXC_GUARD, if yes then,
182 	 * deliver it synchronously and then kill the process, else kill the process
183 	 * and deliver the exception via EXC_CORPSE_NOTIFY.
184 	 */
185 	if (task_exception_notify(EXC_GUARD, code, subcode) == KERN_SUCCESS) {
186 		psignal_uthread(t, SIGSYS);
187 	} else {
188 		exit_with_guard_exception(current_proc(), code, subcode);
189 	}
190 }
191 
192 
193 static void
_syscall_rejection_apply_mask(syscall_rejection_mask_t dest,const syscall_rejection_mask_t src,bool apply_as_allow)194 _syscall_rejection_apply_mask(syscall_rejection_mask_t dest, const syscall_rejection_mask_t src, bool apply_as_allow)
195 {
196 	assert(dest != NULL);
197 	assert(src != NULL);
198 
199 	if (apply_as_allow) {
200 		bitmap_or(dest, dest, src, mach_trap_count + nsysent);
201 	} else {
202 		bitmap_and_not(dest, dest, src, mach_trap_count + nsysent);
203 	}
204 }
205 
206 /*
207  * The masks to apply are passed to the kernel as packed selectors,
208  * which are just however many of the selector data type fit into one
209  * (or more) fields of the natural word size (i.e. a register). This
210  * avoids copying from user space.
211  *
212  * More specifically, at the time of this writing, a selector is 7
213  * bits wide, and there are two uint64_t arguments
214  * (args->packed_selectors<n>), so up to 18 selectors can be
215  * specified, which are then stuffed into the 128 bits of the
216  * arguments. If less than 18 masks are requested to be applied, the
217  * remaining selectors will just be left as 0, which naturally
218  * resolves as the "empty" or "NULL" mask that changes nothing.
219  *
220  * The libsyscall wrapper provides a more convenient interface where
221  * an array (up to 18 elements long) and its length are passed in,
222  * which the wrapper then packs into packed_selectors of the actual
223  * system call.
224  */
225 
226 int
sys_debug_syscall_reject_config(struct proc * p __unused,struct debug_syscall_reject_config_args * args,int * retval)227 sys_debug_syscall_reject_config(struct proc *p __unused, struct debug_syscall_reject_config_args *args, int *retval)
228 {
229 	int error = 0;
230 
231 	*retval = 0;
232 
233 	uthread_t ut = current_uthread();
234 
235 	bitmap_t mask[SR_MASK_SIZE / sizeof(bitmap_t)];
236 	// syscall rejection masks are always reset to "deny all"
237 	memset(mask, 0, SR_MASK_SIZE);
238 
239 	lck_mtx_lock(&syscall_rejection_mtx);
240 
241 	for (int i = 0;
242 	    i + SYSCALL_REJECTION_SELECTOR_BITS < (sizeof(args->packed_selectors1) + sizeof(args->packed_selectors2)) * 8;
243 	    i += SYSCALL_REJECTION_SELECTOR_BITS) {
244 #define s_left_shift(x, n) ((n) < 0 ? ((x) >> -(n)) : ((x) << (n)))
245 
246 		syscall_rejection_selector_t const selector = (syscall_rejection_selector_t)
247 		    (((i < 64 ? (args->packed_selectors1 >> i) : 0) |
248 		    (i > 64 - SYSCALL_REJECTION_SELECTOR_BITS ? s_left_shift(args->packed_selectors2, 64 - i) : 0)) & SYSCALL_REJECTION_SELECTOR_MASK);
249 		bool const is_allow_mask = selector & SYSCALL_REJECTION_IS_ALLOW_MASK;
250 		int const mask_index = selector & SYSCALL_REJECTION_INDEX_MASK;
251 
252 		if (mask_index == SYSCALL_REJECTION_NULL) {
253 			// mask 0 is always empty (nothing to apply)
254 			continue;
255 		}
256 
257 		if (mask_index == SYSCALL_REJECTION_ALL) {
258 			// mask 1 is always full (overrides everything)
259 			memset(mask, is_allow_mask ? 0xff : 0x00, SR_MASK_SIZE);
260 			continue;
261 		}
262 
263 		syscall_rejection_mask_t mask_to_apply = syscall_rejection_masks[mask_index - predefined_masks];
264 
265 		if (mask_to_apply == NULL) {
266 			error = ENOENT;
267 			goto out_locked;
268 		}
269 
270 		_syscall_rejection_apply_mask(mask, mask_to_apply, is_allow_mask);
271 	}
272 
273 	/* Not RT-safe, but only necessary once. */
274 	if (ut->syscall_rejection_mask == NULL) {
275 		ut->syscall_rejection_mask = kalloc_data(SR_MASK_SIZE, Z_WAITOK);
276 
277 		if (ut->syscall_rejection_mask == NULL) {
278 			error = ENOMEM;
279 			goto out_locked;
280 		}
281 	}
282 
283 	memcpy(ut->syscall_rejection_mask, mask, SR_MASK_SIZE);
284 
285 	if ((args->flags & SYSCALL_REJECTION_FLAGS_ONCE)) {
286 		if (ut->syscall_rejection_once_mask == NULL) {
287 			ut->syscall_rejection_once_mask = kalloc_data(SR_MASK_SIZE, Z_WAITOK);
288 
289 			if (ut->syscall_rejection_once_mask == NULL) {
290 				kfree_data(ut->syscall_rejection_mask, SR_MASK_SIZE);
291 				ut->syscall_rejection_mask = NULL;
292 				error = ENOMEM;
293 				goto out_locked;
294 			}
295 
296 			memset(ut->syscall_rejection_once_mask, 0, SR_MASK_SIZE);
297 		} else {
298 			// prevent the already hit syscalls from hitting again.
299 			bitmap_or(ut->syscall_rejection_mask, ut->syscall_rejection_mask, ut->syscall_rejection_once_mask, mach_trap_count + nsysent);
300 		}
301 	}
302 
303 out_locked:
304 	lck_mtx_unlock(&syscall_rejection_mtx);
305 
306 	if (error == 0) {
307 		ut->syscall_rejection_flags = args->flags;
308 	}
309 
310 	if (error == ENOENT && debug_syscall_rejection_mode == SYSCALL_REJECTION_MODE_IGNORE) {
311 		/* Existing code may rely on the system call failing
312 		 * gracefully if syscall rejection is currently off. */
313 		error = 0;
314 	}
315 
316 	return error;
317 }
318 
319 /*
320  * debug_syscall_reject
321  *
322  * Compatibility interface to the old form of the system call.
323  */
324 int
debug_syscall_reject(struct proc * p,struct debug_syscall_reject_args * args,int * retval)325 debug_syscall_reject(struct proc *p, struct debug_syscall_reject_args *args, int *retval)
326 {
327 	struct debug_syscall_reject_config_args new_args;
328 
329 	bzero(&new_args, sizeof(new_args));
330 	new_args.packed_selectors1 = args->packed_selectors;
331 	// packed_selectors2 left empty
332 	new_args.flags = SYSCALL_REJECTION_FLAGS_DEFAULT;
333 
334 	return sys_debug_syscall_reject_config(p, &new_args, retval);
335 }
336 
337 
338 static bool
_syscall_rejection_add(syscall_rejection_mask_t dst,char const * name)339 _syscall_rejection_add(syscall_rejection_mask_t dst, char const *name)
340 {
341 	/*
342 	 * Yes, this function is O(n+m), making the whole act of setting a
343 	 * mask O(l*(n+m)), but defining masks is done rarely enough (and
344 	 * i, n and m small enough) for this to not matter.
345 	 */
346 
347 	for (int i = 0; i < mach_trap_count; i++) {
348 		if (strcmp(mach_syscall_name_table[i], name) == 0) {
349 			bitmap_set(dst, i);
350 			return true;
351 		}
352 	}
353 
354 	extern char const *syscallnames[];
355 
356 	for (int i = 0; i < nsysent; i++) {
357 		if (strcmp(syscallnames[i], name) == 0) {
358 			bitmap_set(dst, i + mach_trap_count);
359 			return true;
360 		}
361 	}
362 
363 	printf("%s: trying to add non-existing syscall/mach trap '%s'\n", __func__, name);
364 	return false;
365 }
366 
367 /* Pretty much arbitrary, we just don't want userspace to pass
368  * unreasonably large buffers to parse. */
369 static size_t const max_input_size = 16 * PAGE_MAX_SIZE;
370 
371 static int
_sysctl_debug_syscall_rejection_masks(struct sysctl_oid __unused * oidp,void * __unused arg1,int __unused arg2,struct sysctl_req * req)372 _sysctl_debug_syscall_rejection_masks(struct sysctl_oid __unused *oidp, void * __unused arg1, int __unused arg2,
373     struct sysctl_req *req)
374 {
375 	size_t const max_name_len = 128;
376 	char name[max_name_len];
377 
378 	if (req->newptr == 0) {
379 		return 0;
380 	}
381 
382 	if (req->newlen > max_input_size) {
383 		return E2BIG;
384 	}
385 
386 	size_t const len = req->newlen;
387 	char *buf = kalloc_data(len + 1, Z_WAITOK);
388 
389 	if (buf == NULL) {
390 		return ENOMEM;
391 	}
392 
393 	/*
394 	 * sysctl_io_string always copies out the given buffer as the
395 	 * "old" value if requested.  We could construct a text
396 	 * representation of existing masks, but this is not particularly
397 	 * interesting, so we just return the dummy string "<masks>".
398 	 */
399 	strlcpy(buf, "<masks>", len + 1);
400 	int changed = 0;
401 	int error = sysctl_io_string(req, buf, len + 1, 0, &changed);
402 
403 	if (error != 0 || !changed) {
404 		goto out;
405 	}
406 
407 	char const *p = buf;
408 
409 	int id = 0;
410 	int l = 0;
411 	int n = sscanf(p, "%i: %n", &id, &l);
412 
413 	if (n != 1 || id < predefined_masks || id > syscall_rejection_mask_count + predefined_masks) {
414 		printf("%s: invalid mask id %i (or conversion failed)\n", __FUNCTION__, id);
415 		error = EINVAL;
416 		goto out;
417 	}
418 
419 	p += l;
420 
421 	syscall_rejection_mask_t new_mask = kalloc_data(SR_MASK_SIZE,
422 	    Z_WAITOK | Z_ZERO);
423 	if (new_mask == NULL) {
424 		printf("%s: allocating new mask for id %i failed\n", __FUNCTION__, id);
425 		error = ENOMEM;
426 		goto out;
427 	}
428 
429 	error = 0;
430 
431 	while (p < buf + len && *p != 0) {
432 		name[0] = 0;
433 		n = sscanf(p, "%127s %n", name, &l);
434 		if (n != 1 || name[0] == 0) {
435 			error = EINVAL;
436 			kfree_data(new_mask, SR_MASK_SIZE);
437 			goto out;
438 		}
439 
440 		if (!_syscall_rejection_add(new_mask, name)) {
441 			error = ENOENT;
442 			kfree_data(new_mask, SR_MASK_SIZE);
443 			goto out;
444 		}
445 
446 		p += l;
447 	}
448 
449 
450 	syscall_rejection_mask_t to_free = NULL;
451 
452 	lck_mtx_lock(&syscall_rejection_mtx);
453 
454 	syscall_rejection_mask_t *target_mask = &syscall_rejection_masks[id - predefined_masks];
455 
456 	to_free = *target_mask;
457 	*target_mask = new_mask;
458 
459 	lck_mtx_unlock(&syscall_rejection_mtx);
460 
461 	kfree_data(to_free, SR_MASK_SIZE);
462 out:
463 
464 	kfree_data(buf, len + 1);
465 	return error;
466 }
467 
468 SYSCTL_PROC(_kern, OID_AUTO, syscall_rejection_masks, CTLTYPE_STRING | CTLFLAG_WR | CTLFLAG_MASKED | CTLFLAG_LOCKED,
469     0, 0, _sysctl_debug_syscall_rejection_masks, "A", "system call rejection masks");
470 
471 #else /* CONFIG_DEBUG_SYSCALL_REJECTION */
472 
473 #include <sys/kern_debug.h>
474 
475 int
sys_debug_syscall_reject_config(struct proc * __unused p,struct debug_syscall_reject_config_args * __unused args,int __unused * ret)476 sys_debug_syscall_reject_config(struct proc * __unused p, struct debug_syscall_reject_config_args * __unused args, int __unused *ret)
477 {
478 	/* not supported. */
479 	return ENOTSUP;
480 }
481 
482 int
debug_syscall_reject(struct proc * __unused p,struct debug_syscall_reject_args * __unused args,int * __unused retval)483 debug_syscall_reject(struct proc * __unused p, struct debug_syscall_reject_args * __unused args, int * __unused retval)
484 {
485 	/* not supported. */
486 	return ENOTSUP;
487 }
488 
489 void
reset_debug_syscall_rejection_mode(void)490 reset_debug_syscall_rejection_mode(void)
491 {
492 	/* not supported. */
493 }
494 
495 #endif /* CONFIG_DEBUG_SYSCALL_REJECTION */
496 
497 #if __arm64__ && (DEBUG || DEVELOPMENT)
498 
499 static void
_spinfor(uint64_t nanoseconds)500 _spinfor(uint64_t nanoseconds)
501 {
502 	uint64_t mt = 0;
503 	nanoseconds_to_absolutetime(nanoseconds, &mt);
504 
505 	uint64_t start = mach_absolute_time();
506 
507 	while (mach_absolute_time() < start + mt) {
508 		// Spinning.
509 	}
510 }
511 
512 static int
_sysctl_debug_disable_interrupts_test(struct sysctl_oid __unused * oidp,void * __unused arg1,int __unused arg2,struct sysctl_req * req)513 _sysctl_debug_disable_interrupts_test(struct sysctl_oid __unused *oidp, void * __unused arg1, int __unused arg2,
514     struct sysctl_req *req)
515 {
516 	int error = 0;
517 
518 	if (req->newptr == 0) {
519 		goto out;
520 	}
521 
522 	uint64_t val = 0;
523 	error = sysctl_io_number(req, 0, sizeof(val), &val, NULL);
524 
525 	if (error != 0 || val == 0) {
526 		goto out;
527 	}
528 
529 	boolean_t istate = ml_set_interrupts_enabled(false);
530 	_spinfor(val);
531 	ml_set_interrupts_enabled(istate);
532 
533 out:
534 	return error;
535 }
536 
537 static int
_sysctl_debug_disable_preemption_test(struct sysctl_oid __unused * oidp,void * __unused arg1,int __unused arg2,struct sysctl_req * req)538 _sysctl_debug_disable_preemption_test(struct sysctl_oid __unused *oidp, void * __unused arg1, int __unused arg2,
539     struct sysctl_req *req)
540 {
541 	int error = 0;
542 
543 	if (req->newptr == 0) {
544 		goto out;
545 	}
546 
547 	uint64_t val = 0;
548 	error = sysctl_io_number(req, 0, sizeof(val), &val, NULL);
549 
550 	if (error != 0 || val == 0) {
551 		goto out;
552 	}
553 
554 	disable_preemption();
555 	_spinfor(val);
556 	enable_preemption();
557 
558 out:
559 	return error;
560 }
561 
562 SYSCTL_PROC(_kern, OID_AUTO, debug_disable_interrupts_test, CTLTYPE_QUAD | CTLFLAG_WR | CTLFLAG_MASKED | CTLFLAG_LOCKED,
563     0, 0, _sysctl_debug_disable_interrupts_test, "Q", "disable interrupts for specified number of nanoseconds, for testing");
564 
565 SYSCTL_PROC(_kern, OID_AUTO, debug_disable_preemption_test, CTLTYPE_QUAD | CTLFLAG_WR | CTLFLAG_MASKED | CTLFLAG_LOCKED,
566     0, 0, _sysctl_debug_disable_preemption_test, "Q", "disable preemption for specified number of nanoseconds, for testing");
567 
568 #endif /* __arm64__ && (DEBUG || DEVELOPMENT) */
569