xref: /xnu-8019.80.24/osfmk/ipc/ipc_space.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58  * support for mandatory and extensible security protections.  This notice
59  * is included in support of clause 2.2 (b) of the Apple Public License,
60  * Version 2.0.
61  */
62 /*
63  */
64 /*
65  *	File:	ipc/ipc_space.c
66  *	Author:	Rich Draves
67  *	Date:	1989
68  *
69  *	Functions to manipulate IPC capability spaces.
70  */
71 
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/port.h>
75 #include <kern/assert.h>
76 #include <kern/sched_prim.h>
77 #include <kern/zalloc.h>
78 #include <ipc/port.h>
79 #include <ipc/ipc_entry.h>
80 #include <ipc/ipc_object.h>
81 #include <ipc/ipc_hash.h>
82 #include <ipc/ipc_table.h>
83 #include <ipc/ipc_port.h>
84 #include <ipc/ipc_space.h>
85 #include <ipc/ipc_right.h>
86 #include <prng/random.h>
87 #include <string.h>
88 
89 /* Remove this in the future so port names are less predictable. */
90 #define CONFIG_SEMI_RANDOM_ENTRIES
91 #ifdef CONFIG_SEMI_RANDOM_ENTRIES
92 #define NUM_SEQ_ENTRIES 8
93 #endif
94 
95 os_refgrp_decl(static, is_refgrp, "is", NULL);
96 static ZONE_DECLARE(ipc_space_zone, "ipc spaces",
97     sizeof(struct ipc_space), ZC_ZFREE_CLEARMEM);
98 
99 SECURITY_READ_ONLY_LATE(ipc_space_t) ipc_space_kernel;
100 SECURITY_READ_ONLY_LATE(ipc_space_t) ipc_space_reply;
101 
102 static ipc_space_t
ipc_space_alloc(void)103 ipc_space_alloc(void)
104 {
105 	ipc_space_t space;
106 
107 	space = zalloc_flags(ipc_space_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
108 	lck_ticket_init(&space->is_lock, &ipc_lck_grp);
109 
110 	return space;
111 }
112 
113 __attribute__((noinline))
114 static void
ipc_space_free(ipc_space_t space)115 ipc_space_free(ipc_space_t space)
116 {
117 	assert(!is_active(space));
118 	lck_ticket_destroy(&space->is_lock, &ipc_lck_grp);
119 	zfree(ipc_space_zone, space);
120 }
121 
122 void
ipc_space_free_table(ipc_entry_t table)123 ipc_space_free_table(ipc_entry_t table)
124 {
125 	it_entries_free(table->ie_size, table);
126 }
127 
128 #if MACH_LOCKFREE_SPACE
129 void
ipc_space_retire_table(ipc_entry_t table)130 ipc_space_retire_table(ipc_entry_t table)
131 {
132 	hazard_retire(table, sizeof(struct ipc_entry) * table->ie_size,
133 	    (void (*)(void*))ipc_space_free_table);
134 }
135 #endif /* MACH_LOCKFREE_SPACE */
136 
137 void
ipc_space_reference(ipc_space_t space)138 ipc_space_reference(
139 	ipc_space_t     space)
140 {
141 	os_ref_retain_mask(&space->is_bits, IS_FLAGS_BITS, &is_refgrp);
142 }
143 
144 void
ipc_space_release(ipc_space_t space)145 ipc_space_release(
146 	ipc_space_t     space)
147 {
148 	if (os_ref_release_mask(&space->is_bits, IS_FLAGS_BITS, &is_refgrp) == 0) {
149 		ipc_space_free(space);
150 	}
151 }
152 
153 void
ipc_space_lock(ipc_space_t space)154 ipc_space_lock(
155 	ipc_space_t     space)
156 {
157 	lck_ticket_lock(&space->is_lock, &ipc_lck_grp);
158 }
159 
160 void
ipc_space_unlock(ipc_space_t space)161 ipc_space_unlock(
162 	ipc_space_t     space)
163 {
164 	lck_ticket_unlock(&space->is_lock);
165 }
166 
167 void
ipc_space_lock_sleep(ipc_space_t space)168 ipc_space_lock_sleep(
169 	ipc_space_t     space)
170 {
171 	lck_ticket_sleep_with_inheritor(&space->is_lock, &ipc_lck_grp,
172 	    LCK_SLEEP_DEFAULT, (event_t)space, space->is_grower,
173 	    THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
174 }
175 
176 /*      Routine:		ipc_space_get_rollpoint
177  *      Purpose:
178  *              Generate a new gencount rollover point from a space's entropy pool
179  */
180 ipc_entry_bits_t
ipc_space_get_rollpoint(ipc_space_t space)181 ipc_space_get_rollpoint(
182 	ipc_space_t     space)
183 {
184 	return random_bool_gen_bits(
185 		&space->bool_gen,
186 		&space->is_entropy[0],
187 		IS_ENTROPY_CNT,
188 		IE_BITS_ROLL_BITS);
189 }
190 
191 /*
192  *	Routine:	ipc_entry_rand_freelist
193  *	Purpose:
194  *		Pseudo-randomly permute the order of entries in an IPC space
195  *	Arguments:
196  *		space:	the ipc space to initialize.
197  *		table:	the corresponding ipc table to initialize.
198  *			the table is 0 initialized.
199  *		bottom:	the start of the range to initialize (inclusive).
200  *		top:	the end of the range to initialize (noninclusive).
201  */
202 void
ipc_space_rand_freelist(ipc_space_t space,ipc_entry_t table,mach_port_index_t bottom,mach_port_index_t size)203 ipc_space_rand_freelist(
204 	ipc_space_t             space,
205 	ipc_entry_t             table,
206 	mach_port_index_t       bottom,
207 	mach_port_index_t       size)
208 {
209 	int at_start = (bottom == 0);
210 #ifdef CONFIG_SEMI_RANDOM_ENTRIES
211 	/*
212 	 * Only make sequential entries at the start of the table, and not when
213 	 * we're growing the space.
214 	 */
215 	ipc_entry_num_t total = 0;
216 #endif
217 
218 	/* First entry in the free list is always free, and is the start of the free list. */
219 	mach_port_index_t curr = bottom;
220 	mach_port_index_t top = size;
221 
222 	bottom++;
223 	top--;
224 
225 	/*
226 	 *	Initialize the free list in the table.
227 	 *	Add the entries in pseudo-random order and randomly set the generation
228 	 *	number, in order to frustrate attacks involving port name reuse.
229 	 */
230 	while (bottom <= top) {
231 		ipc_entry_t entry = &table[curr];
232 		int which;
233 #ifdef CONFIG_SEMI_RANDOM_ENTRIES
234 		/*
235 		 * XXX: This is a horrible hack to make sure that randomizing the port
236 		 * doesn't break programs that might have (sad) hard-coded values for
237 		 * certain port names.
238 		 */
239 		if (at_start && total++ < NUM_SEQ_ENTRIES) {
240 			which = 0;
241 		} else
242 #endif
243 		which = random_bool_gen_bits(
244 			&space->bool_gen,
245 			&space->is_entropy[0],
246 			IS_ENTROPY_CNT,
247 			1);
248 
249 		mach_port_index_t next;
250 		if (which) {
251 			next = top;
252 			top--;
253 		} else {
254 			next = bottom;
255 			bottom++;
256 		}
257 
258 		/*
259 		 * The entry's gencount will roll over on its first allocation, at which
260 		 * point a random rollover will be set for the entry.
261 		 */
262 		entry->ie_bits   = IE_BITS_GEN_MASK;
263 		entry->ie_next   = next;
264 		curr = next;
265 	}
266 	table[curr].ie_bits   = IE_BITS_GEN_MASK;
267 
268 	table[0].ie_size = size;
269 }
270 
271 
272 /*
273  *	Routine:	ipc_space_create
274  *	Purpose:
275  *		Creates a new IPC space.
276  *
277  *		The new space has two references, one for the caller
278  *		and one because it is active.
279  *	Conditions:
280  *		Nothing locked.  Allocates memory.
281  *	Returns:
282  *		KERN_SUCCESS		Created a space.
283  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
284  */
285 
286 kern_return_t
ipc_space_create(ipc_table_size_t initial,ipc_label_t label,ipc_space_t * spacep)287 ipc_space_create(
288 	ipc_table_size_t        initial,
289 	ipc_label_t             label,
290 	ipc_space_t             *spacep)
291 {
292 	ipc_space_t space;
293 	ipc_entry_t table;
294 	ipc_entry_num_t new_size;
295 
296 	table = it_entries_alloc(initial); /* zero-initialized */
297 	if (table == IE_NULL) {
298 		return KERN_RESOURCE_SHORTAGE;
299 	}
300 
301 	space = ipc_space_alloc();
302 	new_size = initial->its_size;
303 
304 	random_bool_init(&space->bool_gen);
305 	ipc_space_rand_freelist(space, table, 0, new_size);
306 
307 	os_ref_init_count_mask(&space->is_bits, IS_FLAGS_BITS, &is_refgrp, 2, 0);
308 	space->is_table_free = new_size - 1;
309 	space->is_table_next = initial + 1;
310 	space->is_label = label;
311 	space->is_low_mod = new_size;
312 	space->is_node_id = HOST_LOCAL_NODE; /* HOST_LOCAL_NODE, except proxy spaces */
313 	hazard_ptr_init(&space->is_table, table);
314 
315 	*spacep = space;
316 	return KERN_SUCCESS;
317 }
318 
319 /*
320  *	Routine:	ipc_space_label
321  *	Purpose:
322  *		Modify the label on a space. The desired
323  *      label must be a super-set of the current
324  *      label for the space (as rights may already
325  *      have been previously copied out under the
326  *      old label value.
327  *	Conditions:
328  *		Nothing locked.
329  *	Returns:
330  *		KERN_SUCCESS		Updated the label
331  *		KERN_INVALID_VALUE  label not a superset of old
332  */
333 kern_return_t
ipc_space_label(ipc_space_t space,ipc_label_t label)334 ipc_space_label(
335 	ipc_space_t space,
336 	ipc_label_t label)
337 {
338 	is_write_lock(space);
339 	if (!is_active(space)) {
340 		is_write_unlock(space);
341 		return KERN_SUCCESS;
342 	}
343 
344 	if ((space->is_label & label) != space->is_label) {
345 		is_write_unlock(space);
346 		return KERN_INVALID_VALUE;
347 	}
348 	space->is_label = label;
349 	is_write_unlock(space);
350 	return KERN_SUCCESS;
351 }
352 
353 /*
354  *	Routine:	ipc_space_add_label
355  *	Purpose:
356  *		Modify the label on a space. The desired
357  *      label is added to the labels already set
358  *      on the space.
359  *	Conditions:
360  *		Nothing locked.
361  *	Returns:
362  *		KERN_SUCCESS		Updated the label
363  *		KERN_INVALID_VALUE  label not a superset of old
364  */
365 kern_return_t
ipc_space_add_label(ipc_space_t space,ipc_label_t label)366 ipc_space_add_label(
367 	ipc_space_t space,
368 	ipc_label_t label)
369 {
370 	is_write_lock(space);
371 	if (!is_active(space)) {
372 		is_write_unlock(space);
373 		return KERN_SUCCESS;
374 	}
375 
376 	space->is_label |= label;
377 	is_write_unlock(space);
378 	return KERN_SUCCESS;
379 }
380 /*
381  *	Routine:	ipc_space_create_special
382  *	Purpose:
383  *		Create a special space.  A special space
384  *		doesn't hold rights in the normal way.
385  *		Instead it is place-holder for holding
386  *		disembodied (naked) receive rights.
387  *		See ipc_port_alloc_special/ipc_port_dealloc_special.
388  *	Conditions:
389  *		Nothing locked.
390  *	Returns:
391  *		KERN_SUCCESS		Created a space.
392  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
393  */
394 
395 kern_return_t
ipc_space_create_special(ipc_space_t * spacep)396 ipc_space_create_special(
397 	ipc_space_t     *spacep)
398 {
399 	ipc_space_t space;
400 
401 	space = ipc_space_alloc();
402 	os_ref_init_count_mask(&space->is_bits, IS_FLAGS_BITS, &is_refgrp, 1, 0);
403 	space->is_label      = IPC_LABEL_SPECIAL;
404 	space->is_node_id = HOST_LOCAL_NODE; /* HOST_LOCAL_NODE, except proxy spaces */
405 
406 	*spacep = space;
407 	return KERN_SUCCESS;
408 }
409 
410 /*
411  *	Routine:	ipc_space_terminate
412  *	Purpose:
413  *		Marks the space as dead and cleans up the entries.
414  *		Does nothing if the space is already dead.
415  *	Conditions:
416  *		Nothing locked.
417  */
418 
419 void
ipc_space_terminate(ipc_space_t space)420 ipc_space_terminate(
421 	ipc_space_t     space)
422 {
423 	ipc_entry_t table;
424 	ipc_entry_num_t size;
425 
426 	assert(space != IS_NULL);
427 
428 	is_write_lock(space);
429 	if (!is_active(space)) {
430 		is_write_unlock(space);
431 		return;
432 	}
433 
434 	table = hazard_ptr_serialized_load(&space->is_table);
435 	hazard_ptr_clear(&space->is_table);
436 
437 	/*
438 	 *	If somebody is trying to grow the table,
439 	 *	we must wait until they finish and figure
440 	 *	out the space died.
441 	 */
442 	while (is_growing(space)) {
443 		is_write_sleep(space);
444 	}
445 
446 	is_write_unlock(space);
447 
448 
449 	/*
450 	 *	Now we can futz with it	unlocked.
451 	 *
452 	 *	First destroy receive rights, then the rest.
453 	 *	This will cut down the number of notifications
454 	 *	being sent when the notification destination
455 	 *	was a receive right in this space.
456 	 */
457 
458 	size = table->ie_size;
459 
460 	for (mach_port_index_t index = 1; index < size; index++) {
461 		ipc_entry_t entry = &table[index];
462 		mach_port_type_t type;
463 
464 		type = IE_BITS_TYPE(entry->ie_bits);
465 		if (type & MACH_PORT_TYPE_RECEIVE) {
466 			mach_port_name_t name;
467 
468 			name = MACH_PORT_MAKE(index,
469 			    IE_BITS_GEN(entry->ie_bits));
470 			ipc_right_terminate(space, name, entry);
471 		}
472 	}
473 
474 	for (mach_port_index_t index = 1; index < size; index++) {
475 		ipc_entry_t entry = &table[index];
476 		mach_port_type_t type;
477 
478 		type = IE_BITS_TYPE(entry->ie_bits);
479 		if (type != MACH_PORT_TYPE_NONE) {
480 			mach_port_name_t name;
481 
482 			name = MACH_PORT_MAKE(index,
483 			    IE_BITS_GEN(entry->ie_bits));
484 			ipc_right_terminate(space, name, entry);
485 		}
486 	}
487 
488 	ipc_space_retire_table(table);
489 	space->is_table_free = 0;
490 
491 	/*
492 	 *	Because the space is now dead,
493 	 *	we must release the "active" reference for it.
494 	 *	Our caller still has his reference.
495 	 */
496 	is_release(space);
497 }
498 
499 #if CONFIG_PROC_RESOURCE_LIMITS
500 /*
501  *	ipc_space_set_table_size_limits:
502  *
503  *	Set the table size's soft and hard limit.
504  */
505 kern_return_t
ipc_space_set_table_size_limits(ipc_space_t space,ipc_entry_num_t soft_limit,ipc_entry_num_t hard_limit)506 ipc_space_set_table_size_limits(
507 	ipc_space_t     space,
508 	ipc_entry_num_t soft_limit,
509 	ipc_entry_num_t hard_limit)
510 {
511 	if (space == IS_NULL) {
512 		return KERN_INVALID_TASK;
513 	}
514 
515 	is_write_lock(space);
516 
517 	if (!is_active(space)) {
518 		is_write_unlock(space);
519 		return KERN_INVALID_TASK;
520 	}
521 
522 	if (hard_limit && soft_limit >= hard_limit) {
523 		soft_limit = 0;
524 	}
525 
526 	space->is_table_size_soft_limit = soft_limit;
527 	space->is_table_size_hard_limit = hard_limit;
528 
529 	is_write_unlock(space);
530 
531 	return KERN_SUCCESS;
532 }
533 
534 /*
535  * Check if port space has exceeded its limits.
536  * Should be called with the space write lock held.
537  */
538 void
ipc_space_check_limit_exceeded(ipc_space_t space)539 ipc_space_check_limit_exceeded(ipc_space_t space)
540 {
541 	ipc_entry_num_t size = is_active_table(space)->ie_size;
542 
543 	if (!is_above_soft_limit_notify(space) && space->is_table_size_soft_limit &&
544 	    ((size - space->is_table_free) > space->is_table_size_soft_limit)) {
545 		is_above_soft_limit_send_notification(space);
546 		act_set_astproc_resource(current_thread());
547 	} else if (!is_above_hard_limit_notify(space) && space->is_table_size_hard_limit &&
548 	    ((size - space->is_table_free) > space->is_table_size_hard_limit)) {
549 		is_above_hard_limit_send_notification(space);
550 		act_set_astproc_resource(current_thread());
551 	}
552 }
553 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
554 
555 kern_return_t
ipc_space_get_table_size_and_limits(ipc_space_t space,ipc_entry_num_t * current_size,ipc_entry_num_t * soft_limit,ipc_entry_num_t * hard_limit)556 ipc_space_get_table_size_and_limits(
557 	ipc_space_t     space,
558 	ipc_entry_num_t *current_size,
559 	ipc_entry_num_t *soft_limit,
560 	ipc_entry_num_t *hard_limit)
561 {
562 	kern_return_t kr = KERN_SUCCESS;
563 
564 	if (space == IS_NULL) {
565 		return KERN_INVALID_TASK;
566 	}
567 
568 	is_write_lock(space);
569 
570 	if (!is_active(space)) {
571 		kr = KERN_INVALID_TASK;
572 		goto exit;
573 	}
574 
575 	*current_size = is_active_table(space)->ie_size - space->is_table_free;
576 	if (is_at_max_limit_notify(space)) {
577 		if (is_at_max_limit_already_notified(space)) {
578 			kr = KERN_FAILURE;
579 		} else {
580 			*soft_limit = 0;
581 			*hard_limit = 0;
582 			is_at_max_limit_notified(space);
583 		}
584 		goto exit;
585 	}
586 
587 #if CONFIG_PROC_RESOURCE_LIMITS
588 	*soft_limit = space->is_table_size_soft_limit;
589 	*hard_limit = space->is_table_size_hard_limit;
590 
591 	if (!*soft_limit && !*hard_limit) {
592 		kr = KERN_INVALID_VALUE;
593 		goto exit;
594 	}
595 
596 	/*
597 	 * Check if the thread sending the soft limit notification arrives after
598 	 * the one that sent the hard limit notification
599 	 */
600 	if (is_hard_limit_already_notified(space)) {
601 		kr = KERN_FAILURE;
602 		goto exit;
603 	}
604 
605 	if (*hard_limit > 0 && *current_size >= *hard_limit) {
606 		*soft_limit = 0;
607 		is_hard_limit_notified(space);
608 	} else {
609 		if (is_soft_limit_already_notified(space)) {
610 			kr = KERN_FAILURE;
611 			goto exit;
612 		}
613 		if (*soft_limit > 0 && *current_size >= *soft_limit) {
614 			*hard_limit = 0;
615 			is_soft_limit_notified(space);
616 		}
617 	}
618 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
619 
620 exit:
621 	is_write_unlock(space);
622 	return kr;
623 }
624 
625 /*
626  * Set an ast if port space is at its max limit.
627  * Should be called with the space write lock held.
628  */
629 void
ipc_space_set_at_max_limit(ipc_space_t space)630 ipc_space_set_at_max_limit(ipc_space_t space)
631 {
632 	if (!is_at_max_limit_notify(space)) {
633 		is_at_max_limit_send_notification(space);
634 		act_set_astproc_resource(current_thread());
635 	}
636 }
637