1 /*
2 * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64 /*
65 * File: ipc/ipc_space.c
66 * Author: Rich Draves
67 * Date: 1989
68 *
69 * Functions to manipulate IPC capability spaces.
70 */
71
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/port.h>
75 #include <kern/assert.h>
76 #include <kern/sched_prim.h>
77 #include <kern/zalloc.h>
78 #include <ipc/port.h>
79 #include <ipc/ipc_entry.h>
80 #include <ipc/ipc_object.h>
81 #include <ipc/ipc_hash.h>
82 #include <ipc/ipc_port.h>
83 #include <ipc/ipc_space.h>
84 #include <ipc/ipc_right.h>
85 #include <prng/random.h>
86 #include <string.h>
87
88 /* Remove this in the future so port names are less predictable. */
89 #define CONFIG_SEMI_RANDOM_ENTRIES
90 #ifdef CONFIG_SEMI_RANDOM_ENTRIES
91 #define NUM_SEQ_ENTRIES 8
92 #endif
93
94 os_refgrp_decl(static, is_refgrp, "is", NULL);
95 static ZONE_DEFINE_TYPE(ipc_space_zone, "ipc spaces",
96 struct ipc_space, ZC_ZFREE_CLEARMEM);
97
98 SECURITY_READ_ONLY_LATE(ipc_space_t) ipc_space_kernel;
99 SECURITY_READ_ONLY_LATE(ipc_space_t) ipc_space_reply;
100
101 static ipc_space_t
ipc_space_alloc(void)102 ipc_space_alloc(void)
103 {
104 ipc_space_t space;
105
106 space = zalloc_flags(ipc_space_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
107 lck_ticket_init(&space->is_lock, &ipc_lck_grp);
108
109 return space;
110 }
111
112 __attribute__((noinline))
113 static void
ipc_space_free(ipc_space_t space)114 ipc_space_free(ipc_space_t space)
115 {
116 assert(!is_active(space));
117 lck_ticket_destroy(&space->is_lock, &ipc_lck_grp);
118 zfree(ipc_space_zone, space);
119 }
120
121 void
ipc_space_retire_table(ipc_entry_table_t table)122 ipc_space_retire_table(ipc_entry_table_t table)
123 {
124 smr_global_retire(table, ipc_entry_table_size(table),
125 (void (*)(void*))ipc_entry_table_free_noclear);
126 }
127
128 void
ipc_space_reference(ipc_space_t space)129 ipc_space_reference(
130 ipc_space_t space)
131 {
132 os_ref_retain_mask(&space->is_bits, IS_FLAGS_BITS, &is_refgrp);
133 }
134
135 void
ipc_space_release(ipc_space_t space)136 ipc_space_release(
137 ipc_space_t space)
138 {
139 if (os_ref_release_mask(&space->is_bits, IS_FLAGS_BITS, &is_refgrp) == 0) {
140 ipc_space_free(space);
141 }
142 }
143
144 void
ipc_space_lock(ipc_space_t space)145 ipc_space_lock(
146 ipc_space_t space)
147 {
148 lck_ticket_lock(&space->is_lock, &ipc_lck_grp);
149 }
150
151 void
ipc_space_unlock(ipc_space_t space)152 ipc_space_unlock(
153 ipc_space_t space)
154 {
155 lck_ticket_unlock(&space->is_lock);
156 }
157
158 void
ipc_space_lock_sleep(ipc_space_t space)159 ipc_space_lock_sleep(
160 ipc_space_t space)
161 {
162 lck_ticket_sleep_with_inheritor(&space->is_lock, &ipc_lck_grp,
163 LCK_SLEEP_DEFAULT, (event_t)space, space->is_grower,
164 THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
165 }
166
167 /* Routine: ipc_space_get_rollpoint
168 * Purpose:
169 * Generate a new gencount rollover point from a space's entropy pool
170 */
171 ipc_entry_bits_t
ipc_space_get_rollpoint(ipc_space_t space)172 ipc_space_get_rollpoint(
173 ipc_space_t space)
174 {
175 return random_bool_gen_bits(
176 &space->bool_gen,
177 &space->is_entropy[0],
178 IS_ENTROPY_CNT,
179 IE_BITS_ROLL_BITS);
180 }
181
182 /*
183 * Routine: ipc_entry_rand_freelist
184 * Purpose:
185 * Pseudo-randomly permute the order of entries in an IPC space
186 * Arguments:
187 * space: the ipc space to initialize.
188 * table: the corresponding ipc table to initialize.
189 * the table is 0 initialized.
190 * bottom: the start of the range to initialize (inclusive).
191 * top: the end of the range to initialize (noninclusive).
192 */
193 void
ipc_space_rand_freelist(ipc_space_t space,ipc_entry_t table,mach_port_index_t bottom,mach_port_index_t size)194 ipc_space_rand_freelist(
195 ipc_space_t space,
196 ipc_entry_t table,
197 mach_port_index_t bottom,
198 mach_port_index_t size)
199 {
200 int at_start = (bottom == 0);
201 #ifdef CONFIG_SEMI_RANDOM_ENTRIES
202 /*
203 * Only make sequential entries at the start of the table, and not when
204 * we're growing the space.
205 */
206 ipc_entry_num_t total = 0;
207 #endif
208
209 /* First entry in the free list is always free, and is the start of the free list. */
210 mach_port_index_t curr = bottom;
211 mach_port_index_t top = size;
212
213 bottom++;
214 top--;
215
216 /*
217 * Initialize the free list in the table.
218 * Add the entries in pseudo-random order and randomly set the generation
219 * number, in order to frustrate attacks involving port name reuse.
220 */
221 while (bottom <= top) {
222 ipc_entry_t entry = &table[curr];
223 int which;
224 #ifdef CONFIG_SEMI_RANDOM_ENTRIES
225 /*
226 * XXX: This is a horrible hack to make sure that randomizing the port
227 * doesn't break programs that might have (sad) hard-coded values for
228 * certain port names.
229 */
230 if (at_start && total++ < NUM_SEQ_ENTRIES) {
231 which = 0;
232 } else
233 #endif
234 which = random_bool_gen_bits(
235 &space->bool_gen,
236 &space->is_entropy[0],
237 IS_ENTROPY_CNT,
238 1);
239
240 mach_port_index_t next;
241 if (which) {
242 next = top;
243 top--;
244 } else {
245 next = bottom;
246 bottom++;
247 }
248
249 /*
250 * The entry's gencount will roll over on its first allocation, at which
251 * point a random rollover will be set for the entry.
252 */
253 entry->ie_bits = IE_BITS_GEN_MASK;
254 entry->ie_next = next;
255 curr = next;
256 }
257 table[curr].ie_bits = IE_BITS_GEN_MASK;
258 }
259
260
261 /*
262 * Routine: ipc_space_create
263 * Purpose:
264 * Creates a new IPC space.
265 *
266 * The new space has two references, one for the caller
267 * and one because it is active.
268 * Conditions:
269 * Nothing locked. Allocates memory.
270 * Returns:
271 * KERN_SUCCESS Created a space.
272 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
273 */
274
275 kern_return_t
ipc_space_create(ipc_label_t label,ipc_space_t * spacep)276 ipc_space_create(
277 ipc_label_t label,
278 ipc_space_t *spacep)
279 {
280 ipc_space_t space;
281 ipc_entry_table_t table;
282 ipc_entry_num_t count;
283
284 table = ipc_entry_table_alloc_by_count(IPC_ENTRY_TABLE_MIN,
285 Z_WAITOK | Z_ZERO | Z_NOFAIL);
286 space = ipc_space_alloc();
287 count = ipc_entry_table_count(table);
288
289 random_bool_init(&space->bool_gen);
290 ipc_space_rand_freelist(space, ipc_entry_table_base(table), 0, count);
291
292 os_ref_init_count_mask(&space->is_bits, IS_FLAGS_BITS, &is_refgrp, 2, 0);
293 space->is_table_free = count - 1;
294 space->is_label = label;
295 space->is_low_mod = count;
296 space->is_node_id = HOST_LOCAL_NODE; /* HOST_LOCAL_NODE, except proxy spaces */
297 smr_init_store(&space->is_table, table);
298
299 *spacep = space;
300 return KERN_SUCCESS;
301 }
302
303 /*
304 * Routine: ipc_space_label
305 * Purpose:
306 * Modify the label on a space. The desired
307 * label must be a super-set of the current
308 * label for the space (as rights may already
309 * have been previously copied out under the
310 * old label value.
311 * Conditions:
312 * Nothing locked.
313 * Returns:
314 * KERN_SUCCESS Updated the label
315 * KERN_INVALID_VALUE label not a superset of old
316 */
317 kern_return_t
ipc_space_label(ipc_space_t space,ipc_label_t label)318 ipc_space_label(
319 ipc_space_t space,
320 ipc_label_t label)
321 {
322 is_write_lock(space);
323 if (!is_active(space)) {
324 is_write_unlock(space);
325 return KERN_SUCCESS;
326 }
327
328 if ((space->is_label & label) != space->is_label) {
329 is_write_unlock(space);
330 return KERN_INVALID_VALUE;
331 }
332 space->is_label = label;
333 is_write_unlock(space);
334 return KERN_SUCCESS;
335 }
336
337 /*
338 * Routine: ipc_space_add_label
339 * Purpose:
340 * Modify the label on a space. The desired
341 * label is added to the labels already set
342 * on the space.
343 * Conditions:
344 * Nothing locked.
345 * Returns:
346 * KERN_SUCCESS Updated the label
347 * KERN_INVALID_VALUE label not a superset of old
348 */
349 kern_return_t
ipc_space_add_label(ipc_space_t space,ipc_label_t label)350 ipc_space_add_label(
351 ipc_space_t space,
352 ipc_label_t label)
353 {
354 is_write_lock(space);
355 if (!is_active(space)) {
356 is_write_unlock(space);
357 return KERN_SUCCESS;
358 }
359
360 space->is_label |= label;
361 is_write_unlock(space);
362 return KERN_SUCCESS;
363 }
364 /*
365 * Routine: ipc_space_create_special
366 * Purpose:
367 * Create a special space. A special space
368 * doesn't hold rights in the normal way.
369 * Instead it is place-holder for holding
370 * disembodied (naked) receive rights.
371 * See ipc_port_alloc_special/ipc_port_dealloc_special.
372 * Conditions:
373 * Nothing locked.
374 * Returns:
375 * KERN_SUCCESS Created a space.
376 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
377 */
378
379 kern_return_t
ipc_space_create_special(ipc_space_t * spacep)380 ipc_space_create_special(
381 ipc_space_t *spacep)
382 {
383 ipc_space_t space;
384
385 space = ipc_space_alloc();
386 os_ref_init_count_mask(&space->is_bits, IS_FLAGS_BITS, &is_refgrp, 1, 0);
387 space->is_label = IPC_LABEL_SPECIAL;
388 space->is_node_id = HOST_LOCAL_NODE; /* HOST_LOCAL_NODE, except proxy spaces */
389
390 *spacep = space;
391 return KERN_SUCCESS;
392 }
393
394 /*
395 * Routine: ipc_space_terminate
396 * Purpose:
397 * Marks the space as dead and cleans up the entries.
398 * Does nothing if the space is already dead.
399 * Conditions:
400 * Nothing locked.
401 */
402
403 void
ipc_space_terminate(ipc_space_t space)404 ipc_space_terminate(
405 ipc_space_t space)
406 {
407 ipc_entry_table_t table;
408
409 assert(space != IS_NULL);
410
411 is_write_lock(space);
412 if (!is_active(space)) {
413 is_write_unlock(space);
414 return;
415 }
416
417 table = smr_serialized_load(&space->is_table);
418 smr_clear_store(&space->is_table);
419
420 /*
421 * If somebody is trying to grow the table,
422 * we must wait until they finish and figure
423 * out the space died.
424 */
425 while (is_growing(space)) {
426 is_write_sleep(space);
427 }
428
429 is_write_unlock(space);
430
431
432 /*
433 * Now we can futz with it unlocked.
434 *
435 * First destroy receive rights, then the rest.
436 * This will cut down the number of notifications
437 * being sent when the notification destination
438 * was a receive right in this space.
439 */
440
441 for (mach_port_index_t index = 1;
442 ipc_entry_table_contains(table, index);
443 index++) {
444 ipc_entry_t entry = ipc_entry_table_get_nocheck(table, index);
445 mach_port_type_t type;
446
447 type = IE_BITS_TYPE(entry->ie_bits);
448 if (type != MACH_PORT_TYPE_NONE) {
449 mach_port_name_t name;
450
451 name = MACH_PORT_MAKE(index,
452 IE_BITS_GEN(entry->ie_bits));
453 ipc_right_terminate(space, name, entry);
454 }
455 }
456
457 ipc_space_retire_table(table);
458 space->is_table_free = 0;
459
460 /*
461 * Because the space is now dead,
462 * we must release the "active" reference for it.
463 * Our caller still has his reference.
464 */
465 is_release(space);
466 }
467
468 #if CONFIG_PROC_RESOURCE_LIMITS
469 /*
470 * ipc_space_set_table_size_limits:
471 *
472 * Set the table size's soft and hard limit.
473 */
474 kern_return_t
ipc_space_set_table_size_limits(ipc_space_t space,ipc_entry_num_t soft_limit,ipc_entry_num_t hard_limit)475 ipc_space_set_table_size_limits(
476 ipc_space_t space,
477 ipc_entry_num_t soft_limit,
478 ipc_entry_num_t hard_limit)
479 {
480 if (space == IS_NULL) {
481 return KERN_INVALID_TASK;
482 }
483
484 is_write_lock(space);
485
486 if (!is_active(space)) {
487 is_write_unlock(space);
488 return KERN_INVALID_TASK;
489 }
490
491 if (hard_limit && soft_limit >= hard_limit) {
492 soft_limit = 0;
493 }
494
495 space->is_table_size_soft_limit = soft_limit;
496 space->is_table_size_hard_limit = hard_limit;
497
498 is_write_unlock(space);
499
500 return KERN_SUCCESS;
501 }
502
503 /*
504 * Check if port space has exceeded its limits.
505 * Should be called with the space write lock held.
506 */
507 void
ipc_space_check_limit_exceeded(ipc_space_t space)508 ipc_space_check_limit_exceeded(ipc_space_t space)
509 {
510 size_t size = ipc_entry_table_count(is_active_table(space));
511
512 if (!is_above_soft_limit_notify(space) && space->is_table_size_soft_limit &&
513 ((size - space->is_table_free) > space->is_table_size_soft_limit)) {
514 is_above_soft_limit_send_notification(space);
515 act_set_astproc_resource(current_thread());
516 } else if (!is_above_hard_limit_notify(space) && space->is_table_size_hard_limit &&
517 ((size - space->is_table_free) > space->is_table_size_hard_limit)) {
518 is_above_hard_limit_send_notification(space);
519 act_set_astproc_resource(current_thread());
520 }
521 }
522 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
523
524 kern_return_t
ipc_space_get_table_size_and_limits(ipc_space_t space,ipc_entry_num_t * current_size,ipc_entry_num_t * soft_limit,ipc_entry_num_t * hard_limit)525 ipc_space_get_table_size_and_limits(
526 ipc_space_t space,
527 ipc_entry_num_t *current_size,
528 ipc_entry_num_t *soft_limit,
529 ipc_entry_num_t *hard_limit)
530 {
531 kern_return_t kr = KERN_SUCCESS;
532 ipc_entry_table_t table;
533
534 if (space == IS_NULL) {
535 return KERN_INVALID_TASK;
536 }
537
538 is_write_lock(space);
539
540 if (!is_active(space)) {
541 kr = KERN_INVALID_TASK;
542 goto exit;
543 }
544
545 table = is_active_table(space);
546 *current_size = ipc_entry_table_count(table) - space->is_table_free;
547 if (is_at_max_limit_notify(space)) {
548 if (is_at_max_limit_already_notified(space)) {
549 kr = KERN_FAILURE;
550 } else {
551 *soft_limit = 0;
552 *hard_limit = 0;
553 is_at_max_limit_notified(space);
554 }
555 goto exit;
556 }
557
558 #if CONFIG_PROC_RESOURCE_LIMITS
559 *soft_limit = space->is_table_size_soft_limit;
560 *hard_limit = space->is_table_size_hard_limit;
561
562 if (!*soft_limit && !*hard_limit) {
563 kr = KERN_INVALID_VALUE;
564 goto exit;
565 }
566
567 /*
568 * Check if the thread sending the soft limit notification arrives after
569 * the one that sent the hard limit notification
570 */
571 if (is_hard_limit_already_notified(space)) {
572 kr = KERN_FAILURE;
573 goto exit;
574 }
575
576 if (*hard_limit > 0 && *current_size >= *hard_limit) {
577 *soft_limit = 0;
578 is_hard_limit_notified(space);
579 } else {
580 if (is_soft_limit_already_notified(space)) {
581 kr = KERN_FAILURE;
582 goto exit;
583 }
584 if (*soft_limit > 0 && *current_size >= *soft_limit) {
585 *hard_limit = 0;
586 is_soft_limit_notified(space);
587 }
588 }
589 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
590
591 exit:
592 is_write_unlock(space);
593 return kr;
594 }
595
596 /*
597 * Set an ast if port space is at its max limit.
598 * Should be called with the space write lock held.
599 */
600 void
ipc_space_set_at_max_limit(ipc_space_t space)601 ipc_space_set_at_max_limit(ipc_space_t space)
602 {
603 if (!is_at_max_limit_notify(space)) {
604 is_at_max_limit_send_notification(space);
605 act_set_astproc_resource(current_thread());
606 }
607 }
608