xref: /xnu-12377.81.4/osfmk/ipc/ipc_entry.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796) !
1 /*
2  * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	ipc/ipc_entry.c
60  *	Author:	Rich Draves
61  *	Date:	1989
62  *
63  *	Primitive functions to manipulate translation entries.
64  */
65 
66 #include <mach/kern_return.h>
67 #include <mach/port.h>
68 #include <kern/assert.h>
69 #include <kern/sched_prim.h>
70 #include <kern/zalloc.h>
71 #include <kern/misc_protos.h>
72 #include <ipc/port.h>
73 #include <ipc/ipc_entry.h>
74 #include <ipc/ipc_space.h>
75 #include <ipc/ipc_object.h>
76 #include <ipc/ipc_hash.h>
77 #include <ipc/ipc_port.h>
78 #include <string.h>
79 #include <sys/kdebug.h>
80 
81 KALLOC_ARRAY_TYPE_DEFINE(ipc_entry_table, struct ipc_entry, KT_PRIV_ACCT);
82 
83 /*
84  *	Routine: ipc_entry_table_count_max
85  *	Purpose:
86  *		returns the maximum number of entries an IPC space
87  *		is allowed to contain (the maximum size to which it will grow)
88  *	Conditions:
89  *		none
90  */
91 unsigned int
ipc_entry_table_count_max(void)92 ipc_entry_table_count_max(void)
93 {
94 	return ipc_entry_table_size_to_count(CONFIG_IPC_TABLE_ENTRIES_SIZE_MAX);
95 }
96 
97 /*
98  *	Routine:	ipc_entry_name_mask
99  *	Purpose:
100  *		Ensure a mach port name has the default ipc entry
101  *		generation bits set. This can be used to ensure that
102  *		a name passed in by user space matches names generated
103  *		by the kernel.
104  *	Conditions:
105  *		None.
106  *	Returns:
107  *		'name' input with default generation bits masked or added
108  *		as appropriate.
109  */
110 mach_port_name_t
ipc_entry_name_mask(mach_port_name_t name)111 ipc_entry_name_mask(mach_port_name_t name)
112 {
113 	return name | MACH_PORT_MAKE(0, IE_BITS_ROLL_MASK);
114 }
115 
116 /*
117  * Restart a generation counter with the specified bits for the rollover point.
118  * There are 4 different rollover points:
119  * bits    rollover period
120  * 0 0     64
121  * 0 1     32
122  * 1 0     22
123  * 1 1     16
124  */
125 static ipc_entry_bits_t
ipc_entry_make_gen(ipc_entry_bits_t ie_bits,ipc_space_t space)126 ipc_entry_make_gen(ipc_entry_bits_t ie_bits, ipc_space_t space)
127 {
128 	ipc_entry_bits_t roll_bits;
129 
130 	roll_bits = random_bool_gen_bits(&space->is_prng,
131 	    space->is_entropy, IS_ENTROPY_CNT, IE_BITS_ROLL_BITS);
132 
133 	roll_bits <<= __builtin_ctz(IE_BITS_ROLL_MASK);
134 	return (ie_bits & IE_BITS_GEN_MASK) | roll_bits;
135 }
136 
137 static ipc_entry_bits_t
ipc_entry_next_gen(ipc_entry_bits_t oldgen,ipc_space_t space)138 ipc_entry_next_gen(ipc_entry_bits_t oldgen, ipc_space_t space)
139 {
140 	ipc_entry_bits_t roll = oldgen & IE_BITS_ROLL_MASK;
141 	ipc_entry_bits_t delta = IE_BITS_GEN_ONE + (roll << IE_BITS_ROLL_BITS);
142 	ipc_entry_bits_t newgen;
143 
144 	if (os_add_overflow(oldgen, delta, &newgen)) {
145 		newgen = ipc_entry_make_gen(0, space);
146 	}
147 	return newgen;
148 }
149 
150 /*
151  *	Routine:	ipc_entry_lookup
152  *	Purpose:
153  *		Searches for an entry, given its name.
154  *	Conditions:
155  *		The space must be read or write locked throughout.
156  *		The space must be active.
157  */
158 
159 ipc_entry_t
ipc_entry_lookup(ipc_space_t space,mach_port_name_t name)160 ipc_entry_lookup(
161 	ipc_space_t             space,
162 	mach_port_name_t        name)
163 {
164 	mach_port_index_t index;
165 	ipc_entry_table_t table;
166 	ipc_entry_t entry;
167 
168 	table = is_active_table(space);
169 	index = MACH_PORT_INDEX(name);
170 	if (__improbable(index == 0)) {
171 		return IE_NULL;
172 	}
173 
174 	entry = ipc_entry_table_get(table, index);
175 	if (__improbable(!entry ||
176 	    IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name) ||
177 	    IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)) {
178 		return IE_NULL;
179 	}
180 
181 	return entry;
182 }
183 
184 
185 /*
186  *	Routine:	ipc_entries_hold
187  *	Purpose:
188  *		Verifies that there are at least 'entries_needed'
189  *		free list members
190  *	Conditions:
191  *		The space is write-locked and active throughout.
192  *		An object may be locked.  Will not allocate memory.
193  *	Returns:
194  *		KERN_SUCCESS		Free entries were found.
195  *		KERN_NO_SPACE		No entry allocated.
196  */
197 
198 kern_return_t
ipc_entries_hold(ipc_space_t space,uint32_t entries_needed)199 ipc_entries_hold(
200 	ipc_space_t             space,
201 	uint32_t                entries_needed)
202 {
203 	mach_port_index_t next_free = 0;
204 	ipc_entry_table_t table;
205 	ipc_entry_t entry;
206 	uint32_t i;
207 
208 	/*
209 	 * Assume that all new entries will need hashing.
210 	 * If the table is more than 87.5% full pretend we didn't have space.
211 	 */
212 	table = is_active_table(space);
213 	if (space->is_table_hashed + entries_needed >
214 	    ipc_entry_table_count(table) * 7 / 8) {
215 		return KERN_NO_SPACE;
216 	}
217 
218 	entry = ipc_entry_table_base(table);
219 
220 	for (i = 0; i < entries_needed; i++) {
221 		next_free = entry->ie_next;
222 		if (next_free == 0) {
223 			return KERN_NO_SPACE;
224 		}
225 
226 		entry = ipc_entry_table_get(table, next_free);
227 
228 		assert(entry && entry->ie_object == IPC_OBJECT_NULL);
229 	}
230 
231 #if CONFIG_PROC_RESOURCE_LIMITS
232 	ipc_space_check_limit_exceeded(space);
233 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
234 	return KERN_SUCCESS;
235 }
236 
237 /*
238  *	Routine:	ipc_entry_claim
239  *	Purpose:
240  *		Take formal ownership of a held entry.
241  *	Conditions:
242  *		The space is write-locked and active throughout.
243  *		Objects must be: NULL, locked, or not initialized yet.
244  *		Will not allocate memory.
245  *
246  *      Note: The returned entry must be marked as modified before
247  *            releasing the space lock
248  */
249 
250 kern_return_t
ipc_entry_claim(ipc_space_t space,ipc_object_t object,mach_port_name_t * namep,ipc_entry_t * entryp)251 ipc_entry_claim(
252 	ipc_space_t             space,
253 	ipc_object_t            object,
254 	mach_port_name_t        *namep,
255 	ipc_entry_t             *entryp)
256 {
257 	ipc_entry_t base, entry;
258 	ipc_entry_table_t table;
259 	mach_port_index_t first_free;
260 	mach_port_gen_t gen;
261 	mach_port_name_t new_name;
262 
263 	table = is_active_table(space);
264 	base  = ipc_entry_table_base(table);
265 
266 	first_free = base->ie_next;
267 	assert(first_free != 0);
268 
269 	entry = ipc_entry_table_get(table, first_free);
270 	assert(entry &&
271 	    ipc_entry_table_contains(table, entry->ie_next) &&
272 	    entry->ie_object == IPC_OBJECT_NULL);
273 	base->ie_next = entry->ie_next;
274 	space->is_table_free--;
275 
276 	if (object && waitq_valid(io_waitq(object))) {
277 		assert(waitq_held(io_waitq(object)));
278 	}
279 
280 	/*
281 	 *	Initialize the new entry: increment gencount and reset
282 	 *	rollover point if it rolled over, and clear ie_request.
283 	 */
284 	gen = ipc_entry_next_gen(entry->ie_bits, space);
285 	entry->ie_bits = gen;
286 	entry->ie_request = IE_REQ_NONE;
287 	entry->ie_object = object;
288 
289 	/*
290 	 *	The new name can't be MACH_PORT_NULL because index
291 	 *	is non-zero.  It can't be MACH_PORT_DEAD because
292 	 *	the table isn't allowed to grow big enough.
293 	 *	(See comment in ipc/ipc_table.h.)
294 	 */
295 	new_name = MACH_PORT_MAKE(first_free, IE_BITS_GEN(gen));
296 	assert(MACH_PORT_VALID(new_name));
297 	*namep = new_name;
298 	*entryp = entry;
299 
300 	return KERN_SUCCESS;
301 }
302 
303 /*
304  *	Routine:	ipc_entry_alloc
305  *	Purpose:
306  *		Allocate an entry out of the space.
307  *	Conditions:
308  *		The space is not locked before, but it is write-locked after
309  *		if the call is successful.  May allocate memory.
310  *	Returns:
311  *		KERN_SUCCESS		An entry was allocated.
312  *		KERN_INVALID_TASK	The space is dead.
313  *		KERN_NO_SPACE		No room for an entry in the space.
314  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory for an entry.
315  */
316 
317 kern_return_t
ipc_entry_alloc(ipc_space_t space,ipc_object_t object,mach_port_name_t * namep,ipc_entry_t * entryp)318 ipc_entry_alloc(
319 	ipc_space_t             space,
320 	ipc_object_t            object,
321 	mach_port_name_t        *namep,
322 	ipc_entry_t             *entryp)
323 {
324 	kern_return_t kr;
325 
326 	is_write_lock(space);
327 
328 	for (;;) {
329 		if (!is_active(space)) {
330 			is_write_unlock(space);
331 			return KERN_INVALID_TASK;
332 		}
333 
334 		kr = ipc_entries_hold(space, 1);
335 		if (kr == KERN_SUCCESS) {
336 			return ipc_entry_claim(space, object, namep, entryp);
337 		}
338 
339 		kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
340 		if (kr != KERN_SUCCESS) {
341 			return kr; /* space is unlocked */
342 		}
343 	}
344 }
345 
346 /*
347  *	Routine:	ipc_entry_alloc_name
348  *	Purpose:
349  *		Allocates/finds an entry with a specific name.
350  *		If an existing entry is returned, its type will be nonzero.
351  *	Conditions:
352  *		The space is not locked before, but it is write-locked after
353  *		if the call is successful.  May allocate memory.
354  *	Returns:
355  *		KERN_SUCCESS		Found existing entry with same name.
356  *		KERN_SUCCESS		Allocated a new entry.
357  *		KERN_INVALID_TASK	The space is dead.
358  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
359  *		KERN_FAILURE		Couldn't allocate requested name.
360  *      KERN_INVALID_VALUE  Supplied port name is invalid.
361  */
362 
363 kern_return_t
ipc_entry_alloc_name(ipc_space_t space,mach_port_name_t name,ipc_entry_t * entryp)364 ipc_entry_alloc_name(
365 	ipc_space_t             space,
366 	mach_port_name_t        name,
367 	ipc_entry_t             *entryp)
368 {
369 	const mach_port_index_t index = MACH_PORT_INDEX(name);
370 	mach_port_gen_t gen = MACH_PORT_GEN(name);
371 
372 	/*
373 	 * Callers today never pass MACH_PORT_NULL
374 	 */
375 	assert(MACH_PORT_VALID(name));
376 
377 	if (index > ipc_entry_table_count_max()) {
378 		return KERN_NO_SPACE;
379 	}
380 	if (name != ipc_entry_name_mask(name)) {
381 		/* must have valid generation bits */
382 		return KERN_INVALID_VALUE;
383 	}
384 	if (index == 0) {
385 		return KERN_FAILURE;
386 	}
387 
388 	is_write_lock(space);
389 
390 	for (;;) {
391 		ipc_entry_table_t table;
392 		ipc_entry_t entry;
393 
394 		if (!is_active(space)) {
395 			is_write_unlock(space);
396 			return KERN_INVALID_TASK;
397 		}
398 
399 		table = is_active_table(space);
400 
401 		/*
402 		 *	If we are under the table cutoff,
403 		 *	there are usually four cases:
404 		 *		1) The entry is reserved (index 0)
405 		 *		   dealt with on entry.
406 		 *		2) The entry is free
407 		 *		3) The entry is inuse, for the same name
408 		 *		4) The entry is inuse, for a different name
409 		 *
410 		 *	For a task with a "fast" IPC space, we disallow
411 		 *	cases 1) and 4), because ports cannot be renamed.
412 		 */
413 
414 		entry = ipc_entry_table_get(table, index);
415 		if (!entry) {
416 			/*
417 			 *      We grow the table so that the name
418 			 *	index fits in the array space.
419 			 *      Because the space will be unlocked,
420 			 *      we must restart.
421 			 */
422 			kern_return_t kr;
423 			kr = ipc_entry_grow_table(space, index + 1);
424 			if (kr != KERN_SUCCESS) {
425 				/* space is unlocked */
426 				return kr;
427 			}
428 			continue;
429 		}
430 
431 		if (!IE_BITS_TYPE(entry->ie_bits)) {
432 			mach_port_index_t prev_index;
433 			ipc_entry_t prev_entry;
434 
435 			/*
436 			 *      case #2 -- the entry is free
437 			 *	Rip the entry out of the free list.
438 			 */
439 
440 			prev_index = 0;
441 			prev_entry = ipc_entry_table_base(table);
442 			while (prev_entry->ie_next != index) {
443 				prev_index = prev_entry->ie_next;
444 				prev_entry = ipc_entry_table_get(table, prev_index);
445 			}
446 
447 			prev_entry->ie_next = entry->ie_next;
448 			space->is_table_free--;
449 
450 			/*
451 			 *	prev_index can be 0 here if the desired index
452 			 *	happens to be at the top of the freelist.
453 			 *
454 			 *	Mark the previous entry modified -
455 			 *	reconstructing the name.
456 			 *
457 			 *	Do not do so for the first entry, which is
458 			 *	reserved and ipc_entry_grow_table() will handle
459 			 *	its ie_next separately after the rescan loop.
460 			 */
461 			if (prev_index > 0) {
462 				/*
463 				 */
464 				ipc_entry_modified(space,
465 				    MACH_PORT_MAKE(prev_index,
466 				    IE_BITS_GEN(prev_entry->ie_bits)),
467 				    prev_entry);
468 			}
469 
470 			entry->ie_bits = ipc_entry_make_gen(gen, space);
471 			entry->ie_request = IE_REQ_NONE;
472 			*entryp = entry;
473 
474 			assert(entry->ie_object == IPC_OBJECT_NULL);
475 			return KERN_SUCCESS;
476 		} else if (IE_BITS_GEN(entry->ie_bits) == gen) {
477 			/* case #3 -- the entry is inuse, for the same name */
478 			*entryp = entry;
479 			return KERN_SUCCESS;
480 		} else {
481 			/* case #4 -- the entry is inuse, for a different name. */
482 			/* Collisions are not allowed */
483 			is_write_unlock(space);
484 			return KERN_FAILURE;
485 		}
486 	}
487 }
488 
489 /*
490  *	Routine:	ipc_entry_dealloc
491  *	Purpose:
492  *		Deallocates an entry from a space.
493  *	Conditions:
494  *		The space must be write-locked throughout.
495  *		The space must be active.
496  */
497 
498 void
ipc_entry_dealloc(ipc_space_t space,ipc_object_t object,mach_port_name_t name,ipc_entry_t entry)499 ipc_entry_dealloc(
500 	ipc_space_t             space,
501 	ipc_object_t            object,
502 	mach_port_name_t        name,
503 	ipc_entry_t             entry)
504 {
505 	ipc_entry_table_t table;
506 	mach_port_index_t index;
507 	ipc_entry_t base;
508 
509 	assert(entry->ie_object == object);
510 	assert(entry->ie_request == IE_REQ_NONE);
511 	if (object) {
512 		io_lock_held(object);
513 	}
514 
515 #if 1
516 	if (entry->ie_request != IE_REQ_NONE) {
517 		panic("ipc_entry_dealloc()");
518 	}
519 #endif
520 
521 	index = MACH_PORT_INDEX(name);
522 	table = is_active_table(space);
523 	base  = ipc_entry_table_base(table);
524 
525 	assert(index > 0 && entry == ipc_entry_table_get(table, index));
526 
527 	assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
528 	entry->ie_bits &= (IE_BITS_GEN_MASK | IE_BITS_ROLL_MASK);
529 	entry->ie_next = base->ie_next;
530 	entry->ie_object = IPC_OBJECT_NULL;
531 	base->ie_next = index;
532 	space->is_table_free++;
533 
534 	ipc_entry_modified(space, name, entry);
535 }
536 
537 /*
538  *	Routine:	ipc_entry_modified
539  *	Purpose:
540  *		Note that an entry was modified in a space.
541  *	Conditions:
542  *		Assumes exclusive write access to the space,
543  *		either through a write lock or being the cleaner
544  *		on an inactive space.
545  */
546 
547 void
ipc_entry_modified(ipc_space_t space,mach_port_name_t name,__assert_only ipc_entry_t entry)548 ipc_entry_modified(
549 	ipc_space_t             space,
550 	mach_port_name_t        name,
551 	__assert_only ipc_entry_t entry)
552 {
553 	ipc_entry_table_t table;
554 	mach_port_index_t index;
555 
556 	index = MACH_PORT_INDEX(name);
557 	table = is_active_table(space);
558 
559 	assert(entry == ipc_entry_table_get(table, index));
560 	assert(space->is_low_mod <= ipc_entry_table_count(table));
561 	assert(space->is_high_mod < ipc_entry_table_count(table));
562 
563 	if (index < space->is_low_mod) {
564 		space->is_low_mod = index;
565 	}
566 	if (index > space->is_high_mod) {
567 		space->is_high_mod = index;
568 	}
569 
570 	KERNEL_DEBUG_CONSTANT(
571 		MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_PORT_ENTRY_MODIFY) | DBG_FUNC_NONE,
572 		space->is_task ? task_pid(space->is_task) : 0,
573 		name,
574 		entry->ie_bits,
575 		0,
576 		0);
577 }
578 
579 #define IPC_ENTRY_GROW_STATS 1
580 #if IPC_ENTRY_GROW_STATS
581 static uint64_t ipc_entry_grow_count = 0;
582 static uint64_t ipc_entry_grow_rescan = 0;
583 static uint64_t ipc_entry_grow_rescan_max = 0;
584 static uint64_t ipc_entry_grow_rescan_entries = 0;
585 static uint64_t ipc_entry_grow_rescan_entries_max = 0;
586 static uint64_t ipc_entry_grow_freelist_entries = 0;
587 static uint64_t ipc_entry_grow_freelist_entries_max = 0;
588 #endif
589 
590 static inline void
ipc_space_start_growing(ipc_space_t is)591 ipc_space_start_growing(ipc_space_t is)
592 {
593 	assert(!is_growing(is));
594 	is->is_grower = current_thread();
595 }
596 
597 static void
ipc_space_done_growing_and_unlock(ipc_space_t space)598 ipc_space_done_growing_and_unlock(ipc_space_t space)
599 {
600 	assert(space->is_grower == current_thread());
601 	space->is_grower = THREAD_NULL;
602 	is_write_unlock(space);
603 	wakeup_all_with_inheritor((event_t)space, THREAD_AWAKENED);
604 }
605 
606 /*
607  *	Routine:	ipc_entry_grow_table
608  *	Purpose:
609  *		Grows the table in a space.
610  *	Conditions:
611  *		The space must be write-locked and active before.
612  *		If successful, the space is also returned locked.
613  *		On failure, the space is returned unlocked.
614  *		Allocates memory.
615  *	Returns:
616  *		KERN_SUCCESS		Grew the table.
617  *		KERN_SUCCESS		Somebody else grew the table.
618  *		KERN_SUCCESS		The space died.
619  *		KERN_NO_SPACE		Table has maximum size already.
620  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate a new table.
621  */
622 
623 kern_return_t
ipc_entry_grow_table(ipc_space_t space,ipc_table_elems_t target_count)624 ipc_entry_grow_table(
625 	ipc_space_t             space,
626 	ipc_table_elems_t       target_count)
627 {
628 	ipc_entry_num_t osize, nsize;
629 	ipc_entry_num_t ocount, ncount;
630 	ipc_entry_table_t otable, ntable;
631 	ipc_entry_t obase, nbase;
632 	mach_port_index_t free_index;
633 	mach_port_index_t low_mod, hi_mod;
634 	ipc_table_index_t sanity;
635 #if IPC_ENTRY_GROW_STATS
636 	uint64_t rescan_count = 0;
637 #endif
638 
639 	if (is_growing(space)) {
640 		/*
641 		 *	Somebody else is growing the table.
642 		 *	We just wait for them to finish.
643 		 */
644 		is_write_sleep(space);
645 		return KERN_SUCCESS;
646 	}
647 
648 	otable = is_active_table(space);
649 	obase  = ipc_entry_table_base(otable);
650 	osize  = ipc_entry_table_size(otable);
651 	ocount = ipc_entry_table_size_to_count(osize);
652 
653 	if (target_count == ITS_SIZE_NONE) {
654 		nsize = ipc_entry_table_next_size(IPC_ENTRY_TABLE_MIN, osize,
655 		    IPC_ENTRY_TABLE_PERIOD);
656 	} else if (target_count <= ocount) {
657 		return KERN_SUCCESS;
658 	} else if (target_count > ipc_entry_table_count_max()) {
659 		goto no_space;
660 	} else {
661 		uint32_t tsize = ipc_entry_table_count_to_size(target_count);
662 
663 		nsize = ipc_entry_table_next_size(IPC_ENTRY_TABLE_MIN, tsize,
664 		    IPC_ENTRY_TABLE_PERIOD);
665 	}
666 	if (nsize > CONFIG_IPC_TABLE_ENTRIES_SIZE_MAX) {
667 		nsize = CONFIG_IPC_TABLE_ENTRIES_SIZE_MAX;
668 	}
669 	if (osize == nsize) {
670 		goto no_space;
671 	}
672 
673 
674 	/*
675 	 * We'll attempt to grow the table.
676 	 *
677 	 * Because we will be copying without the space lock, reset
678 	 * the lowest_mod index to just beyond the end of the current
679 	 * table.  Modification of entries (other than hashes) will
680 	 * bump this downward, and we only have to reprocess entries
681 	 * above that mark.  Eventually, we'll get done.
682 	 */
683 	ipc_space_start_growing(space);
684 	space->is_low_mod = ocount;
685 	space->is_high_mod = 0;
686 #if IPC_ENTRY_GROW_STATS
687 	ipc_entry_grow_count++;
688 #endif
689 	is_write_unlock(space);
690 
691 	ntable = ipc_entry_table_alloc_by_size(nsize, Z_WAITOK | Z_ZERO);
692 	if (ntable == NULL) {
693 		is_write_lock(space);
694 		ipc_space_done_growing_and_unlock(space);
695 		return KERN_RESOURCE_SHORTAGE;
696 	}
697 
698 	nbase  = ipc_entry_table_base(ntable);
699 	nsize  = ipc_entry_table_size(ntable);
700 	ncount = ipc_entry_table_count(ntable);
701 	ipc_space_rand_freelist(space, nbase, ocount, ncount);
702 
703 	low_mod = 1;
704 	hi_mod = ocount - 1;
705 rescan:
706 	/*
707 	 * Within the range of the table that changed, determine what we
708 	 * have to take action on. For each entry, take a snapshot of the
709 	 * corresponding entry in the old table (so it won't change
710 	 * during this iteration). The snapshot may not be self-consistent
711 	 * (if we caught it in the middle of being changed), so be very
712 	 * cautious with the values.
713 	 */
714 	assert(low_mod > 0);
715 	for (mach_port_index_t i = MAX(1, low_mod); i <= hi_mod; i++) {
716 		ipc_entry_t entry = &nbase[i];
717 		ipc_object_t osnap_object = obase[i].ie_object;
718 		ipc_entry_bits_t osnap_bits = obase[i].ie_bits;
719 		ipc_entry_bits_t osnap_request = obase[i].ie_request;
720 
721 		/*
722 		 * We need to make sure the osnap_* fields are never reloaded.
723 		 */
724 		os_compiler_barrier();
725 
726 		if (entry->ie_object != osnap_object ||
727 		    IE_BITS_TYPE(entry->ie_bits) != IE_BITS_TYPE(osnap_bits)) {
728 			if (entry->ie_object != IPC_OBJECT_NULL &&
729 			    IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND) {
730 				ipc_hash_table_delete(ntable, entry->ie_object, i, entry);
731 			}
732 
733 			entry->ie_object = osnap_object;
734 			entry->ie_bits = osnap_bits;
735 			entry->ie_request = osnap_request; /* or ie_next */
736 
737 			if (osnap_object != IPC_OBJECT_NULL &&
738 			    IE_BITS_TYPE(osnap_bits) == MACH_PORT_TYPE_SEND) {
739 				ipc_hash_table_insert(ntable, osnap_object, i, entry);
740 			}
741 		} else {
742 			entry->ie_bits = osnap_bits;
743 			entry->ie_request = osnap_request; /* or ie_next */
744 		}
745 	}
746 	nbase[0].ie_next = obase[0].ie_next;  /* always rebase the freelist */
747 
748 	/*
749 	 * find the end of the freelist (should be short). But be careful,
750 	 * the list items can change so only follow through truly free entries
751 	 * (no problem stopping short in those cases, because we'll rescan).
752 	 */
753 	free_index = 0;
754 	for (sanity = 0; sanity < ocount; sanity++) {
755 		if (nbase[free_index].ie_object != IPC_OBJECT_NULL) {
756 			break;
757 		}
758 		mach_port_index_t i = nbase[free_index].ie_next;
759 		if (i == 0 || i >= ocount) {
760 			break;
761 		}
762 		free_index = i;
763 	}
764 #if IPC_ENTRY_GROW_STATS
765 	ipc_entry_grow_freelist_entries += sanity;
766 	if (sanity > ipc_entry_grow_freelist_entries_max) {
767 		ipc_entry_grow_freelist_entries_max = sanity;
768 	}
769 #endif
770 
771 	is_write_lock(space);
772 
773 	/*
774 	 *	We need to do a wakeup on the space,
775 	 *	to rouse waiting threads.  We defer
776 	 *	this until the space is unlocked,
777 	 *	because we don't want them to spin.
778 	 */
779 
780 	if (!is_active(space)) {
781 		/*
782 		 *	The space died while it was unlocked.
783 		 */
784 
785 		ipc_space_done_growing_and_unlock(space);
786 		ipc_entry_table_free(&ntable);
787 		is_write_lock(space);
788 		return KERN_SUCCESS;
789 	}
790 
791 	/* If the space changed while unlocked, go back and process the changes */
792 	if (space->is_low_mod < ocount) {
793 		assert(space->is_high_mod > 0);
794 		low_mod = space->is_low_mod;
795 		space->is_low_mod = ocount;
796 		hi_mod = space->is_high_mod;
797 		space->is_high_mod = 0;
798 		is_write_unlock(space);
799 
800 		if (hi_mod >= ocount) {
801 			panic("corrupt hi_mod: %d, obase: %p, ocount: %d\n",
802 			    hi_mod, obase, ocount);
803 		}
804 
805 #if IPC_ENTRY_GROW_STATS
806 		rescan_count++;
807 		if (rescan_count > ipc_entry_grow_rescan_max) {
808 			ipc_entry_grow_rescan_max = rescan_count;
809 		}
810 
811 		ipc_entry_grow_rescan++;
812 		ipc_entry_grow_rescan_entries += hi_mod - low_mod + 1;
813 		if (hi_mod - low_mod + 1 > ipc_entry_grow_rescan_entries_max) {
814 			ipc_entry_grow_rescan_entries_max = hi_mod - low_mod + 1;
815 		}
816 #endif
817 		goto rescan;
818 	}
819 
820 	/* link new free entries onto the rest of the freelist */
821 	assert(nbase[free_index].ie_next == 0 &&
822 	    nbase[free_index].ie_object == IPC_OBJECT_NULL);
823 	nbase[free_index].ie_next = ocount;
824 
825 	assert(smr_serialized_load(&space->is_table) == otable);
826 
827 	space->is_table_free += ncount - ocount;
828 	smr_serialized_store(&space->is_table, ntable);
829 
830 	ipc_space_done_growing_and_unlock(space);
831 
832 	/*
833 	 *	Now we need to free the old table.
834 	 */
835 	ipc_space_retire_table(otable);
836 	is_write_lock(space);
837 
838 	return KERN_SUCCESS;
839 
840 no_space:
841 	ipc_space_set_at_max_limit(space);
842 	is_write_unlock(space);
843 	return KERN_NO_SPACE;
844 }
845