xref: /xnu-8020.121.3/osfmk/ipc/ipc_entry.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	ipc/ipc_entry.c
60  *	Author:	Rich Draves
61  *	Date:	1989
62  *
63  *	Primitive functions to manipulate translation entries.
64  */
65 
66 #include <mach_debug.h>
67 
68 #include <mach/kern_return.h>
69 #include <mach/port.h>
70 #include <kern/assert.h>
71 #include <kern/sched_prim.h>
72 #include <kern/zalloc.h>
73 #include <kern/misc_protos.h>
74 #include <ipc/port.h>
75 #include <ipc/ipc_entry.h>
76 #include <ipc/ipc_space.h>
77 #include <ipc/ipc_object.h>
78 #include <ipc/ipc_hash.h>
79 #include <ipc/ipc_table.h>
80 #include <ipc/ipc_port.h>
81 #include <string.h>
82 #include <sys/kdebug.h>
83 
84 /*
85  *	Routine:	ipc_entry_lookup
86  *	Purpose:
87  *		Searches for an entry, given its name.
88  *	Conditions:
89  *		The space must be read or write locked throughout.
90  *		The space must be active.
91  */
92 
93 ipc_entry_t
ipc_entry_lookup(ipc_space_t space,mach_port_name_t name)94 ipc_entry_lookup(
95 	ipc_space_t             space,
96 	mach_port_name_t        name)
97 {
98 	mach_port_index_t index;
99 	ipc_entry_t entry, table;
100 
101 	table = is_active_table(space);
102 	index = MACH_PORT_INDEX(name);
103 	if (index > 0 && index < table->ie_size) {
104 		entry = &table[index];
105 		if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name) ||
106 		    IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE) {
107 			entry = IE_NULL;
108 		}
109 	} else {
110 		entry = IE_NULL;
111 	}
112 
113 	assert((entry == IE_NULL) || IE_BITS_TYPE(entry->ie_bits));
114 	return entry;
115 }
116 
117 
118 /*
119  *	Routine:	ipc_entries_hold
120  *	Purpose:
121  *		Verifies that there are at least 'entries_needed'
122  *		free list members
123  *	Conditions:
124  *		The space is write-locked and active throughout.
125  *		An object may be locked.  Will not allocate memory.
126  *	Returns:
127  *		KERN_SUCCESS		Free entries were found.
128  *		KERN_NO_SPACE		No entry allocated.
129  */
130 
131 kern_return_t
ipc_entries_hold(ipc_space_t space,uint32_t entries_needed)132 ipc_entries_hold(
133 	ipc_space_t             space,
134 	uint32_t                entries_needed)
135 {
136 	ipc_entry_t table = is_active_table(space);
137 	mach_port_index_t next_free = 0;
138 	uint32_t i;
139 
140 	/*
141 	 * Assume that all new entries will need hashing.
142 	 * If the table is more than 87.5% full pretend we didn't have space.
143 	 */
144 	if (space->is_table_hashed + entries_needed > table->ie_size * 7 / 8) {
145 		return KERN_NO_SPACE;
146 	}
147 
148 	for (i = 0; i < entries_needed; i++) {
149 		next_free = table[next_free].ie_next;
150 		if (next_free == 0) {
151 			return KERN_NO_SPACE;
152 		}
153 		assert(next_free < table->ie_size);
154 		assert(table[next_free].ie_object == IO_NULL);
155 	}
156 
157 #if CONFIG_PROC_RESOURCE_LIMITS
158 	ipc_space_check_limit_exceeded(space);
159 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
160 	return KERN_SUCCESS;
161 }
162 
163 /*
164  *	Routine:	ipc_entry_claim
165  *	Purpose:
166  *		Take formal ownership of a held entry.
167  *	Conditions:
168  *		The space is write-locked and active throughout.
169  *		Objects must be: NULL, locked, or not initialized yet.
170  *		Will not allocate memory.
171  *
172  *      Note: The returned entry must be marked as modified before
173  *            releasing the space lock
174  */
175 
176 kern_return_t
ipc_entry_claim(ipc_space_t space,ipc_object_t object,mach_port_name_t * namep,ipc_entry_t * entryp)177 ipc_entry_claim(
178 	ipc_space_t             space,
179 	ipc_object_t            object,
180 	mach_port_name_t        *namep,
181 	ipc_entry_t             *entryp)
182 {
183 	ipc_entry_t entry;
184 	ipc_entry_t table;
185 	mach_port_index_t first_free;
186 	mach_port_gen_t gen;
187 	mach_port_name_t new_name;
188 
189 	table = is_active_table(space);
190 
191 	first_free = table->ie_next;
192 	assert(first_free != 0);
193 
194 	entry = &table[first_free];
195 	table->ie_next = entry->ie_next;
196 	space->is_table_free--;
197 
198 	assert(table->ie_next < table->ie_size);
199 	assert(entry->ie_object == IO_NULL);
200 	if (object && waitq_valid(io_waitq(object))) {
201 		assert(waitq_held(io_waitq(object)));
202 	}
203 
204 	/*
205 	 *	Initialize the new entry: increment gencount and reset
206 	 *	rollover point if it rolled over, and clear ie_request.
207 	 */
208 	gen = ipc_entry_new_gen(entry->ie_bits);
209 	if (__improbable(ipc_entry_gen_rolled(entry->ie_bits, gen))) {
210 		ipc_entry_bits_t roll = ipc_space_get_rollpoint(space);
211 		gen = ipc_entry_new_rollpoint(roll);
212 	}
213 	entry->ie_bits = gen;
214 	entry->ie_request = IE_REQ_NONE;
215 	entry->ie_object = object;
216 
217 	/*
218 	 *	The new name can't be MACH_PORT_NULL because index
219 	 *	is non-zero.  It can't be MACH_PORT_DEAD because
220 	 *	the table isn't allowed to grow big enough.
221 	 *	(See comment in ipc/ipc_table.h.)
222 	 */
223 	new_name = MACH_PORT_MAKE(first_free, gen);
224 	assert(MACH_PORT_VALID(new_name));
225 	*namep = new_name;
226 	*entryp = entry;
227 
228 	return KERN_SUCCESS;
229 }
230 
231 /*
232  *	Routine:	ipc_entry_alloc
233  *	Purpose:
234  *		Allocate an entry out of the space.
235  *	Conditions:
236  *		The space is not locked before, but it is write-locked after
237  *		if the call is successful.  May allocate memory.
238  *	Returns:
239  *		KERN_SUCCESS		An entry was allocated.
240  *		KERN_INVALID_TASK	The space is dead.
241  *		KERN_NO_SPACE		No room for an entry in the space.
242  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory for an entry.
243  */
244 
245 kern_return_t
ipc_entry_alloc(ipc_space_t space,ipc_object_t object,mach_port_name_t * namep,ipc_entry_t * entryp)246 ipc_entry_alloc(
247 	ipc_space_t             space,
248 	ipc_object_t            object,
249 	mach_port_name_t        *namep,
250 	ipc_entry_t             *entryp)
251 {
252 	kern_return_t kr;
253 
254 	is_write_lock(space);
255 
256 	for (;;) {
257 		if (!is_active(space)) {
258 			is_write_unlock(space);
259 			return KERN_INVALID_TASK;
260 		}
261 
262 		kr = ipc_entries_hold(space, 1);
263 		if (kr == KERN_SUCCESS) {
264 			return ipc_entry_claim(space, object, namep, entryp);
265 		}
266 
267 		kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
268 		if (kr != KERN_SUCCESS) {
269 			return kr; /* space is unlocked */
270 		}
271 	}
272 }
273 
274 /*
275  *	Routine:	ipc_entry_alloc_name
276  *	Purpose:
277  *		Allocates/finds an entry with a specific name.
278  *		If an existing entry is returned, its type will be nonzero.
279  *	Conditions:
280  *		The space is not locked before, but it is write-locked after
281  *		if the call is successful.  May allocate memory.
282  *	Returns:
283  *		KERN_SUCCESS		Found existing entry with same name.
284  *		KERN_SUCCESS		Allocated a new entry.
285  *		KERN_INVALID_TASK	The space is dead.
286  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
287  *		KERN_FAILURE		Couldn't allocate requested name.
288  *      KERN_INVALID_VALUE  Supplied port name is invalid.
289  */
290 
291 kern_return_t
ipc_entry_alloc_name(ipc_space_t space,mach_port_name_t name,ipc_entry_t * entryp)292 ipc_entry_alloc_name(
293 	ipc_space_t             space,
294 	mach_port_name_t        name,
295 	ipc_entry_t             *entryp)
296 {
297 	mach_port_index_t index = MACH_PORT_INDEX(name);
298 	mach_port_gen_t gen = MACH_PORT_GEN(name);
299 
300 	if (index > ipc_table_max_entries()) {
301 		return KERN_NO_SPACE;
302 	}
303 
304 	if (name != ipc_entry_name_mask(name)) {
305 		/* must have valid generation bits */
306 		return KERN_INVALID_VALUE;
307 	}
308 
309 	assert(MACH_PORT_VALID(name));
310 
311 
312 	is_write_lock(space);
313 
314 	for (;;) {
315 		ipc_entry_t table, entry;
316 
317 		if (!is_active(space)) {
318 			is_write_unlock(space);
319 			return KERN_INVALID_TASK;
320 		}
321 
322 		table = is_active_table(space);
323 
324 		/*
325 		 *	If we are under the table cutoff,
326 		 *	there are usually four cases:
327 		 *		1) The entry is reserved (index 0)
328 		 *		2) The entry is inuse, for the same name
329 		 *		3) The entry is inuse, for a different name
330 		 *		4) The entry is free
331 		 *	For a task with a "fast" IPC space, we disallow
332 		 *	cases 1) and 3), because ports cannot be renamed.
333 		 */
334 		if (index < table->ie_size) {
335 			entry = &table[index];
336 
337 			if (index == 0) {
338 				/* case #1 - the entry is reserved */
339 				is_write_unlock(space);
340 				return KERN_FAILURE;
341 			} else if (IE_BITS_TYPE(entry->ie_bits)) {
342 				if (IE_BITS_GEN(entry->ie_bits) == gen) {
343 					/* case #2 -- the entry is inuse, for the same name */
344 					*entryp = entry;
345 					return KERN_SUCCESS;
346 				} else {
347 					/* case #3 -- the entry is inuse, for a different name. */
348 					/* Collisions are not allowed */
349 					is_write_unlock(space);
350 					return KERN_FAILURE;
351 				}
352 			} else {
353 				mach_port_index_t free_index, next_index;
354 
355 				/*
356 				 *      case #4 -- the entry is free
357 				 *	Rip the entry out of the free list.
358 				 */
359 
360 				for (free_index = 0;
361 				    (next_index = table[free_index].ie_next)
362 				    != index;
363 				    free_index = next_index) {
364 					continue;
365 				}
366 
367 				table[free_index].ie_next =
368 				    table[next_index].ie_next;
369 				space->is_table_free--;
370 
371 				/* mark the previous entry modified - reconstructing the name */
372 				ipc_entry_modified(space,
373 				    MACH_PORT_MAKE(free_index,
374 				    IE_BITS_GEN(table[free_index].ie_bits)),
375 				    &table[free_index]);
376 
377 				entry->ie_bits = gen;
378 				entry->ie_request = IE_REQ_NONE;
379 				*entryp = entry;
380 
381 				assert(entry->ie_object == IO_NULL);
382 				return KERN_SUCCESS;
383 			}
384 		}
385 
386 		/*
387 		 *      We grow the table so that the name
388 		 *	index fits in the array space.
389 		 *      Because the space will be unlocked,
390 		 *      we must restart.
391 		 */
392 		kern_return_t kr;
393 		kr = ipc_entry_grow_table(space, index + 1);
394 		if (kr != KERN_SUCCESS) {
395 			/* space is unlocked */
396 			return kr;
397 		}
398 		continue;
399 	}
400 }
401 
402 /*
403  *	Routine:	ipc_entry_dealloc
404  *	Purpose:
405  *		Deallocates an entry from a space.
406  *	Conditions:
407  *		The space must be write-locked throughout.
408  *		The space must be active.
409  */
410 
411 void
ipc_entry_dealloc(ipc_space_t space,ipc_object_t object,mach_port_name_t name,ipc_entry_t entry)412 ipc_entry_dealloc(
413 	ipc_space_t             space,
414 	ipc_object_t            object,
415 	mach_port_name_t        name,
416 	ipc_entry_t             entry)
417 {
418 	ipc_entry_t table;
419 	ipc_entry_num_t size;
420 	mach_port_index_t index;
421 
422 	assert(entry->ie_object == object);
423 	assert(entry->ie_request == IE_REQ_NONE);
424 	if (object) {
425 		io_lock_held(object);
426 	}
427 
428 #if 1
429 	if (entry->ie_request != IE_REQ_NONE) {
430 		panic("ipc_entry_dealloc()");
431 	}
432 #endif
433 
434 	index = MACH_PORT_INDEX(name);
435 	table = is_active_table(space);
436 	size  = table->ie_size;
437 
438 	assert(index > 0 && index < size);
439 	assert(entry == &table[index]);
440 
441 	assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
442 	entry->ie_bits &= (IE_BITS_GEN_MASK | IE_BITS_ROLL_MASK);
443 	entry->ie_next = table->ie_next;
444 	entry->ie_object = IO_NULL;
445 	table->ie_next = index;
446 	space->is_table_free++;
447 
448 	ipc_entry_modified(space, name, entry);
449 }
450 
451 /*
452  *	Routine:	ipc_entry_modified
453  *	Purpose:
454  *		Note that an entry was modified in a space.
455  *	Conditions:
456  *		Assumes exclusive write access to the space,
457  *		either through a write lock or being the cleaner
458  *		on an inactive space.
459  */
460 
461 void
ipc_entry_modified(ipc_space_t space,mach_port_name_t name,__assert_only ipc_entry_t entry)462 ipc_entry_modified(
463 	ipc_space_t             space,
464 	mach_port_name_t        name,
465 	__assert_only ipc_entry_t entry)
466 {
467 	ipc_entry_t table;
468 	ipc_entry_num_t size;
469 	mach_port_index_t index;
470 
471 	index = MACH_PORT_INDEX(name);
472 	table = is_active_table(space);
473 	size  = table->ie_size;
474 
475 	assert(index < size);
476 	assert(entry == &table[index]);
477 
478 	assert(space->is_low_mod <= size);
479 	assert(space->is_high_mod < size);
480 
481 	if (index < space->is_low_mod) {
482 		space->is_low_mod = index;
483 	}
484 	if (index > space->is_high_mod) {
485 		space->is_high_mod = index;
486 	}
487 
488 	KERNEL_DEBUG_CONSTANT(
489 		MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_PORT_ENTRY_MODIFY) | DBG_FUNC_NONE,
490 		space->is_task ? task_pid(space->is_task) : 0,
491 		name,
492 		entry->ie_bits,
493 		0,
494 		0);
495 }
496 
497 #define IPC_ENTRY_GROW_STATS 1
498 #if IPC_ENTRY_GROW_STATS
499 static uint64_t ipc_entry_grow_count = 0;
500 static uint64_t ipc_entry_grow_rescan = 0;
501 static uint64_t ipc_entry_grow_rescan_max = 0;
502 static uint64_t ipc_entry_grow_rescan_entries = 0;
503 static uint64_t ipc_entry_grow_rescan_entries_max = 0;
504 static uint64_t ipc_entry_grow_freelist_entries = 0;
505 static uint64_t ipc_entry_grow_freelist_entries_max = 0;
506 #endif
507 
508 static inline void
ipc_space_start_growing(ipc_space_t is)509 ipc_space_start_growing(ipc_space_t is)
510 {
511 	assert(!is_growing(is));
512 	is->is_grower = current_thread();
513 }
514 
515 static void
ipc_space_done_growing_and_unlock(ipc_space_t space)516 ipc_space_done_growing_and_unlock(ipc_space_t space)
517 {
518 	assert(space->is_grower == current_thread());
519 	space->is_grower = THREAD_NULL;
520 	is_write_unlock(space);
521 	wakeup_all_with_inheritor((event_t)space, THREAD_AWAKENED);
522 }
523 
524 /*
525  *	Routine:	ipc_entry_grow_table
526  *	Purpose:
527  *		Grows the table in a space.
528  *	Conditions:
529  *		The space must be write-locked and active before.
530  *		If successful, the space is also returned locked.
531  *		On failure, the space is returned unlocked.
532  *		Allocates memory.
533  *	Returns:
534  *		KERN_SUCCESS		Grew the table.
535  *		KERN_SUCCESS		Somebody else grew the table.
536  *		KERN_SUCCESS		The space died.
537  *		KERN_NO_SPACE		Table has maximum size already.
538  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate a new table.
539  */
540 
541 kern_return_t
ipc_entry_grow_table(ipc_space_t space,ipc_table_elems_t target_size)542 ipc_entry_grow_table(
543 	ipc_space_t             space,
544 	ipc_table_elems_t       target_size)
545 {
546 	ipc_entry_num_t osize, size, nsize, psize;
547 
548 	ipc_entry_t otable, table;
549 	ipc_table_size_t oits, its, nits;
550 	mach_port_index_t free_index;
551 	mach_port_index_t low_mod, hi_mod;
552 	ipc_table_index_t sanity;
553 #if IPC_ENTRY_GROW_STATS
554 	uint64_t rescan_count = 0;
555 #endif
556 
557 	if (is_growing(space)) {
558 		/*
559 		 *	Somebody else is growing the table.
560 		 *	We just wait for them to finish.
561 		 */
562 		is_write_sleep(space);
563 		return KERN_SUCCESS;
564 	}
565 
566 	otable = is_active_table(space);
567 	its = space->is_table_next;
568 	size = its->its_size;
569 
570 	/*
571 	 * Since is_table_next points to the next natural size
572 	 * we can identify the current size entry.
573 	 */
574 	oits = its - 1;
575 	osize = oits->its_size;
576 
577 	/*
578 	 * If there is no target size, then the new size is simply
579 	 * specified by is_table_next.  If there is a target
580 	 * size, then search for the next entry.
581 	 */
582 	if (target_size != ITS_SIZE_NONE) {
583 		if (target_size <= osize) {
584 			/* the space is locked */
585 			return KERN_SUCCESS;
586 		}
587 
588 		psize = osize;
589 		while ((psize != size) && (target_size > size)) {
590 			psize = size;
591 			its++;
592 			size = its->its_size;
593 		}
594 		if (psize == size) {
595 			goto no_space;
596 		}
597 	}
598 
599 	if (osize == size) {
600 		goto no_space;
601 	}
602 
603 	nits = its + 1;
604 	nsize = nits->its_size;
605 	assert((osize < size) && (size <= nsize));
606 
607 	/*
608 	 * We'll attempt to grow the table.
609 	 *
610 	 * Because we will be copying without the space lock, reset
611 	 * the lowest_mod index to just beyond the end of the current
612 	 * table.  Modification of entries (other than hashes) will
613 	 * bump this downward, and we only have to reprocess entries
614 	 * above that mark.  Eventually, we'll get done.
615 	 */
616 	ipc_space_start_growing(space);
617 	space->is_low_mod = osize;
618 	space->is_high_mod = 0;
619 #if IPC_ENTRY_GROW_STATS
620 	ipc_entry_grow_count++;
621 #endif
622 	is_write_unlock(space);
623 
624 	table = it_entries_alloc(its); /* zero-initialized */
625 	if (table == IE_NULL) {
626 		is_write_lock(space);
627 		ipc_space_done_growing_and_unlock(space);
628 		return KERN_RESOURCE_SHORTAGE;
629 	}
630 
631 	ipc_space_rand_freelist(space, table, osize, size);
632 
633 	low_mod = 1;
634 	hi_mod = osize - 1;
635 rescan:
636 	/*
637 	 * Within the range of the table that changed, determine what we
638 	 * have to take action on. For each entry, take a snapshot of the
639 	 * corresponding entry in the old table (so it won't change
640 	 * during this iteration). The snapshot may not be self-consistent
641 	 * (if we caught it in the middle of being changed), so be very
642 	 * cautious with the values.
643 	 */
644 	assert(low_mod > 0);
645 	for (mach_port_index_t i = low_mod; i <= hi_mod; i++) {
646 		ipc_entry_t entry = &table[i];
647 		ipc_object_t osnap_object = otable[i].ie_object;
648 		ipc_entry_bits_t osnap_bits = otable[i].ie_bits;
649 		ipc_entry_bits_t osnap_request = otable[i].ie_request;
650 
651 		/*
652 		 * We need to make sure the osnap_* fields are never reloaded.
653 		 */
654 		os_compiler_barrier();
655 
656 		if (entry->ie_object != osnap_object ||
657 		    IE_BITS_TYPE(entry->ie_bits) != IE_BITS_TYPE(osnap_bits)) {
658 			if (entry->ie_object != IO_NULL &&
659 			    IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND) {
660 				ipc_hash_table_delete(table, entry->ie_object, i, entry);
661 			}
662 
663 			entry->ie_object = osnap_object;
664 			entry->ie_bits = osnap_bits;
665 			entry->ie_request = osnap_request; /* or ie_next */
666 
667 			if (osnap_object != IO_NULL &&
668 			    IE_BITS_TYPE(osnap_bits) == MACH_PORT_TYPE_SEND) {
669 				ipc_hash_table_insert(table, osnap_object, i, entry);
670 			}
671 		} else {
672 			entry->ie_bits = osnap_bits;
673 			entry->ie_request = osnap_request; /* or ie_next */
674 		}
675 	}
676 	table[0].ie_next = otable[0].ie_next;  /* always rebase the freelist */
677 
678 	/*
679 	 * find the end of the freelist (should be short). But be careful,
680 	 * the list items can change so only follow through truly free entries
681 	 * (no problem stopping short in those cases, because we'll rescan).
682 	 */
683 	free_index = 0;
684 	for (sanity = 0; sanity < osize; sanity++) {
685 		if (table[free_index].ie_object != IPC_OBJECT_NULL) {
686 			break;
687 		}
688 		mach_port_index_t i = table[free_index].ie_next;
689 		if (i == 0 || i >= osize) {
690 			break;
691 		}
692 		free_index = i;
693 	}
694 #if IPC_ENTRY_GROW_STATS
695 	ipc_entry_grow_freelist_entries += sanity;
696 	if (sanity > ipc_entry_grow_freelist_entries_max) {
697 		ipc_entry_grow_freelist_entries_max = sanity;
698 	}
699 #endif
700 
701 	is_write_lock(space);
702 
703 	/*
704 	 *	We need to do a wakeup on the space,
705 	 *	to rouse waiting threads.  We defer
706 	 *	this until the space is unlocked,
707 	 *	because we don't want them to spin.
708 	 */
709 
710 	if (!is_active(space)) {
711 		/*
712 		 *	The space died while it was unlocked.
713 		 */
714 
715 		ipc_space_done_growing_and_unlock(space);
716 		ipc_space_free_table(table);
717 		is_write_lock(space);
718 		return KERN_SUCCESS;
719 	}
720 
721 	/* If the space changed while unlocked, go back and process the changes */
722 	if (space->is_low_mod < osize) {
723 		assert(space->is_high_mod > 0);
724 		low_mod = space->is_low_mod;
725 		space->is_low_mod = osize;
726 		hi_mod = space->is_high_mod;
727 		space->is_high_mod = 0;
728 		is_write_unlock(space);
729 #if IPC_ENTRY_GROW_STATS
730 		rescan_count++;
731 		if (rescan_count > ipc_entry_grow_rescan_max) {
732 			ipc_entry_grow_rescan_max = rescan_count;
733 		}
734 
735 		ipc_entry_grow_rescan++;
736 		ipc_entry_grow_rescan_entries += hi_mod - low_mod + 1;
737 		if (hi_mod - low_mod + 1 > ipc_entry_grow_rescan_entries_max) {
738 			ipc_entry_grow_rescan_entries_max = hi_mod - low_mod + 1;
739 		}
740 #endif
741 		goto rescan;
742 	}
743 
744 	/* link new free entries onto the rest of the freelist */
745 	assert(table[free_index].ie_next == 0 &&
746 	    table[free_index].ie_object == IO_NULL);
747 	table[free_index].ie_next = osize;
748 
749 	assert(hazard_ptr_serialized_load(&space->is_table) == otable);
750 	assert((space->is_table_next == its) ||
751 	    (target_size != ITS_SIZE_NONE));
752 	assert(otable->ie_size == osize);
753 
754 	space->is_table_next = nits;
755 	space->is_table_free += size - osize;
756 	hazard_ptr_serialized_store(&space->is_table, table);
757 
758 	ipc_space_done_growing_and_unlock(space);
759 
760 	/*
761 	 *	Now we need to free the old table.
762 	 */
763 	ipc_space_retire_table(otable);
764 	is_write_lock(space);
765 
766 	return KERN_SUCCESS;
767 
768 no_space:
769 	ipc_space_set_at_max_limit(space);
770 	is_write_unlock(space);
771 	return KERN_NO_SPACE;
772 }
773 
774 
775 /*
776  *	Routine:	ipc_entry_name_mask
777  *	Purpose:
778  *		Ensure a mach port name has the default ipc entry
779  *		generation bits set. This can be used to ensure that
780  *		a name passed in by user space matches names generated
781  *		by the kernel.
782  *	Conditions:
783  *		None.
784  *	Returns:
785  *		'name' input with default generation bits masked or added
786  *		as appropriate.
787  */
788 mach_port_name_t
ipc_entry_name_mask(mach_port_name_t name)789 ipc_entry_name_mask(mach_port_name_t name)
790 {
791 #ifndef NO_PORT_GEN
792 	static mach_port_name_t null_name = MACH_PORT_MAKE(0, IE_BITS_GEN_MASK + IE_BITS_GEN_ONE);
793 	return name | null_name;
794 #else
795 	static mach_port_name_t null_name = MACH_PORT_MAKE(0, ~(IE_BITS_GEN_MASK + IE_BITS_GEN_ONE));
796 	return name & ~null_name;
797 #endif
798 }
799