xref: /xnu-8020.140.41/osfmk/kern/stack.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1*27b03b36SApple OSS Distributions /*
2*27b03b36SApple OSS Distributions  * Copyright (c) 2003-2019 Apple Inc. All rights reserved.
3*27b03b36SApple OSS Distributions  *
4*27b03b36SApple OSS Distributions  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*27b03b36SApple OSS Distributions  *
6*27b03b36SApple OSS Distributions  * This file contains Original Code and/or Modifications of Original Code
7*27b03b36SApple OSS Distributions  * as defined in and that are subject to the Apple Public Source License
8*27b03b36SApple OSS Distributions  * Version 2.0 (the 'License'). You may not use this file except in
9*27b03b36SApple OSS Distributions  * compliance with the License. The rights granted to you under the License
10*27b03b36SApple OSS Distributions  * may not be used to create, or enable the creation or redistribution of,
11*27b03b36SApple OSS Distributions  * unlawful or unlicensed copies of an Apple operating system, or to
12*27b03b36SApple OSS Distributions  * circumvent, violate, or enable the circumvention or violation of, any
13*27b03b36SApple OSS Distributions  * terms of an Apple operating system software license agreement.
14*27b03b36SApple OSS Distributions  *
15*27b03b36SApple OSS Distributions  * Please obtain a copy of the License at
16*27b03b36SApple OSS Distributions  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*27b03b36SApple OSS Distributions  *
18*27b03b36SApple OSS Distributions  * The Original Code and all software distributed under the License are
19*27b03b36SApple OSS Distributions  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*27b03b36SApple OSS Distributions  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*27b03b36SApple OSS Distributions  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*27b03b36SApple OSS Distributions  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*27b03b36SApple OSS Distributions  * Please see the License for the specific language governing rights and
24*27b03b36SApple OSS Distributions  * limitations under the License.
25*27b03b36SApple OSS Distributions  *
26*27b03b36SApple OSS Distributions  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*27b03b36SApple OSS Distributions  */
28*27b03b36SApple OSS Distributions /*
29*27b03b36SApple OSS Distributions  *	Kernel stack management routines.
30*27b03b36SApple OSS Distributions  */
31*27b03b36SApple OSS Distributions 
32*27b03b36SApple OSS Distributions #include <mach/mach_host.h>
33*27b03b36SApple OSS Distributions #include <mach/mach_types.h>
34*27b03b36SApple OSS Distributions #include <mach/processor_set.h>
35*27b03b36SApple OSS Distributions 
36*27b03b36SApple OSS Distributions #include <kern/kern_types.h>
37*27b03b36SApple OSS Distributions #include <kern/lock_group.h>
38*27b03b36SApple OSS Distributions #include <kern/mach_param.h>
39*27b03b36SApple OSS Distributions #include <kern/percpu.h>
40*27b03b36SApple OSS Distributions #include <kern/processor.h>
41*27b03b36SApple OSS Distributions #include <kern/thread.h>
42*27b03b36SApple OSS Distributions #include <kern/zalloc.h>
43*27b03b36SApple OSS Distributions #include <kern/kalloc.h>
44*27b03b36SApple OSS Distributions #include <kern/ledger.h>
45*27b03b36SApple OSS Distributions 
46*27b03b36SApple OSS Distributions #include <vm/vm_map.h>
47*27b03b36SApple OSS Distributions #include <vm/vm_kern.h>
48*27b03b36SApple OSS Distributions 
49*27b03b36SApple OSS Distributions #include <mach_debug.h>
50*27b03b36SApple OSS Distributions #include <san/kasan.h>
51*27b03b36SApple OSS Distributions 
52*27b03b36SApple OSS Distributions /*
53*27b03b36SApple OSS Distributions  *	We allocate stacks from generic kernel VM.
54*27b03b36SApple OSS Distributions  *
55*27b03b36SApple OSS Distributions  *	The stack_free_list can only be accessed at splsched,
56*27b03b36SApple OSS Distributions  *	because stack_alloc_try/thread_invoke operate at splsched.
57*27b03b36SApple OSS Distributions  */
58*27b03b36SApple OSS Distributions 
59*27b03b36SApple OSS Distributions static SIMPLE_LOCK_DECLARE(stack_lock_data, 0);
60*27b03b36SApple OSS Distributions #define stack_lock()            simple_lock(&stack_lock_data, LCK_GRP_NULL)
61*27b03b36SApple OSS Distributions #define stack_unlock()          simple_unlock(&stack_lock_data)
62*27b03b36SApple OSS Distributions 
63*27b03b36SApple OSS Distributions #define STACK_CACHE_SIZE        2
64*27b03b36SApple OSS Distributions 
65*27b03b36SApple OSS Distributions static vm_offset_t              stack_free_list;
66*27b03b36SApple OSS Distributions 
67*27b03b36SApple OSS Distributions static unsigned int             stack_free_count, stack_free_hiwat;             /* free list count */
68*27b03b36SApple OSS Distributions static unsigned int             stack_hiwat;
69*27b03b36SApple OSS Distributions unsigned int                    stack_total;                            /* current total count */
70*27b03b36SApple OSS Distributions unsigned long long              stack_allocs;                           /* total count of allocations */
71*27b03b36SApple OSS Distributions 
72*27b03b36SApple OSS Distributions static unsigned int             stack_free_target;
73*27b03b36SApple OSS Distributions static int                      stack_free_delta;
74*27b03b36SApple OSS Distributions 
75*27b03b36SApple OSS Distributions static unsigned int             stack_new_count;                                                /* total new stack allocations */
76*27b03b36SApple OSS Distributions 
77*27b03b36SApple OSS Distributions static SECURITY_READ_ONLY_LATE(vm_offset_t)  stack_addr_mask;
78*27b03b36SApple OSS Distributions SECURITY_READ_ONLY_LATE(vm_offset_t)         kernel_stack_size;
79*27b03b36SApple OSS Distributions SECURITY_READ_ONLY_LATE(vm_offset_t)         kernel_stack_mask;
80*27b03b36SApple OSS Distributions vm_offset_t                                  kernel_stack_depth_max;
81*27b03b36SApple OSS Distributions 
82*27b03b36SApple OSS Distributions struct stack_cache {
83*27b03b36SApple OSS Distributions 	vm_offset_t     free;
84*27b03b36SApple OSS Distributions 	unsigned int    count;
85*27b03b36SApple OSS Distributions };
86*27b03b36SApple OSS Distributions static struct stack_cache PERCPU_DATA(stack_cache);
87*27b03b36SApple OSS Distributions 
88*27b03b36SApple OSS Distributions /*
89*27b03b36SApple OSS Distributions  *	The next field is at the base of the stack,
90*27b03b36SApple OSS Distributions  *	so the low end is left unsullied.
91*27b03b36SApple OSS Distributions  */
92*27b03b36SApple OSS Distributions #define stack_next(stack)       \
93*27b03b36SApple OSS Distributions 	(*((vm_offset_t *)((stack) + kernel_stack_size) - 1))
94*27b03b36SApple OSS Distributions 
95*27b03b36SApple OSS Distributions static inline int
log2(vm_offset_t size)96*27b03b36SApple OSS Distributions log2(vm_offset_t size)
97*27b03b36SApple OSS Distributions {
98*27b03b36SApple OSS Distributions 	int     result;
99*27b03b36SApple OSS Distributions 	for (result = 0; size > 0; result++) {
100*27b03b36SApple OSS Distributions 		size >>= 1;
101*27b03b36SApple OSS Distributions 	}
102*27b03b36SApple OSS Distributions 	return result;
103*27b03b36SApple OSS Distributions }
104*27b03b36SApple OSS Distributions 
105*27b03b36SApple OSS Distributions static inline vm_offset_t
roundup_pow2(vm_offset_t size)106*27b03b36SApple OSS Distributions roundup_pow2(vm_offset_t size)
107*27b03b36SApple OSS Distributions {
108*27b03b36SApple OSS Distributions 	return 1UL << (log2(size - 1) + 1);
109*27b03b36SApple OSS Distributions }
110*27b03b36SApple OSS Distributions 
111*27b03b36SApple OSS Distributions static vm_offset_t stack_alloc_internal(void);
112*27b03b36SApple OSS Distributions static void stack_free_stack(vm_offset_t);
113*27b03b36SApple OSS Distributions 
114*27b03b36SApple OSS Distributions static void
stack_init(void)115*27b03b36SApple OSS Distributions stack_init(void)
116*27b03b36SApple OSS Distributions {
117*27b03b36SApple OSS Distributions 	uint32_t kernel_stack_pages = atop(KERNEL_STACK_SIZE);
118*27b03b36SApple OSS Distributions 
119*27b03b36SApple OSS Distributions 	kernel_stack_size = KERNEL_STACK_SIZE;
120*27b03b36SApple OSS Distributions 	kernel_stack_mask = -KERNEL_STACK_SIZE;
121*27b03b36SApple OSS Distributions 
122*27b03b36SApple OSS Distributions 	if (PE_parse_boot_argn("kernel_stack_pages",
123*27b03b36SApple OSS Distributions 	    &kernel_stack_pages,
124*27b03b36SApple OSS Distributions 	    sizeof(kernel_stack_pages))) {
125*27b03b36SApple OSS Distributions 		kernel_stack_size = kernel_stack_pages * PAGE_SIZE;
126*27b03b36SApple OSS Distributions 	}
127*27b03b36SApple OSS Distributions 
128*27b03b36SApple OSS Distributions 	if (kernel_stack_size < round_page(kernel_stack_size)) {
129*27b03b36SApple OSS Distributions 		panic("stack_init: stack size %p not a multiple of page size %d",
130*27b03b36SApple OSS Distributions 		    (void *) kernel_stack_size, PAGE_SIZE);
131*27b03b36SApple OSS Distributions 	}
132*27b03b36SApple OSS Distributions 
133*27b03b36SApple OSS Distributions 	stack_addr_mask = roundup_pow2(kernel_stack_size) - 1;
134*27b03b36SApple OSS Distributions 	kernel_stack_mask = ~stack_addr_mask;
135*27b03b36SApple OSS Distributions }
136*27b03b36SApple OSS Distributions STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, stack_init);
137*27b03b36SApple OSS Distributions 
138*27b03b36SApple OSS Distributions /*
139*27b03b36SApple OSS Distributions  *	stack_alloc:
140*27b03b36SApple OSS Distributions  *
141*27b03b36SApple OSS Distributions  *	Allocate a stack for a thread, may
142*27b03b36SApple OSS Distributions  *	block.
143*27b03b36SApple OSS Distributions  */
144*27b03b36SApple OSS Distributions 
145*27b03b36SApple OSS Distributions static vm_offset_t
stack_alloc_internal(void)146*27b03b36SApple OSS Distributions stack_alloc_internal(void)
147*27b03b36SApple OSS Distributions {
148*27b03b36SApple OSS Distributions 	vm_offset_t     stack = 0;
149*27b03b36SApple OSS Distributions 	spl_t           s;
150*27b03b36SApple OSS Distributions 	kma_flags_t     flags = KMA_NOFAIL | KMA_GUARD_FIRST | KMA_GUARD_LAST |
151*27b03b36SApple OSS Distributions 	    KMA_KSTACK | KMA_KOBJECT | KMA_ZERO;
152*27b03b36SApple OSS Distributions 
153*27b03b36SApple OSS Distributions 	s = splsched();
154*27b03b36SApple OSS Distributions 	stack_lock();
155*27b03b36SApple OSS Distributions 	stack_allocs++;
156*27b03b36SApple OSS Distributions 	stack = stack_free_list;
157*27b03b36SApple OSS Distributions 	if (stack != 0) {
158*27b03b36SApple OSS Distributions 		stack_free_list = stack_next(stack);
159*27b03b36SApple OSS Distributions 		stack_free_count--;
160*27b03b36SApple OSS Distributions 	} else {
161*27b03b36SApple OSS Distributions 		if (++stack_total > stack_hiwat) {
162*27b03b36SApple OSS Distributions 			stack_hiwat = stack_total;
163*27b03b36SApple OSS Distributions 		}
164*27b03b36SApple OSS Distributions 		stack_new_count++;
165*27b03b36SApple OSS Distributions 	}
166*27b03b36SApple OSS Distributions 	stack_free_delta--;
167*27b03b36SApple OSS Distributions 	stack_unlock();
168*27b03b36SApple OSS Distributions 	splx(s);
169*27b03b36SApple OSS Distributions 
170*27b03b36SApple OSS Distributions 	if (stack == 0) {
171*27b03b36SApple OSS Distributions 		/*
172*27b03b36SApple OSS Distributions 		 * Request guard pages on either side of the stack.  Ask
173*27b03b36SApple OSS Distributions 		 * kernel_memory_allocate() for two extra pages to account
174*27b03b36SApple OSS Distributions 		 * for these.
175*27b03b36SApple OSS Distributions 		 */
176*27b03b36SApple OSS Distributions 
177*27b03b36SApple OSS Distributions 		kernel_memory_allocate(kernel_map, &stack,
178*27b03b36SApple OSS Distributions 		    kernel_stack_size + ptoa(2), stack_addr_mask,
179*27b03b36SApple OSS Distributions 		    flags, VM_KERN_MEMORY_STACK);
180*27b03b36SApple OSS Distributions 
181*27b03b36SApple OSS Distributions 		/*
182*27b03b36SApple OSS Distributions 		 * The stack address that comes back is the address of the lower
183*27b03b36SApple OSS Distributions 		 * guard page.  Skip past it to get the actual stack base address.
184*27b03b36SApple OSS Distributions 		 */
185*27b03b36SApple OSS Distributions 
186*27b03b36SApple OSS Distributions 		stack += PAGE_SIZE;
187*27b03b36SApple OSS Distributions 	}
188*27b03b36SApple OSS Distributions 	return stack;
189*27b03b36SApple OSS Distributions }
190*27b03b36SApple OSS Distributions 
191*27b03b36SApple OSS Distributions void
stack_alloc(thread_t thread)192*27b03b36SApple OSS Distributions stack_alloc(
193*27b03b36SApple OSS Distributions 	thread_t        thread)
194*27b03b36SApple OSS Distributions {
195*27b03b36SApple OSS Distributions 	assert(thread->kernel_stack == 0);
196*27b03b36SApple OSS Distributions 	machine_stack_attach(thread, stack_alloc_internal());
197*27b03b36SApple OSS Distributions }
198*27b03b36SApple OSS Distributions 
199*27b03b36SApple OSS Distributions void
stack_handoff(thread_t from,thread_t to)200*27b03b36SApple OSS Distributions stack_handoff(thread_t from, thread_t to)
201*27b03b36SApple OSS Distributions {
202*27b03b36SApple OSS Distributions 	assert(from == current_thread());
203*27b03b36SApple OSS Distributions 	machine_stack_handoff(from, to);
204*27b03b36SApple OSS Distributions }
205*27b03b36SApple OSS Distributions 
206*27b03b36SApple OSS Distributions /*
207*27b03b36SApple OSS Distributions  *	stack_free:
208*27b03b36SApple OSS Distributions  *
209*27b03b36SApple OSS Distributions  *	Detach and free the stack for a thread.
210*27b03b36SApple OSS Distributions  */
211*27b03b36SApple OSS Distributions void
stack_free(thread_t thread)212*27b03b36SApple OSS Distributions stack_free(
213*27b03b36SApple OSS Distributions 	thread_t        thread)
214*27b03b36SApple OSS Distributions {
215*27b03b36SApple OSS Distributions 	vm_offset_t         stack = machine_stack_detach(thread);
216*27b03b36SApple OSS Distributions 
217*27b03b36SApple OSS Distributions 	assert(stack);
218*27b03b36SApple OSS Distributions 	if (stack != thread->reserved_stack) {
219*27b03b36SApple OSS Distributions 		stack_free_stack(stack);
220*27b03b36SApple OSS Distributions 	}
221*27b03b36SApple OSS Distributions }
222*27b03b36SApple OSS Distributions 
223*27b03b36SApple OSS Distributions void
stack_free_reserved(thread_t thread)224*27b03b36SApple OSS Distributions stack_free_reserved(
225*27b03b36SApple OSS Distributions 	thread_t        thread)
226*27b03b36SApple OSS Distributions {
227*27b03b36SApple OSS Distributions 	if (thread->reserved_stack != thread->kernel_stack) {
228*27b03b36SApple OSS Distributions 		stack_free_stack(thread->reserved_stack);
229*27b03b36SApple OSS Distributions 	}
230*27b03b36SApple OSS Distributions }
231*27b03b36SApple OSS Distributions 
232*27b03b36SApple OSS Distributions static void
stack_free_stack(vm_offset_t stack)233*27b03b36SApple OSS Distributions stack_free_stack(
234*27b03b36SApple OSS Distributions 	vm_offset_t             stack)
235*27b03b36SApple OSS Distributions {
236*27b03b36SApple OSS Distributions 	struct stack_cache      *cache;
237*27b03b36SApple OSS Distributions 	spl_t                           s;
238*27b03b36SApple OSS Distributions 
239*27b03b36SApple OSS Distributions #if KASAN_DEBUG
240*27b03b36SApple OSS Distributions 	/* Sanity check - stack should be unpoisoned by now */
241*27b03b36SApple OSS Distributions 	assert(kasan_check_shadow(stack, kernel_stack_size, 0));
242*27b03b36SApple OSS Distributions #endif
243*27b03b36SApple OSS Distributions 
244*27b03b36SApple OSS Distributions 	s = splsched();
245*27b03b36SApple OSS Distributions 	cache = PERCPU_GET(stack_cache);
246*27b03b36SApple OSS Distributions 	if (cache->count < STACK_CACHE_SIZE) {
247*27b03b36SApple OSS Distributions 		stack_next(stack) = cache->free;
248*27b03b36SApple OSS Distributions 		cache->free = stack;
249*27b03b36SApple OSS Distributions 		cache->count++;
250*27b03b36SApple OSS Distributions 	} else {
251*27b03b36SApple OSS Distributions 		stack_lock();
252*27b03b36SApple OSS Distributions 		stack_next(stack) = stack_free_list;
253*27b03b36SApple OSS Distributions 		stack_free_list = stack;
254*27b03b36SApple OSS Distributions 		if (++stack_free_count > stack_free_hiwat) {
255*27b03b36SApple OSS Distributions 			stack_free_hiwat = stack_free_count;
256*27b03b36SApple OSS Distributions 		}
257*27b03b36SApple OSS Distributions 		stack_free_delta++;
258*27b03b36SApple OSS Distributions 		stack_unlock();
259*27b03b36SApple OSS Distributions 	}
260*27b03b36SApple OSS Distributions 	splx(s);
261*27b03b36SApple OSS Distributions }
262*27b03b36SApple OSS Distributions 
263*27b03b36SApple OSS Distributions /*
264*27b03b36SApple OSS Distributions  *	stack_alloc_try:
265*27b03b36SApple OSS Distributions  *
266*27b03b36SApple OSS Distributions  *	Non-blocking attempt to allocate a
267*27b03b36SApple OSS Distributions  *	stack for a thread.
268*27b03b36SApple OSS Distributions  *
269*27b03b36SApple OSS Distributions  *	Returns TRUE on success.
270*27b03b36SApple OSS Distributions  *
271*27b03b36SApple OSS Distributions  *	Called at splsched.
272*27b03b36SApple OSS Distributions  */
273*27b03b36SApple OSS Distributions boolean_t
stack_alloc_try(thread_t thread)274*27b03b36SApple OSS Distributions stack_alloc_try(
275*27b03b36SApple OSS Distributions 	thread_t                thread)
276*27b03b36SApple OSS Distributions {
277*27b03b36SApple OSS Distributions 	struct stack_cache      *cache;
278*27b03b36SApple OSS Distributions 	vm_offset_t                     stack;
279*27b03b36SApple OSS Distributions 
280*27b03b36SApple OSS Distributions 	cache = PERCPU_GET(stack_cache);
281*27b03b36SApple OSS Distributions 	stack = cache->free;
282*27b03b36SApple OSS Distributions 	if (stack != 0) {
283*27b03b36SApple OSS Distributions 		cache->free = stack_next(stack);
284*27b03b36SApple OSS Distributions 		cache->count--;
285*27b03b36SApple OSS Distributions 	} else {
286*27b03b36SApple OSS Distributions 		if (stack_free_list != 0) {
287*27b03b36SApple OSS Distributions 			stack_lock();
288*27b03b36SApple OSS Distributions 			stack = stack_free_list;
289*27b03b36SApple OSS Distributions 			if (stack != 0) {
290*27b03b36SApple OSS Distributions 				stack_free_list = stack_next(stack);
291*27b03b36SApple OSS Distributions 				stack_free_count--;
292*27b03b36SApple OSS Distributions 				stack_free_delta--;
293*27b03b36SApple OSS Distributions 			}
294*27b03b36SApple OSS Distributions 			stack_unlock();
295*27b03b36SApple OSS Distributions 		}
296*27b03b36SApple OSS Distributions 	}
297*27b03b36SApple OSS Distributions 
298*27b03b36SApple OSS Distributions 	if (stack != 0 || (stack = thread->reserved_stack) != 0) {
299*27b03b36SApple OSS Distributions 		machine_stack_attach(thread, stack);
300*27b03b36SApple OSS Distributions 		return TRUE;
301*27b03b36SApple OSS Distributions 	}
302*27b03b36SApple OSS Distributions 
303*27b03b36SApple OSS Distributions 	return FALSE;
304*27b03b36SApple OSS Distributions }
305*27b03b36SApple OSS Distributions 
306*27b03b36SApple OSS Distributions static unsigned int             stack_collect_tick, last_stack_tick;
307*27b03b36SApple OSS Distributions 
308*27b03b36SApple OSS Distributions /*
309*27b03b36SApple OSS Distributions  *	stack_collect:
310*27b03b36SApple OSS Distributions  *
311*27b03b36SApple OSS Distributions  *	Free excess kernel stacks, may
312*27b03b36SApple OSS Distributions  *	block.
313*27b03b36SApple OSS Distributions  */
314*27b03b36SApple OSS Distributions void
stack_collect(void)315*27b03b36SApple OSS Distributions stack_collect(void)
316*27b03b36SApple OSS Distributions {
317*27b03b36SApple OSS Distributions 	if (stack_collect_tick != last_stack_tick) {
318*27b03b36SApple OSS Distributions 		unsigned int    target;
319*27b03b36SApple OSS Distributions 		vm_offset_t             stack;
320*27b03b36SApple OSS Distributions 		spl_t                   s;
321*27b03b36SApple OSS Distributions 
322*27b03b36SApple OSS Distributions 		s = splsched();
323*27b03b36SApple OSS Distributions 		stack_lock();
324*27b03b36SApple OSS Distributions 
325*27b03b36SApple OSS Distributions 		target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
326*27b03b36SApple OSS Distributions 		target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
327*27b03b36SApple OSS Distributions 
328*27b03b36SApple OSS Distributions 		while (stack_free_count > target) {
329*27b03b36SApple OSS Distributions 			stack = stack_free_list;
330*27b03b36SApple OSS Distributions 			stack_free_list = stack_next(stack);
331*27b03b36SApple OSS Distributions 			stack_free_count--; stack_total--;
332*27b03b36SApple OSS Distributions 			stack_unlock();
333*27b03b36SApple OSS Distributions 			splx(s);
334*27b03b36SApple OSS Distributions 
335*27b03b36SApple OSS Distributions 			/*
336*27b03b36SApple OSS Distributions 			 * Get the stack base address, then decrement by one page
337*27b03b36SApple OSS Distributions 			 * to account for the lower guard page.  Add two extra pages
338*27b03b36SApple OSS Distributions 			 * to the size to account for the guard pages on both ends
339*27b03b36SApple OSS Distributions 			 * that were originally requested when the stack was allocated
340*27b03b36SApple OSS Distributions 			 * back in stack_alloc().
341*27b03b36SApple OSS Distributions 			 */
342*27b03b36SApple OSS Distributions 
343*27b03b36SApple OSS Distributions 			stack = (vm_offset_t)vm_map_trunc_page(
344*27b03b36SApple OSS Distributions 				stack,
345*27b03b36SApple OSS Distributions 				VM_MAP_PAGE_MASK(kernel_map));
346*27b03b36SApple OSS Distributions 			stack -= PAGE_SIZE;
347*27b03b36SApple OSS Distributions 			kmem_free(kernel_map, stack, kernel_stack_size + ptoa(2));
348*27b03b36SApple OSS Distributions 			stack = 0;
349*27b03b36SApple OSS Distributions 
350*27b03b36SApple OSS Distributions 			s = splsched();
351*27b03b36SApple OSS Distributions 			stack_lock();
352*27b03b36SApple OSS Distributions 
353*27b03b36SApple OSS Distributions 			target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
354*27b03b36SApple OSS Distributions 			target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
355*27b03b36SApple OSS Distributions 		}
356*27b03b36SApple OSS Distributions 
357*27b03b36SApple OSS Distributions 		last_stack_tick = stack_collect_tick;
358*27b03b36SApple OSS Distributions 
359*27b03b36SApple OSS Distributions 		stack_unlock();
360*27b03b36SApple OSS Distributions 		splx(s);
361*27b03b36SApple OSS Distributions 	}
362*27b03b36SApple OSS Distributions }
363*27b03b36SApple OSS Distributions 
364*27b03b36SApple OSS Distributions /*
365*27b03b36SApple OSS Distributions  *	compute_stack_target:
366*27b03b36SApple OSS Distributions  *
367*27b03b36SApple OSS Distributions  *	Computes a new target free list count
368*27b03b36SApple OSS Distributions  *	based on recent alloc / free activity.
369*27b03b36SApple OSS Distributions  *
370*27b03b36SApple OSS Distributions  *	Limits stack collection to once per
371*27b03b36SApple OSS Distributions  *	computation period.
372*27b03b36SApple OSS Distributions  */
373*27b03b36SApple OSS Distributions void
compute_stack_target(__unused void * arg)374*27b03b36SApple OSS Distributions compute_stack_target(
375*27b03b36SApple OSS Distributions 	__unused void           *arg)
376*27b03b36SApple OSS Distributions {
377*27b03b36SApple OSS Distributions 	spl_t           s;
378*27b03b36SApple OSS Distributions 
379*27b03b36SApple OSS Distributions 	s = splsched();
380*27b03b36SApple OSS Distributions 	stack_lock();
381*27b03b36SApple OSS Distributions 
382*27b03b36SApple OSS Distributions 	if (stack_free_target > 5) {
383*27b03b36SApple OSS Distributions 		stack_free_target = (4 * stack_free_target) / 5;
384*27b03b36SApple OSS Distributions 	} else if (stack_free_target > 0) {
385*27b03b36SApple OSS Distributions 		stack_free_target--;
386*27b03b36SApple OSS Distributions 	}
387*27b03b36SApple OSS Distributions 
388*27b03b36SApple OSS Distributions 	stack_free_target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
389*27b03b36SApple OSS Distributions 
390*27b03b36SApple OSS Distributions 	stack_free_delta = 0;
391*27b03b36SApple OSS Distributions 	stack_collect_tick++;
392*27b03b36SApple OSS Distributions 
393*27b03b36SApple OSS Distributions 	stack_unlock();
394*27b03b36SApple OSS Distributions 	splx(s);
395*27b03b36SApple OSS Distributions }
396*27b03b36SApple OSS Distributions 
397*27b03b36SApple OSS Distributions /* OBSOLETE */
398*27b03b36SApple OSS Distributions void    stack_privilege(
399*27b03b36SApple OSS Distributions 	thread_t        thread);
400*27b03b36SApple OSS Distributions 
401*27b03b36SApple OSS Distributions void
stack_privilege(__unused thread_t thread)402*27b03b36SApple OSS Distributions stack_privilege(
403*27b03b36SApple OSS Distributions 	__unused thread_t       thread)
404*27b03b36SApple OSS Distributions {
405*27b03b36SApple OSS Distributions 	/* OBSOLETE */
406*27b03b36SApple OSS Distributions }
407*27b03b36SApple OSS Distributions 
408*27b03b36SApple OSS Distributions /*
409*27b03b36SApple OSS Distributions  * Return info on stack usage for threads in a specific processor set
410*27b03b36SApple OSS Distributions  */
411*27b03b36SApple OSS Distributions kern_return_t
processor_set_stack_usage(processor_set_t pset,unsigned int * totalp,vm_size_t * spacep,vm_size_t * residentp,vm_size_t * maxusagep,vm_offset_t * maxstackp)412*27b03b36SApple OSS Distributions processor_set_stack_usage(
413*27b03b36SApple OSS Distributions 	processor_set_t pset,
414*27b03b36SApple OSS Distributions 	unsigned int    *totalp,
415*27b03b36SApple OSS Distributions 	vm_size_t       *spacep,
416*27b03b36SApple OSS Distributions 	vm_size_t       *residentp,
417*27b03b36SApple OSS Distributions 	vm_size_t       *maxusagep,
418*27b03b36SApple OSS Distributions 	vm_offset_t     *maxstackp)
419*27b03b36SApple OSS Distributions {
420*27b03b36SApple OSS Distributions #if !MACH_DEBUG
421*27b03b36SApple OSS Distributions 	return KERN_NOT_SUPPORTED;
422*27b03b36SApple OSS Distributions #else
423*27b03b36SApple OSS Distributions 	unsigned int total = 0;
424*27b03b36SApple OSS Distributions 	thread_t thread;
425*27b03b36SApple OSS Distributions 
426*27b03b36SApple OSS Distributions 	if (pset == PROCESSOR_SET_NULL || pset != &pset0) {
427*27b03b36SApple OSS Distributions 		return KERN_INVALID_ARGUMENT;
428*27b03b36SApple OSS Distributions 	}
429*27b03b36SApple OSS Distributions 
430*27b03b36SApple OSS Distributions 	lck_mtx_lock(&tasks_threads_lock);
431*27b03b36SApple OSS Distributions 
432*27b03b36SApple OSS Distributions 	queue_iterate(&threads, thread, thread_t, threads) {
433*27b03b36SApple OSS Distributions 		total += (thread->kernel_stack != 0);
434*27b03b36SApple OSS Distributions 	}
435*27b03b36SApple OSS Distributions 
436*27b03b36SApple OSS Distributions 	lck_mtx_unlock(&tasks_threads_lock);
437*27b03b36SApple OSS Distributions 
438*27b03b36SApple OSS Distributions 	*totalp = total;
439*27b03b36SApple OSS Distributions 	*residentp = *spacep = total * round_page(kernel_stack_size);
440*27b03b36SApple OSS Distributions 	*maxusagep = 0;
441*27b03b36SApple OSS Distributions 	*maxstackp = 0;
442*27b03b36SApple OSS Distributions 	return KERN_SUCCESS;
443*27b03b36SApple OSS Distributions 
444*27b03b36SApple OSS Distributions #endif  /* MACH_DEBUG */
445*27b03b36SApple OSS Distributions }
446*27b03b36SApple OSS Distributions 
447*27b03b36SApple OSS Distributions vm_offset_t
min_valid_stack_address(void)448*27b03b36SApple OSS Distributions min_valid_stack_address(void)
449*27b03b36SApple OSS Distributions {
450*27b03b36SApple OSS Distributions 	return (vm_offset_t)vm_map_min(kernel_map);
451*27b03b36SApple OSS Distributions }
452*27b03b36SApple OSS Distributions 
453*27b03b36SApple OSS Distributions vm_offset_t
max_valid_stack_address(void)454*27b03b36SApple OSS Distributions max_valid_stack_address(void)
455*27b03b36SApple OSS Distributions {
456*27b03b36SApple OSS Distributions 	return (vm_offset_t)vm_map_max(kernel_map);
457*27b03b36SApple OSS Distributions }
458