1*e7776783SApple OSS Distributions /*
2*e7776783SApple OSS Distributions * Copyright (c) 2003-2019 Apple Inc. All rights reserved.
3*e7776783SApple OSS Distributions *
4*e7776783SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*e7776783SApple OSS Distributions *
6*e7776783SApple OSS Distributions * This file contains Original Code and/or Modifications of Original Code
7*e7776783SApple OSS Distributions * as defined in and that are subject to the Apple Public Source License
8*e7776783SApple OSS Distributions * Version 2.0 (the 'License'). You may not use this file except in
9*e7776783SApple OSS Distributions * compliance with the License. The rights granted to you under the License
10*e7776783SApple OSS Distributions * may not be used to create, or enable the creation or redistribution of,
11*e7776783SApple OSS Distributions * unlawful or unlicensed copies of an Apple operating system, or to
12*e7776783SApple OSS Distributions * circumvent, violate, or enable the circumvention or violation of, any
13*e7776783SApple OSS Distributions * terms of an Apple operating system software license agreement.
14*e7776783SApple OSS Distributions *
15*e7776783SApple OSS Distributions * Please obtain a copy of the License at
16*e7776783SApple OSS Distributions * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*e7776783SApple OSS Distributions *
18*e7776783SApple OSS Distributions * The Original Code and all software distributed under the License are
19*e7776783SApple OSS Distributions * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*e7776783SApple OSS Distributions * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*e7776783SApple OSS Distributions * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*e7776783SApple OSS Distributions * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*e7776783SApple OSS Distributions * Please see the License for the specific language governing rights and
24*e7776783SApple OSS Distributions * limitations under the License.
25*e7776783SApple OSS Distributions *
26*e7776783SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*e7776783SApple OSS Distributions */
28*e7776783SApple OSS Distributions /*
29*e7776783SApple OSS Distributions * Kernel stack management routines.
30*e7776783SApple OSS Distributions */
31*e7776783SApple OSS Distributions
32*e7776783SApple OSS Distributions #include <mach/mach_host.h>
33*e7776783SApple OSS Distributions #include <mach/mach_types.h>
34*e7776783SApple OSS Distributions #include <mach/processor_set.h>
35*e7776783SApple OSS Distributions
36*e7776783SApple OSS Distributions #include <kern/kern_types.h>
37*e7776783SApple OSS Distributions #include <kern/lock_group.h>
38*e7776783SApple OSS Distributions #include <kern/mach_param.h>
39*e7776783SApple OSS Distributions #include <kern/percpu.h>
40*e7776783SApple OSS Distributions #include <kern/processor.h>
41*e7776783SApple OSS Distributions #include <kern/thread.h>
42*e7776783SApple OSS Distributions #include <kern/zalloc.h>
43*e7776783SApple OSS Distributions #include <kern/kalloc.h>
44*e7776783SApple OSS Distributions #include <kern/ledger.h>
45*e7776783SApple OSS Distributions
46*e7776783SApple OSS Distributions #include <vm/vm_map.h>
47*e7776783SApple OSS Distributions #include <vm/vm_kern.h>
48*e7776783SApple OSS Distributions
49*e7776783SApple OSS Distributions #include <mach_debug.h>
50*e7776783SApple OSS Distributions #include <san/kasan.h>
51*e7776783SApple OSS Distributions
52*e7776783SApple OSS Distributions /*
53*e7776783SApple OSS Distributions * We allocate stacks from generic kernel VM.
54*e7776783SApple OSS Distributions *
55*e7776783SApple OSS Distributions * The stack_free_list can only be accessed at splsched,
56*e7776783SApple OSS Distributions * because stack_alloc_try/thread_invoke operate at splsched.
57*e7776783SApple OSS Distributions */
58*e7776783SApple OSS Distributions
59*e7776783SApple OSS Distributions static SIMPLE_LOCK_DECLARE(stack_lock_data, 0);
60*e7776783SApple OSS Distributions #define stack_lock() simple_lock(&stack_lock_data, LCK_GRP_NULL)
61*e7776783SApple OSS Distributions #define stack_unlock() simple_unlock(&stack_lock_data)
62*e7776783SApple OSS Distributions
63*e7776783SApple OSS Distributions #define STACK_CACHE_SIZE 2
64*e7776783SApple OSS Distributions
65*e7776783SApple OSS Distributions static vm_offset_t stack_free_list;
66*e7776783SApple OSS Distributions
67*e7776783SApple OSS Distributions static unsigned int stack_free_count, stack_free_hiwat; /* free list count */
68*e7776783SApple OSS Distributions static unsigned int stack_hiwat;
69*e7776783SApple OSS Distributions unsigned int stack_total; /* current total count */
70*e7776783SApple OSS Distributions unsigned long long stack_allocs; /* total count of allocations */
71*e7776783SApple OSS Distributions
72*e7776783SApple OSS Distributions static unsigned int stack_free_target;
73*e7776783SApple OSS Distributions static int stack_free_delta;
74*e7776783SApple OSS Distributions
75*e7776783SApple OSS Distributions static unsigned int stack_new_count; /* total new stack allocations */
76*e7776783SApple OSS Distributions
77*e7776783SApple OSS Distributions static SECURITY_READ_ONLY_LATE(vm_offset_t) stack_addr_mask;
78*e7776783SApple OSS Distributions SECURITY_READ_ONLY_LATE(vm_offset_t) kernel_stack_size;
79*e7776783SApple OSS Distributions SECURITY_READ_ONLY_LATE(vm_offset_t) kernel_stack_mask;
80*e7776783SApple OSS Distributions vm_offset_t kernel_stack_depth_max;
81*e7776783SApple OSS Distributions
82*e7776783SApple OSS Distributions struct stack_cache {
83*e7776783SApple OSS Distributions vm_offset_t free;
84*e7776783SApple OSS Distributions unsigned int count;
85*e7776783SApple OSS Distributions };
86*e7776783SApple OSS Distributions static struct stack_cache PERCPU_DATA(stack_cache);
87*e7776783SApple OSS Distributions
88*e7776783SApple OSS Distributions /*
89*e7776783SApple OSS Distributions * The next field is at the base of the stack,
90*e7776783SApple OSS Distributions * so the low end is left unsullied.
91*e7776783SApple OSS Distributions */
92*e7776783SApple OSS Distributions #define stack_next(stack) \
93*e7776783SApple OSS Distributions (*((vm_offset_t *)((stack) + kernel_stack_size) - 1))
94*e7776783SApple OSS Distributions
95*e7776783SApple OSS Distributions static inline int
log2(vm_offset_t size)96*e7776783SApple OSS Distributions log2(vm_offset_t size)
97*e7776783SApple OSS Distributions {
98*e7776783SApple OSS Distributions int result;
99*e7776783SApple OSS Distributions for (result = 0; size > 0; result++) {
100*e7776783SApple OSS Distributions size >>= 1;
101*e7776783SApple OSS Distributions }
102*e7776783SApple OSS Distributions return result;
103*e7776783SApple OSS Distributions }
104*e7776783SApple OSS Distributions
105*e7776783SApple OSS Distributions static inline vm_offset_t
roundup_pow2(vm_offset_t size)106*e7776783SApple OSS Distributions roundup_pow2(vm_offset_t size)
107*e7776783SApple OSS Distributions {
108*e7776783SApple OSS Distributions return 1UL << (log2(size - 1) + 1);
109*e7776783SApple OSS Distributions }
110*e7776783SApple OSS Distributions
111*e7776783SApple OSS Distributions static vm_offset_t stack_alloc_internal(void);
112*e7776783SApple OSS Distributions static void stack_free_stack(vm_offset_t);
113*e7776783SApple OSS Distributions
114*e7776783SApple OSS Distributions static void
stack_init(void)115*e7776783SApple OSS Distributions stack_init(void)
116*e7776783SApple OSS Distributions {
117*e7776783SApple OSS Distributions uint32_t kernel_stack_pages = atop(KERNEL_STACK_SIZE);
118*e7776783SApple OSS Distributions
119*e7776783SApple OSS Distributions kernel_stack_size = KERNEL_STACK_SIZE;
120*e7776783SApple OSS Distributions kernel_stack_mask = -KERNEL_STACK_SIZE;
121*e7776783SApple OSS Distributions
122*e7776783SApple OSS Distributions if (PE_parse_boot_argn("kernel_stack_pages",
123*e7776783SApple OSS Distributions &kernel_stack_pages,
124*e7776783SApple OSS Distributions sizeof(kernel_stack_pages))) {
125*e7776783SApple OSS Distributions kernel_stack_size = kernel_stack_pages * PAGE_SIZE;
126*e7776783SApple OSS Distributions }
127*e7776783SApple OSS Distributions
128*e7776783SApple OSS Distributions if (kernel_stack_size < round_page(kernel_stack_size)) {
129*e7776783SApple OSS Distributions panic("stack_init: stack size %p not a multiple of page size %d",
130*e7776783SApple OSS Distributions (void *) kernel_stack_size, PAGE_SIZE);
131*e7776783SApple OSS Distributions }
132*e7776783SApple OSS Distributions
133*e7776783SApple OSS Distributions stack_addr_mask = roundup_pow2(kernel_stack_size) - 1;
134*e7776783SApple OSS Distributions kernel_stack_mask = ~stack_addr_mask;
135*e7776783SApple OSS Distributions }
136*e7776783SApple OSS Distributions STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, stack_init);
137*e7776783SApple OSS Distributions
138*e7776783SApple OSS Distributions /*
139*e7776783SApple OSS Distributions * stack_alloc:
140*e7776783SApple OSS Distributions *
141*e7776783SApple OSS Distributions * Allocate a stack for a thread, may
142*e7776783SApple OSS Distributions * block.
143*e7776783SApple OSS Distributions */
144*e7776783SApple OSS Distributions
145*e7776783SApple OSS Distributions static vm_offset_t
stack_alloc_internal(void)146*e7776783SApple OSS Distributions stack_alloc_internal(void)
147*e7776783SApple OSS Distributions {
148*e7776783SApple OSS Distributions vm_offset_t stack = 0;
149*e7776783SApple OSS Distributions spl_t s;
150*e7776783SApple OSS Distributions int flags = 0;
151*e7776783SApple OSS Distributions kern_return_t kr = KERN_SUCCESS;
152*e7776783SApple OSS Distributions
153*e7776783SApple OSS Distributions s = splsched();
154*e7776783SApple OSS Distributions stack_lock();
155*e7776783SApple OSS Distributions stack_allocs++;
156*e7776783SApple OSS Distributions stack = stack_free_list;
157*e7776783SApple OSS Distributions if (stack != 0) {
158*e7776783SApple OSS Distributions stack_free_list = stack_next(stack);
159*e7776783SApple OSS Distributions stack_free_count--;
160*e7776783SApple OSS Distributions } else {
161*e7776783SApple OSS Distributions if (++stack_total > stack_hiwat) {
162*e7776783SApple OSS Distributions stack_hiwat = stack_total;
163*e7776783SApple OSS Distributions }
164*e7776783SApple OSS Distributions stack_new_count++;
165*e7776783SApple OSS Distributions }
166*e7776783SApple OSS Distributions stack_free_delta--;
167*e7776783SApple OSS Distributions stack_unlock();
168*e7776783SApple OSS Distributions splx(s);
169*e7776783SApple OSS Distributions
170*e7776783SApple OSS Distributions if (stack == 0) {
171*e7776783SApple OSS Distributions /*
172*e7776783SApple OSS Distributions * Request guard pages on either side of the stack. Ask
173*e7776783SApple OSS Distributions * kernel_memory_allocate() for two extra pages to account
174*e7776783SApple OSS Distributions * for these.
175*e7776783SApple OSS Distributions */
176*e7776783SApple OSS Distributions
177*e7776783SApple OSS Distributions flags = KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT | KMA_ZERO;
178*e7776783SApple OSS Distributions kr = kernel_memory_allocate(kernel_map, &stack,
179*e7776783SApple OSS Distributions kernel_stack_size + (2 * PAGE_SIZE),
180*e7776783SApple OSS Distributions stack_addr_mask,
181*e7776783SApple OSS Distributions flags,
182*e7776783SApple OSS Distributions VM_KERN_MEMORY_STACK);
183*e7776783SApple OSS Distributions if (kr != KERN_SUCCESS) {
184*e7776783SApple OSS Distributions panic("stack_alloc: kernel_memory_allocate(size:0x%llx, mask: 0x%llx, flags: 0x%x) failed with %d", (uint64_t)(kernel_stack_size + (2 * PAGE_SIZE)), (uint64_t)stack_addr_mask, flags, kr);
185*e7776783SApple OSS Distributions }
186*e7776783SApple OSS Distributions
187*e7776783SApple OSS Distributions /*
188*e7776783SApple OSS Distributions * The stack address that comes back is the address of the lower
189*e7776783SApple OSS Distributions * guard page. Skip past it to get the actual stack base address.
190*e7776783SApple OSS Distributions */
191*e7776783SApple OSS Distributions
192*e7776783SApple OSS Distributions stack += PAGE_SIZE;
193*e7776783SApple OSS Distributions }
194*e7776783SApple OSS Distributions return stack;
195*e7776783SApple OSS Distributions }
196*e7776783SApple OSS Distributions
197*e7776783SApple OSS Distributions void
stack_alloc(thread_t thread)198*e7776783SApple OSS Distributions stack_alloc(
199*e7776783SApple OSS Distributions thread_t thread)
200*e7776783SApple OSS Distributions {
201*e7776783SApple OSS Distributions assert(thread->kernel_stack == 0);
202*e7776783SApple OSS Distributions machine_stack_attach(thread, stack_alloc_internal());
203*e7776783SApple OSS Distributions }
204*e7776783SApple OSS Distributions
205*e7776783SApple OSS Distributions void
stack_handoff(thread_t from,thread_t to)206*e7776783SApple OSS Distributions stack_handoff(thread_t from, thread_t to)
207*e7776783SApple OSS Distributions {
208*e7776783SApple OSS Distributions assert(from == current_thread());
209*e7776783SApple OSS Distributions machine_stack_handoff(from, to);
210*e7776783SApple OSS Distributions }
211*e7776783SApple OSS Distributions
212*e7776783SApple OSS Distributions /*
213*e7776783SApple OSS Distributions * stack_free:
214*e7776783SApple OSS Distributions *
215*e7776783SApple OSS Distributions * Detach and free the stack for a thread.
216*e7776783SApple OSS Distributions */
217*e7776783SApple OSS Distributions void
stack_free(thread_t thread)218*e7776783SApple OSS Distributions stack_free(
219*e7776783SApple OSS Distributions thread_t thread)
220*e7776783SApple OSS Distributions {
221*e7776783SApple OSS Distributions vm_offset_t stack = machine_stack_detach(thread);
222*e7776783SApple OSS Distributions
223*e7776783SApple OSS Distributions assert(stack);
224*e7776783SApple OSS Distributions if (stack != thread->reserved_stack) {
225*e7776783SApple OSS Distributions stack_free_stack(stack);
226*e7776783SApple OSS Distributions }
227*e7776783SApple OSS Distributions }
228*e7776783SApple OSS Distributions
229*e7776783SApple OSS Distributions void
stack_free_reserved(thread_t thread)230*e7776783SApple OSS Distributions stack_free_reserved(
231*e7776783SApple OSS Distributions thread_t thread)
232*e7776783SApple OSS Distributions {
233*e7776783SApple OSS Distributions if (thread->reserved_stack != thread->kernel_stack) {
234*e7776783SApple OSS Distributions stack_free_stack(thread->reserved_stack);
235*e7776783SApple OSS Distributions }
236*e7776783SApple OSS Distributions }
237*e7776783SApple OSS Distributions
238*e7776783SApple OSS Distributions static void
stack_free_stack(vm_offset_t stack)239*e7776783SApple OSS Distributions stack_free_stack(
240*e7776783SApple OSS Distributions vm_offset_t stack)
241*e7776783SApple OSS Distributions {
242*e7776783SApple OSS Distributions struct stack_cache *cache;
243*e7776783SApple OSS Distributions spl_t s;
244*e7776783SApple OSS Distributions
245*e7776783SApple OSS Distributions #if KASAN_DEBUG
246*e7776783SApple OSS Distributions /* Sanity check - stack should be unpoisoned by now */
247*e7776783SApple OSS Distributions assert(kasan_check_shadow(stack, kernel_stack_size, 0));
248*e7776783SApple OSS Distributions #endif
249*e7776783SApple OSS Distributions
250*e7776783SApple OSS Distributions s = splsched();
251*e7776783SApple OSS Distributions cache = PERCPU_GET(stack_cache);
252*e7776783SApple OSS Distributions if (cache->count < STACK_CACHE_SIZE) {
253*e7776783SApple OSS Distributions stack_next(stack) = cache->free;
254*e7776783SApple OSS Distributions cache->free = stack;
255*e7776783SApple OSS Distributions cache->count++;
256*e7776783SApple OSS Distributions } else {
257*e7776783SApple OSS Distributions stack_lock();
258*e7776783SApple OSS Distributions stack_next(stack) = stack_free_list;
259*e7776783SApple OSS Distributions stack_free_list = stack;
260*e7776783SApple OSS Distributions if (++stack_free_count > stack_free_hiwat) {
261*e7776783SApple OSS Distributions stack_free_hiwat = stack_free_count;
262*e7776783SApple OSS Distributions }
263*e7776783SApple OSS Distributions stack_free_delta++;
264*e7776783SApple OSS Distributions stack_unlock();
265*e7776783SApple OSS Distributions }
266*e7776783SApple OSS Distributions splx(s);
267*e7776783SApple OSS Distributions }
268*e7776783SApple OSS Distributions
269*e7776783SApple OSS Distributions /*
270*e7776783SApple OSS Distributions * stack_alloc_try:
271*e7776783SApple OSS Distributions *
272*e7776783SApple OSS Distributions * Non-blocking attempt to allocate a
273*e7776783SApple OSS Distributions * stack for a thread.
274*e7776783SApple OSS Distributions *
275*e7776783SApple OSS Distributions * Returns TRUE on success.
276*e7776783SApple OSS Distributions *
277*e7776783SApple OSS Distributions * Called at splsched.
278*e7776783SApple OSS Distributions */
279*e7776783SApple OSS Distributions boolean_t
stack_alloc_try(thread_t thread)280*e7776783SApple OSS Distributions stack_alloc_try(
281*e7776783SApple OSS Distributions thread_t thread)
282*e7776783SApple OSS Distributions {
283*e7776783SApple OSS Distributions struct stack_cache *cache;
284*e7776783SApple OSS Distributions vm_offset_t stack;
285*e7776783SApple OSS Distributions
286*e7776783SApple OSS Distributions cache = PERCPU_GET(stack_cache);
287*e7776783SApple OSS Distributions stack = cache->free;
288*e7776783SApple OSS Distributions if (stack != 0) {
289*e7776783SApple OSS Distributions cache->free = stack_next(stack);
290*e7776783SApple OSS Distributions cache->count--;
291*e7776783SApple OSS Distributions } else {
292*e7776783SApple OSS Distributions if (stack_free_list != 0) {
293*e7776783SApple OSS Distributions stack_lock();
294*e7776783SApple OSS Distributions stack = stack_free_list;
295*e7776783SApple OSS Distributions if (stack != 0) {
296*e7776783SApple OSS Distributions stack_free_list = stack_next(stack);
297*e7776783SApple OSS Distributions stack_free_count--;
298*e7776783SApple OSS Distributions stack_free_delta--;
299*e7776783SApple OSS Distributions }
300*e7776783SApple OSS Distributions stack_unlock();
301*e7776783SApple OSS Distributions }
302*e7776783SApple OSS Distributions }
303*e7776783SApple OSS Distributions
304*e7776783SApple OSS Distributions if (stack != 0 || (stack = thread->reserved_stack) != 0) {
305*e7776783SApple OSS Distributions machine_stack_attach(thread, stack);
306*e7776783SApple OSS Distributions return TRUE;
307*e7776783SApple OSS Distributions }
308*e7776783SApple OSS Distributions
309*e7776783SApple OSS Distributions return FALSE;
310*e7776783SApple OSS Distributions }
311*e7776783SApple OSS Distributions
312*e7776783SApple OSS Distributions static unsigned int stack_collect_tick, last_stack_tick;
313*e7776783SApple OSS Distributions
314*e7776783SApple OSS Distributions /*
315*e7776783SApple OSS Distributions * stack_collect:
316*e7776783SApple OSS Distributions *
317*e7776783SApple OSS Distributions * Free excess kernel stacks, may
318*e7776783SApple OSS Distributions * block.
319*e7776783SApple OSS Distributions */
320*e7776783SApple OSS Distributions void
stack_collect(void)321*e7776783SApple OSS Distributions stack_collect(void)
322*e7776783SApple OSS Distributions {
323*e7776783SApple OSS Distributions if (stack_collect_tick != last_stack_tick) {
324*e7776783SApple OSS Distributions unsigned int target;
325*e7776783SApple OSS Distributions vm_offset_t stack;
326*e7776783SApple OSS Distributions spl_t s;
327*e7776783SApple OSS Distributions
328*e7776783SApple OSS Distributions s = splsched();
329*e7776783SApple OSS Distributions stack_lock();
330*e7776783SApple OSS Distributions
331*e7776783SApple OSS Distributions target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
332*e7776783SApple OSS Distributions target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
333*e7776783SApple OSS Distributions
334*e7776783SApple OSS Distributions while (stack_free_count > target) {
335*e7776783SApple OSS Distributions stack = stack_free_list;
336*e7776783SApple OSS Distributions stack_free_list = stack_next(stack);
337*e7776783SApple OSS Distributions stack_free_count--; stack_total--;
338*e7776783SApple OSS Distributions stack_unlock();
339*e7776783SApple OSS Distributions splx(s);
340*e7776783SApple OSS Distributions
341*e7776783SApple OSS Distributions /*
342*e7776783SApple OSS Distributions * Get the stack base address, then decrement by one page
343*e7776783SApple OSS Distributions * to account for the lower guard page. Add two extra pages
344*e7776783SApple OSS Distributions * to the size to account for the guard pages on both ends
345*e7776783SApple OSS Distributions * that were originally requested when the stack was allocated
346*e7776783SApple OSS Distributions * back in stack_alloc().
347*e7776783SApple OSS Distributions */
348*e7776783SApple OSS Distributions
349*e7776783SApple OSS Distributions stack = (vm_offset_t)vm_map_trunc_page(
350*e7776783SApple OSS Distributions stack,
351*e7776783SApple OSS Distributions VM_MAP_PAGE_MASK(kernel_map));
352*e7776783SApple OSS Distributions stack -= PAGE_SIZE;
353*e7776783SApple OSS Distributions if (vm_map_remove(
354*e7776783SApple OSS Distributions kernel_map,
355*e7776783SApple OSS Distributions stack,
356*e7776783SApple OSS Distributions stack + kernel_stack_size + (2 * PAGE_SIZE),
357*e7776783SApple OSS Distributions VM_MAP_REMOVE_KUNWIRE)
358*e7776783SApple OSS Distributions != KERN_SUCCESS) {
359*e7776783SApple OSS Distributions panic("stack_collect: vm_map_remove");
360*e7776783SApple OSS Distributions }
361*e7776783SApple OSS Distributions stack = 0;
362*e7776783SApple OSS Distributions
363*e7776783SApple OSS Distributions s = splsched();
364*e7776783SApple OSS Distributions stack_lock();
365*e7776783SApple OSS Distributions
366*e7776783SApple OSS Distributions target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
367*e7776783SApple OSS Distributions target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
368*e7776783SApple OSS Distributions }
369*e7776783SApple OSS Distributions
370*e7776783SApple OSS Distributions last_stack_tick = stack_collect_tick;
371*e7776783SApple OSS Distributions
372*e7776783SApple OSS Distributions stack_unlock();
373*e7776783SApple OSS Distributions splx(s);
374*e7776783SApple OSS Distributions }
375*e7776783SApple OSS Distributions }
376*e7776783SApple OSS Distributions
377*e7776783SApple OSS Distributions /*
378*e7776783SApple OSS Distributions * compute_stack_target:
379*e7776783SApple OSS Distributions *
380*e7776783SApple OSS Distributions * Computes a new target free list count
381*e7776783SApple OSS Distributions * based on recent alloc / free activity.
382*e7776783SApple OSS Distributions *
383*e7776783SApple OSS Distributions * Limits stack collection to once per
384*e7776783SApple OSS Distributions * computation period.
385*e7776783SApple OSS Distributions */
386*e7776783SApple OSS Distributions void
compute_stack_target(__unused void * arg)387*e7776783SApple OSS Distributions compute_stack_target(
388*e7776783SApple OSS Distributions __unused void *arg)
389*e7776783SApple OSS Distributions {
390*e7776783SApple OSS Distributions spl_t s;
391*e7776783SApple OSS Distributions
392*e7776783SApple OSS Distributions s = splsched();
393*e7776783SApple OSS Distributions stack_lock();
394*e7776783SApple OSS Distributions
395*e7776783SApple OSS Distributions if (stack_free_target > 5) {
396*e7776783SApple OSS Distributions stack_free_target = (4 * stack_free_target) / 5;
397*e7776783SApple OSS Distributions } else if (stack_free_target > 0) {
398*e7776783SApple OSS Distributions stack_free_target--;
399*e7776783SApple OSS Distributions }
400*e7776783SApple OSS Distributions
401*e7776783SApple OSS Distributions stack_free_target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
402*e7776783SApple OSS Distributions
403*e7776783SApple OSS Distributions stack_free_delta = 0;
404*e7776783SApple OSS Distributions stack_collect_tick++;
405*e7776783SApple OSS Distributions
406*e7776783SApple OSS Distributions stack_unlock();
407*e7776783SApple OSS Distributions splx(s);
408*e7776783SApple OSS Distributions }
409*e7776783SApple OSS Distributions
410*e7776783SApple OSS Distributions /* OBSOLETE */
411*e7776783SApple OSS Distributions void stack_privilege(
412*e7776783SApple OSS Distributions thread_t thread);
413*e7776783SApple OSS Distributions
414*e7776783SApple OSS Distributions void
stack_privilege(__unused thread_t thread)415*e7776783SApple OSS Distributions stack_privilege(
416*e7776783SApple OSS Distributions __unused thread_t thread)
417*e7776783SApple OSS Distributions {
418*e7776783SApple OSS Distributions /* OBSOLETE */
419*e7776783SApple OSS Distributions }
420*e7776783SApple OSS Distributions
421*e7776783SApple OSS Distributions /*
422*e7776783SApple OSS Distributions * Return info on stack usage for threads in a specific processor set
423*e7776783SApple OSS Distributions */
424*e7776783SApple OSS Distributions kern_return_t
processor_set_stack_usage(processor_set_t pset,unsigned int * totalp,vm_size_t * spacep,vm_size_t * residentp,vm_size_t * maxusagep,vm_offset_t * maxstackp)425*e7776783SApple OSS Distributions processor_set_stack_usage(
426*e7776783SApple OSS Distributions processor_set_t pset,
427*e7776783SApple OSS Distributions unsigned int *totalp,
428*e7776783SApple OSS Distributions vm_size_t *spacep,
429*e7776783SApple OSS Distributions vm_size_t *residentp,
430*e7776783SApple OSS Distributions vm_size_t *maxusagep,
431*e7776783SApple OSS Distributions vm_offset_t *maxstackp)
432*e7776783SApple OSS Distributions {
433*e7776783SApple OSS Distributions #if !MACH_DEBUG
434*e7776783SApple OSS Distributions return KERN_NOT_SUPPORTED;
435*e7776783SApple OSS Distributions #else
436*e7776783SApple OSS Distributions unsigned int total = 0;
437*e7776783SApple OSS Distributions thread_t thread;
438*e7776783SApple OSS Distributions
439*e7776783SApple OSS Distributions if (pset == PROCESSOR_SET_NULL || pset != &pset0) {
440*e7776783SApple OSS Distributions return KERN_INVALID_ARGUMENT;
441*e7776783SApple OSS Distributions }
442*e7776783SApple OSS Distributions
443*e7776783SApple OSS Distributions lck_mtx_lock(&tasks_threads_lock);
444*e7776783SApple OSS Distributions
445*e7776783SApple OSS Distributions queue_iterate(&threads, thread, thread_t, threads) {
446*e7776783SApple OSS Distributions total += (thread->kernel_stack != 0);
447*e7776783SApple OSS Distributions }
448*e7776783SApple OSS Distributions
449*e7776783SApple OSS Distributions lck_mtx_unlock(&tasks_threads_lock);
450*e7776783SApple OSS Distributions
451*e7776783SApple OSS Distributions *totalp = total;
452*e7776783SApple OSS Distributions *residentp = *spacep = total * round_page(kernel_stack_size);
453*e7776783SApple OSS Distributions *maxusagep = 0;
454*e7776783SApple OSS Distributions *maxstackp = 0;
455*e7776783SApple OSS Distributions return KERN_SUCCESS;
456*e7776783SApple OSS Distributions
457*e7776783SApple OSS Distributions #endif /* MACH_DEBUG */
458*e7776783SApple OSS Distributions }
459*e7776783SApple OSS Distributions
460*e7776783SApple OSS Distributions vm_offset_t
min_valid_stack_address(void)461*e7776783SApple OSS Distributions min_valid_stack_address(void)
462*e7776783SApple OSS Distributions {
463*e7776783SApple OSS Distributions return (vm_offset_t)vm_map_min(kernel_map);
464*e7776783SApple OSS Distributions }
465*e7776783SApple OSS Distributions
466*e7776783SApple OSS Distributions vm_offset_t
max_valid_stack_address(void)467*e7776783SApple OSS Distributions max_valid_stack_address(void)
468*e7776783SApple OSS Distributions {
469*e7776783SApple OSS Distributions return (vm_offset_t)vm_map_max(kernel_map);
470*e7776783SApple OSS Distributions }
471