xref: /xnu-8019.80.24/osfmk/kern/stack.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2003-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  *	Kernel stack management routines.
30  */
31 
32 #include <mach/mach_host.h>
33 #include <mach/mach_types.h>
34 #include <mach/processor_set.h>
35 
36 #include <kern/kern_types.h>
37 #include <kern/lock_group.h>
38 #include <kern/mach_param.h>
39 #include <kern/percpu.h>
40 #include <kern/processor.h>
41 #include <kern/thread.h>
42 #include <kern/zalloc.h>
43 #include <kern/kalloc.h>
44 #include <kern/ledger.h>
45 
46 #include <vm/vm_map.h>
47 #include <vm/vm_kern.h>
48 
49 #include <mach_debug.h>
50 #include <san/kasan.h>
51 
52 /*
53  *	We allocate stacks from generic kernel VM.
54  *
55  *	The stack_free_list can only be accessed at splsched,
56  *	because stack_alloc_try/thread_invoke operate at splsched.
57  */
58 
59 static SIMPLE_LOCK_DECLARE(stack_lock_data, 0);
60 #define stack_lock()            simple_lock(&stack_lock_data, LCK_GRP_NULL)
61 #define stack_unlock()          simple_unlock(&stack_lock_data)
62 
63 #define STACK_CACHE_SIZE        2
64 
65 static vm_offset_t              stack_free_list;
66 
67 static unsigned int             stack_free_count, stack_free_hiwat;             /* free list count */
68 static unsigned int             stack_hiwat;
69 unsigned int                    stack_total;                            /* current total count */
70 unsigned long long              stack_allocs;                           /* total count of allocations */
71 
72 static unsigned int             stack_free_target;
73 static int                      stack_free_delta;
74 
75 static unsigned int             stack_new_count;                                                /* total new stack allocations */
76 
77 static SECURITY_READ_ONLY_LATE(vm_offset_t)  stack_addr_mask;
78 SECURITY_READ_ONLY_LATE(vm_offset_t)         kernel_stack_size;
79 SECURITY_READ_ONLY_LATE(vm_offset_t)         kernel_stack_mask;
80 vm_offset_t                                  kernel_stack_depth_max;
81 
82 struct stack_cache {
83 	vm_offset_t     free;
84 	unsigned int    count;
85 };
86 static struct stack_cache PERCPU_DATA(stack_cache);
87 
88 /*
89  *	The next field is at the base of the stack,
90  *	so the low end is left unsullied.
91  */
92 #define stack_next(stack)       \
93 	(*((vm_offset_t *)((stack) + kernel_stack_size) - 1))
94 
95 static inline int
log2(vm_offset_t size)96 log2(vm_offset_t size)
97 {
98 	int     result;
99 	for (result = 0; size > 0; result++) {
100 		size >>= 1;
101 	}
102 	return result;
103 }
104 
105 static inline vm_offset_t
roundup_pow2(vm_offset_t size)106 roundup_pow2(vm_offset_t size)
107 {
108 	return 1UL << (log2(size - 1) + 1);
109 }
110 
111 static vm_offset_t stack_alloc_internal(void);
112 static void stack_free_stack(vm_offset_t);
113 
114 static void
stack_init(void)115 stack_init(void)
116 {
117 	uint32_t kernel_stack_pages = atop(KERNEL_STACK_SIZE);
118 
119 	kernel_stack_size = KERNEL_STACK_SIZE;
120 	kernel_stack_mask = -KERNEL_STACK_SIZE;
121 
122 	if (PE_parse_boot_argn("kernel_stack_pages",
123 	    &kernel_stack_pages,
124 	    sizeof(kernel_stack_pages))) {
125 		kernel_stack_size = kernel_stack_pages * PAGE_SIZE;
126 	}
127 
128 	if (kernel_stack_size < round_page(kernel_stack_size)) {
129 		panic("stack_init: stack size %p not a multiple of page size %d",
130 		    (void *) kernel_stack_size, PAGE_SIZE);
131 	}
132 
133 	stack_addr_mask = roundup_pow2(kernel_stack_size) - 1;
134 	kernel_stack_mask = ~stack_addr_mask;
135 }
136 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, stack_init);
137 
138 /*
139  *	stack_alloc:
140  *
141  *	Allocate a stack for a thread, may
142  *	block.
143  */
144 
145 static vm_offset_t
stack_alloc_internal(void)146 stack_alloc_internal(void)
147 {
148 	vm_offset_t             stack = 0;
149 	spl_t                   s;
150 	int                     flags = 0;
151 	kern_return_t           kr = KERN_SUCCESS;
152 
153 	s = splsched();
154 	stack_lock();
155 	stack_allocs++;
156 	stack = stack_free_list;
157 	if (stack != 0) {
158 		stack_free_list = stack_next(stack);
159 		stack_free_count--;
160 	} else {
161 		if (++stack_total > stack_hiwat) {
162 			stack_hiwat = stack_total;
163 		}
164 		stack_new_count++;
165 	}
166 	stack_free_delta--;
167 	stack_unlock();
168 	splx(s);
169 
170 	if (stack == 0) {
171 		/*
172 		 * Request guard pages on either side of the stack.  Ask
173 		 * kernel_memory_allocate() for two extra pages to account
174 		 * for these.
175 		 */
176 
177 		flags = KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT | KMA_ZERO;
178 		kr = kernel_memory_allocate(kernel_map, &stack,
179 		    kernel_stack_size + (2 * PAGE_SIZE),
180 		    stack_addr_mask,
181 		    flags,
182 		    VM_KERN_MEMORY_STACK);
183 		if (kr != KERN_SUCCESS) {
184 			panic("stack_alloc: kernel_memory_allocate(size:0x%llx, mask: 0x%llx, flags: 0x%x) failed with %d", (uint64_t)(kernel_stack_size + (2 * PAGE_SIZE)), (uint64_t)stack_addr_mask, flags, kr);
185 		}
186 
187 		/*
188 		 * The stack address that comes back is the address of the lower
189 		 * guard page.  Skip past it to get the actual stack base address.
190 		 */
191 
192 		stack += PAGE_SIZE;
193 	}
194 	return stack;
195 }
196 
197 void
stack_alloc(thread_t thread)198 stack_alloc(
199 	thread_t        thread)
200 {
201 	assert(thread->kernel_stack == 0);
202 	machine_stack_attach(thread, stack_alloc_internal());
203 }
204 
205 void
stack_handoff(thread_t from,thread_t to)206 stack_handoff(thread_t from, thread_t to)
207 {
208 	assert(from == current_thread());
209 	machine_stack_handoff(from, to);
210 }
211 
212 /*
213  *	stack_free:
214  *
215  *	Detach and free the stack for a thread.
216  */
217 void
stack_free(thread_t thread)218 stack_free(
219 	thread_t        thread)
220 {
221 	vm_offset_t         stack = machine_stack_detach(thread);
222 
223 	assert(stack);
224 	if (stack != thread->reserved_stack) {
225 		stack_free_stack(stack);
226 	}
227 }
228 
229 void
stack_free_reserved(thread_t thread)230 stack_free_reserved(
231 	thread_t        thread)
232 {
233 	if (thread->reserved_stack != thread->kernel_stack) {
234 		stack_free_stack(thread->reserved_stack);
235 	}
236 }
237 
238 static void
stack_free_stack(vm_offset_t stack)239 stack_free_stack(
240 	vm_offset_t             stack)
241 {
242 	struct stack_cache      *cache;
243 	spl_t                           s;
244 
245 #if KASAN_DEBUG
246 	/* Sanity check - stack should be unpoisoned by now */
247 	assert(kasan_check_shadow(stack, kernel_stack_size, 0));
248 #endif
249 
250 	s = splsched();
251 	cache = PERCPU_GET(stack_cache);
252 	if (cache->count < STACK_CACHE_SIZE) {
253 		stack_next(stack) = cache->free;
254 		cache->free = stack;
255 		cache->count++;
256 	} else {
257 		stack_lock();
258 		stack_next(stack) = stack_free_list;
259 		stack_free_list = stack;
260 		if (++stack_free_count > stack_free_hiwat) {
261 			stack_free_hiwat = stack_free_count;
262 		}
263 		stack_free_delta++;
264 		stack_unlock();
265 	}
266 	splx(s);
267 }
268 
269 /*
270  *	stack_alloc_try:
271  *
272  *	Non-blocking attempt to allocate a
273  *	stack for a thread.
274  *
275  *	Returns TRUE on success.
276  *
277  *	Called at splsched.
278  */
279 boolean_t
stack_alloc_try(thread_t thread)280 stack_alloc_try(
281 	thread_t                thread)
282 {
283 	struct stack_cache      *cache;
284 	vm_offset_t                     stack;
285 
286 	cache = PERCPU_GET(stack_cache);
287 	stack = cache->free;
288 	if (stack != 0) {
289 		cache->free = stack_next(stack);
290 		cache->count--;
291 	} else {
292 		if (stack_free_list != 0) {
293 			stack_lock();
294 			stack = stack_free_list;
295 			if (stack != 0) {
296 				stack_free_list = stack_next(stack);
297 				stack_free_count--;
298 				stack_free_delta--;
299 			}
300 			stack_unlock();
301 		}
302 	}
303 
304 	if (stack != 0 || (stack = thread->reserved_stack) != 0) {
305 		machine_stack_attach(thread, stack);
306 		return TRUE;
307 	}
308 
309 	return FALSE;
310 }
311 
312 static unsigned int             stack_collect_tick, last_stack_tick;
313 
314 /*
315  *	stack_collect:
316  *
317  *	Free excess kernel stacks, may
318  *	block.
319  */
320 void
stack_collect(void)321 stack_collect(void)
322 {
323 	if (stack_collect_tick != last_stack_tick) {
324 		unsigned int    target;
325 		vm_offset_t             stack;
326 		spl_t                   s;
327 
328 		s = splsched();
329 		stack_lock();
330 
331 		target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
332 		target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
333 
334 		while (stack_free_count > target) {
335 			stack = stack_free_list;
336 			stack_free_list = stack_next(stack);
337 			stack_free_count--; stack_total--;
338 			stack_unlock();
339 			splx(s);
340 
341 			/*
342 			 * Get the stack base address, then decrement by one page
343 			 * to account for the lower guard page.  Add two extra pages
344 			 * to the size to account for the guard pages on both ends
345 			 * that were originally requested when the stack was allocated
346 			 * back in stack_alloc().
347 			 */
348 
349 			stack = (vm_offset_t)vm_map_trunc_page(
350 				stack,
351 				VM_MAP_PAGE_MASK(kernel_map));
352 			stack -= PAGE_SIZE;
353 			if (vm_map_remove(
354 				    kernel_map,
355 				    stack,
356 				    stack + kernel_stack_size + (2 * PAGE_SIZE),
357 				    VM_MAP_REMOVE_KUNWIRE)
358 			    != KERN_SUCCESS) {
359 				panic("stack_collect: vm_map_remove");
360 			}
361 			stack = 0;
362 
363 			s = splsched();
364 			stack_lock();
365 
366 			target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
367 			target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
368 		}
369 
370 		last_stack_tick = stack_collect_tick;
371 
372 		stack_unlock();
373 		splx(s);
374 	}
375 }
376 
377 /*
378  *	compute_stack_target:
379  *
380  *	Computes a new target free list count
381  *	based on recent alloc / free activity.
382  *
383  *	Limits stack collection to once per
384  *	computation period.
385  */
386 void
compute_stack_target(__unused void * arg)387 compute_stack_target(
388 	__unused void           *arg)
389 {
390 	spl_t           s;
391 
392 	s = splsched();
393 	stack_lock();
394 
395 	if (stack_free_target > 5) {
396 		stack_free_target = (4 * stack_free_target) / 5;
397 	} else if (stack_free_target > 0) {
398 		stack_free_target--;
399 	}
400 
401 	stack_free_target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
402 
403 	stack_free_delta = 0;
404 	stack_collect_tick++;
405 
406 	stack_unlock();
407 	splx(s);
408 }
409 
410 /* OBSOLETE */
411 void    stack_privilege(
412 	thread_t        thread);
413 
414 void
stack_privilege(__unused thread_t thread)415 stack_privilege(
416 	__unused thread_t       thread)
417 {
418 	/* OBSOLETE */
419 }
420 
421 /*
422  * Return info on stack usage for threads in a specific processor set
423  */
424 kern_return_t
processor_set_stack_usage(processor_set_t pset,unsigned int * totalp,vm_size_t * spacep,vm_size_t * residentp,vm_size_t * maxusagep,vm_offset_t * maxstackp)425 processor_set_stack_usage(
426 	processor_set_t pset,
427 	unsigned int    *totalp,
428 	vm_size_t       *spacep,
429 	vm_size_t       *residentp,
430 	vm_size_t       *maxusagep,
431 	vm_offset_t     *maxstackp)
432 {
433 #if !MACH_DEBUG
434 	return KERN_NOT_SUPPORTED;
435 #else
436 	unsigned int total = 0;
437 	thread_t thread;
438 
439 	if (pset == PROCESSOR_SET_NULL || pset != &pset0) {
440 		return KERN_INVALID_ARGUMENT;
441 	}
442 
443 	lck_mtx_lock(&tasks_threads_lock);
444 
445 	queue_iterate(&threads, thread, thread_t, threads) {
446 		total += (thread->kernel_stack != 0);
447 	}
448 
449 	lck_mtx_unlock(&tasks_threads_lock);
450 
451 	*totalp = total;
452 	*residentp = *spacep = total * round_page(kernel_stack_size);
453 	*maxusagep = 0;
454 	*maxstackp = 0;
455 	return KERN_SUCCESS;
456 
457 #endif  /* MACH_DEBUG */
458 }
459 
460 vm_offset_t
min_valid_stack_address(void)461 min_valid_stack_address(void)
462 {
463 	return (vm_offset_t)vm_map_min(kernel_map);
464 }
465 
466 vm_offset_t
max_valid_stack_address(void)467 max_valid_stack_address(void)
468 {
469 	return (vm_offset_t)vm_map_max(kernel_map);
470 }
471