xref: /xnu-8019.80.24/bsd/kern/kern_malloc.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1987, 1991, 1993
31  *	The Regents of the University of California.  All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  * 3. All advertising materials mentioning features or use of this software
42  *    must display the following acknowledgement:
43  *	This product includes software developed by the University of
44  *	California, Berkeley and its contributors.
45  * 4. Neither the name of the University nor the names of its contributors
46  *    may be used to endorse or promote products derived from this software
47  *    without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59  * SUCH DAMAGE.
60  *
61  *	@(#)kern_malloc.c	8.4 (Berkeley) 5/20/95
62  */
63 /*
64  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65  * support for mandatory and extensible security protections.  This notice
66  * is included in support of clause 2.2 (b) of the Apple Public License,
67  * Version 2.0.
68  */
69 
70 #include <kern/zalloc.h>
71 #include <kern/kalloc.h>
72 
73 #include <sys/malloc.h>
74 #include <sys/sysctl.h>
75 
76 #include <libkern/libkern.h>
77 
78 ZONE_VIEW_DEFINE(ZV_NAMEI, "vfs.namei", KHEAP_ID_DATA_BUFFERS, MAXPATHLEN);
79 
80 static void *
__MALLOC_ext(size_t size,int type,int flags,vm_allocation_site_t * site,kalloc_heap_t heap)81 __MALLOC_ext(
82 	size_t          size,
83 	int             type,
84 	int             flags,
85 	vm_allocation_site_t *site,
86 	kalloc_heap_t   heap)
87 {
88 	void    *addr = NULL;
89 
90 	if (type >= M_LAST) {
91 		panic("_malloc TYPE");
92 	}
93 
94 	if (size == 0) {
95 		return NULL;
96 	}
97 
98 	static_assert(sizeof(vm_size_t) == sizeof(size_t));
99 	static_assert(M_WAITOK == Z_WAITOK);
100 	static_assert(M_NOWAIT == Z_NOWAIT);
101 	static_assert(M_ZERO == Z_ZERO);
102 
103 	addr = kalloc_ext(heap, size,
104 	    flags & (M_WAITOK | M_NOWAIT | M_ZERO), site).addr;
105 	if (__probable(addr)) {
106 		return addr;
107 	}
108 
109 	if (flags & (M_NOWAIT | M_NULL)) {
110 		return NULL;
111 	}
112 
113 	/*
114 	 * We get here when the caller told us to block waiting for memory, but
115 	 * kalloc said there's no memory left to get.  Generally, this means there's a
116 	 * leak or the caller asked for an impossibly large amount of memory. If the caller
117 	 * is expecting a NULL return code then it should explicitly set the flag M_NULL.
118 	 * If the caller isn't expecting a NULL return code, we just panic. This is less
119 	 * than ideal, but returning NULL when the caller isn't expecting it doesn't help
120 	 * since the majority of callers don't check the return value and will just
121 	 * dereference the pointer and trap anyway.  We may as well get a more
122 	 * descriptive message out while we can.
123 	 */
124 	panic("_MALLOC: kalloc returned NULL (potential leak), size %llu", (uint64_t) size);
125 }
126 
127 void *
__MALLOC(size_t size,int type,int flags,vm_allocation_site_t * site)128 __MALLOC(size_t size, int type, int flags, vm_allocation_site_t *site)
129 {
130 	return __MALLOC_ext(size, type, flags, site, KHEAP_DEFAULT);
131 }
132 
133 void *
__REALLOC(void * addr,size_t size,int type __unused,int flags,vm_allocation_site_t * site)134 __REALLOC(
135 	void            *addr,
136 	size_t          size,
137 	int             type __unused,
138 	int             flags,
139 	vm_allocation_site_t *site)
140 {
141 	addr = kheap_realloc_addr(KHEAP_DEFAULT, addr, size,
142 	    flags & (M_WAITOK | M_NOWAIT | M_ZERO), site).addr;
143 
144 	if (__probable(addr)) {
145 		return addr;
146 	}
147 
148 	if (flags & (M_NOWAIT | M_NULL)) {
149 		return NULL;
150 	}
151 
152 	panic("_REALLOC: kalloc returned NULL (potential leak), size %llu", (uint64_t) size);
153 }
154 
155 void *
156 _MALLOC_external(size_t size, int type, int flags);
157 void *
_MALLOC_external(size_t size,int type,int flags)158 _MALLOC_external(size_t size, int type, int flags)
159 {
160 	static vm_allocation_site_t site = {
161 		.tag = VM_KERN_MEMORY_KALLOC,
162 		.flags = VM_TAG_BT,
163 	};
164 	kalloc_heap_t heap = KHEAP_KEXT;
165 
166 	if (type == M_SONAME) {
167 #if !XNU_TARGET_OS_OSX
168 		assert3u(size, <=, UINT8_MAX);
169 #endif /* XNU_TARGET_OS_OSX */
170 		heap = KHEAP_SONAME;
171 	}
172 
173 	return __MALLOC_ext(size, type, flags, &site, heap);
174 }
175 
176 void
177 _FREE_external(void *addr, int type);
178 void
_FREE_external(void * addr,int type)179 _FREE_external(void *addr, int type)
180 {
181 	/*
182 	 * hashinit and other functions allocate on behalf of kexts and do not have
183 	 * a matching hashdestroy, so we sadly have to allow this for now.
184 	 */
185 	kalloc_heap_t heap = KHEAP_ANY;
186 
187 	if (type == M_SONAME) {
188 		/*
189 		 * On macOS, some KEXT is known to use M_SONAME for M_TEMP allocation
190 		 */
191 #if !XNU_TARGET_OS_OSX
192 		kheap_free_bounded(KHEAP_SONAME, addr, 1, UINT8_MAX);
193 #else
194 		kheap_free_addr(heap, addr);
195 #endif /* XNU_TARGET_OS_OSX */
196 		return;
197 	}
198 
199 	kheap_free_addr(heap, addr);
200 }
201 
202 void
203 _FREE_ZONE_external(void *elem, size_t size, int type);
204 void
_FREE_ZONE_external(void * elem,size_t size,int type __unused)205 _FREE_ZONE_external(void *elem, size_t size, int type __unused)
206 {
207 	(kheap_free)(KHEAP_KEXT, elem, size);
208 }
209 
210 #if DEBUG || DEVELOPMENT
211 
212 extern unsigned int zone_map_jetsam_limit;
213 
214 static int
215 sysctl_zone_map_jetsam_limit SYSCTL_HANDLER_ARGS
216 {
217 #pragma unused(oidp, arg1, arg2)
218 	int oldval = 0, val = 0, error = 0;
219 
220 	oldval = zone_map_jetsam_limit;
221 	error = sysctl_io_number(req, oldval, sizeof(int), &val, NULL);
222 	if (error || !req->newptr) {
223 		return error;
224 	}
225 
226 	if (val <= 0 || val > 100) {
227 		printf("sysctl_zone_map_jetsam_limit: new jetsam limit value is invalid.\n");
228 		return EINVAL;
229 	}
230 
231 	zone_map_jetsam_limit = val;
232 	return 0;
233 }
234 
235 SYSCTL_PROC(_kern, OID_AUTO, zone_map_jetsam_limit, CTLTYPE_INT | CTLFLAG_RW, 0, 0,
236     sysctl_zone_map_jetsam_limit, "I", "Zone map jetsam limit");
237 
238 
239 extern void get_zone_map_size(uint64_t *current_size, uint64_t *capacity);
240 
241 static int
242 sysctl_zone_map_size_and_capacity SYSCTL_HANDLER_ARGS
243 {
244 #pragma unused(oidp, arg1, arg2)
245 	uint64_t zstats[2];
246 	get_zone_map_size(&zstats[0], &zstats[1]);
247 
248 	return SYSCTL_OUT(req, &zstats, sizeof(zstats));
249 }
250 
251 SYSCTL_PROC(_kern, OID_AUTO, zone_map_size_and_capacity,
252     CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED,
253     0, 0, &sysctl_zone_map_size_and_capacity, "Q", "Current size and capacity of the zone map");
254 
255 #endif /* DEBUG || DEVELOPMENT */
256 
257 #if CONFIG_ZLEAKS
258 
259 SYSCTL_DECL(_kern_zleak);
260 SYSCTL_NODE(_kern, OID_AUTO, zleak, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "zleak");
261 
262 /*
263  * kern.zleak.active
264  *
265  * Show the status of the zleak subsystem (0 = enabled, 1 = active,
266  * and -1 = failed), and if enabled, allow it to be activated immediately.
267  */
268 static int
269 sysctl_zleak_active SYSCTL_HANDLER_ARGS
270 {
271 #pragma unused(arg1, arg2)
272 	int oldval, val, error;
273 
274 	val = oldval = get_zleak_state();
275 	error = sysctl_handle_int(oidp, &val, 0, req);
276 	if (error || !req->newptr) {
277 		return error;
278 	}
279 	/*
280 	 * Can only be activated if it's off (and not failed.)
281 	 * Cannot be deactivated once it's on.
282 	 */
283 	if (val == 1 && oldval == 0) {
284 		kern_return_t kr = zleak_activate();
285 
286 		if (KERN_SUCCESS != kr) {
287 			printf("zleak_active: failed to activate "
288 			    "live zone leak debugging (%d).\n", kr);
289 		}
290 	}
291 	if (val == 0 && oldval == 1) {
292 		printf("zleak_active: active, cannot be disabled.\n");
293 		return EINVAL;
294 	}
295 	return 0;
296 }
297 
298 SYSCTL_PROC(_kern_zleak, OID_AUTO, active,
299     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
300     0, 0, sysctl_zleak_active, "I", "zleak activity");
301 
302 /*
303  * kern.zleak.max_zonemap_size
304  *
305  * Read the value of the maximum zonemap size in bytes; useful
306  * as the maximum size that zleak.global_threshold and
307  * zleak.zone_threshold should be set to.
308  */
309 static int
310 sysctl_zleak_max_zonemap_size SYSCTL_HANDLER_ARGS
311 {
312 	uint64_t zmap_max_size = *(vm_size_t *)arg1;
313 
314 	return sysctl_handle_quad(oidp, &zmap_max_size, arg2, req);
315 }
316 
317 SYSCTL_PROC(_kern_zleak, OID_AUTO, max_zonemap_size,
318     CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
319     &zleak_max_zonemap_size, 0,
320     sysctl_zleak_max_zonemap_size, "Q", "zleak max zonemap size");
321 
322 
323 static int
324 sysctl_zleak_threshold SYSCTL_HANDLER_ARGS
325 {
326 #pragma unused(oidp, arg2)
327 	int error;
328 	uint64_t value = *(vm_size_t *)arg1;
329 
330 	error = sysctl_io_number(req, value, sizeof(value), &value, NULL);
331 
332 	if (error || !req->newptr) {
333 		return error;
334 	}
335 
336 	if (value > (uint64_t)zleak_max_zonemap_size) {
337 		return ERANGE;
338 	}
339 
340 	*(vm_size_t *)arg1 = value;
341 	return 0;
342 }
343 
344 /*
345  * kern.zleak.global_threshold
346  *
347  * Set the global zleak threshold size (in bytes).  If the zone map
348  * grows larger than this value, zleaks are automatically activated.
349  *
350  * The default value is set in zleak_init().
351  */
352 SYSCTL_PROC(_kern_zleak, OID_AUTO, global_threshold,
353     CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
354     &zleak_global_tracking_threshold, 0,
355     sysctl_zleak_threshold, "Q", "zleak global threshold");
356 
357 /*
358  * kern.zleak.zone_threshold
359  *
360  * Set the per-zone threshold size (in bytes) above which any
361  * zone will automatically start zleak tracking.
362  *
363  * The default value is set in zleak_init().
364  *
365  * Setting this variable will have no effect until zleak tracking is
366  * activated (See above.)
367  */
368 SYSCTL_PROC(_kern_zleak, OID_AUTO, zone_threshold,
369     CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
370     &zleak_per_zone_tracking_threshold, 0,
371     sysctl_zleak_threshold, "Q", "zleak per-zone threshold");
372 
373 #endif  /* CONFIG_ZLEAKS */
374 
375 extern uint64_t get_zones_collectable_bytes(void);
376 
377 static int
378 sysctl_zones_collectable_bytes SYSCTL_HANDLER_ARGS
379 {
380 #pragma unused(oidp, arg1, arg2)
381 	uint64_t zones_free_mem = get_zones_collectable_bytes();
382 
383 	return SYSCTL_OUT(req, &zones_free_mem, sizeof(zones_free_mem));
384 }
385 
386 SYSCTL_PROC(_kern, OID_AUTO, zones_collectable_bytes,
387     CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED,
388     0, 0, &sysctl_zones_collectable_bytes, "Q", "Collectable memory in zones");
389