1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1987, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69
70 #include <kern/zalloc.h>
71 #include <kern/kalloc.h>
72 #include <sys/ubc.h> /* mach_to_bsd_errno */
73
74 #include <sys/malloc.h>
75 #include <sys/sysctl.h>
76 #include <sys/kauth.h>
77
78 #include <vm/vm_kern_xnu.h>
79
80 #include <libkern/libkern.h>
81
82 ZONE_VIEW_DEFINE(ZV_NAMEI, "vfs.namei", KHEAP_ID_DATA_BUFFERS, MAXPATHLEN);
83 KALLOC_HEAP_DEFINE(KERN_OS_MALLOC, "kern_os_malloc", KHEAP_ID_KT_VAR);
84
85 /*
86 * macOS Only deprecated interfaces, here only for legacy reasons.
87 * There is no internal variant of any of these symbols on purpose.
88 */
89 #if XNU_PLATFORM_MacOSX
90
91 #define OSMallocDeprecatedMsg(msg)
92 #include <libkern/OSMalloc.h>
93
94 void *
95 _MALLOC_external(size_t size, int type, int flags);
96 void *
_MALLOC_external(size_t size,int type,int flags)97 _MALLOC_external(size_t size, int type, int flags)
98 {
99 kalloc_heap_t heap = KHEAP_DEFAULT;
100 void *addr = NULL;
101
102 if (type == M_SONAME) {
103 #if !XNU_TARGET_OS_OSX
104 assert3u(size, <=, UINT8_MAX);
105 #endif /* XNU_TARGET_OS_OSX */
106 heap = KHEAP_SONAME;
107 }
108
109 if (size == 0) {
110 return NULL;
111 }
112
113 static_assert(sizeof(vm_size_t) == sizeof(size_t));
114 static_assert(M_WAITOK == Z_WAITOK);
115 static_assert(M_NOWAIT == Z_NOWAIT);
116 static_assert(M_ZERO == Z_ZERO);
117
118 flags = Z_VM_TAG_BT(flags & Z_KPI_MASK, VM_KERN_MEMORY_KALLOC);
119 addr = kalloc_ext(heap, size, flags, NULL).addr;
120 if (__probable(addr)) {
121 return addr;
122 }
123
124 if (flags & (M_NOWAIT | M_NULL)) {
125 return NULL;
126 }
127
128 /*
129 * We get here when the caller told us to block waiting for memory, but
130 * kalloc said there's no memory left to get. Generally, this means there's a
131 * leak or the caller asked for an impossibly large amount of memory. If the caller
132 * is expecting a NULL return code then it should explicitly set the flag M_NULL.
133 * If the caller isn't expecting a NULL return code, we just panic. This is less
134 * than ideal, but returning NULL when the caller isn't expecting it doesn't help
135 * since the majority of callers don't check the return value and will just
136 * dereference the pointer and trap anyway. We may as well get a more
137 * descriptive message out while we can.
138 */
139 panic("_MALLOC: kalloc returned NULL (potential leak), size %llu", (uint64_t) size);
140 }
141
142 void
143 _FREE_external(void *addr, int type);
144 void
_FREE_external(void * addr,int type __unused)145 _FREE_external(void *addr, int type __unused)
146 {
147 kheap_free_addr(KHEAP_DEFAULT, addr);
148 }
149
150 void
151 _FREE_ZONE_external(void *elem, size_t size, int type);
152 void
_FREE_ZONE_external(void * elem,size_t size,int type __unused)153 _FREE_ZONE_external(void *elem, size_t size, int type __unused)
154 {
155 kheap_free(KHEAP_DEFAULT, elem, size);
156 }
157
158 char *
159 STRDUP_external(const char *string, int type);
160 char *
STRDUP_external(const char * string,int type __unused)161 STRDUP_external(const char *string, int type __unused)
162 {
163 size_t len;
164 char *copy;
165
166 len = strlen(string) + 1;
167 copy = kheap_alloc(KHEAP_DEFAULT, len, Z_WAITOK);
168 if (copy) {
169 memcpy(copy, string, len);
170 }
171 return copy;
172 }
173
174 static queue_head_t OSMalloc_tag_list = QUEUE_HEAD_INITIALIZER(OSMalloc_tag_list);
175 static LCK_GRP_DECLARE(OSMalloc_tag_lck_grp, "OSMalloc_tag");
176 static LCK_SPIN_DECLARE(OSMalloc_tag_lock, &OSMalloc_tag_lck_grp);
177
178 #define OSMalloc_tag_spin_lock() lck_spin_lock(&OSMalloc_tag_lock)
179 #define OSMalloc_tag_unlock() lck_spin_unlock(&OSMalloc_tag_lock)
180
181 extern typeof(OSMalloc_Tagalloc) OSMalloc_Tagalloc_external;
182 OSMallocTag
OSMalloc_Tagalloc_external(const char * str,uint32_t flags)183 OSMalloc_Tagalloc_external(const char *str, uint32_t flags)
184 {
185 OSMallocTag OSMTag;
186
187 OSMTag = kalloc_type(struct _OSMallocTag_, Z_WAITOK | Z_ZERO);
188
189 if (flags & OSMT_PAGEABLE) {
190 OSMTag->OSMT_attr = OSMT_ATTR_PAGEABLE;
191 }
192
193 OSMTag->OSMT_refcnt = 1;
194
195 strlcpy(OSMTag->OSMT_name, str, OSMT_MAX_NAME);
196
197 OSMalloc_tag_spin_lock();
198 enqueue_tail(&OSMalloc_tag_list, (queue_entry_t)OSMTag);
199 OSMalloc_tag_unlock();
200 OSMTag->OSMT_state = OSMT_VALID;
201 return OSMTag;
202 }
203
204 static void
OSMalloc_Tagref(OSMallocTag tag)205 OSMalloc_Tagref(OSMallocTag tag)
206 {
207 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID)) {
208 panic("OSMalloc_Tagref():'%s' has bad state 0x%08X",
209 tag->OSMT_name, tag->OSMT_state);
210 }
211
212 os_atomic_inc(&tag->OSMT_refcnt, relaxed);
213 }
214
215 static void
OSMalloc_Tagrele(OSMallocTag tag)216 OSMalloc_Tagrele(OSMallocTag tag)
217 {
218 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID)) {
219 panic("OSMalloc_Tagref():'%s' has bad state 0x%08X",
220 tag->OSMT_name, tag->OSMT_state);
221 }
222
223 if (os_atomic_dec(&tag->OSMT_refcnt, relaxed) != 0) {
224 return;
225 }
226
227 if (os_atomic_cmpxchg(&tag->OSMT_state,
228 OSMT_VALID | OSMT_RELEASED, OSMT_VALID | OSMT_RELEASED, acq_rel)) {
229 OSMalloc_tag_spin_lock();
230 (void)remque((queue_entry_t)tag);
231 OSMalloc_tag_unlock();
232 kfree_type(struct _OSMallocTag_, tag);
233 } else {
234 panic("OSMalloc_Tagrele():'%s' has refcnt 0", tag->OSMT_name);
235 }
236 }
237
238 extern typeof(OSMalloc_Tagfree) OSMalloc_Tagfree_external;
239 void
OSMalloc_Tagfree_external(OSMallocTag tag)240 OSMalloc_Tagfree_external(OSMallocTag tag)
241 {
242 if (!os_atomic_cmpxchg(&tag->OSMT_state,
243 OSMT_VALID, OSMT_VALID | OSMT_RELEASED, acq_rel)) {
244 panic("OSMalloc_Tagfree():'%s' has bad state 0x%08X",
245 tag->OSMT_name, tag->OSMT_state);
246 }
247
248 if (os_atomic_dec(&tag->OSMT_refcnt, relaxed) == 0) {
249 OSMalloc_tag_spin_lock();
250 (void)remque((queue_entry_t)tag);
251 OSMalloc_tag_unlock();
252 kfree_type(struct _OSMallocTag_, tag);
253 }
254 }
255
256 extern typeof(OSMalloc) OSMalloc_external;
257 void *
OSMalloc_external(uint32_t size,OSMallocTag tag)258 OSMalloc_external(uint32_t size, OSMallocTag tag)
259 {
260 void *addr = NULL;
261 kern_return_t kr;
262
263 OSMalloc_Tagref(tag);
264 if ((tag->OSMT_attr & OSMT_PAGEABLE) && (size & ~PAGE_MASK)) {
265 if ((kr = kmem_alloc(kernel_map, (vm_offset_t *)&addr, size,
266 KMA_PAGEABLE | KMA_DATA, vm_tag_bt())) != KERN_SUCCESS) {
267 addr = NULL;
268 }
269 } else {
270 addr = kheap_alloc(KERN_OS_MALLOC, size,
271 Z_VM_TAG_BT(Z_WAITOK, VM_KERN_MEMORY_KALLOC));
272 }
273
274 if (!addr) {
275 OSMalloc_Tagrele(tag);
276 }
277
278 return addr;
279 }
280
281 extern typeof(OSMalloc_noblock) OSMalloc_noblock_external;
282 void *
OSMalloc_noblock_external(uint32_t size,OSMallocTag tag)283 OSMalloc_noblock_external(uint32_t size, OSMallocTag tag)
284 {
285 void *addr = NULL;
286
287 if (tag->OSMT_attr & OSMT_PAGEABLE) {
288 return NULL;
289 }
290
291 OSMalloc_Tagref(tag);
292 addr = kheap_alloc(KERN_OS_MALLOC, (vm_size_t)size,
293 Z_VM_TAG_BT(Z_NOWAIT, VM_KERN_MEMORY_KALLOC));
294 if (addr == NULL) {
295 OSMalloc_Tagrele(tag);
296 }
297
298 return addr;
299 }
300
301 extern typeof(OSFree) OSFree_external;
302 void
OSFree_external(void * addr,uint32_t size,OSMallocTag tag)303 OSFree_external(void *addr, uint32_t size, OSMallocTag tag)
304 {
305 if ((tag->OSMT_attr & OSMT_PAGEABLE)
306 && (size & ~PAGE_MASK)) {
307 kmem_free(kernel_map, (vm_offset_t)addr, size);
308 } else {
309 kheap_free(KERN_OS_MALLOC, addr, size);
310 }
311
312 OSMalloc_Tagrele(tag);
313 }
314
315 #endif /* XNU_PLATFORM_MacOSX */
316 #if DEBUG || DEVELOPMENT
317
318 static int
319 sysctl_zone_map_jetsam_limit SYSCTL_HANDLER_ARGS
320 {
321 #pragma unused(oidp, arg1, arg2)
322 int oldval = 0, val = 0, error = 0;
323
324 oldval = zone_map_jetsam_limit;
325 error = sysctl_io_number(req, oldval, sizeof(int), &val, NULL);
326 if (error || !req->newptr) {
327 return error;
328 }
329
330 return mach_to_bsd_errno(zone_map_jetsam_set_limit(val));
331 }
332 SYSCTL_PROC(_kern, OID_AUTO, zone_map_jetsam_limit,
333 CTLTYPE_INT | CTLFLAG_RW, 0, 0, sysctl_zone_map_jetsam_limit, "I",
334 "Zone map jetsam limit");
335
336
337 extern void get_zone_map_size(uint64_t *current_size, uint64_t *capacity);
338
339 static int
340 sysctl_zone_map_size_and_capacity SYSCTL_HANDLER_ARGS
341 {
342 #pragma unused(oidp, arg1, arg2)
343 uint64_t zstats[2];
344 get_zone_map_size(&zstats[0], &zstats[1]);
345
346 return SYSCTL_OUT(req, &zstats, sizeof(zstats));
347 }
348
349 SYSCTL_PROC(_kern, OID_AUTO, zone_map_size_and_capacity,
350 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, 0,
351 &sysctl_zone_map_size_and_capacity, "Q",
352 "Current size and capacity of the zone map");
353
354 SYSCTL_LONG(_kern, OID_AUTO, zone_wired_pages,
355 CTLFLAG_RD | CTLFLAG_LOCKED, &zone_pages_wired,
356 "number of wired pages in zones");
357
358 SYSCTL_LONG(_kern, OID_AUTO, zone_guard_pages,
359 CTLFLAG_RD | CTLFLAG_LOCKED, &zone_guard_pages,
360 "number of guard pages in zones");
361
362 #endif /* DEBUG || DEVELOPMENT */
363 #if CONFIG_ZLEAKS
364
365 SYSCTL_DECL(_kern_zleak);
366 SYSCTL_NODE(_kern, OID_AUTO, zleak, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "zleak");
367
368 SYSCTL_INT(_kern_zleak, OID_AUTO, active, CTLFLAG_RD,
369 &zleak_active, 0, "zleak activity");
370
371 /*
372 * kern.zleak.max_zonemap_size
373 *
374 * Read the value of the maximum zonemap size in bytes; useful
375 * as the maximum size that zleak.global_threshold and
376 * zleak.zone_threshold should be set to.
377 */
378 SYSCTL_LONG(_kern_zleak, OID_AUTO, max_zonemap_size,
379 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, &zleak_max_zonemap_size,
380 "zleak max zonemap size");
381
382
383 static int
384 sysctl_zleak_threshold SYSCTL_HANDLER_ARGS
385 {
386 #pragma unused(oidp, arg2)
387 int error;
388 uint64_t value = *(vm_size_t *)arg1;
389
390 error = sysctl_io_number(req, value, sizeof(value), &value, NULL);
391
392 if (error || !req->newptr) {
393 return error;
394 }
395
396 return mach_to_bsd_errno(zleak_update_threshold(arg1, value));
397 }
398
399 /*
400 * kern.zleak.zone_threshold
401 *
402 * Set the per-zone threshold size (in bytes) above which any
403 * zone will automatically start zleak tracking.
404 *
405 * The default value is set in zleak_init().
406 *
407 * Setting this variable will have no effect until zleak tracking is
408 * activated (See above.)
409 */
410 SYSCTL_PROC(_kern_zleak, OID_AUTO, zone_threshold,
411 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
412 &zleak_per_zone_tracking_threshold, 0, sysctl_zleak_threshold, "Q",
413 "zleak per-zone threshold");
414
415 #endif /* CONFIG_ZLEAKS */
416
417 extern uint64_t get_zones_collectable_bytes(void);
418
419 static int
420 sysctl_zones_collectable_bytes SYSCTL_HANDLER_ARGS
421 {
422 #pragma unused(oidp, arg1, arg2)
423 uint64_t zones_free_mem = get_zones_collectable_bytes();
424
425 if (!kauth_cred_issuser(kauth_cred_get())) {
426 return EPERM;
427 }
428
429 return SYSCTL_OUT(req, &zones_free_mem, sizeof(zones_free_mem));
430 }
431
432 SYSCTL_PROC(_kern, OID_AUTO, zones_collectable_bytes,
433 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED,
434 0, 0, &sysctl_zones_collectable_bytes, "Q",
435 "Collectable memory in zones");
436