xref: /xnu-11215.1.10/iokit/Kernel/IOLib.cpp (revision 8d741a5de7ff4191bf97d57b9f54c2f6d4a15585)
1 /*
2  * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * HISTORY
30  *
31  * 17-Apr-91   Portions from libIO.m, Doug Mitchell at NeXT.
32  * 17-Nov-98   cpp
33  *
34  */
35 
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <vm/vm_kern_xnu.h>
40 #include <vm/vm_map_xnu.h>
41 #include <libkern/c++/OSCPPDebug.h>
42 
43 #include <IOKit/assert.h>
44 
45 #include <IOKit/IOReturn.h>
46 #include <IOKit/IOLib.h>
47 #include <IOKit/IOLocks.h>
48 #include <IOKit/IOMapper.h>
49 #include <IOKit/IOBufferMemoryDescriptor.h>
50 #include <IOKit/IOKitDebug.h>
51 
52 #include "IOKitKernelInternal.h"
53 
54 #ifdef IOALLOCDEBUG
55 #include <libkern/OSDebug.h>
56 #include <sys/sysctl.h>
57 #endif
58 
59 #include "libkern/OSAtomic.h"
60 #include <libkern/c++/OSKext.h>
61 #include <IOKit/IOStatisticsPrivate.h>
62 #include <os/log_private.h>
63 #include <sys/msgbuf.h>
64 #include <console/serial_protos.h>
65 
66 #if IOKITSTATS
67 
68 #define IOStatisticsAlloc(type, size) \
69 do { \
70 	IOStatistics::countAlloc(type, size); \
71 } while (0)
72 
73 #else
74 
75 #define IOStatisticsAlloc(type, size)
76 
77 #endif /* IOKITSTATS */
78 
79 
80 #define TRACK_ALLOC     (IOTRACKING && (kIOTracking & gIOKitDebug))
81 
82 
83 extern "C"
84 {
85 mach_timespec_t IOZeroTvalspec = { 0, 0 };
86 
87 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
88 
89 extern int
90 __doprnt(
91 	const char              *fmt,
92 	va_list                 argp,
93 	void                    (*putc)(int, void *),
94 	void                    *arg,
95 	int                     radix,
96 	int                     is_log);
97 
98 extern bool bsd_log_lock(bool);
99 extern void bsd_log_unlock(void);
100 
101 
102 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
103 
104 lck_grp_t       *IOLockGroup;
105 
106 /*
107  * Global variables for use by iLogger
108  * These symbols are for use only by Apple diagnostic code.
109  * Binary compatibility is not guaranteed for kexts that reference these symbols.
110  */
111 
112 void *_giDebugLogInternal       = NULL;
113 void *_giDebugLogDataInternal   = NULL;
114 void *_giDebugReserved1         = NULL;
115 void *_giDebugReserved2         = NULL;
116 
117 #if defined(__x86_64__)
118 iopa_t gIOBMDPageAllocator;
119 #endif /* defined(__x86_64__) */
120 
121 /*
122  * Static variables for this module.
123  */
124 
125 static queue_head_t gIOMallocContiguousEntries;
126 static lck_mtx_t *  gIOMallocContiguousEntriesLock;
127 
128 #if __x86_64__
129 enum { kIOMaxPageableMaps    = 8 };
130 enum { kIOMaxFixedRanges     = 4 };
131 enum { kIOPageableMapSize    = 512 * 1024 * 1024 };
132 enum { kIOPageableMaxMapSize = 512 * 1024 * 1024 };
133 #else
134 enum { kIOMaxPageableMaps    = 16 };
135 enum { kIOMaxFixedRanges     = 4 };
136 enum { kIOPageableMapSize    = 96 * 1024 * 1024 };
137 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
138 #endif
139 
140 typedef struct {
141 	vm_map_t            map;
142 	vm_offset_t address;
143 	vm_offset_t end;
144 } IOMapData;
145 
146 static SECURITY_READ_ONLY_LATE(struct mach_vm_range)
147 gIOKitPageableFixedRanges[kIOMaxFixedRanges];
148 
149 static struct {
150 	UInt32      count;
151 	UInt32      hint;
152 	IOMapData   maps[kIOMaxPageableMaps];
153 	lck_mtx_t * lock;
154 } gIOKitPageableSpace;
155 
156 #if defined(__x86_64__)
157 static iopa_t gIOPageablePageAllocator;
158 
159 uint32_t  gIOPageAllocChunkBytes;
160 #endif /* defined(__x86_64__) */
161 
162 #if IOTRACKING
163 IOTrackingQueue * gIOMallocTracking;
164 IOTrackingQueue * gIOWireTracking;
165 IOTrackingQueue * gIOMapTracking;
166 #endif /* IOTRACKING */
167 
168 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
169 
170 KMEM_RANGE_REGISTER_STATIC(gIOKitPageableFixed0,
171     &gIOKitPageableFixedRanges[0], kIOPageableMapSize);
172 KMEM_RANGE_REGISTER_STATIC(gIOKitPageableFixed1,
173     &gIOKitPageableFixedRanges[1], kIOPageableMapSize);
174 KMEM_RANGE_REGISTER_STATIC(gIOKitPageableFixed2,
175     &gIOKitPageableFixedRanges[2], kIOPageableMapSize);
176 KMEM_RANGE_REGISTER_STATIC(gIOKitPageableFixed3,
177     &gIOKitPageableFixedRanges[3], kIOPageableMapSize);
178 void
IOLibInit(void)179 IOLibInit(void)
180 {
181 	static bool libInitialized;
182 
183 	if (libInitialized) {
184 		return;
185 	}
186 
187 	IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
188 
189 #if IOTRACKING
190 	IOTrackingInit();
191 	gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, 0,
192 	    kIOTrackingQueueTypeAlloc,
193 	    37);
194 	gIOWireTracking   = IOTrackingQueueAlloc(kIOWireTrackingName, 0, 0, page_size, 0, 0);
195 
196 	size_t mapCaptureSize = (kIOTracking & gIOKitDebug) ? page_size : (1024 * 1024);
197 	gIOMapTracking    = IOTrackingQueueAlloc(kIOMapTrackingName, 0, 0, mapCaptureSize,
198 	    kIOTrackingQueueTypeDefaultOn
199 	    | kIOTrackingQueueTypeMap
200 	    | kIOTrackingQueueTypeUser,
201 	    0);
202 #endif
203 
204 	gIOKitPageableSpace.maps[0].map = kmem_suballoc(kernel_map,
205 	    &gIOKitPageableFixedRanges[0].min_address,
206 	    kIOPageableMapSize,
207 	    VM_MAP_CREATE_PAGEABLE,
208 	    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
209 	    (kms_flags_t)(KMS_PERMANENT | KMS_DATA | KMS_NOFAIL),
210 	    VM_KERN_MEMORY_IOKIT).kmr_submap;
211 
212 	gIOKitPageableSpace.maps[0].address = gIOKitPageableFixedRanges[0].min_address;
213 	gIOKitPageableSpace.maps[0].end     = gIOKitPageableFixedRanges[0].max_address;
214 	gIOKitPageableSpace.lock            = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
215 	gIOKitPageableSpace.hint            = 0;
216 	gIOKitPageableSpace.count           = 1;
217 
218 	gIOMallocContiguousEntriesLock      = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
219 	queue_init( &gIOMallocContiguousEntries );
220 
221 #if defined(__x86_64__)
222 	gIOPageAllocChunkBytes = PAGE_SIZE / 64;
223 
224 	assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes);
225 	iopa_init(&gIOBMDPageAllocator);
226 	iopa_init(&gIOPageablePageAllocator);
227 #endif /* defined(__x86_64__) */
228 
229 
230 	libInitialized = true;
231 }
232 
233 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
234 
235 vm_size_t
log2up(vm_size_t size)236 log2up(vm_size_t size)
237 {
238 	if (size <= 1) {
239 		size = 0;
240 	} else {
241 #if __LP64__
242 		size = 64 - __builtin_clzl(size - 1);
243 #else
244 		size = 32 - __builtin_clzl(size - 1);
245 #endif
246 	}
247 	return size;
248 }
249 
250 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
251 
252 IOThread
IOCreateThread(IOThreadFunc fcn,void * arg)253 IOCreateThread(IOThreadFunc fcn, void *arg)
254 {
255 	kern_return_t   result;
256 	thread_t                thread;
257 
258 	result = kernel_thread_start((thread_continue_t)(void (*)(void))fcn, arg, &thread);
259 	if (result != KERN_SUCCESS) {
260 		return NULL;
261 	}
262 
263 	thread_deallocate(thread);
264 
265 	return thread;
266 }
267 
268 
269 void
IOExitThread(void)270 IOExitThread(void)
271 {
272 	(void) thread_terminate(current_thread());
273 }
274 
275 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
276 
277 #if IOTRACKING
278 struct IOLibMallocHeader {
279 	IOTrackingAddress tracking;
280 };
281 #endif
282 
283 #if IOTRACKING
284 #define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
285 #else
286 #define sizeofIOLibMallocHeader (0)
287 #endif
288 
289 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
290 
291 __typed_allocators_ignore_push // allocator implementation
292 
293 void *
294 (IOMalloc_internal)(struct kalloc_heap *kheap, vm_size_t size,
295 zalloc_flags_t flags)
296 {
297 	void * address;
298 	vm_size_t allocSize;
299 
300 	allocSize = size + sizeofIOLibMallocHeader;
301 #if IOTRACKING
302 	if (sizeofIOLibMallocHeader && (allocSize <= size)) {
303 		return NULL;                                          // overflow
304 	}
305 #endif
306 	address = kheap_alloc(kheap, allocSize,
307 	    Z_VM_TAG(Z_WAITOK | flags, VM_KERN_MEMORY_IOKIT));
308 
309 	if (address) {
310 #if IOTRACKING
311 		if (TRACK_ALLOC) {
312 			IOLibMallocHeader * hdr;
313 			hdr = (typeof(hdr))address;
314 			bzero(&hdr->tracking, sizeof(hdr->tracking));
315 			hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader);
316 			hdr->tracking.size    = size;
317 			IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
318 		}
319 #endif
320 		address = (typeof(address))(((uintptr_t) address) + sizeofIOLibMallocHeader);
321 
322 #if IOALLOCDEBUG
323 		OSAddAtomicLong(size, &debug_iomalloc_size);
324 #endif
325 		IOStatisticsAlloc(kIOStatisticsMalloc, size);
326 	}
327 
328 	return address;
329 }
330 
331 void
IOFree_internal(struct kalloc_heap * kheap,void * inAddress,vm_size_t size)332 IOFree_internal(struct kalloc_heap *kheap, void * inAddress, vm_size_t size)
333 {
334 	void * address;
335 
336 	if ((address = inAddress)) {
337 		address = (typeof(address))(((uintptr_t) address) - sizeofIOLibMallocHeader);
338 
339 #if IOTRACKING
340 		if (TRACK_ALLOC) {
341 			IOLibMallocHeader * hdr;
342 			struct ptr_reference { void * ptr; };
343 			volatile struct ptr_reference ptr;
344 
345 			// we're about to block in IOTrackingRemove(), make sure the original pointer
346 			// exists in memory or a register for leak scanning to find
347 			ptr.ptr = inAddress;
348 
349 			hdr = (typeof(hdr))address;
350 			if (size != hdr->tracking.size) {
351 				OSReportWithBacktrace("bad IOFree size 0x%zx should be 0x%zx",
352 				    (size_t)size, (size_t)hdr->tracking.size);
353 				size = hdr->tracking.size;
354 			}
355 			IOTrackingRemoveAddress(gIOMallocTracking, &hdr->tracking, size);
356 			ptr.ptr = NULL;
357 		}
358 #endif
359 
360 		kheap_free(kheap, address, size + sizeofIOLibMallocHeader);
361 #if IOALLOCDEBUG
362 		OSAddAtomicLong(-size, &debug_iomalloc_size);
363 #endif
364 		IOStatisticsAlloc(kIOStatisticsFree, size);
365 	}
366 }
367 
368 void *
369 IOMalloc_external(
370 	vm_size_t size);
371 void *
IOMalloc_external(vm_size_t size)372 IOMalloc_external(
373 	vm_size_t size)
374 {
375 	return IOMalloc_internal(KHEAP_DEFAULT, size, Z_VM_TAG_BT_BIT);
376 }
377 
378 void
IOFree(void * inAddress,vm_size_t size)379 IOFree(void * inAddress, vm_size_t size)
380 {
381 	IOFree_internal(KHEAP_DEFAULT, inAddress, size);
382 }
383 
384 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
385 
386 void *
387 IOMallocZero_external(
388 	vm_size_t size);
389 void *
IOMallocZero_external(vm_size_t size)390 IOMallocZero_external(
391 	vm_size_t size)
392 {
393 	return IOMalloc_internal(KHEAP_DEFAULT, size, Z_ZERO_VM_TAG_BT_BIT);
394 }
395 
396 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
397 
398 vm_tag_t
IOMemoryTag(vm_map_t map)399 IOMemoryTag(vm_map_t map)
400 {
401 	vm_tag_t tag;
402 
403 	if (!vm_kernel_map_is_kernel(map)) {
404 		return VM_MEMORY_IOKIT;
405 	}
406 
407 	tag = vm_tag_bt();
408 	if (tag == VM_KERN_MEMORY_NONE) {
409 		tag = VM_KERN_MEMORY_IOKIT;
410 	}
411 
412 	return tag;
413 }
414 
415 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
416 
417 struct IOLibPageMallocHeader {
418 	mach_vm_size_t    alignMask;
419 	mach_vm_offset_t  allocationOffset;
420 #if IOTRACKING
421 	IOTrackingAddress tracking;
422 #endif
423 };
424 
425 #if IOTRACKING
426 #define sizeofIOLibPageMallocHeader     (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
427 #else
428 #define sizeofIOLibPageMallocHeader     (sizeof(IOLibPageMallocHeader))
429 #endif
430 
431 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
432 
433 static __header_always_inline void
IOMallocAlignedSetHdr(IOLibPageMallocHeader * hdr,mach_vm_size_t alignMask,mach_vm_address_t allocationStart,mach_vm_address_t alignedStart)434 IOMallocAlignedSetHdr(
435 	IOLibPageMallocHeader  *hdr,
436 	mach_vm_size_t          alignMask,
437 	mach_vm_address_t       allocationStart,
438 	mach_vm_address_t       alignedStart)
439 {
440 	mach_vm_offset_t        offset = alignedStart - allocationStart;
441 #if __has_feature(ptrauth_calls)
442 	offset = (mach_vm_offset_t) ptrauth_sign_unauthenticated((void *)offset,
443 	    ptrauth_key_process_independent_data,
444 	    ptrauth_blend_discriminator((void *)(alignedStart | alignMask),
445 	    OS_PTRAUTH_DISCRIMINATOR("IOLibPageMallocHeader.allocationOffset")));
446 #endif /* __has_feature(ptrauth_calls) */
447 	hdr->allocationOffset = offset;
448 	hdr->alignMask = alignMask;
449 }
450 
451 __abortlike
452 static void
IOMallocAlignedHdrCorruptionPanic(mach_vm_offset_t offset,mach_vm_size_t alignMask,mach_vm_address_t alignedStart,vm_size_t size)453 IOMallocAlignedHdrCorruptionPanic(
454 	mach_vm_offset_t        offset,
455 	mach_vm_size_t          alignMask,
456 	mach_vm_address_t       alignedStart,
457 	vm_size_t               size)
458 {
459 	mach_vm_address_t       address = 0;
460 	mach_vm_address_t       recalAlignedStart = 0;
461 
462 	if (os_sub_overflow(alignedStart, offset, &address)) {
463 		panic("Invalid offset %p for aligned addr %p", (void *)offset,
464 		    (void *)alignedStart);
465 	}
466 	if (os_add3_overflow(address, sizeofIOLibPageMallocHeader, alignMask,
467 	    &recalAlignedStart)) {
468 		panic("alignMask 0x%llx overflows recalAlignedStart %p for provided addr "
469 		    "%p", alignMask, (void *)recalAlignedStart, (void *)alignedStart);
470 	}
471 	if (((recalAlignedStart &= ~alignMask) != alignedStart) &&
472 	    (round_page(recalAlignedStart) != alignedStart)) {
473 		panic("Recalculated aligned addr %p doesn't match provided addr %p",
474 		    (void *)recalAlignedStart, (void *)alignedStart);
475 	}
476 	if (offset < sizeofIOLibPageMallocHeader) {
477 		panic("Offset %zd doesn't accomodate IOLibPageMallocHeader for aligned "
478 		    "addr %p", (size_t)offset, (void *)alignedStart);
479 	}
480 	panic("alignMask 0x%llx overflows adjusted size %zd for aligned addr %p",
481 	    alignMask, (size_t)size, (void *)alignedStart);
482 }
483 
484 static __header_always_inline mach_vm_address_t
IOMallocAlignedGetAddress(IOLibPageMallocHeader * hdr,mach_vm_address_t alignedStart,vm_size_t * size)485 IOMallocAlignedGetAddress(
486 	IOLibPageMallocHeader  *hdr,
487 	mach_vm_address_t       alignedStart,
488 	vm_size_t              *size)
489 {
490 	mach_vm_address_t       address = 0;
491 	mach_vm_address_t       recalAlignedStart = 0;
492 	mach_vm_offset_t        offset = hdr->allocationOffset;
493 	mach_vm_size_t          alignMask = hdr->alignMask;
494 #if __has_feature(ptrauth_calls)
495 	offset = (mach_vm_offset_t) ptrauth_auth_data((void *)offset,
496 	    ptrauth_key_process_independent_data,
497 	    ptrauth_blend_discriminator((void *)(alignedStart | alignMask),
498 	    OS_PTRAUTH_DISCRIMINATOR("IOLibPageMallocHeader.allocationOffset")));
499 #endif /* __has_feature(ptrauth_calls) */
500 	if (os_sub_overflow(alignedStart, offset, &address) ||
501 	    os_add3_overflow(address, sizeofIOLibPageMallocHeader, alignMask,
502 	    &recalAlignedStart) ||
503 	    (((recalAlignedStart &= ~alignMask) != alignedStart) &&
504 	    (round_page(recalAlignedStart) != alignedStart)) ||
505 	    (offset < sizeofIOLibPageMallocHeader) ||
506 	    os_add_overflow(*size, alignMask, size)) {
507 		IOMallocAlignedHdrCorruptionPanic(offset, alignMask, alignedStart, *size);
508 	}
509 	return address;
510 }
511 
512 void *
513 (IOMallocAligned_internal)(struct kalloc_heap *kheap, vm_size_t size,
514 vm_size_t alignment, zalloc_flags_t flags)
515 {
516 	kern_return_t           kr;
517 	vm_offset_t             address;
518 	vm_offset_t             allocationAddress;
519 	vm_size_t               adjustedSize;
520 	uintptr_t               alignMask;
521 	IOLibPageMallocHeader * hdr;
522 	kma_flags_t kma_flags = KMA_NONE;
523 
524 	if (size == 0) {
525 		return NULL;
526 	}
527 	if (((uint32_t) alignment) != alignment) {
528 		return NULL;
529 	}
530 
531 	if (flags & Z_ZERO) {
532 		kma_flags = KMA_ZERO;
533 	}
534 
535 	if (kheap == KHEAP_DATA_BUFFERS) {
536 		kma_flags = (kma_flags_t) (kma_flags | KMA_DATA);
537 	}
538 
539 	alignment = (1UL << log2up((uint32_t) alignment));
540 	alignMask = alignment - 1;
541 	adjustedSize = size + sizeofIOLibPageMallocHeader;
542 
543 	if (size > adjustedSize) {
544 		address = 0; /* overflow detected */
545 	} else if (adjustedSize >= page_size) {
546 		kr = kernel_memory_allocate(kernel_map, &address,
547 		    size, alignMask, kma_flags, IOMemoryTag(kernel_map));
548 		if (KERN_SUCCESS != kr) {
549 			address = 0;
550 		}
551 #if IOTRACKING
552 		else if (TRACK_ALLOC) {
553 			IOTrackingAlloc(gIOMallocTracking, address, size);
554 		}
555 #endif
556 	} else {
557 		adjustedSize += alignMask;
558 
559 		if (adjustedSize >= page_size) {
560 			kr = kmem_alloc(kernel_map, &allocationAddress,
561 			    adjustedSize, kma_flags, IOMemoryTag(kernel_map));
562 			if (KERN_SUCCESS != kr) {
563 				allocationAddress = 0;
564 			}
565 		} else {
566 			allocationAddress = (vm_address_t) kheap_alloc(kheap,
567 			    adjustedSize, Z_VM_TAG(Z_WAITOK | flags, VM_KERN_MEMORY_IOKIT));
568 		}
569 
570 		if (allocationAddress) {
571 			address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
572 			    & (~alignMask);
573 
574 			hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
575 			IOMallocAlignedSetHdr(hdr, alignMask, allocationAddress, address);
576 #if IOTRACKING
577 			if (TRACK_ALLOC) {
578 				bzero(&hdr->tracking, sizeof(hdr->tracking));
579 				hdr->tracking.address = ~address;
580 				hdr->tracking.size = size;
581 				IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
582 			}
583 #endif
584 		} else {
585 			address = 0;
586 		}
587 	}
588 
589 	assert(0 == (address & alignMask));
590 
591 	if (address) {
592 #if IOALLOCDEBUG
593 		OSAddAtomicLong(size, &debug_iomalloc_size);
594 #endif
595 		IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
596 	}
597 
598 	return (void *) address;
599 }
600 
601 void
IOFreeAligned_internal(kalloc_heap_t kheap,void * address,vm_size_t size)602 IOFreeAligned_internal(kalloc_heap_t kheap, void * address, vm_size_t size)
603 {
604 	vm_address_t            allocationAddress;
605 	vm_size_t               adjustedSize;
606 	IOLibPageMallocHeader * hdr;
607 
608 	if (!address) {
609 		return;
610 	}
611 
612 	assert(size);
613 
614 	adjustedSize = size + sizeofIOLibPageMallocHeader;
615 	if (adjustedSize >= page_size) {
616 #if IOTRACKING
617 		if (TRACK_ALLOC) {
618 			IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size);
619 		}
620 #endif
621 		kmem_free(kernel_map, (vm_offset_t) address, size);
622 	} else {
623 		hdr = (typeof(hdr))(((uintptr_t)address) - sizeofIOLibPageMallocHeader);
624 		allocationAddress = IOMallocAlignedGetAddress(hdr,
625 		    (mach_vm_address_t)address, &adjustedSize);
626 
627 #if IOTRACKING
628 		if (TRACK_ALLOC) {
629 			if (size != hdr->tracking.size) {
630 				OSReportWithBacktrace("bad IOFreeAligned size 0x%zx should be 0x%zx",
631 				    (size_t)size, (size_t)hdr->tracking.size);
632 				size = hdr->tracking.size;
633 			}
634 			IOTrackingRemoveAddress(gIOMallocTracking, &hdr->tracking, size);
635 		}
636 #endif
637 		if (adjustedSize >= page_size) {
638 			kmem_free(kernel_map, allocationAddress, adjustedSize);
639 		} else {
640 			kheap_free(kheap, allocationAddress, adjustedSize);
641 		}
642 	}
643 
644 #if IOALLOCDEBUG
645 	OSAddAtomicLong(-size, &debug_iomalloc_size);
646 #endif
647 
648 	IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
649 }
650 
651 void *
652 IOMallocAligned_external(
653 	vm_size_t size, vm_size_t alignment);
654 void *
IOMallocAligned_external(vm_size_t size,vm_size_t alignment)655 IOMallocAligned_external(
656 	vm_size_t size, vm_size_t alignment)
657 {
658 	return IOMallocAligned_internal(KHEAP_DATA_BUFFERS, size, alignment,
659 	           Z_VM_TAG_BT_BIT);
660 }
661 
662 void
IOFreeAligned(void * address,vm_size_t size)663 IOFreeAligned(
664 	void                  * address,
665 	vm_size_t               size)
666 {
667 	IOFreeAligned_internal(KHEAP_DATA_BUFFERS, address, size);
668 }
669 
670 __typed_allocators_ignore_pop
671 
672 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
673 
674 void
IOKernelFreePhysical(kalloc_heap_t kheap,mach_vm_address_t address,mach_vm_size_t size)675 IOKernelFreePhysical(
676 	kalloc_heap_t         kheap,
677 	mach_vm_address_t     address,
678 	mach_vm_size_t        size)
679 {
680 	vm_address_t       allocationAddress;
681 	vm_size_t          adjustedSize;
682 	IOLibPageMallocHeader * hdr;
683 
684 	if (!address) {
685 		return;
686 	}
687 
688 	assert(size);
689 
690 	adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
691 	if (adjustedSize >= page_size) {
692 #if IOTRACKING
693 		if (TRACK_ALLOC) {
694 			IOTrackingFree(gIOMallocTracking, address, size);
695 		}
696 #endif
697 		kmem_free(kernel_map, (vm_offset_t) address, size);
698 	} else {
699 		hdr = (typeof(hdr))(((uintptr_t)address) - sizeofIOLibPageMallocHeader);
700 		allocationAddress = IOMallocAlignedGetAddress(hdr, address, &adjustedSize);
701 #if IOTRACKING
702 		if (TRACK_ALLOC) {
703 			IOTrackingRemoveAddress(gIOMallocTracking, &hdr->tracking, size);
704 		}
705 #endif
706 		__typed_allocators_ignore(kheap_free(kheap, allocationAddress, adjustedSize));
707 	}
708 
709 	IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
710 #if IOALLOCDEBUG
711 	OSAddAtomicLong(-size, &debug_iomalloc_size);
712 #endif
713 }
714 
715 #if __arm64__
716 extern unsigned long gPhysBase, gPhysSize;
717 #endif
718 
719 mach_vm_address_t
IOKernelAllocateWithPhysicalRestrict(kalloc_heap_t kheap,mach_vm_size_t size,mach_vm_address_t maxPhys,mach_vm_size_t alignment,bool contiguous)720 IOKernelAllocateWithPhysicalRestrict(
721 	kalloc_heap_t         kheap,
722 	mach_vm_size_t        size,
723 	mach_vm_address_t     maxPhys,
724 	mach_vm_size_t        alignment,
725 	bool                  contiguous)
726 {
727 	kern_return_t           kr;
728 	mach_vm_address_t       address;
729 	mach_vm_address_t       allocationAddress;
730 	mach_vm_size_t          adjustedSize;
731 	mach_vm_address_t       alignMask;
732 	IOLibPageMallocHeader * hdr;
733 
734 	if (size == 0) {
735 		return 0;
736 	}
737 	if (alignment == 0) {
738 		alignment = 1;
739 	}
740 
741 	alignMask = alignment - 1;
742 
743 	if (os_mul_and_add_overflow(2, size, sizeofIOLibPageMallocHeader, &adjustedSize)) {
744 		return 0;
745 	}
746 
747 	contiguous = (contiguous && (adjustedSize > page_size))
748 	    || (alignment > page_size);
749 
750 	if (contiguous || maxPhys) {
751 		kma_flags_t options = KMA_ZERO;
752 		vm_offset_t virt;
753 
754 		if (kheap == KHEAP_DATA_BUFFERS) {
755 			options = (kma_flags_t) (options | KMA_DATA);
756 		}
757 
758 		adjustedSize = size;
759 		contiguous = (contiguous && (adjustedSize > page_size))
760 		    || (alignment > page_size);
761 
762 		if (!contiguous) {
763 #if __arm64__
764 			if (maxPhys >= (mach_vm_address_t)(gPhysBase + gPhysSize)) {
765 				maxPhys = 0;
766 			} else
767 #endif
768 			if (maxPhys <= 0xFFFFFFFF) {
769 				maxPhys = 0;
770 				options = (kma_flags_t)(options | KMA_LOMEM);
771 			} else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage)) {
772 				maxPhys = 0;
773 			}
774 		}
775 		if (contiguous || maxPhys) {
776 			kr = kmem_alloc_contig(kernel_map, &virt, size,
777 			    alignMask, (ppnum_t) atop(maxPhys), (ppnum_t) atop(alignMask),
778 			    options, IOMemoryTag(kernel_map));
779 		} else {
780 			kr = kernel_memory_allocate(kernel_map, &virt,
781 			    size, alignMask, options, IOMemoryTag(kernel_map));
782 		}
783 		if (KERN_SUCCESS == kr) {
784 			address = virt;
785 #if IOTRACKING
786 			if (TRACK_ALLOC) {
787 				IOTrackingAlloc(gIOMallocTracking, address, size);
788 			}
789 #endif
790 		} else {
791 			address = 0;
792 		}
793 	} else {
794 		adjustedSize += alignMask;
795 		if (adjustedSize < size) {
796 			return 0;
797 		}
798 		/* BEGIN IGNORE CODESTYLE */
799 		__typed_allocators_ignore_push // allocator implementation
800 		allocationAddress = (mach_vm_address_t) kheap_alloc(kheap,
801 		    adjustedSize, Z_VM_TAG_BT(Z_WAITOK, VM_KERN_MEMORY_IOKIT));
802 		__typed_allocators_ignore_pop
803 		/* END IGNORE CODESTYLE */
804 
805 		if (allocationAddress) {
806 			address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
807 			    & (~alignMask);
808 
809 			if (atop_32(address) != atop_32(address + size - 1)) {
810 				address = round_page(address);
811 			}
812 
813 			hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
814 			IOMallocAlignedSetHdr(hdr, alignMask, allocationAddress, address);
815 #if IOTRACKING
816 			if (TRACK_ALLOC) {
817 				bzero(&hdr->tracking, sizeof(hdr->tracking));
818 				hdr->tracking.address = ~address;
819 				hdr->tracking.size    = size;
820 				IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
821 			}
822 #endif
823 		} else {
824 			address = 0;
825 		}
826 	}
827 
828 	if (address) {
829 		IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
830 #if IOALLOCDEBUG
831 		OSAddAtomicLong(size, &debug_iomalloc_size);
832 #endif
833 	}
834 
835 	return address;
836 }
837 
838 
839 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
840 
841 struct _IOMallocContiguousEntry {
842 	mach_vm_address_t          virtualAddr;
843 	IOBufferMemoryDescriptor * md;
844 	queue_chain_t              link;
845 };
846 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
847 
848 void *
IOMallocContiguous(vm_size_t size,vm_size_t alignment,IOPhysicalAddress * physicalAddress)849 IOMallocContiguous(vm_size_t size, vm_size_t alignment,
850     IOPhysicalAddress * physicalAddress)
851 {
852 	mach_vm_address_t   address = 0;
853 
854 	if (size == 0) {
855 		return NULL;
856 	}
857 	if (alignment == 0) {
858 		alignment = 1;
859 	}
860 
861 	/* Do we want a physical address? */
862 	if (!physicalAddress) {
863 		address = IOKernelAllocateWithPhysicalRestrict(KHEAP_DEFAULT,
864 		    size, 0 /*maxPhys*/, alignment, true);
865 	} else {
866 		do {
867 			IOBufferMemoryDescriptor * bmd;
868 			mach_vm_address_t          physicalMask;
869 			vm_offset_t                alignMask;
870 
871 			alignMask = alignment - 1;
872 			physicalMask = (0xFFFFFFFF ^ alignMask);
873 
874 			bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
875 				kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
876 			if (!bmd) {
877 				break;
878 			}
879 
880 			_IOMallocContiguousEntry *
881 			    entry = IOMallocType(_IOMallocContiguousEntry);
882 			if (!entry) {
883 				bmd->release();
884 				break;
885 			}
886 			entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
887 			entry->md          = bmd;
888 			lck_mtx_lock(gIOMallocContiguousEntriesLock);
889 			queue_enter( &gIOMallocContiguousEntries, entry,
890 			    _IOMallocContiguousEntry *, link );
891 			lck_mtx_unlock(gIOMallocContiguousEntriesLock);
892 
893 			address          = (mach_vm_address_t) entry->virtualAddr;
894 			*physicalAddress = bmd->getPhysicalAddress();
895 		}while (false);
896 	}
897 
898 	return (void *) address;
899 }
900 
901 void
IOFreeContiguous(void * _address,vm_size_t size)902 IOFreeContiguous(void * _address, vm_size_t size)
903 {
904 	_IOMallocContiguousEntry * entry;
905 	IOMemoryDescriptor *       md = NULL;
906 
907 	mach_vm_address_t address = (mach_vm_address_t) _address;
908 
909 	if (!address) {
910 		return;
911 	}
912 
913 	assert(size);
914 
915 	lck_mtx_lock(gIOMallocContiguousEntriesLock);
916 	queue_iterate( &gIOMallocContiguousEntries, entry,
917 	    _IOMallocContiguousEntry *, link )
918 	{
919 		if (entry->virtualAddr == address) {
920 			md   = entry->md;
921 			queue_remove( &gIOMallocContiguousEntries, entry,
922 			    _IOMallocContiguousEntry *, link );
923 			break;
924 		}
925 	}
926 	lck_mtx_unlock(gIOMallocContiguousEntriesLock);
927 
928 	if (md) {
929 		md->release();
930 		IOFreeType(entry, _IOMallocContiguousEntry);
931 	} else {
932 		IOKernelFreePhysical(KHEAP_DEFAULT, (mach_vm_address_t) address, size);
933 	}
934 }
935 
936 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
937 
938 kern_return_t
IOIteratePageableMaps(vm_size_t size,IOIteratePageableMapsCallback callback,void * ref)939 IOIteratePageableMaps(vm_size_t size,
940     IOIteratePageableMapsCallback callback, void * ref)
941 {
942 	kern_return_t       kr = kIOReturnNotReady;
943 	kmem_return_t       kmr;
944 	vm_size_t           segSize;
945 	UInt32              attempts;
946 	UInt32              index;
947 	mach_vm_offset_t    min;
948 	int                 flags;
949 
950 	if (size > kIOPageableMaxMapSize) {
951 		return kIOReturnBadArgument;
952 	}
953 
954 	do {
955 		index = gIOKitPageableSpace.hint;
956 		attempts = gIOKitPageableSpace.count;
957 		while (attempts--) {
958 			kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
959 			if (KERN_SUCCESS == kr) {
960 				gIOKitPageableSpace.hint = index;
961 				break;
962 			}
963 			if (index) {
964 				index--;
965 			} else {
966 				index = gIOKitPageableSpace.count - 1;
967 			}
968 		}
969 		if (KERN_NO_SPACE != kr) {
970 			break;
971 		}
972 
973 		lck_mtx_lock( gIOKitPageableSpace.lock );
974 
975 		index = gIOKitPageableSpace.count;
976 		if (index >= kIOMaxPageableMaps) {
977 			lck_mtx_unlock( gIOKitPageableSpace.lock );
978 			break;
979 		}
980 
981 		if (size < kIOPageableMapSize) {
982 			segSize = kIOPageableMapSize;
983 		} else {
984 			segSize = size;
985 		}
986 
987 		/*
988 		 * Use the predefine ranges if available, else default to data
989 		 */
990 		if (index < kIOMaxFixedRanges) {
991 			min = gIOKitPageableFixedRanges[index].min_address;
992 			flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
993 		} else {
994 			min = 0;
995 			flags = VM_FLAGS_ANYWHERE;
996 		}
997 		kmr = kmem_suballoc(kernel_map,
998 		    &min,
999 		    segSize,
1000 		    VM_MAP_CREATE_PAGEABLE,
1001 		    flags,
1002 		    (kms_flags_t)(KMS_PERMANENT | KMS_DATA),
1003 		    VM_KERN_MEMORY_IOKIT);
1004 		if (kmr.kmr_return != KERN_SUCCESS) {
1005 			kr = kmr.kmr_return;
1006 			lck_mtx_unlock( gIOKitPageableSpace.lock );
1007 			break;
1008 		}
1009 
1010 		gIOKitPageableSpace.maps[index].map     = kmr.kmr_submap;
1011 		gIOKitPageableSpace.maps[index].address = min;
1012 		gIOKitPageableSpace.maps[index].end     = min + segSize;
1013 		gIOKitPageableSpace.hint                = index;
1014 		gIOKitPageableSpace.count               = index + 1;
1015 
1016 		lck_mtx_unlock( gIOKitPageableSpace.lock );
1017 	} while (true);
1018 
1019 	return kr;
1020 }
1021 
1022 struct IOMallocPageableRef {
1023 	vm_offset_t address;
1024 	vm_size_t   size;
1025 	vm_tag_t    tag;
1026 };
1027 
1028 static kern_return_t
IOMallocPageableCallback(vm_map_t map,void * _ref)1029 IOMallocPageableCallback(vm_map_t map, void * _ref)
1030 {
1031 	struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
1032 	kma_flags_t flags = (kma_flags_t)(KMA_PAGEABLE | KMA_DATA);
1033 
1034 	return kmem_alloc( map, &ref->address, ref->size, flags, ref->tag );
1035 }
1036 
1037 static void *
IOMallocPageablePages(vm_size_t size,vm_size_t alignment,vm_tag_t tag)1038 IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag)
1039 {
1040 	kern_return_t              kr = kIOReturnNotReady;
1041 	struct IOMallocPageableRef ref;
1042 
1043 	if (alignment > page_size) {
1044 		return NULL;
1045 	}
1046 	if (size > kIOPageableMaxMapSize) {
1047 		return NULL;
1048 	}
1049 
1050 	ref.size = size;
1051 	ref.tag  = tag;
1052 	kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
1053 	if (kIOReturnSuccess != kr) {
1054 		ref.address = 0;
1055 	}
1056 
1057 	return (void *) ref.address;
1058 }
1059 
1060 vm_map_t
IOPageableMapForAddress(uintptr_t address)1061 IOPageableMapForAddress( uintptr_t address )
1062 {
1063 	vm_map_t    map = NULL;
1064 	UInt32      index;
1065 
1066 	for (index = 0; index < gIOKitPageableSpace.count; index++) {
1067 		if ((address >= gIOKitPageableSpace.maps[index].address)
1068 		    && (address < gIOKitPageableSpace.maps[index].end)) {
1069 			map = gIOKitPageableSpace.maps[index].map;
1070 			break;
1071 		}
1072 	}
1073 	if (!map) {
1074 		panic("IOPageableMapForAddress: null");
1075 	}
1076 
1077 	return map;
1078 }
1079 
1080 static void
IOFreePageablePages(void * address,vm_size_t size)1081 IOFreePageablePages(void * address, vm_size_t size)
1082 {
1083 	vm_map_t map;
1084 
1085 	map = IOPageableMapForAddress((vm_address_t) address);
1086 	if (map) {
1087 		kmem_free( map, (vm_offset_t) address, size);
1088 	}
1089 }
1090 
1091 #if defined(__x86_64__)
1092 static uintptr_t
IOMallocOnePageablePage(kalloc_heap_t kheap __unused,iopa_t * a)1093 IOMallocOnePageablePage(kalloc_heap_t kheap __unused, iopa_t * a)
1094 {
1095 	return (uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT);
1096 }
1097 #endif /* defined(__x86_64__) */
1098 
1099 static void *
IOMallocPageableInternal(vm_size_t size,vm_size_t alignment,bool zeroed)1100 IOMallocPageableInternal(vm_size_t size, vm_size_t alignment, bool zeroed)
1101 {
1102 	void * addr;
1103 
1104 	if (((uint32_t) alignment) != alignment) {
1105 		return NULL;
1106 	}
1107 #if defined(__x86_64__)
1108 	if (size >= (page_size - 4 * gIOPageAllocChunkBytes) ||
1109 	    alignment > page_size) {
1110 		addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map));
1111 		/* Memory allocated this way will already be zeroed. */
1112 	} else {
1113 		addr = ((void *) iopa_alloc(&gIOPageablePageAllocator,
1114 		    &IOMallocOnePageablePage, KHEAP_DEFAULT, size, (uint32_t) alignment));
1115 		if (addr && zeroed) {
1116 			bzero(addr, size);
1117 		}
1118 	}
1119 #else /* !defined(__x86_64__) */
1120 	vm_size_t allocSize = size;
1121 	if (allocSize == 0) {
1122 		allocSize = 1;
1123 	}
1124 	addr = IOMallocPageablePages(allocSize, alignment, IOMemoryTag(kernel_map));
1125 	/* already zeroed */
1126 #endif /* defined(__x86_64__) */
1127 
1128 	if (addr) {
1129 #if IOALLOCDEBUG
1130 		OSAddAtomicLong(size, &debug_iomallocpageable_size);
1131 #endif
1132 		IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
1133 	}
1134 
1135 	return addr;
1136 }
1137 
1138 void *
IOMallocPageable(vm_size_t size,vm_size_t alignment)1139 IOMallocPageable(vm_size_t size, vm_size_t alignment)
1140 {
1141 	return IOMallocPageableInternal(size, alignment, /*zeroed*/ false);
1142 }
1143 
1144 void *
IOMallocPageableZero(vm_size_t size,vm_size_t alignment)1145 IOMallocPageableZero(vm_size_t size, vm_size_t alignment)
1146 {
1147 	return IOMallocPageableInternal(size, alignment, /*zeroed*/ true);
1148 }
1149 
1150 void
IOFreePageable(void * address,vm_size_t size)1151 IOFreePageable(void * address, vm_size_t size)
1152 {
1153 #if IOALLOCDEBUG
1154 	OSAddAtomicLong(-size, &debug_iomallocpageable_size);
1155 #endif
1156 	IOStatisticsAlloc(kIOStatisticsFreePageable, size);
1157 
1158 #if defined(__x86_64__)
1159 	if (size < (page_size - 4 * gIOPageAllocChunkBytes)) {
1160 		address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size);
1161 		size = page_size;
1162 	}
1163 	if (address) {
1164 		IOFreePageablePages(address, size);
1165 	}
1166 #else /* !defined(__x86_64__) */
1167 	if (size == 0) {
1168 		size = 1;
1169 	}
1170 	if (address) {
1171 		IOFreePageablePages(address, size);
1172 	}
1173 #endif /* defined(__x86_64__) */
1174 }
1175 
1176 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1177 
1178 void *
1179 IOMallocData_external(
1180 	vm_size_t size);
1181 void *
IOMallocData_external(vm_size_t size)1182 IOMallocData_external(vm_size_t size)
1183 {
1184 	return IOMalloc_internal(KHEAP_DATA_BUFFERS, size, Z_VM_TAG_BT_BIT);
1185 }
1186 
1187 void *
1188 IOMallocZeroData_external(
1189 	vm_size_t size);
1190 void *
IOMallocZeroData_external(vm_size_t size)1191 IOMallocZeroData_external(vm_size_t size)
1192 {
1193 	return IOMalloc_internal(KHEAP_DATA_BUFFERS, size, Z_ZERO_VM_TAG_BT_BIT);
1194 }
1195 
1196 void
IOFreeData(void * address,vm_size_t size)1197 IOFreeData(void * address, vm_size_t size)
1198 {
1199 	return IOFree_internal(KHEAP_DATA_BUFFERS, address, size);
1200 }
1201 
1202 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1203 
1204 __typed_allocators_ignore_push // allocator implementation
1205 
1206 void *
IOMallocTypeImpl(kalloc_type_view_t kt_view)1207 IOMallocTypeImpl(kalloc_type_view_t kt_view)
1208 {
1209 #if IOTRACKING
1210 	/*
1211 	 * When leak detection is on default to using IOMalloc as kalloc
1212 	 * type infrastructure isn't aware of needing additional space for
1213 	 * the header.
1214 	 */
1215 	if (TRACK_ALLOC) {
1216 		uint32_t kt_size = kalloc_type_get_size(kt_view->kt_size);
1217 		void *mem = IOMalloc_internal(KHEAP_DEFAULT, kt_size, Z_ZERO);
1218 		if (!IOMallocType_from_vm(kt_view)) {
1219 			assert(mem);
1220 		}
1221 		return mem;
1222 	}
1223 #endif
1224 	zalloc_flags_t kt_flags = (zalloc_flags_t) (Z_WAITOK | Z_ZERO);
1225 	if (!IOMallocType_from_vm(kt_view)) {
1226 		kt_flags = (zalloc_flags_t) (kt_flags | Z_NOFAIL);
1227 	}
1228 	/*
1229 	 * Use external symbol for kalloc_type_impl as
1230 	 * kalloc_type_views generated at some external callsites
1231 	 * many not have been processed during boot.
1232 	 */
1233 	return kalloc_type_impl_external(kt_view, kt_flags);
1234 }
1235 
1236 void
IOFreeTypeImpl(kalloc_type_view_t kt_view,void * address)1237 IOFreeTypeImpl(kalloc_type_view_t kt_view, void * address)
1238 {
1239 #if IOTRACKING
1240 	if (TRACK_ALLOC) {
1241 		return IOFree_internal(KHEAP_DEFAULT, address,
1242 		           kalloc_type_get_size(kt_view->kt_size));
1243 	}
1244 #endif
1245 	/*
1246 	 * Use external symbol for kalloc_type_impl as
1247 	 * kalloc_type_views generated at some external callsites
1248 	 * many not have been processed during boot.
1249 	 */
1250 	return kfree_type_impl_external(kt_view, address);
1251 }
1252 
1253 void *
IOMallocTypeVarImpl(kalloc_type_var_view_t kt_view,vm_size_t size)1254 IOMallocTypeVarImpl(kalloc_type_var_view_t kt_view, vm_size_t size)
1255 {
1256 #if IOTRACKING
1257 	/*
1258 	 * When leak detection is on default to using IOMalloc as kalloc
1259 	 * type infrastructure isn't aware of needing additional space for
1260 	 * the header.
1261 	 */
1262 	if (TRACK_ALLOC) {
1263 		return IOMalloc_internal(KHEAP_DEFAULT, size, Z_ZERO);
1264 	}
1265 #endif
1266 	zalloc_flags_t kt_flags = (zalloc_flags_t) (Z_WAITOK | Z_ZERO);
1267 
1268 	kt_flags = Z_VM_TAG_BT(kt_flags, VM_KERN_MEMORY_KALLOC_TYPE);
1269 	return kalloc_type_var_impl(kt_view, size, kt_flags, NULL);
1270 }
1271 
1272 void
IOFreeTypeVarImpl(kalloc_type_var_view_t kt_view,void * address,vm_size_t size)1273 IOFreeTypeVarImpl(kalloc_type_var_view_t kt_view, void * address,
1274     vm_size_t size)
1275 {
1276 #if IOTRACKING
1277 	if (TRACK_ALLOC) {
1278 		return IOFree_internal(KHEAP_DEFAULT, address, size);
1279 	}
1280 #endif
1281 
1282 	return kfree_type_var_impl(kt_view, address, size);
1283 }
1284 
1285 __typed_allocators_ignore_pop
1286 
1287 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1288 
1289 #if defined(__x86_64__)
1290 
1291 
1292 extern "C" void
iopa_init(iopa_t * a)1293 iopa_init(iopa_t * a)
1294 {
1295 	bzero(a, sizeof(*a));
1296 	a->lock = IOLockAlloc();
1297 	queue_init(&a->list);
1298 }
1299 
1300 static uintptr_t
iopa_allocinpage(iopa_page_t * pa,uint32_t count,uint64_t align)1301 iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
1302 {
1303 	uint32_t n, s;
1304 	uint64_t avail = pa->avail;
1305 
1306 	assert(avail);
1307 
1308 	// find strings of count 1 bits in avail
1309 	for (n = count; n > 1; n -= s) {
1310 		s = n >> 1;
1311 		avail = avail & (avail << s);
1312 	}
1313 	// and aligned
1314 	avail &= align;
1315 
1316 	if (avail) {
1317 		n = __builtin_clzll(avail);
1318 		pa->avail &= ~((-1ULL << (64 - count)) >> n);
1319 		if (!pa->avail && pa->link.next) {
1320 			remque(&pa->link);
1321 			pa->link.next = NULL;
1322 		}
1323 		return n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa);
1324 	}
1325 
1326 	return 0;
1327 }
1328 
1329 uintptr_t
iopa_alloc(iopa_t * a,iopa_proc_t alloc,kalloc_heap_t kheap,vm_size_t bytes,vm_size_t balign)1330 iopa_alloc(
1331 	iopa_t          * a,
1332 	iopa_proc_t       alloc,
1333 	kalloc_heap_t     kheap,
1334 	vm_size_t         bytes,
1335 	vm_size_t         balign)
1336 {
1337 	static const uint64_t align_masks[] = {
1338 		0xFFFFFFFFFFFFFFFF,
1339 		0xAAAAAAAAAAAAAAAA,
1340 		0x8888888888888888,
1341 		0x8080808080808080,
1342 		0x8000800080008000,
1343 		0x8000000080000000,
1344 		0x8000000000000000,
1345 	};
1346 	iopa_page_t * pa;
1347 	uintptr_t     addr = 0;
1348 	uint32_t      count;
1349 	uint64_t      align;
1350 	vm_size_t     align_masks_idx;
1351 
1352 	if (((uint32_t) bytes) != bytes) {
1353 		return 0;
1354 	}
1355 	if (!bytes) {
1356 		bytes = 1;
1357 	}
1358 	count = (((uint32_t) bytes) + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1359 
1360 	align_masks_idx = log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes);
1361 	assert(align_masks_idx < sizeof(align_masks) / sizeof(*align_masks));
1362 	align = align_masks[align_masks_idx];
1363 
1364 	IOLockLock(a->lock);
1365 	__IGNORE_WCASTALIGN(pa = (typeof(pa))queue_first(&a->list));
1366 	while (!queue_end(&a->list, &pa->link)) {
1367 		addr = iopa_allocinpage(pa, count, align);
1368 		if (addr) {
1369 			a->bytecount += bytes;
1370 			break;
1371 		}
1372 		__IGNORE_WCASTALIGN(pa = (typeof(pa))queue_next(&pa->link));
1373 	}
1374 	IOLockUnlock(a->lock);
1375 
1376 	if (!addr) {
1377 		addr = alloc(kheap, a);
1378 		if (addr) {
1379 			pa = (typeof(pa))(addr + page_size - gIOPageAllocChunkBytes);
1380 			pa->signature = kIOPageAllocSignature;
1381 			pa->avail     = -2ULL;
1382 
1383 			addr = iopa_allocinpage(pa, count, align);
1384 			IOLockLock(a->lock);
1385 			if (pa->avail) {
1386 				enqueue_head(&a->list, &pa->link);
1387 			}
1388 			a->pagecount++;
1389 			if (addr) {
1390 				a->bytecount += bytes;
1391 			}
1392 			IOLockUnlock(a->lock);
1393 		}
1394 	}
1395 
1396 	assert((addr & ((1 << log2up(balign)) - 1)) == 0);
1397 	return addr;
1398 }
1399 
1400 uintptr_t
iopa_free(iopa_t * a,uintptr_t addr,vm_size_t bytes)1401 iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes)
1402 {
1403 	iopa_page_t * pa;
1404 	uint32_t      count;
1405 	uintptr_t     chunk;
1406 
1407 	if (((uint32_t) bytes) != bytes) {
1408 		return 0;
1409 	}
1410 	if (!bytes) {
1411 		bytes = 1;
1412 	}
1413 
1414 	chunk = (addr & page_mask);
1415 	assert(0 == (chunk & (gIOPageAllocChunkBytes - 1)));
1416 
1417 	pa = (typeof(pa))(addr | (page_size - gIOPageAllocChunkBytes));
1418 	assert(kIOPageAllocSignature == pa->signature);
1419 
1420 	count = (((uint32_t) bytes) + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1421 	chunk /= gIOPageAllocChunkBytes;
1422 
1423 	IOLockLock(a->lock);
1424 	if (!pa->avail) {
1425 		assert(!pa->link.next);
1426 		enqueue_tail(&a->list, &pa->link);
1427 	}
1428 	pa->avail |= ((-1ULL << (64 - count)) >> chunk);
1429 	if (pa->avail != -2ULL) {
1430 		pa = NULL;
1431 	} else {
1432 		remque(&pa->link);
1433 		pa->link.next = NULL;
1434 		pa->signature = 0;
1435 		a->pagecount--;
1436 		// page to free
1437 		pa = (typeof(pa))trunc_page(pa);
1438 	}
1439 	a->bytecount -= bytes;
1440 	IOLockUnlock(a->lock);
1441 
1442 	return (uintptr_t) pa;
1443 }
1444 
1445 #endif /* defined(__x86_64__) */
1446 
1447 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1448 
1449 IOReturn
IOSetProcessorCacheMode(task_t task,IOVirtualAddress address,IOByteCount length,IOOptionBits cacheMode)1450 IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
1451     IOByteCount length, IOOptionBits cacheMode )
1452 {
1453 	IOReturn    ret = kIOReturnSuccess;
1454 	ppnum_t     pagenum;
1455 
1456 	if (task != kernel_task) {
1457 		return kIOReturnUnsupported;
1458 	}
1459 	if ((address | length) & PAGE_MASK) {
1460 //	OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
1461 		return kIOReturnUnsupported;
1462 	}
1463 	length = round_page(address + length) - trunc_page( address );
1464 	address = trunc_page( address );
1465 
1466 	// make map mode
1467 	cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
1468 
1469 	while ((kIOReturnSuccess == ret) && (length > 0)) {
1470 		// Get the physical page number
1471 		pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
1472 		if (pagenum) {
1473 			ret = IOUnmapPages( get_task_map(task), address, page_size );
1474 			ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
1475 		} else {
1476 			ret = kIOReturnVMError;
1477 		}
1478 
1479 		address += page_size;
1480 		length -= page_size;
1481 	}
1482 
1483 	return ret;
1484 }
1485 
1486 
1487 IOReturn
IOFlushProcessorCache(task_t task,IOVirtualAddress address,IOByteCount length)1488 IOFlushProcessorCache( task_t task, IOVirtualAddress address,
1489     IOByteCount length )
1490 {
1491 	if (task != kernel_task) {
1492 		return kIOReturnUnsupported;
1493 	}
1494 
1495 	flush_dcache64((addr64_t) address, (unsigned) length, false );
1496 
1497 	return kIOReturnSuccess;
1498 }
1499 
1500 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1501 
1502 vm_offset_t
OSKernelStackRemaining(void)1503 OSKernelStackRemaining( void )
1504 {
1505 	return ml_stack_remaining();
1506 }
1507 
1508 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1509 
1510 /*
1511  * Spin for indicated number of milliseconds.
1512  */
1513 void
IOSleep(unsigned milliseconds)1514 IOSleep(unsigned milliseconds)
1515 {
1516 	delay_for_interval(milliseconds, kMillisecondScale);
1517 }
1518 
1519 /*
1520  * Spin for indicated number of milliseconds, and potentially an
1521  * additional number of milliseconds up to the leeway values.
1522  */
1523 void
IOSleepWithLeeway(unsigned intervalMilliseconds,unsigned leewayMilliseconds)1524 IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds)
1525 {
1526 	delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale);
1527 }
1528 
1529 /*
1530  * Spin for indicated number of microseconds.
1531  */
1532 void
IODelay(unsigned microseconds)1533 IODelay(unsigned microseconds)
1534 {
1535 	delay_for_interval(microseconds, kMicrosecondScale);
1536 }
1537 
1538 /*
1539  * Spin for indicated number of nanoseconds.
1540  */
1541 void
IOPause(unsigned nanoseconds)1542 IOPause(unsigned nanoseconds)
1543 {
1544 	delay_for_interval(nanoseconds, kNanosecondScale);
1545 }
1546 
1547 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1548 
1549 static void _IOLogv(const char *format, va_list ap, void *caller) __printflike(1, 0);
1550 
1551 __attribute__((noinline, not_tail_called))
1552 void
IOLog(const char * format,...)1553 IOLog(const char *format, ...)
1554 {
1555 	void *caller = __builtin_return_address(0);
1556 	va_list ap;
1557 
1558 	va_start(ap, format);
1559 	_IOLogv(format, ap, caller);
1560 	va_end(ap);
1561 }
1562 
1563 __attribute__((noinline, not_tail_called))
1564 void
IOLogv(const char * format,va_list ap)1565 IOLogv(const char *format, va_list ap)
1566 {
1567 	void *caller = __builtin_return_address(0);
1568 	_IOLogv(format, ap, caller);
1569 }
1570 
1571 void
_IOLogv(const char * format,va_list ap,void * caller)1572 _IOLogv(const char *format, va_list ap, void *caller)
1573 {
1574 	va_list ap2;
1575 	struct console_printbuf_state info_data;
1576 	console_printbuf_state_init(&info_data, TRUE, TRUE);
1577 
1578 	va_copy(ap2, ap);
1579 
1580 #pragma clang diagnostic push
1581 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1582 	os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, format, ap, caller);
1583 #pragma clang diagnostic pop
1584 
1585 	if (!disable_iolog_serial_output) {
1586 		__doprnt(format, ap2, console_printbuf_putc, &info_data, 16, TRUE);
1587 		console_printbuf_clear(&info_data);
1588 	}
1589 	va_end(ap2);
1590 
1591 	assertf(ml_get_interrupts_enabled() || ml_is_quiescing() ||
1592 	    debug_mode_active() || !gCPUsRunning,
1593 	    "IOLog called with interrupts disabled");
1594 }
1595 
1596 #if !__LP64__
1597 void
IOPanic(const char * reason)1598 IOPanic(const char *reason)
1599 {
1600 	panic("%s", reason);
1601 }
1602 #endif
1603 
1604 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1605 
1606 void
IOKitKernelLogBuffer(const char * title,const void * buffer,size_t size,void (* output)(const char * format,...))1607 IOKitKernelLogBuffer(const char * title, const void * buffer, size_t size,
1608     void (*output)(const char *format, ...))
1609 {
1610 	size_t idx, linestart;
1611 	enum { bytelen = (sizeof("0xZZ, ") - 1) };
1612 	char hex[(bytelen * 16) + 1];
1613 	uint8_t c, chars[17];
1614 
1615 	output("%s(0x%lx):\n", title, size);
1616 	output("              0     1     2     3     4     5     6     7     8     9     A     B     C     D     E     F\n");
1617 	if (size > 4096) {
1618 		size = 4096;
1619 	}
1620 	chars[16] = 0;
1621 	for (idx = 0, linestart = 0; idx < size;) {
1622 		c = ((char *)buffer)[idx];
1623 		snprintf(&hex[bytelen * (idx & 15)], bytelen + 1, "0x%02x, ", c);
1624 		chars[idx & 15] = ((c >= 0x20) && (c <= 0x7f)) ? c : ' ';
1625 		idx++;
1626 		if ((idx == size) || !(idx & 15)) {
1627 			if (idx & 15) {
1628 				chars[idx & 15] = 0;
1629 			}
1630 			output("/* %04lx: */ %-96s /* |%-16s| */\n", linestart, hex, chars);
1631 			linestart += 16;
1632 		}
1633 	}
1634 }
1635 
1636 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1637 
1638 /*
1639  * Convert a integer constant (typically a #define or enum) to a string.
1640  */
1641 static char noValue[80];        // that's pretty
1642 
1643 const char *
IOFindNameForValue(int value,const IONamedValue * regValueArray)1644 IOFindNameForValue(int value, const IONamedValue *regValueArray)
1645 {
1646 	for (; regValueArray->name; regValueArray++) {
1647 		if (regValueArray->value == value) {
1648 			return regValueArray->name;
1649 		}
1650 	}
1651 	snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
1652 	return (const char *)noValue;
1653 }
1654 
1655 IOReturn
IOFindValueForName(const char * string,const IONamedValue * regValueArray,int * value)1656 IOFindValueForName(const char *string,
1657     const IONamedValue *regValueArray,
1658     int *value)
1659 {
1660 	for (; regValueArray->name; regValueArray++) {
1661 		if (!strcmp(regValueArray->name, string)) {
1662 			*value = regValueArray->value;
1663 			return kIOReturnSuccess;
1664 		}
1665 	}
1666 	return kIOReturnBadArgument;
1667 }
1668 
1669 OSString *
IOCopyLogNameForPID(int pid)1670 IOCopyLogNameForPID(int pid)
1671 {
1672 	char   buf[128];
1673 	size_t len;
1674 	snprintf(buf, sizeof(buf), "pid %d, ", pid);
1675 	len = strlen(buf);
1676 	proc_name(pid, buf + len, (int) (sizeof(buf) - len));
1677 	return OSString::withCString(buf);
1678 }
1679 
1680 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1681 
1682 IOAlignment
IOSizeToAlignment(unsigned int size)1683 IOSizeToAlignment(unsigned int size)
1684 {
1685 	int shift;
1686 	const int intsize = sizeof(unsigned int) * 8;
1687 
1688 	for (shift = 1; shift < intsize; shift++) {
1689 		if (size & 0x80000000) {
1690 			return (IOAlignment)(intsize - shift);
1691 		}
1692 		size <<= 1;
1693 	}
1694 	return 0;
1695 }
1696 
1697 unsigned int
IOAlignmentToSize(IOAlignment align)1698 IOAlignmentToSize(IOAlignment align)
1699 {
1700 	unsigned int size;
1701 
1702 	for (size = 1; align; align--) {
1703 		size <<= 1;
1704 	}
1705 	return size;
1706 }
1707 } /* extern "C" */
1708