xref: /xnu-12377.1.9/iokit/Kernel/IOLib.cpp (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * HISTORY
30  *
31  * 17-Apr-91   Portions from libIO.m, Doug Mitchell at NeXT.
32  * 17-Nov-98   cpp
33  *
34  */
35 
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <vm/vm_kern_xnu.h>
40 #include <vm/vm_map_xnu.h>
41 #include <libkern/c++/OSCPPDebug.h>
42 
43 #include <IOKit/assert.h>
44 
45 #include <IOKit/IOReturn.h>
46 #include <IOKit/IOLib.h>
47 #include <IOKit/IOLocks.h>
48 #include <IOKit/IOMapper.h>
49 #include <IOKit/IOBufferMemoryDescriptor.h>
50 #include <IOKit/IOKitDebug.h>
51 
52 #include "IOKitKernelInternal.h"
53 
54 #ifdef IOALLOCDEBUG
55 #include <libkern/OSDebug.h>
56 #include <sys/sysctl.h>
57 #endif
58 
59 #include "libkern/OSAtomic.h"
60 #include <libkern/c++/OSKext.h>
61 #include <IOKit/IOStatisticsPrivate.h>
62 #include <os/log_private.h>
63 #include <sys/msgbuf.h>
64 #include <console/serial_protos.h>
65 
66 #if IOKITSTATS
67 
68 #define IOStatisticsAlloc(type, size) \
69 do { \
70 	IOStatistics::countAlloc(type, size); \
71 } while (0)
72 
73 #else
74 
75 #define IOStatisticsAlloc(type, size)
76 
77 #endif /* IOKITSTATS */
78 
79 
80 #define TRACK_ALLOC     (IOTRACKING && (kIOTracking & gIOKitDebug))
81 
82 
83 extern "C"
84 {
85 mach_timespec_t IOZeroTvalspec = { 0, 0 };
86 
87 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
88 
89 extern int
90 __doprnt(
91 	const char              *fmt,
92 	va_list                 argp,
93 	void                    (*putc)(int, void *),
94 	void                    *arg,
95 	int                     radix,
96 	int                     is_log);
97 
98 extern bool bsd_log_lock(bool);
99 extern void bsd_log_unlock(void);
100 
101 
102 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
103 
104 lck_grp_t        io_lck_grp;
105 lck_grp_t       *IOLockGroup;
106 
107 /*
108  * Global variables for use by iLogger
109  * These symbols are for use only by Apple diagnostic code.
110  * Binary compatibility is not guaranteed for kexts that reference these symbols.
111  */
112 
113 void *_giDebugLogInternal       = NULL;
114 void *_giDebugLogDataInternal   = NULL;
115 void *_giDebugReserved1         = NULL;
116 void *_giDebugReserved2         = NULL;
117 
118 #if defined(__x86_64__)
119 iopa_t gIOBMDPageAllocator;
120 #endif /* defined(__x86_64__) */
121 
122 /*
123  * Static variables for this module.
124  */
125 
126 static queue_head_t gIOMallocContiguousEntries;
127 static lck_mtx_t *  gIOMallocContiguousEntriesLock;
128 
129 #if __x86_64__
130 enum { kIOPageableMaxAllocSize = 512ULL * 1024 * 1024 };
131 enum { kIOPageableMapSize      = 8ULL * kIOPageableMaxAllocSize  };
132 #else
133 enum { kIOPageableMaxAllocSize = 96ULL * 1024 * 1024 };
134 enum { kIOPageableMapSize      = 16ULL * kIOPageableMaxAllocSize  };
135 #endif
136 
137 typedef struct {
138 	vm_map_t            map;
139 	vm_offset_t address;
140 	vm_offset_t end;
141 } IOMapData;
142 
143 #ifndef __BUILDING_XNU_LIBRARY__
144 /* this makes clang emit a C and C++ symbol which confuses lldb rdar://135688747 */
145 static
146 #endif /* __BUILDING_XNU_LIBRARY__ */
147 SECURITY_READ_ONLY_LATE(struct mach_vm_range) gIOKitPageableFixedRange;
148 IOMapData gIOKitPageableMap;
149 
150 #if defined(__x86_64__)
151 static iopa_t gIOPageablePageAllocator;
152 
153 uint32_t  gIOPageAllocChunkBytes;
154 #endif /* defined(__x86_64__) */
155 
156 #if IOTRACKING
157 IOTrackingQueue * gIOMallocTracking;
158 IOTrackingQueue * gIOWireTracking;
159 IOTrackingQueue * gIOMapTracking;
160 #endif /* IOTRACKING */
161 
162 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
163 
164 KMEM_RANGE_REGISTER_STATIC(gIOKitPageableFixed,
165     &gIOKitPageableFixedRange, kIOPageableMapSize);
166 void
IOLibInit(void)167 IOLibInit(void)
168 {
169 	static bool libInitialized;
170 
171 	if (libInitialized) {
172 		return;
173 	}
174 
175 	lck_grp_init(&io_lck_grp, "IOKit", LCK_GRP_ATTR_NULL);
176 	IOLockGroup = &io_lck_grp;
177 
178 #if IOTRACKING
179 	IOTrackingInit();
180 	gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, 0,
181 	    kIOTrackingQueueTypeAlloc,
182 	    37);
183 	gIOWireTracking   = IOTrackingQueueAlloc(kIOWireTrackingName, 0, 0, page_size, 0, 0);
184 
185 	size_t mapCaptureSize = (kIOTracking & gIOKitDebug) ? page_size : (1024 * 1024);
186 	gIOMapTracking    = IOTrackingQueueAlloc(kIOMapTrackingName, 0, 0, mapCaptureSize,
187 	    kIOTrackingQueueTypeDefaultOn
188 	    | kIOTrackingQueueTypeMap
189 	    | kIOTrackingQueueTypeUser,
190 	    0);
191 #endif
192 
193 	gIOKitPageableMap.map = kmem_suballoc(kernel_map,
194 	    &gIOKitPageableFixedRange.min_address,
195 	    kIOPageableMapSize,
196 	    VM_MAP_CREATE_PAGEABLE,
197 	    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
198 	    (kms_flags_t)(KMS_PERMANENT | KMS_DATA | KMS_NOFAIL | KMS_NOSOFTLIMIT),
199 	    VM_KERN_MEMORY_IOKIT).kmr_submap;
200 
201 	gIOKitPageableMap.address = gIOKitPageableFixedRange.min_address;
202 	gIOKitPageableMap.end     = gIOKitPageableFixedRange.max_address;
203 
204 	gIOMallocContiguousEntriesLock      = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
205 	queue_init( &gIOMallocContiguousEntries );
206 
207 #if defined(__x86_64__)
208 	gIOPageAllocChunkBytes = PAGE_SIZE / 64;
209 
210 	assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes);
211 	iopa_init(&gIOBMDPageAllocator);
212 	iopa_init(&gIOPageablePageAllocator);
213 #endif /* defined(__x86_64__) */
214 
215 
216 	libInitialized = true;
217 }
218 
219 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
220 
221 vm_size_t
log2up(vm_size_t size)222 log2up(vm_size_t size)
223 {
224 	if (size <= 1) {
225 		size = 0;
226 	} else {
227 #if __LP64__
228 		size = 64 - __builtin_clzl(size - 1);
229 #else
230 		size = 32 - __builtin_clzl(size - 1);
231 #endif
232 	}
233 	return size;
234 }
235 
236 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
237 
238 IOThread
IOCreateThread(IOThreadFunc fcn,void * arg)239 IOCreateThread(IOThreadFunc fcn, void *arg)
240 {
241 	kern_return_t   result;
242 	thread_t                thread;
243 
244 	result = kernel_thread_start((thread_continue_t)(void (*)(void))fcn, arg, &thread);
245 	if (result != KERN_SUCCESS) {
246 		return NULL;
247 	}
248 
249 	thread_deallocate(thread);
250 
251 	return thread;
252 }
253 
254 
255 void
IOExitThread(void)256 IOExitThread(void)
257 {
258 	(void) thread_terminate(current_thread());
259 }
260 
261 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
262 
263 #if IOTRACKING
264 struct IOLibMallocHeader {
265 	IOTrackingAddress tracking;
266 };
267 #endif
268 
269 #if IOTRACKING
270 #define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
271 #else
272 #define sizeofIOLibMallocHeader (0)
273 #endif
274 
275 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
276 
277 __typed_allocators_ignore_push // allocator implementation
278 
279 void *
280 (IOMalloc_internal)(struct kalloc_heap *kheap, vm_size_t size,
281 zalloc_flags_t flags)
282 {
283 	void * address;
284 	vm_size_t allocSize;
285 
286 	allocSize = size + sizeofIOLibMallocHeader;
287 #if IOTRACKING
288 	if (sizeofIOLibMallocHeader && (allocSize <= size)) {
289 		return NULL;                                          // overflow
290 	}
291 #endif
292 	address = kheap_alloc(kheap, allocSize,
293 	    Z_VM_TAG(Z_WAITOK | flags, VM_KERN_MEMORY_IOKIT));
294 
295 	if (address) {
296 #if IOTRACKING
297 		if (TRACK_ALLOC) {
298 			IOLibMallocHeader * hdr;
299 			hdr = (typeof(hdr))address;
300 			bzero(&hdr->tracking, sizeof(hdr->tracking));
301 			hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader);
302 			hdr->tracking.size    = size;
303 			IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
304 		}
305 #endif
306 		address = (typeof(address))(((uintptr_t) address) + sizeofIOLibMallocHeader);
307 
308 #if IOALLOCDEBUG
309 		OSAddAtomicLong(size, &debug_iomalloc_size);
310 #endif
311 		IOStatisticsAlloc(kIOStatisticsMalloc, size);
312 	}
313 
314 	return address;
315 }
316 
317 void
IOFree_internal(struct kalloc_heap * kheap,void * inAddress,vm_size_t size)318 IOFree_internal(struct kalloc_heap *kheap, void * inAddress, vm_size_t size)
319 {
320 	void * address;
321 
322 	if ((address = inAddress)) {
323 		address = (typeof(address))(((uintptr_t) address) - sizeofIOLibMallocHeader);
324 
325 #if IOTRACKING
326 		if (TRACK_ALLOC) {
327 			IOLibMallocHeader * hdr;
328 			struct ptr_reference { void * ptr; };
329 			volatile struct ptr_reference ptr;
330 
331 			// we're about to block in IOTrackingRemove(), make sure the original pointer
332 			// exists in memory or a register for leak scanning to find
333 			ptr.ptr = inAddress;
334 
335 			hdr = (typeof(hdr))address;
336 			if (size != hdr->tracking.size) {
337 				OSReportWithBacktrace("bad IOFree size 0x%zx should be 0x%zx",
338 				    (size_t)size, (size_t)hdr->tracking.size);
339 				size = hdr->tracking.size;
340 			}
341 			IOTrackingRemoveAddress(gIOMallocTracking, &hdr->tracking, size);
342 			ptr.ptr = NULL;
343 		}
344 #endif
345 
346 		kheap_free(kheap, address, size + sizeofIOLibMallocHeader);
347 #if IOALLOCDEBUG
348 		OSAddAtomicLong(-size, &debug_iomalloc_size);
349 #endif
350 		IOStatisticsAlloc(kIOStatisticsFree, size);
351 	}
352 }
353 
354 void *
355 IOMalloc_external(
356 	vm_size_t size);
357 void *
IOMalloc_external(vm_size_t size)358 IOMalloc_external(
359 	vm_size_t size)
360 {
361 	return IOMalloc_internal(KHEAP_DEFAULT, size, Z_VM_TAG_BT_BIT);
362 }
363 
364 void
IOFree(void * inAddress,vm_size_t size)365 IOFree(void * inAddress, vm_size_t size)
366 {
367 	IOFree_internal(KHEAP_DEFAULT, inAddress, size);
368 }
369 
370 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
371 
372 void *
373 IOMallocZero_external(
374 	vm_size_t size);
375 void *
IOMallocZero_external(vm_size_t size)376 IOMallocZero_external(
377 	vm_size_t size)
378 {
379 	return IOMalloc_internal(KHEAP_DEFAULT, size, Z_ZERO_VM_TAG_BT_BIT);
380 }
381 
382 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
383 
384 vm_tag_t
IOMemoryTag(vm_map_t map)385 IOMemoryTag(vm_map_t map)
386 {
387 	vm_tag_t tag;
388 
389 	if (!vm_kernel_map_is_kernel(map)) {
390 		return VM_MEMORY_IOKIT;
391 	}
392 
393 	tag = vm_tag_bt();
394 	if (tag == VM_KERN_MEMORY_NONE) {
395 		tag = VM_KERN_MEMORY_IOKIT;
396 	}
397 
398 	return tag;
399 }
400 
401 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
402 
403 struct IOLibPageMallocHeader {
404 	mach_vm_size_t    alignMask;
405 	mach_vm_offset_t  allocationOffset;
406 #if IOTRACKING
407 	IOTrackingAddress tracking;
408 #endif
409 };
410 
411 #if IOTRACKING
412 #define sizeofIOLibPageMallocHeader     (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
413 #else
414 #define sizeofIOLibPageMallocHeader     (sizeof(IOLibPageMallocHeader))
415 #endif
416 
417 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
418 
419 static __header_always_inline void
IOMallocAlignedSetHdr(IOLibPageMallocHeader * hdr,mach_vm_size_t alignMask,mach_vm_address_t allocationStart,mach_vm_address_t alignedStart)420 IOMallocAlignedSetHdr(
421 	IOLibPageMallocHeader  *hdr,
422 	mach_vm_size_t          alignMask,
423 	mach_vm_address_t       allocationStart,
424 	mach_vm_address_t       alignedStart)
425 {
426 	mach_vm_offset_t        offset = alignedStart - allocationStart;
427 #if __has_feature(ptrauth_calls)
428 	offset = (mach_vm_offset_t) ptrauth_sign_unauthenticated((void *)offset,
429 	    ptrauth_key_process_independent_data,
430 	    ptrauth_blend_discriminator((void *)(alignedStart | alignMask),
431 	    OS_PTRAUTH_DISCRIMINATOR("IOLibPageMallocHeader.allocationOffset")));
432 #endif /* __has_feature(ptrauth_calls) */
433 	hdr->allocationOffset = offset;
434 	hdr->alignMask = alignMask;
435 }
436 
437 __abortlike
438 static void
IOMallocAlignedHdrCorruptionPanic(mach_vm_offset_t offset,mach_vm_size_t alignMask,mach_vm_address_t alignedStart,vm_size_t size)439 IOMallocAlignedHdrCorruptionPanic(
440 	mach_vm_offset_t        offset,
441 	mach_vm_size_t          alignMask,
442 	mach_vm_address_t       alignedStart,
443 	vm_size_t               size)
444 {
445 	mach_vm_address_t       address = 0;
446 	mach_vm_address_t       recalAlignedStart = 0;
447 
448 	if (os_sub_overflow(alignedStart, offset, &address)) {
449 		panic("Invalid offset %p for aligned addr %p", (void *)offset,
450 		    (void *)alignedStart);
451 	}
452 	if (os_add3_overflow(address, sizeofIOLibPageMallocHeader, alignMask,
453 	    &recalAlignedStart)) {
454 		panic("alignMask 0x%llx overflows recalAlignedStart %p for provided addr "
455 		    "%p", alignMask, (void *)recalAlignedStart, (void *)alignedStart);
456 	}
457 	if (((recalAlignedStart &= ~alignMask) != alignedStart) &&
458 	    (round_page(recalAlignedStart) != alignedStart)) {
459 		panic("Recalculated aligned addr %p doesn't match provided addr %p",
460 		    (void *)recalAlignedStart, (void *)alignedStart);
461 	}
462 	if (offset < sizeofIOLibPageMallocHeader) {
463 		panic("Offset %zd doesn't accomodate IOLibPageMallocHeader for aligned "
464 		    "addr %p", (size_t)offset, (void *)alignedStart);
465 	}
466 	panic("alignMask 0x%llx overflows adjusted size %zd for aligned addr %p",
467 	    alignMask, (size_t)size, (void *)alignedStart);
468 }
469 
470 static __header_always_inline mach_vm_address_t
IOMallocAlignedGetAddress(IOLibPageMallocHeader * hdr,mach_vm_address_t alignedStart,vm_size_t * size)471 IOMallocAlignedGetAddress(
472 	IOLibPageMallocHeader  *hdr,
473 	mach_vm_address_t       alignedStart,
474 	vm_size_t              *size)
475 {
476 	mach_vm_address_t       address = 0;
477 	mach_vm_address_t       recalAlignedStart = 0;
478 	mach_vm_offset_t        offset = hdr->allocationOffset;
479 	mach_vm_size_t          alignMask = hdr->alignMask;
480 #if __has_feature(ptrauth_calls)
481 	offset = (mach_vm_offset_t) ptrauth_auth_data((void *)offset,
482 	    ptrauth_key_process_independent_data,
483 	    ptrauth_blend_discriminator((void *)(alignedStart | alignMask),
484 	    OS_PTRAUTH_DISCRIMINATOR("IOLibPageMallocHeader.allocationOffset")));
485 #endif /* __has_feature(ptrauth_calls) */
486 	if (os_sub_overflow(alignedStart, offset, &address) ||
487 	    os_add3_overflow(address, sizeofIOLibPageMallocHeader, alignMask,
488 	    &recalAlignedStart) ||
489 	    (((recalAlignedStart &= ~alignMask) != alignedStart) &&
490 	    (round_page(recalAlignedStart) != alignedStart)) ||
491 	    (offset < sizeofIOLibPageMallocHeader) ||
492 	    os_add_overflow(*size, alignMask, size)) {
493 		IOMallocAlignedHdrCorruptionPanic(offset, alignMask, alignedStart, *size);
494 	}
495 	return address;
496 }
497 
498 void *
499 (IOMallocAligned_internal)(struct kalloc_heap *kheap, vm_size_t size,
500 vm_size_t alignment, zalloc_flags_t flags)
501 {
502 	kern_return_t           kr;
503 	vm_offset_t             address;
504 	vm_offset_t             allocationAddress;
505 	vm_size_t               adjustedSize;
506 	uintptr_t               alignMask;
507 	IOLibPageMallocHeader * hdr;
508 	kma_flags_t kma_flags = KMA_NONE;
509 
510 	if (size == 0) {
511 		return NULL;
512 	}
513 	if (((uint32_t) alignment) != alignment) {
514 		return NULL;
515 	}
516 
517 	if (flags & Z_ZERO) {
518 		kma_flags = KMA_ZERO;
519 	}
520 
521 	if (kheap == KHEAP_DATA_BUFFERS) {
522 		kma_flags = (kma_flags_t) (kma_flags | KMA_DATA);
523 	} else if (kheap == KHEAP_DATA_SHARED) {
524 		kma_flags = (kma_flags_t) (kma_flags | KMA_DATA_SHARED);
525 	}
526 
527 	alignment = (1UL << log2up((uint32_t) alignment));
528 	alignMask = alignment - 1;
529 	adjustedSize = size + sizeofIOLibPageMallocHeader;
530 
531 	if (size > adjustedSize) {
532 		address = 0; /* overflow detected */
533 	} else if (adjustedSize >= page_size) {
534 		kr = kernel_memory_allocate(kernel_map, &address,
535 		    size, alignMask, kma_flags, IOMemoryTag(kernel_map));
536 		if (KERN_SUCCESS != kr) {
537 			address = 0;
538 		}
539 #if IOTRACKING
540 		else if (TRACK_ALLOC) {
541 			IOTrackingAlloc(gIOMallocTracking, address, size);
542 		}
543 #endif
544 	} else {
545 		adjustedSize += alignMask;
546 
547 		if (adjustedSize >= page_size) {
548 			kr = kmem_alloc(kernel_map, &allocationAddress,
549 			    adjustedSize, kma_flags, IOMemoryTag(kernel_map));
550 			if (KERN_SUCCESS != kr) {
551 				allocationAddress = 0;
552 			}
553 		} else {
554 			allocationAddress = (vm_address_t) kheap_alloc(kheap,
555 			    adjustedSize, Z_VM_TAG(Z_WAITOK | flags, VM_KERN_MEMORY_IOKIT));
556 		}
557 
558 		if (allocationAddress) {
559 			address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
560 			    & (~alignMask);
561 
562 			hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
563 			IOMallocAlignedSetHdr(hdr, alignMask, allocationAddress, address);
564 #if IOTRACKING
565 			if (TRACK_ALLOC) {
566 				bzero(&hdr->tracking, sizeof(hdr->tracking));
567 				hdr->tracking.address = ~address;
568 				hdr->tracking.size = size;
569 				IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
570 			}
571 #endif
572 		} else {
573 			address = 0;
574 		}
575 	}
576 
577 	assert(0 == (address & alignMask));
578 
579 	if (address) {
580 #if IOALLOCDEBUG
581 		OSAddAtomicLong(size, &debug_iomalloc_size);
582 #endif
583 		IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
584 	}
585 
586 	return (void *) address;
587 }
588 
589 void
IOFreeAligned_internal(kalloc_heap_t kheap,void * address,vm_size_t size)590 IOFreeAligned_internal(kalloc_heap_t kheap, void * address, vm_size_t size)
591 {
592 	vm_address_t            allocationAddress;
593 	vm_size_t               adjustedSize;
594 	IOLibPageMallocHeader * hdr;
595 
596 	if (!address) {
597 		return;
598 	}
599 
600 	assert(size);
601 
602 	adjustedSize = size + sizeofIOLibPageMallocHeader;
603 	if (adjustedSize >= page_size) {
604 #if IOTRACKING
605 		if (TRACK_ALLOC) {
606 			IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size);
607 		}
608 #endif
609 		kmem_free(kernel_map, (vm_offset_t) address, size);
610 	} else {
611 		hdr = (typeof(hdr))(((uintptr_t)address) - sizeofIOLibPageMallocHeader);
612 		allocationAddress = IOMallocAlignedGetAddress(hdr,
613 		    (mach_vm_address_t)address, &adjustedSize);
614 
615 #if IOTRACKING
616 		if (TRACK_ALLOC) {
617 			if (size != hdr->tracking.size) {
618 				OSReportWithBacktrace("bad IOFreeAligned size 0x%zx should be 0x%zx",
619 				    (size_t)size, (size_t)hdr->tracking.size);
620 				size = hdr->tracking.size;
621 			}
622 			IOTrackingRemoveAddress(gIOMallocTracking, &hdr->tracking, size);
623 		}
624 #endif
625 		if (adjustedSize >= page_size) {
626 			kmem_free(kernel_map, allocationAddress, adjustedSize);
627 		} else {
628 			kheap_free(kheap, allocationAddress, adjustedSize);
629 		}
630 	}
631 
632 #if IOALLOCDEBUG
633 	OSAddAtomicLong(-size, &debug_iomalloc_size);
634 #endif
635 
636 	IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
637 }
638 
639 void *
640 IOMallocAligned_external(
641 	vm_size_t size, vm_size_t alignment);
642 void *
IOMallocAligned_external(vm_size_t size,vm_size_t alignment)643 IOMallocAligned_external(
644 	vm_size_t size, vm_size_t alignment)
645 {
646 	return IOMallocAligned_internal(GET_KEXT_KHEAP_DATA(), size, alignment,
647 	           Z_VM_TAG_BT_BIT);
648 }
649 
650 void
IOFreeAligned(void * address,vm_size_t size)651 IOFreeAligned(
652 	void                  * address,
653 	vm_size_t               size)
654 {
655 	IOFreeAligned_internal(GET_KEXT_KHEAP_DATA(), address, size);
656 }
657 
658 __typed_allocators_ignore_pop
659 
660 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
661 
662 void
IOKernelFreePhysical(kalloc_heap_t kheap,mach_vm_address_t address,mach_vm_size_t size)663 IOKernelFreePhysical(
664 	kalloc_heap_t         kheap,
665 	mach_vm_address_t     address,
666 	mach_vm_size_t        size)
667 {
668 	vm_address_t       allocationAddress;
669 	vm_size_t          adjustedSize;
670 	IOLibPageMallocHeader * hdr;
671 
672 	if (!address) {
673 		return;
674 	}
675 
676 	assert(size);
677 
678 	adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
679 	if (adjustedSize >= page_size) {
680 #if IOTRACKING
681 		if (TRACK_ALLOC) {
682 			IOTrackingFree(gIOMallocTracking, address, size);
683 		}
684 #endif
685 		kmem_free(kernel_map, (vm_offset_t) address, size);
686 	} else {
687 		hdr = (typeof(hdr))(((uintptr_t)address) - sizeofIOLibPageMallocHeader);
688 		allocationAddress = IOMallocAlignedGetAddress(hdr, address, &adjustedSize);
689 #if IOTRACKING
690 		if (TRACK_ALLOC) {
691 			IOTrackingRemoveAddress(gIOMallocTracking, &hdr->tracking, size);
692 		}
693 #endif
694 		__typed_allocators_ignore(kheap_free(kheap, allocationAddress, adjustedSize));
695 	}
696 
697 	IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
698 #if IOALLOCDEBUG
699 	OSAddAtomicLong(-size, &debug_iomalloc_size);
700 #endif
701 }
702 
703 #if __arm64__
704 extern unsigned long gPhysBase, gPhysSize;
705 #endif
706 
707 mach_vm_address_t
IOKernelAllocateWithPhysicalRestrict(kalloc_heap_t kheap,mach_vm_size_t size,mach_vm_address_t maxPhys,mach_vm_size_t alignment,bool contiguous,bool noSoftLimit)708 IOKernelAllocateWithPhysicalRestrict(
709 	kalloc_heap_t         kheap,
710 	mach_vm_size_t        size,
711 	mach_vm_address_t     maxPhys,
712 	mach_vm_size_t        alignment,
713 	bool                  contiguous,
714 	bool                  noSoftLimit)
715 {
716 	kern_return_t           kr;
717 	mach_vm_address_t       address;
718 	mach_vm_address_t       allocationAddress;
719 	mach_vm_size_t          adjustedSize;
720 	mach_vm_address_t       alignMask;
721 	IOLibPageMallocHeader * hdr;
722 
723 	if (size == 0) {
724 		return 0;
725 	}
726 	if (alignment == 0) {
727 		alignment = 1;
728 	}
729 
730 	alignMask = alignment - 1;
731 
732 	if (os_mul_and_add_overflow(2, size, sizeofIOLibPageMallocHeader, &adjustedSize)) {
733 		return 0;
734 	}
735 
736 	contiguous = (contiguous && (adjustedSize > page_size))
737 	    || (alignment > page_size);
738 
739 	if (contiguous || maxPhys) {
740 		kma_flags_t options = KMA_ZERO;
741 		vm_offset_t virt;
742 
743 		if (kheap == KHEAP_DATA_BUFFERS) {
744 			options = (kma_flags_t) (options | KMA_DATA);
745 		} else if (kheap == KHEAP_DATA_SHARED) {
746 			options = (kma_flags_t) (options | KMA_DATA_SHARED);
747 		}
748 
749 		if (noSoftLimit) {
750 			options = (kma_flags_t) (options | KMA_NOSOFTLIMIT);
751 		}
752 
753 		adjustedSize = size;
754 		contiguous = (contiguous && (adjustedSize > page_size))
755 		    || (alignment > page_size);
756 
757 		if (!contiguous) {
758 #if __arm64__
759 			if (maxPhys >= (mach_vm_address_t)(gPhysBase + gPhysSize)) {
760 				maxPhys = 0;
761 			} else
762 #endif
763 			if (maxPhys <= 0xFFFFFFFF) {
764 				maxPhys = 0;
765 				options = (kma_flags_t)(options | KMA_LOMEM);
766 			} else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage)) {
767 				maxPhys = 0;
768 			}
769 		}
770 		if (contiguous || maxPhys) {
771 			kr = kmem_alloc_contig(kernel_map, &virt, size,
772 			    alignMask, (ppnum_t) atop(maxPhys), (ppnum_t) atop(alignMask),
773 			    options, IOMemoryTag(kernel_map));
774 		} else {
775 			kr = kernel_memory_allocate(kernel_map, &virt,
776 			    size, alignMask, options, IOMemoryTag(kernel_map));
777 		}
778 		if (KERN_SUCCESS == kr) {
779 			address = virt;
780 #if IOTRACKING
781 			if (TRACK_ALLOC) {
782 				IOTrackingAlloc(gIOMallocTracking, address, size);
783 			}
784 #endif
785 		} else {
786 			address = 0;
787 		}
788 	} else {
789 		zalloc_flags_t zflags = Z_WAITOK;
790 
791 		if (noSoftLimit) {
792 			zflags = (zalloc_flags_t)(zflags | Z_NOSOFTLIMIT);
793 		}
794 
795 		adjustedSize += alignMask;
796 		if (adjustedSize < size) {
797 			return 0;
798 		}
799 
800 		/* BEGIN IGNORE CODESTYLE */
801 		__typed_allocators_ignore_push // allocator implementation
802 		allocationAddress = (mach_vm_address_t) kheap_alloc(kheap,
803 		    adjustedSize, Z_VM_TAG_BT(zflags, VM_KERN_MEMORY_IOKIT));
804 		__typed_allocators_ignore_pop
805 		/* END IGNORE CODESTYLE */
806 
807 		if (allocationAddress) {
808 			address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
809 			    & (~alignMask);
810 
811 			if (atop_32(address) != atop_32(address + size - 1)) {
812 				address = round_page(address);
813 			}
814 
815 			hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
816 			IOMallocAlignedSetHdr(hdr, alignMask, allocationAddress, address);
817 #if IOTRACKING
818 			if (TRACK_ALLOC) {
819 				bzero(&hdr->tracking, sizeof(hdr->tracking));
820 				hdr->tracking.address = ~address;
821 				hdr->tracking.size    = size;
822 				IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
823 			}
824 #endif
825 		} else {
826 			address = 0;
827 		}
828 	}
829 
830 	if (address) {
831 		IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
832 #if IOALLOCDEBUG
833 		OSAddAtomicLong(size, &debug_iomalloc_size);
834 #endif
835 	}
836 
837 	return address;
838 }
839 
840 
841 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
842 
843 struct _IOMallocContiguousEntry {
844 	mach_vm_address_t          virtualAddr;
845 	IOBufferMemoryDescriptor * md;
846 	queue_chain_t              link;
847 };
848 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
849 
850 void *
IOMallocContiguous(vm_size_t size,vm_size_t alignment,IOPhysicalAddress * physicalAddress)851 IOMallocContiguous(vm_size_t size, vm_size_t alignment,
852     IOPhysicalAddress * physicalAddress)
853 {
854 	mach_vm_address_t   address = 0;
855 
856 	if (size == 0) {
857 		return NULL;
858 	}
859 	if (alignment == 0) {
860 		alignment = 1;
861 	}
862 
863 	/* Do we want a physical address? */
864 	if (!physicalAddress) {
865 		address = IOKernelAllocateWithPhysicalRestrict(KHEAP_DEFAULT,
866 		    size, 0 /*maxPhys*/, alignment, true, false /* noSoftLimit */);
867 	} else {
868 		do {
869 			IOBufferMemoryDescriptor * bmd;
870 			mach_vm_address_t          physicalMask;
871 			vm_offset_t                alignMask;
872 
873 			alignMask = alignment - 1;
874 			physicalMask = (0xFFFFFFFF ^ alignMask);
875 
876 			bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
877 				kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
878 			if (!bmd) {
879 				break;
880 			}
881 
882 			_IOMallocContiguousEntry *
883 			    entry = IOMallocType(_IOMallocContiguousEntry);
884 			if (!entry) {
885 				bmd->release();
886 				break;
887 			}
888 			entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
889 			entry->md          = bmd;
890 			lck_mtx_lock(gIOMallocContiguousEntriesLock);
891 			queue_enter( &gIOMallocContiguousEntries, entry,
892 			    _IOMallocContiguousEntry *, link );
893 			lck_mtx_unlock(gIOMallocContiguousEntriesLock);
894 
895 			address          = (mach_vm_address_t) entry->virtualAddr;
896 			*physicalAddress = bmd->getPhysicalAddress();
897 		}while (false);
898 	}
899 
900 	return (void *) address;
901 }
902 
903 void
IOFreeContiguous(void * _address,vm_size_t size)904 IOFreeContiguous(void * _address, vm_size_t size)
905 {
906 	_IOMallocContiguousEntry * entry;
907 	IOMemoryDescriptor *       md = NULL;
908 
909 	mach_vm_address_t address = (mach_vm_address_t) _address;
910 
911 	if (!address) {
912 		return;
913 	}
914 
915 	assert(size);
916 
917 	lck_mtx_lock(gIOMallocContiguousEntriesLock);
918 	queue_iterate( &gIOMallocContiguousEntries, entry,
919 	    _IOMallocContiguousEntry *, link )
920 	{
921 		if (entry->virtualAddr == address) {
922 			md   = entry->md;
923 			queue_remove( &gIOMallocContiguousEntries, entry,
924 			    _IOMallocContiguousEntry *, link );
925 			break;
926 		}
927 	}
928 	lck_mtx_unlock(gIOMallocContiguousEntriesLock);
929 
930 	if (md) {
931 		md->release();
932 		IOFreeType(entry, _IOMallocContiguousEntry);
933 	} else {
934 		IOKernelFreePhysical(KHEAP_DEFAULT, (mach_vm_address_t) address, size);
935 	}
936 }
937 
938 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
939 
940 kern_return_t
IOIteratePageableMaps(vm_size_t size,IOIteratePageableMapsCallback callback,void * ref)941 IOIteratePageableMaps(vm_size_t size,
942     IOIteratePageableMapsCallback callback, void * ref)
943 {
944 	if (size > kIOPageableMaxAllocSize) {
945 		return kIOReturnBadArgument;
946 	}
947 	return (*callback)(gIOKitPageableMap.map, ref);
948 }
949 
950 struct IOMallocPageableRef {
951 	vm_offset_t address;
952 	vm_size_t   size;
953 	vm_tag_t    tag;
954 };
955 
956 static kern_return_t
IOMallocPageableCallback(vm_map_t map,void * _ref)957 IOMallocPageableCallback(vm_map_t map, void * _ref)
958 {
959 	struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
960 	kma_flags_t flags = (kma_flags_t)(KMA_PAGEABLE | KMA_DATA_SHARED);
961 
962 	return kmem_alloc( map, &ref->address, ref->size, flags, ref->tag );
963 }
964 
965 static void *
IOMallocPageablePages(vm_size_t size,vm_size_t alignment,vm_tag_t tag)966 IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag)
967 {
968 	kern_return_t              kr = kIOReturnNotReady;
969 	struct IOMallocPageableRef ref;
970 
971 	if (alignment > page_size) {
972 		return NULL;
973 	}
974 	if (size > kIOPageableMaxAllocSize) {
975 		return NULL;
976 	}
977 
978 	ref.size = size;
979 	ref.tag  = tag;
980 	kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
981 	if (kIOReturnSuccess != kr) {
982 		ref.address = 0;
983 	}
984 
985 	return (void *) ref.address;
986 }
987 
988 vm_map_t
IOPageableMapForAddress(uintptr_t address)989 IOPageableMapForAddress(uintptr_t address)
990 {
991 	if (address < gIOKitPageableMap.address || address >= gIOKitPageableMap.end) {
992 		panic("IOPageableMapForAddress: address out of range");
993 	}
994 	return gIOKitPageableMap.map;
995 }
996 
997 static void
IOFreePageablePages(void * address,vm_size_t size)998 IOFreePageablePages(void * address, vm_size_t size)
999 {
1000 	vm_map_t map;
1001 
1002 	map = IOPageableMapForAddress((vm_address_t) address);
1003 	if (map) {
1004 		kmem_free( map, (vm_offset_t) address, size);
1005 	}
1006 }
1007 
1008 #if defined(__x86_64__)
1009 static uintptr_t
IOMallocOnePageablePage(kalloc_heap_t kheap __unused,iopa_t * a)1010 IOMallocOnePageablePage(kalloc_heap_t kheap __unused, iopa_t * a)
1011 {
1012 	return (uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT);
1013 }
1014 #endif /* defined(__x86_64__) */
1015 
1016 static void *
IOMallocPageableInternal(vm_size_t size,vm_size_t alignment,bool zeroed)1017 IOMallocPageableInternal(vm_size_t size, vm_size_t alignment, bool zeroed)
1018 {
1019 	void * addr;
1020 
1021 	if (((uint32_t) alignment) != alignment) {
1022 		return NULL;
1023 	}
1024 #if defined(__x86_64__)
1025 	if (size >= (page_size - 4 * gIOPageAllocChunkBytes) ||
1026 	    alignment > page_size) {
1027 		addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map));
1028 		/* Memory allocated this way will already be zeroed. */
1029 	} else {
1030 		addr = ((void *) iopa_alloc(&gIOPageablePageAllocator,
1031 		    &IOMallocOnePageablePage, KHEAP_DEFAULT, size, (uint32_t) alignment));
1032 		if (addr && zeroed) {
1033 			bzero(addr, size);
1034 		}
1035 	}
1036 #else /* !defined(__x86_64__) */
1037 	vm_size_t allocSize = size;
1038 	if (allocSize == 0) {
1039 		allocSize = 1;
1040 	}
1041 	addr = IOMallocPageablePages(allocSize, alignment, IOMemoryTag(kernel_map));
1042 	/* already zeroed */
1043 #endif /* defined(__x86_64__) */
1044 
1045 	if (addr) {
1046 #if IOALLOCDEBUG
1047 		OSAddAtomicLong(size, &debug_iomallocpageable_size);
1048 #endif
1049 		IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
1050 	}
1051 
1052 	return addr;
1053 }
1054 
1055 void *
IOMallocPageable(vm_size_t size,vm_size_t alignment)1056 IOMallocPageable(vm_size_t size, vm_size_t alignment)
1057 {
1058 	return IOMallocPageableInternal(size, alignment, /*zeroed*/ false);
1059 }
1060 
1061 void *
IOMallocPageableZero(vm_size_t size,vm_size_t alignment)1062 IOMallocPageableZero(vm_size_t size, vm_size_t alignment)
1063 {
1064 	return IOMallocPageableInternal(size, alignment, /*zeroed*/ true);
1065 }
1066 
1067 void
IOFreePageable(void * address,vm_size_t size)1068 IOFreePageable(void * address, vm_size_t size)
1069 {
1070 #if IOALLOCDEBUG
1071 	OSAddAtomicLong(-size, &debug_iomallocpageable_size);
1072 #endif
1073 	IOStatisticsAlloc(kIOStatisticsFreePageable, size);
1074 
1075 #if defined(__x86_64__)
1076 	if (size < (page_size - 4 * gIOPageAllocChunkBytes)) {
1077 		address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size);
1078 		size = page_size;
1079 	}
1080 	if (address) {
1081 		IOFreePageablePages(address, size);
1082 	}
1083 #else /* !defined(__x86_64__) */
1084 	if (size == 0) {
1085 		size = 1;
1086 	}
1087 	if (address) {
1088 		IOFreePageablePages(address, size);
1089 	}
1090 #endif /* defined(__x86_64__) */
1091 }
1092 
1093 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1094 
1095 
1096 __typed_allocators_ignore_push
1097 
1098 void *
1099 IOMallocData_external(
1100 	vm_size_t size);
1101 void *
IOMallocData_external(vm_size_t size)1102 IOMallocData_external(vm_size_t size)
1103 {
1104 	return IOMalloc_internal(GET_KEXT_KHEAP_DATA(), size, Z_VM_TAG_BT_BIT);
1105 }
1106 
1107 void *
1108 IOMallocZeroData_external(
1109 	vm_size_t size);
1110 void *
IOMallocZeroData_external(vm_size_t size)1111 IOMallocZeroData_external(vm_size_t size)
1112 {
1113 	return IOMalloc_internal(GET_KEXT_KHEAP_DATA(), size, Z_ZERO_VM_TAG_BT_BIT);
1114 }
1115 
1116 void
IOFreeData(void * address,vm_size_t size)1117 IOFreeData(void * address, vm_size_t size)
1118 {
1119 	return IOFree_internal(GET_KEXT_KHEAP_DATA(), address, size);
1120 }
1121 
1122 void
IOFreeDataSharable(void * address,vm_size_t size)1123 IOFreeDataSharable(void * address, vm_size_t size)
1124 {
1125 	return IOFree_internal(KHEAP_DATA_SHARED, address, size);
1126 }
1127 
1128 __typed_allocators_ignore_pop
1129 
1130 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1131 
1132 __typed_allocators_ignore_push // allocator implementation
1133 
1134 void *
IOMallocTypeImpl(kalloc_type_view_t kt_view)1135 IOMallocTypeImpl(kalloc_type_view_t kt_view)
1136 {
1137 #if IOTRACKING
1138 	/*
1139 	 * When leak detection is on default to using IOMalloc as kalloc
1140 	 * type infrastructure isn't aware of needing additional space for
1141 	 * the header.
1142 	 */
1143 	if (TRACK_ALLOC) {
1144 		uint32_t kt_size = kalloc_type_get_size(kt_view->kt_size);
1145 		void *mem = IOMalloc_internal(KHEAP_DEFAULT, kt_size, Z_ZERO);
1146 		if (!IOMallocType_from_vm(kt_view)) {
1147 			assert(mem);
1148 		}
1149 		return mem;
1150 	}
1151 #endif
1152 	zalloc_flags_t kt_flags = (zalloc_flags_t) (Z_WAITOK | Z_ZERO);
1153 	if (!IOMallocType_from_vm(kt_view)) {
1154 		kt_flags = (zalloc_flags_t) (kt_flags | Z_NOFAIL);
1155 	}
1156 	/*
1157 	 * Use external symbol for kalloc_type_impl as
1158 	 * kalloc_type_views generated at some external callsites
1159 	 * many not have been processed during boot.
1160 	 */
1161 	return kalloc_type_impl_external(kt_view, kt_flags);
1162 }
1163 
1164 void
IOFreeTypeImpl(kalloc_type_view_t kt_view,void * address)1165 IOFreeTypeImpl(kalloc_type_view_t kt_view, void * address)
1166 {
1167 #if IOTRACKING
1168 	if (TRACK_ALLOC) {
1169 		return IOFree_internal(KHEAP_DEFAULT, address,
1170 		           kalloc_type_get_size(kt_view->kt_size));
1171 	}
1172 #endif
1173 	/*
1174 	 * Use external symbol for kalloc_type_impl as
1175 	 * kalloc_type_views generated at some external callsites
1176 	 * many not have been processed during boot.
1177 	 */
1178 	return kfree_type_impl_external(kt_view, address);
1179 }
1180 
1181 void *
IOMallocTypeVarImpl(kalloc_type_var_view_t kt_view,vm_size_t size)1182 IOMallocTypeVarImpl(kalloc_type_var_view_t kt_view, vm_size_t size)
1183 {
1184 #if IOTRACKING
1185 	/*
1186 	 * When leak detection is on default to using IOMalloc as kalloc
1187 	 * type infrastructure isn't aware of needing additional space for
1188 	 * the header.
1189 	 */
1190 	if (TRACK_ALLOC) {
1191 		return IOMalloc_internal(KHEAP_DEFAULT, size, Z_ZERO);
1192 	}
1193 #endif
1194 	zalloc_flags_t kt_flags = (zalloc_flags_t) (Z_WAITOK | Z_ZERO);
1195 
1196 	kt_flags = Z_VM_TAG_BT(kt_flags, VM_KERN_MEMORY_KALLOC_TYPE);
1197 	return kalloc_type_var_impl(kt_view, size, kt_flags, NULL);
1198 }
1199 
1200 void
IOFreeTypeVarImpl(kalloc_type_var_view_t kt_view,void * address,vm_size_t size)1201 IOFreeTypeVarImpl(kalloc_type_var_view_t kt_view, void * address,
1202     vm_size_t size)
1203 {
1204 #if IOTRACKING
1205 	if (TRACK_ALLOC) {
1206 		return IOFree_internal(KHEAP_DEFAULT, address, size);
1207 	}
1208 #endif
1209 
1210 	return kfree_type_var_impl(kt_view, address, size);
1211 }
1212 
1213 __typed_allocators_ignore_pop
1214 
1215 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1216 
1217 #if defined(__x86_64__)
1218 
1219 
1220 extern "C" void
iopa_init(iopa_t * a)1221 iopa_init(iopa_t * a)
1222 {
1223 	bzero(a, sizeof(*a));
1224 	a->lock = IOLockAlloc();
1225 	queue_init(&a->list);
1226 }
1227 
1228 static uintptr_t
iopa_allocinpage(iopa_page_t * pa,uint32_t count,uint64_t align)1229 iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
1230 {
1231 	uint32_t n, s;
1232 	uint64_t avail = pa->avail;
1233 
1234 	assert(avail);
1235 
1236 	// find strings of count 1 bits in avail
1237 	for (n = count; n > 1; n -= s) {
1238 		s = n >> 1;
1239 		avail = avail & (avail << s);
1240 	}
1241 	// and aligned
1242 	avail &= align;
1243 
1244 	if (avail) {
1245 		n = __builtin_clzll(avail);
1246 		pa->avail &= ~((-1ULL << (64 - count)) >> n);
1247 		if (!pa->avail && pa->link.next) {
1248 			remque(&pa->link);
1249 			pa->link.next = NULL;
1250 		}
1251 		return n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa);
1252 	}
1253 
1254 	return 0;
1255 }
1256 
1257 uintptr_t
iopa_alloc(iopa_t * a,iopa_proc_t alloc,kalloc_heap_t kheap,vm_size_t bytes,vm_size_t balign)1258 iopa_alloc(
1259 	iopa_t          * a,
1260 	iopa_proc_t       alloc,
1261 	kalloc_heap_t     kheap,
1262 	vm_size_t         bytes,
1263 	vm_size_t         balign)
1264 {
1265 	static const uint64_t align_masks[] = {
1266 		0xFFFFFFFFFFFFFFFF,
1267 		0xAAAAAAAAAAAAAAAA,
1268 		0x8888888888888888,
1269 		0x8080808080808080,
1270 		0x8000800080008000,
1271 		0x8000000080000000,
1272 		0x8000000000000000,
1273 	};
1274 	iopa_page_t * pa;
1275 	uintptr_t     addr = 0;
1276 	uint32_t      count;
1277 	uint64_t      align;
1278 	vm_size_t     align_masks_idx;
1279 
1280 	if (((uint32_t) bytes) != bytes) {
1281 		return 0;
1282 	}
1283 	if (!bytes) {
1284 		bytes = 1;
1285 	}
1286 	count = (((uint32_t) bytes) + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1287 
1288 	align_masks_idx = log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes);
1289 	assert(align_masks_idx < sizeof(align_masks) / sizeof(*align_masks));
1290 	align = align_masks[align_masks_idx];
1291 
1292 	IOLockLock(a->lock);
1293 	__IGNORE_WCASTALIGN(pa = (typeof(pa))queue_first(&a->list));
1294 	while (!queue_end(&a->list, &pa->link)) {
1295 		addr = iopa_allocinpage(pa, count, align);
1296 		if (addr) {
1297 			a->bytecount += bytes;
1298 			break;
1299 		}
1300 		__IGNORE_WCASTALIGN(pa = (typeof(pa))queue_next(&pa->link));
1301 	}
1302 	IOLockUnlock(a->lock);
1303 
1304 	if (!addr) {
1305 		addr = alloc(kheap, a);
1306 		if (addr) {
1307 			pa = (typeof(pa))(addr + page_size - gIOPageAllocChunkBytes);
1308 			pa->signature = kIOPageAllocSignature;
1309 			pa->avail     = -2ULL;
1310 
1311 			addr = iopa_allocinpage(pa, count, align);
1312 			IOLockLock(a->lock);
1313 			if (pa->avail) {
1314 				enqueue_head(&a->list, &pa->link);
1315 			}
1316 			a->pagecount++;
1317 			if (addr) {
1318 				a->bytecount += bytes;
1319 			}
1320 			IOLockUnlock(a->lock);
1321 		}
1322 	}
1323 
1324 	assert((addr & ((1 << log2up(balign)) - 1)) == 0);
1325 	return addr;
1326 }
1327 
1328 uintptr_t
iopa_free(iopa_t * a,uintptr_t addr,vm_size_t bytes)1329 iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes)
1330 {
1331 	iopa_page_t * pa;
1332 	uint32_t      count;
1333 	uintptr_t     chunk;
1334 
1335 	if (((uint32_t) bytes) != bytes) {
1336 		return 0;
1337 	}
1338 	if (!bytes) {
1339 		bytes = 1;
1340 	}
1341 
1342 	chunk = (addr & page_mask);
1343 	assert(0 == (chunk & (gIOPageAllocChunkBytes - 1)));
1344 
1345 	pa = (typeof(pa))(addr | (page_size - gIOPageAllocChunkBytes));
1346 	assert(kIOPageAllocSignature == pa->signature);
1347 
1348 	count = (((uint32_t) bytes) + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1349 	chunk /= gIOPageAllocChunkBytes;
1350 
1351 	IOLockLock(a->lock);
1352 	if (!pa->avail) {
1353 		assert(!pa->link.next);
1354 		enqueue_tail(&a->list, &pa->link);
1355 	}
1356 	pa->avail |= ((-1ULL << (64 - count)) >> chunk);
1357 	if (pa->avail != -2ULL) {
1358 		pa = NULL;
1359 	} else {
1360 		remque(&pa->link);
1361 		pa->link.next = NULL;
1362 		pa->signature = 0;
1363 		a->pagecount--;
1364 		// page to free
1365 		pa = (typeof(pa))trunc_page(pa);
1366 	}
1367 	a->bytecount -= bytes;
1368 	IOLockUnlock(a->lock);
1369 
1370 	return (uintptr_t) pa;
1371 }
1372 
1373 #endif /* defined(__x86_64__) */
1374 
1375 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1376 
1377 IOReturn
IOSetProcessorCacheMode(task_t task,IOVirtualAddress address,IOByteCount length,IOOptionBits cacheMode)1378 IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
1379     IOByteCount length, IOOptionBits cacheMode )
1380 {
1381 	IOReturn    ret = kIOReturnSuccess;
1382 	ppnum_t     pagenum;
1383 
1384 	if (task != kernel_task) {
1385 		return kIOReturnUnsupported;
1386 	}
1387 	if ((address | length) & PAGE_MASK) {
1388 //	OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
1389 		return kIOReturnUnsupported;
1390 	}
1391 	length = round_page(address + length) - trunc_page( address );
1392 	address = trunc_page( address );
1393 
1394 	// make map mode
1395 	cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
1396 
1397 	while ((kIOReturnSuccess == ret) && (length > 0)) {
1398 		// Get the physical page number
1399 		pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
1400 		if (pagenum) {
1401 			ret = IOUnmapPages( get_task_map(task), address, page_size );
1402 			ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
1403 		} else {
1404 			ret = kIOReturnVMError;
1405 		}
1406 
1407 		address += page_size;
1408 		length -= page_size;
1409 	}
1410 
1411 	return ret;
1412 }
1413 
1414 
1415 IOReturn
IOFlushProcessorCache(task_t task,IOVirtualAddress address,IOByteCount length)1416 IOFlushProcessorCache( task_t task, IOVirtualAddress address,
1417     IOByteCount length )
1418 {
1419 	if (task != kernel_task) {
1420 		return kIOReturnUnsupported;
1421 	}
1422 
1423 	flush_dcache64((addr64_t) address, (unsigned) length, false );
1424 
1425 	return kIOReturnSuccess;
1426 }
1427 
1428 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1429 
1430 vm_offset_t
OSKernelStackRemaining(void)1431 OSKernelStackRemaining( void )
1432 {
1433 	return ml_stack_remaining();
1434 }
1435 
1436 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1437 
1438 /*
1439  * Spin for indicated number of milliseconds.
1440  */
1441 void
IOSleep(unsigned milliseconds)1442 IOSleep(unsigned milliseconds)
1443 {
1444 	delay_for_interval(milliseconds, kMillisecondScale);
1445 }
1446 
1447 /*
1448  * Spin for indicated number of milliseconds, and potentially an
1449  * additional number of milliseconds up to the leeway values.
1450  */
1451 void
IOSleepWithLeeway(unsigned intervalMilliseconds,unsigned leewayMilliseconds)1452 IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds)
1453 {
1454 	delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale);
1455 }
1456 
1457 /*
1458  * Spin for indicated number of microseconds.
1459  */
1460 void
IODelay(unsigned microseconds)1461 IODelay(unsigned microseconds)
1462 {
1463 	delay_for_interval(microseconds, kMicrosecondScale);
1464 }
1465 
1466 /*
1467  * Spin for indicated number of nanoseconds.
1468  */
1469 void
IOPause(unsigned nanoseconds)1470 IOPause(unsigned nanoseconds)
1471 {
1472 	delay_for_interval(nanoseconds, kNanosecondScale);
1473 }
1474 
1475 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1476 
1477 static void _IOLogv(const char *format, va_list ap, void *caller) __printflike(1, 0);
1478 
1479 __attribute__((noinline, not_tail_called))
1480 void
IOLog(const char * format,...)1481 IOLog(const char *format, ...)
1482 {
1483 	void *caller = __builtin_return_address(0);
1484 	va_list ap;
1485 
1486 	va_start(ap, format);
1487 	_IOLogv(format, ap, caller);
1488 	va_end(ap);
1489 }
1490 
1491 __attribute__((noinline, not_tail_called))
1492 void
IOLogv(const char * format,va_list ap)1493 IOLogv(const char *format, va_list ap)
1494 {
1495 	void *caller = __builtin_return_address(0);
1496 	_IOLogv(format, ap, caller);
1497 }
1498 
1499 void
_IOLogv(const char * format,va_list ap,void * caller)1500 _IOLogv(const char *format, va_list ap, void *caller)
1501 {
1502 	va_list ap2;
1503 	struct console_printbuf_state info_data;
1504 	console_printbuf_state_init(&info_data, TRUE, TRUE);
1505 
1506 	va_copy(ap2, ap);
1507 
1508 #pragma clang diagnostic push
1509 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1510 #pragma clang diagnostic ignored "-Wformat"
1511 	os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, format, ap, caller);
1512 #pragma clang diagnostic pop
1513 
1514 	if (!disable_iolog_serial_output) {
1515 		__doprnt(format, ap2, console_printbuf_putc, &info_data, 16, TRUE);
1516 		console_printbuf_clear(&info_data);
1517 	}
1518 	va_end(ap2);
1519 
1520 	assertf(ml_get_interrupts_enabled() || ml_is_quiescing() ||
1521 	    debug_mode_active() || !gCPUsRunning,
1522 	    "IOLog called with interrupts disabled");
1523 }
1524 
1525 #if !__LP64__
1526 void
IOPanic(const char * reason)1527 IOPanic(const char *reason)
1528 {
1529 	panic("%s", reason);
1530 }
1531 #endif
1532 
1533 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1534 
1535 void
IOKitKernelLogBuffer(const char * title,const void * buffer,size_t size,void (* output)(const char * format,...))1536 IOKitKernelLogBuffer(const char * title, const void * buffer, size_t size,
1537     void (*output)(const char *format, ...))
1538 {
1539 	size_t idx, linestart;
1540 	enum { bytelen = (sizeof("0xZZ, ") - 1) };
1541 	char hex[(bytelen * 16) + 1];
1542 	uint8_t c, chars[17];
1543 
1544 	output("%s(0x%lx):\n", title, size);
1545 	output("              0     1     2     3     4     5     6     7     8     9     A     B     C     D     E     F\n");
1546 	if (size > 4096) {
1547 		size = 4096;
1548 	}
1549 	chars[16] = 0;
1550 	for (idx = 0, linestart = 0; idx < size;) {
1551 		c = ((char *)buffer)[idx];
1552 		snprintf(&hex[bytelen * (idx & 15)], bytelen + 1, "0x%02x, ", c);
1553 		chars[idx & 15] = ((c >= 0x20) && (c <= 0x7f)) ? c : ' ';
1554 		idx++;
1555 		if ((idx == size) || !(idx & 15)) {
1556 			if (idx & 15) {
1557 				chars[idx & 15] = 0;
1558 			}
1559 			output("/* %04lx: */ %-96s /* |%-16s| */\n", linestart, hex, chars);
1560 			linestart += 16;
1561 		}
1562 	}
1563 }
1564 
1565 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1566 
1567 /*
1568  * Convert a integer constant (typically a #define or enum) to a string.
1569  */
1570 static char noValue[80];        // that's pretty
1571 
1572 const char *
IOFindNameForValue(int value,const IONamedValue * regValueArray)1573 IOFindNameForValue(int value, const IONamedValue *regValueArray)
1574 {
1575 	for (; regValueArray->name; regValueArray++) {
1576 		if (regValueArray->value == value) {
1577 			return regValueArray->name;
1578 		}
1579 	}
1580 	snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
1581 	return (const char *)noValue;
1582 }
1583 
1584 IOReturn
IOFindValueForName(const char * string,const IONamedValue * regValueArray,int * value)1585 IOFindValueForName(const char *string,
1586     const IONamedValue *regValueArray,
1587     int *value)
1588 {
1589 	for (; regValueArray->name; regValueArray++) {
1590 		if (!strcmp(regValueArray->name, string)) {
1591 			*value = regValueArray->value;
1592 			return kIOReturnSuccess;
1593 		}
1594 	}
1595 	return kIOReturnBadArgument;
1596 }
1597 
1598 OSString *
IOCopyLogNameForPID(int pid)1599 IOCopyLogNameForPID(int pid)
1600 {
1601 	char   buf[128];
1602 	size_t len;
1603 	snprintf(buf, sizeof(buf), "pid %d, ", pid);
1604 	len = strlen(buf);
1605 	proc_name(pid, buf + len, (int) (sizeof(buf) - len));
1606 	return OSString::withCString(buf);
1607 }
1608 
1609 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1610 
1611 IOAlignment
IOSizeToAlignment(unsigned int size)1612 IOSizeToAlignment(unsigned int size)
1613 {
1614 	int shift;
1615 	const int intsize = sizeof(unsigned int) * 8;
1616 
1617 	for (shift = 1; shift < intsize; shift++) {
1618 		if (size & 0x80000000) {
1619 			return (IOAlignment)(intsize - shift);
1620 		}
1621 		size <<= 1;
1622 	}
1623 	return 0;
1624 }
1625 
1626 unsigned int
IOAlignmentToSize(IOAlignment align)1627 IOAlignmentToSize(IOAlignment align)
1628 {
1629 	unsigned int size;
1630 
1631 	for (size = 1; align; align--) {
1632 		size <<= 1;
1633 	}
1634 	return size;
1635 }
1636 } /* extern "C" */
1637