xref: /xnu-8020.101.4/iokit/Kernel/IOLib.cpp (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * HISTORY
30  *
31  * 17-Apr-91   Portions from libIO.m, Doug Mitchell at NeXT.
32  * 17-Nov-98   cpp
33  *
34  */
35 
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <vm/vm_kern.h>
40 #include <libkern/c++/OSCPPDebug.h>
41 
42 #include <IOKit/assert.h>
43 
44 #include <IOKit/IOReturn.h>
45 #include <IOKit/IOLib.h>
46 #include <IOKit/IOLocks.h>
47 #include <IOKit/IOMapper.h>
48 #include <IOKit/IOBufferMemoryDescriptor.h>
49 #include <IOKit/IOKitDebug.h>
50 
51 #include "IOKitKernelInternal.h"
52 
53 #ifdef IOALLOCDEBUG
54 #include <libkern/OSDebug.h>
55 #include <sys/sysctl.h>
56 #endif
57 
58 #include "libkern/OSAtomic.h"
59 #include <libkern/c++/OSKext.h>
60 #include <IOKit/IOStatisticsPrivate.h>
61 #include <os/log_private.h>
62 #include <sys/msgbuf.h>
63 #include <console/serial_protos.h>
64 
65 #if IOKITSTATS
66 
67 #define IOStatisticsAlloc(type, size) \
68 do { \
69 	IOStatistics::countAlloc(type, size); \
70 } while (0)
71 
72 #else
73 
74 #define IOStatisticsAlloc(type, size)
75 
76 #endif /* IOKITSTATS */
77 
78 
79 #define TRACK_ALLOC     (IOTRACKING && (kIOTracking & gIOKitDebug))
80 
81 
82 extern "C"
83 {
84 mach_timespec_t IOZeroTvalspec = { 0, 0 };
85 
86 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
87 
88 extern int
89 __doprnt(
90 	const char              *fmt,
91 	va_list                 argp,
92 	void                    (*putc)(int, void *),
93 	void                    *arg,
94 	int                     radix,
95 	int                     is_log);
96 
97 extern bool bsd_log_lock(bool);
98 extern void bsd_log_unlock(void);
99 
100 
101 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
102 
103 lck_grp_t       *IOLockGroup;
104 
105 /*
106  * Global variables for use by iLogger
107  * These symbols are for use only by Apple diagnostic code.
108  * Binary compatibility is not guaranteed for kexts that reference these symbols.
109  */
110 
111 void *_giDebugLogInternal       = NULL;
112 void *_giDebugLogDataInternal   = NULL;
113 void *_giDebugReserved1         = NULL;
114 void *_giDebugReserved2         = NULL;
115 
116 #if defined(__x86_64__)
117 iopa_t gIOBMDPageAllocator;
118 #endif /* defined(__x86_64__) */
119 
120 /*
121  * Static variables for this module.
122  */
123 
124 static queue_head_t gIOMallocContiguousEntries;
125 static lck_mtx_t *  gIOMallocContiguousEntriesLock;
126 
127 #if __x86_64__
128 enum { kIOMaxPageableMaps    = 8 };
129 enum { kIOPageableMapSize    = 512 * 1024 * 1024 };
130 enum { kIOPageableMaxMapSize = 512 * 1024 * 1024 };
131 #else
132 enum { kIOMaxPageableMaps    = 16 };
133 enum { kIOPageableMapSize    = 96 * 1024 * 1024 };
134 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
135 #endif
136 
137 typedef struct {
138 	vm_map_t            map;
139 	vm_offset_t address;
140 	vm_offset_t end;
141 } IOMapData;
142 
143 static struct {
144 	UInt32      count;
145 	UInt32      hint;
146 	IOMapData   maps[kIOMaxPageableMaps];
147 	lck_mtx_t * lock;
148 } gIOKitPageableSpace;
149 
150 #if defined(__x86_64__)
151 static iopa_t gIOPageablePageAllocator;
152 
153 uint32_t  gIOPageAllocChunkBytes;
154 #endif /* defined(__x86_64__) */
155 
156 #if IOTRACKING
157 IOTrackingQueue * gIOMallocTracking;
158 IOTrackingQueue * gIOWireTracking;
159 IOTrackingQueue * gIOMapTracking;
160 #endif /* IOTRACKING */
161 
162 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
163 
164 void
IOLibInit(void)165 IOLibInit(void)
166 {
167 	kern_return_t ret;
168 
169 	static bool libInitialized;
170 
171 	if (libInitialized) {
172 		return;
173 	}
174 
175 	IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
176 
177 #if IOTRACKING
178 	IOTrackingInit();
179 	gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, 0,
180 	    kIOTrackingQueueTypeAlloc,
181 	    37);
182 	gIOWireTracking   = IOTrackingQueueAlloc(kIOWireTrackingName, 0, 0, page_size, 0, 0);
183 
184 	size_t mapCaptureSize = (kIOTracking & gIOKitDebug) ? page_size : (1024 * 1024);
185 	gIOMapTracking    = IOTrackingQueueAlloc(kIOMapTrackingName, 0, 0, mapCaptureSize,
186 	    kIOTrackingQueueTypeDefaultOn
187 	    | kIOTrackingQueueTypeMap
188 	    | kIOTrackingQueueTypeUser,
189 	    0);
190 #endif
191 
192 	gIOKitPageableSpace.maps[0].address = 0;
193 	ret = kmem_suballoc(kernel_map,
194 	    &gIOKitPageableSpace.maps[0].address,
195 	    kIOPageableMapSize,
196 	    VM_MAP_CREATE_PAGEABLE,
197 	    VM_FLAGS_ANYWHERE,
198 	    VM_MAP_KERNEL_FLAGS_NONE,
199 	    VM_KERN_MEMORY_IOKIT,
200 	    &gIOKitPageableSpace.maps[0].map);
201 	if (ret != KERN_SUCCESS) {
202 		panic("failed to allocate iokit pageable map");
203 	}
204 
205 	gIOKitPageableSpace.lock            = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
206 	gIOKitPageableSpace.maps[0].end     = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
207 	gIOKitPageableSpace.hint            = 0;
208 	gIOKitPageableSpace.count           = 1;
209 
210 	gIOMallocContiguousEntriesLock      = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
211 	queue_init( &gIOMallocContiguousEntries );
212 
213 #if defined(__x86_64__)
214 	gIOPageAllocChunkBytes = PAGE_SIZE / 64;
215 
216 	assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes);
217 	iopa_init(&gIOBMDPageAllocator);
218 	iopa_init(&gIOPageablePageAllocator);
219 #endif /* defined(__x86_64__) */
220 
221 
222 	libInitialized = true;
223 }
224 
225 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
226 
227 vm_size_t
log2up(vm_size_t size)228 log2up(vm_size_t size)
229 {
230 	if (size <= 1) {
231 		size = 0;
232 	} else {
233 #if __LP64__
234 		size = 64 - __builtin_clzl(size - 1);
235 #else
236 		size = 32 - __builtin_clzl(size - 1);
237 #endif
238 	}
239 	return size;
240 }
241 
242 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
243 
244 IOThread
IOCreateThread(IOThreadFunc fcn,void * arg)245 IOCreateThread(IOThreadFunc fcn, void *arg)
246 {
247 	kern_return_t   result;
248 	thread_t                thread;
249 
250 	result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
251 	if (result != KERN_SUCCESS) {
252 		return NULL;
253 	}
254 
255 	thread_deallocate(thread);
256 
257 	return thread;
258 }
259 
260 
261 void
IOExitThread(void)262 IOExitThread(void)
263 {
264 	(void) thread_terminate(current_thread());
265 }
266 
267 void *
268 IOMalloc_external(
269 	vm_size_t size);
270 void *
IOMalloc_external(vm_size_t size)271 IOMalloc_external(
272 	vm_size_t size)
273 {
274 	return IOMalloc_internal(KHEAP_KEXT, size);
275 }
276 
277 void *
278 IOMallocZero_external(
279 	vm_size_t size);
280 void *
IOMallocZero_external(vm_size_t size)281 IOMallocZero_external(
282 	vm_size_t size)
283 {
284 	return IOMallocZero_internal(KHEAP_KEXT, size);
285 }
286 
287 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
288 
289 void *
IOMallocZero_internal(struct kalloc_heap * kalloc_heap_cfg,vm_size_t size)290 IOMallocZero_internal(struct kalloc_heap *kalloc_heap_cfg, vm_size_t size)
291 {
292 	void * result;
293 	result = IOMalloc_internal(kalloc_heap_cfg, size);
294 	if (result) {
295 		bzero(result, size);
296 	}
297 	return result;
298 }
299 
300 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
301 
302 #if IOTRACKING
303 struct IOLibMallocHeader {
304 	IOTrackingAddress tracking;
305 };
306 #endif
307 
308 #if IOTRACKING
309 #define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
310 #else
311 #define sizeofIOLibMallocHeader (0)
312 #endif
313 
314 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
315 
316 void *
IOMalloc_internal(struct kalloc_heap * kheap,vm_size_t size)317 IOMalloc_internal(struct kalloc_heap *kheap, vm_size_t size)
318 {
319 	void * address;
320 	vm_size_t allocSize;
321 
322 	allocSize = size + sizeofIOLibMallocHeader;
323 #if IOTRACKING
324 	if (sizeofIOLibMallocHeader && (allocSize <= size)) {
325 		return NULL;                                          // overflow
326 	}
327 #endif
328 	address = kheap_alloc(kheap, allocSize,
329 	    Z_VM_TAG_BT(Z_WAITOK, VM_KERN_MEMORY_IOKIT));
330 
331 	if (address) {
332 #if IOTRACKING
333 		if (TRACK_ALLOC) {
334 			IOLibMallocHeader * hdr;
335 			hdr = (typeof(hdr))address;
336 			bzero(&hdr->tracking, sizeof(hdr->tracking));
337 			hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader);
338 			hdr->tracking.size    = size;
339 			IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
340 		}
341 #endif
342 		address = (typeof(address))(((uintptr_t) address) + sizeofIOLibMallocHeader);
343 
344 #if IOALLOCDEBUG
345 		OSAddAtomicLong(size, &debug_iomalloc_size);
346 #endif
347 		IOStatisticsAlloc(kIOStatisticsMalloc, size);
348 	}
349 
350 	return address;
351 }
352 
353 void
IOFree_internal(struct kalloc_heap * kheap,void * inAddress,vm_size_t size)354 IOFree_internal(struct kalloc_heap *kheap, void * inAddress, vm_size_t size)
355 {
356 	void * address;
357 
358 	if ((address = inAddress)) {
359 		address = (typeof(address))(((uintptr_t) address) - sizeofIOLibMallocHeader);
360 
361 #if IOTRACKING
362 		if (TRACK_ALLOC) {
363 			IOLibMallocHeader * hdr;
364 			struct ptr_reference { void * ptr; };
365 			volatile struct ptr_reference ptr;
366 
367 			// we're about to block in IOTrackingRemove(), make sure the original pointer
368 			// exists in memory or a register for leak scanning to find
369 			ptr.ptr = inAddress;
370 
371 			hdr = (typeof(hdr))address;
372 			if (size != hdr->tracking.size) {
373 				OSReportWithBacktrace("bad IOFree size 0x%zx should be 0x%zx",
374 				    (size_t)size, (size_t)hdr->tracking.size);
375 				size = hdr->tracking.size;
376 			}
377 			IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
378 			ptr.ptr = NULL;
379 		}
380 #endif
381 
382 		kheap_free(kheap, address, size + sizeofIOLibMallocHeader);
383 #if IOALLOCDEBUG
384 		OSAddAtomicLong(-size, &debug_iomalloc_size);
385 #endif
386 		IOStatisticsAlloc(kIOStatisticsFree, size);
387 	}
388 }
389 
390 void
IOFree(void * inAddress,vm_size_t size)391 IOFree(void * inAddress, vm_size_t size)
392 {
393 	IOFree_internal(KHEAP_ANY, inAddress, size);
394 }
395 
396 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
397 
398 vm_tag_t
IOMemoryTag(vm_map_t map)399 IOMemoryTag(vm_map_t map)
400 {
401 	vm_tag_t tag;
402 
403 	if (!vm_kernel_map_is_kernel(map)) {
404 		return VM_MEMORY_IOKIT;
405 	}
406 
407 	tag = vm_tag_bt();
408 	if (tag == VM_KERN_MEMORY_NONE) {
409 		tag = VM_KERN_MEMORY_IOKIT;
410 	}
411 
412 	return tag;
413 }
414 
415 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
416 
417 struct IOLibPageMallocHeader {
418 	mach_vm_size_t    allocationSize;
419 	mach_vm_address_t allocationAddress;
420 #if IOTRACKING
421 	IOTrackingAddress tracking;
422 #endif
423 };
424 
425 #if IOTRACKING
426 #define sizeofIOLibPageMallocHeader     (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
427 #else
428 #define sizeofIOLibPageMallocHeader     (sizeof(IOLibPageMallocHeader))
429 #endif
430 
431 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
432 void *
433 IOMallocAligned_external(
434 	vm_size_t size, vm_size_t alignment);
435 void *
IOMallocAligned_external(vm_size_t size,vm_size_t alignment)436 IOMallocAligned_external(
437 	vm_size_t size, vm_size_t alignment)
438 {
439 	return IOMallocAligned_internal(KHEAP_KEXT, size, alignment);
440 }
441 
442 void *
IOMallocAligned_internal(struct kalloc_heap * kheap,vm_size_t size,vm_size_t alignment)443 IOMallocAligned_internal(struct kalloc_heap *kheap, vm_size_t size,
444     vm_size_t alignment)
445 {
446 	kern_return_t           kr;
447 	vm_offset_t             address;
448 	vm_offset_t             allocationAddress;
449 	vm_size_t               adjustedSize;
450 	uintptr_t               alignMask;
451 	IOLibPageMallocHeader * hdr;
452 
453 	if (size == 0) {
454 		return NULL;
455 	}
456 	if (((uint32_t) alignment) != alignment) {
457 		return NULL;
458 	}
459 
460 	alignment = (1UL << log2up((uint32_t) alignment));
461 	alignMask = alignment - 1;
462 	adjustedSize = size + sizeofIOLibPageMallocHeader;
463 
464 	if (size > adjustedSize) {
465 		address = 0; /* overflow detected */
466 	} else if (adjustedSize >= page_size) {
467 		kr = kernel_memory_allocate(kheap->kh_fallback_map, &address,
468 		    size, alignMask, KMA_NONE, IOMemoryTag(kernel_map));
469 		if (KERN_SUCCESS != kr) {
470 			address = 0;
471 		}
472 #if IOTRACKING
473 		else if (TRACK_ALLOC) {
474 			IOTrackingAlloc(gIOMallocTracking, address, size);
475 		}
476 #endif
477 	} else {
478 		adjustedSize += alignMask;
479 
480 		if (adjustedSize >= page_size) {
481 			kr = kernel_memory_allocate(kheap->kh_fallback_map, &allocationAddress,
482 			    adjustedSize, 0, KMA_NONE, IOMemoryTag(kernel_map));
483 			if (KERN_SUCCESS != kr) {
484 				allocationAddress = 0;
485 			}
486 		} else {
487 			allocationAddress = (vm_address_t) kheap_alloc(kheap,
488 			    adjustedSize, Z_VM_TAG_BT(Z_WAITOK, VM_KERN_MEMORY_IOKIT));
489 		}
490 
491 		if (allocationAddress) {
492 			address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
493 			    & (~alignMask);
494 
495 			hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
496 			hdr->allocationSize    = adjustedSize;
497 			hdr->allocationAddress = allocationAddress;
498 #if IOTRACKING
499 			if (TRACK_ALLOC) {
500 				bzero(&hdr->tracking, sizeof(hdr->tracking));
501 				hdr->tracking.address = ~address;
502 				hdr->tracking.size = size;
503 				IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
504 			}
505 #endif
506 		} else {
507 			address = 0;
508 		}
509 	}
510 
511 	assert(0 == (address & alignMask));
512 
513 	if (address) {
514 #if IOALLOCDEBUG
515 		OSAddAtomicLong(size, &debug_iomalloc_size);
516 #endif
517 		IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
518 	}
519 
520 	return (void *) address;
521 }
522 
523 void
IOFreeAligned(void * address,vm_size_t size)524 IOFreeAligned(
525 	void                  * address,
526 	vm_size_t               size)
527 {
528 	IOFreeAligned_internal(KHEAP_ANY, address, size);
529 }
530 
531 void
IOFreeAligned_internal(kalloc_heap_t kheap,void * address,vm_size_t size)532 IOFreeAligned_internal(kalloc_heap_t kheap, void * address, vm_size_t size)
533 {
534 	vm_address_t            allocationAddress;
535 	vm_size_t               adjustedSize;
536 	IOLibPageMallocHeader * hdr;
537 	vm_map_t                kheap_map;
538 
539 	if (!address) {
540 		return;
541 	}
542 
543 	/*
544 	 * When called with KHEAP_ANY, use default fallback map as KHEAP_ANY
545 	 * is a construct that allows to free to a mismatched heap and is
546 	 * NULL.
547 	 */
548 	if (kheap == KHEAP_ANY) {
549 		kheap_map = KHEAP_DEFAULT->kh_fallback_map;
550 	} else {
551 		kheap_map = kheap->kh_fallback_map;
552 	}
553 
554 	assert(size);
555 
556 	adjustedSize = size + sizeofIOLibPageMallocHeader;
557 	if (adjustedSize >= page_size) {
558 #if IOTRACKING
559 		if (TRACK_ALLOC) {
560 			IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size);
561 		}
562 #endif
563 		kmem_free(kheap_map, (vm_offset_t) address, size);
564 	} else {
565 		hdr = (typeof(hdr))(((uintptr_t)address) - sizeofIOLibPageMallocHeader);
566 		adjustedSize = hdr->allocationSize;
567 		allocationAddress = hdr->allocationAddress;
568 
569 #if IOTRACKING
570 		if (TRACK_ALLOC) {
571 			if (size != hdr->tracking.size) {
572 				OSReportWithBacktrace("bad IOFreeAligned size 0x%zx should be 0x%zx",
573 				    (size_t)size, (size_t)hdr->tracking.size);
574 				size = hdr->tracking.size;
575 			}
576 			IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
577 		}
578 #endif
579 		if (adjustedSize >= page_size) {
580 			kmem_free(kheap_map, allocationAddress, adjustedSize);
581 		} else {
582 			kheap_free(kheap, allocationAddress, adjustedSize);
583 		}
584 	}
585 
586 #if IOALLOCDEBUG
587 	OSAddAtomicLong(-size, &debug_iomalloc_size);
588 #endif
589 
590 	IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
591 }
592 
593 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
594 
595 void
IOKernelFreePhysical(kalloc_heap_t kheap,mach_vm_address_t address,mach_vm_size_t size)596 IOKernelFreePhysical(
597 	kalloc_heap_t         kheap,
598 	mach_vm_address_t     address,
599 	mach_vm_size_t        size)
600 {
601 	vm_address_t       allocationAddress;
602 	vm_size_t          adjustedSize;
603 	IOLibPageMallocHeader * hdr;
604 
605 	if (!address) {
606 		return;
607 	}
608 
609 	assert(size);
610 
611 	adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
612 	if (adjustedSize >= page_size) {
613 #if IOTRACKING
614 		if (TRACK_ALLOC) {
615 			IOTrackingFree(gIOMallocTracking, address, size);
616 		}
617 #endif
618 		kmem_free(kheap->kh_fallback_map, (vm_offset_t) address, size);
619 	} else {
620 		hdr = (typeof(hdr))(((uintptr_t)address) - sizeofIOLibPageMallocHeader);
621 		adjustedSize = hdr->allocationSize;
622 		allocationAddress = hdr->allocationAddress;
623 #if IOTRACKING
624 		if (TRACK_ALLOC) {
625 			IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
626 		}
627 #endif
628 		kheap_free(kheap, allocationAddress, adjustedSize);
629 	}
630 
631 	IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
632 #if IOALLOCDEBUG
633 	OSAddAtomicLong(-size, &debug_iomalloc_size);
634 #endif
635 }
636 
637 #if __arm__ || __arm64__
638 extern unsigned long gPhysBase, gPhysSize;
639 #endif
640 
641 mach_vm_address_t
IOKernelAllocateWithPhysicalRestrict(kalloc_heap_t kheap,mach_vm_size_t size,mach_vm_address_t maxPhys,mach_vm_size_t alignment,bool contiguous)642 IOKernelAllocateWithPhysicalRestrict(
643 	kalloc_heap_t         kheap,
644 	mach_vm_size_t        size,
645 	mach_vm_address_t     maxPhys,
646 	mach_vm_size_t        alignment,
647 	bool                  contiguous)
648 {
649 	kern_return_t           kr;
650 	mach_vm_address_t       address;
651 	mach_vm_address_t       allocationAddress;
652 	mach_vm_size_t          adjustedSize;
653 	mach_vm_address_t       alignMask;
654 	IOLibPageMallocHeader * hdr;
655 
656 	if (size == 0) {
657 		return 0;
658 	}
659 	if (alignment == 0) {
660 		alignment = 1;
661 	}
662 
663 	alignMask = alignment - 1;
664 
665 	if (os_mul_and_add_overflow(2, size, sizeofIOLibPageMallocHeader, &adjustedSize)) {
666 		return 0;
667 	}
668 
669 	contiguous = (contiguous && (adjustedSize > page_size))
670 	    || (alignment > page_size);
671 
672 	if (contiguous || maxPhys) {
673 		kma_flags_t options = KMA_NONE;
674 		vm_offset_t virt;
675 
676 		adjustedSize = size;
677 		contiguous = (contiguous && (adjustedSize > page_size))
678 		    || (alignment > page_size);
679 
680 		if (!contiguous) {
681 #if __arm__ || __arm64__
682 			if (maxPhys >= (mach_vm_address_t)(gPhysBase + gPhysSize)) {
683 				maxPhys = 0;
684 			} else
685 #endif
686 			if (maxPhys <= 0xFFFFFFFF) {
687 				maxPhys = 0;
688 				options = (kma_flags_t)(options | KMA_LOMEM);
689 			} else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage)) {
690 				maxPhys = 0;
691 			}
692 		}
693 		if (contiguous || maxPhys) {
694 			kr = kmem_alloc_contig(kheap->kh_fallback_map, &virt, size,
695 			    alignMask, (ppnum_t) atop(maxPhys), (ppnum_t) atop(alignMask),
696 			    KMA_NONE, IOMemoryTag(kernel_map));
697 		} else {
698 			kr = kernel_memory_allocate(kheap->kh_fallback_map, &virt,
699 			    size, alignMask, options, IOMemoryTag(kernel_map));
700 		}
701 		if (KERN_SUCCESS == kr) {
702 			address = virt;
703 #if IOTRACKING
704 			if (TRACK_ALLOC) {
705 				IOTrackingAlloc(gIOMallocTracking, address, size);
706 			}
707 #endif
708 		} else {
709 			address = 0;
710 		}
711 	} else {
712 		adjustedSize += alignMask;
713 		if (adjustedSize < size) {
714 			return 0;
715 		}
716 		allocationAddress = (mach_vm_address_t) kheap_alloc(kheap,
717 		    adjustedSize, Z_VM_TAG_BT(Z_WAITOK, VM_KERN_MEMORY_IOKIT));
718 
719 		if (allocationAddress) {
720 			address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
721 			    & (~alignMask);
722 
723 			if (atop_32(address) != atop_32(address + size - 1)) {
724 				address = round_page(address);
725 			}
726 
727 			hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
728 			hdr->allocationSize    = adjustedSize;
729 			hdr->allocationAddress = allocationAddress;
730 #if IOTRACKING
731 			if (TRACK_ALLOC) {
732 				bzero(&hdr->tracking, sizeof(hdr->tracking));
733 				hdr->tracking.address = ~address;
734 				hdr->tracking.size    = size;
735 				IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
736 			}
737 #endif
738 		} else {
739 			address = 0;
740 		}
741 	}
742 
743 	if (address) {
744 		IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
745 #if IOALLOCDEBUG
746 		OSAddAtomicLong(size, &debug_iomalloc_size);
747 #endif
748 	}
749 
750 	return address;
751 }
752 
753 
754 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
755 
756 struct _IOMallocContiguousEntry {
757 	mach_vm_address_t          virtualAddr;
758 	IOBufferMemoryDescriptor * md;
759 	queue_chain_t              link;
760 };
761 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
762 
763 void *
IOMallocContiguous(vm_size_t size,vm_size_t alignment,IOPhysicalAddress * physicalAddress)764 IOMallocContiguous(vm_size_t size, vm_size_t alignment,
765     IOPhysicalAddress * physicalAddress)
766 {
767 	mach_vm_address_t   address = 0;
768 
769 	if (size == 0) {
770 		return NULL;
771 	}
772 	if (alignment == 0) {
773 		alignment = 1;
774 	}
775 
776 	/* Do we want a physical address? */
777 	if (!physicalAddress) {
778 		address = IOKernelAllocateWithPhysicalRestrict(KHEAP_KEXT,
779 		    size, 0 /*maxPhys*/, alignment, true);
780 	} else {
781 		do {
782 			IOBufferMemoryDescriptor * bmd;
783 			mach_vm_address_t          physicalMask;
784 			vm_offset_t                alignMask;
785 
786 			alignMask = alignment - 1;
787 			physicalMask = (0xFFFFFFFF ^ alignMask);
788 
789 			bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
790 				kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
791 			if (!bmd) {
792 				break;
793 			}
794 
795 			_IOMallocContiguousEntry *
796 			    entry = IOMallocType(_IOMallocContiguousEntry);
797 			if (!entry) {
798 				bmd->release();
799 				break;
800 			}
801 			entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
802 			entry->md          = bmd;
803 			lck_mtx_lock(gIOMallocContiguousEntriesLock);
804 			queue_enter( &gIOMallocContiguousEntries, entry,
805 			    _IOMallocContiguousEntry *, link );
806 			lck_mtx_unlock(gIOMallocContiguousEntriesLock);
807 
808 			address          = (mach_vm_address_t) entry->virtualAddr;
809 			*physicalAddress = bmd->getPhysicalAddress();
810 		}while (false);
811 	}
812 
813 	return (void *) address;
814 }
815 
816 void
IOFreeContiguous(void * _address,vm_size_t size)817 IOFreeContiguous(void * _address, vm_size_t size)
818 {
819 	_IOMallocContiguousEntry * entry;
820 	IOMemoryDescriptor *       md = NULL;
821 
822 	mach_vm_address_t address = (mach_vm_address_t) _address;
823 
824 	if (!address) {
825 		return;
826 	}
827 
828 	assert(size);
829 
830 	lck_mtx_lock(gIOMallocContiguousEntriesLock);
831 	queue_iterate( &gIOMallocContiguousEntries, entry,
832 	    _IOMallocContiguousEntry *, link )
833 	{
834 		if (entry->virtualAddr == address) {
835 			md   = entry->md;
836 			queue_remove( &gIOMallocContiguousEntries, entry,
837 			    _IOMallocContiguousEntry *, link );
838 			break;
839 		}
840 	}
841 	lck_mtx_unlock(gIOMallocContiguousEntriesLock);
842 
843 	if (md) {
844 		md->release();
845 		IOFreeType(entry, _IOMallocContiguousEntry);
846 	} else {
847 		IOKernelFreePhysical(KHEAP_KEXT, (mach_vm_address_t) address, size);
848 	}
849 }
850 
851 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
852 
853 kern_return_t
IOIteratePageableMaps(vm_size_t size,IOIteratePageableMapsCallback callback,void * ref)854 IOIteratePageableMaps(vm_size_t size,
855     IOIteratePageableMapsCallback callback, void * ref)
856 {
857 	kern_return_t       kr = kIOReturnNotReady;
858 	vm_size_t           segSize;
859 	UInt32              attempts;
860 	UInt32              index;
861 	vm_offset_t         min;
862 	vm_map_t            map;
863 
864 	if (size > kIOPageableMaxMapSize) {
865 		return kIOReturnBadArgument;
866 	}
867 
868 	do {
869 		index = gIOKitPageableSpace.hint;
870 		attempts = gIOKitPageableSpace.count;
871 		while (attempts--) {
872 			kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
873 			if (KERN_SUCCESS == kr) {
874 				gIOKitPageableSpace.hint = index;
875 				break;
876 			}
877 			if (index) {
878 				index--;
879 			} else {
880 				index = gIOKitPageableSpace.count - 1;
881 			}
882 		}
883 		if (KERN_NO_SPACE != kr) {
884 			break;
885 		}
886 
887 		lck_mtx_lock( gIOKitPageableSpace.lock );
888 
889 		index = gIOKitPageableSpace.count;
890 		if (index >= (kIOMaxPageableMaps - 1)) {
891 			lck_mtx_unlock( gIOKitPageableSpace.lock );
892 			break;
893 		}
894 
895 		if (size < kIOPageableMapSize) {
896 			segSize = kIOPageableMapSize;
897 		} else {
898 			segSize = size;
899 		}
900 
901 		min = 0;
902 		kr = kmem_suballoc(kernel_map,
903 		    &min,
904 		    segSize,
905 		    VM_MAP_CREATE_PAGEABLE,
906 		    VM_FLAGS_ANYWHERE,
907 		    VM_MAP_KERNEL_FLAGS_NONE,
908 		    VM_KERN_MEMORY_IOKIT,
909 		    &map);
910 		if (KERN_SUCCESS != kr) {
911 			lck_mtx_unlock( gIOKitPageableSpace.lock );
912 			break;
913 		}
914 
915 		gIOKitPageableSpace.maps[index].map     = map;
916 		gIOKitPageableSpace.maps[index].address = min;
917 		gIOKitPageableSpace.maps[index].end     = min + segSize;
918 		gIOKitPageableSpace.hint                = index;
919 		gIOKitPageableSpace.count               = index + 1;
920 
921 		lck_mtx_unlock( gIOKitPageableSpace.lock );
922 	} while (true);
923 
924 	return kr;
925 }
926 
927 struct IOMallocPageableRef {
928 	vm_offset_t address;
929 	vm_size_t   size;
930 	vm_tag_t    tag;
931 };
932 
933 static kern_return_t
IOMallocPageableCallback(vm_map_t map,void * _ref)934 IOMallocPageableCallback(vm_map_t map, void * _ref)
935 {
936 	struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
937 	kern_return_t                kr;
938 
939 	kr = kmem_alloc_pageable( map, &ref->address, ref->size, ref->tag );
940 
941 	return kr;
942 }
943 
944 static void *
IOMallocPageablePages(vm_size_t size,vm_size_t alignment,vm_tag_t tag)945 IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag)
946 {
947 	kern_return_t              kr = kIOReturnNotReady;
948 	struct IOMallocPageableRef ref;
949 
950 	if (alignment > page_size) {
951 		return NULL;
952 	}
953 	if (size > kIOPageableMaxMapSize) {
954 		return NULL;
955 	}
956 
957 	ref.size = size;
958 	ref.tag  = tag;
959 	kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
960 	if (kIOReturnSuccess != kr) {
961 		ref.address = 0;
962 	}
963 
964 	return (void *) ref.address;
965 }
966 
967 vm_map_t
IOPageableMapForAddress(uintptr_t address)968 IOPageableMapForAddress( uintptr_t address )
969 {
970 	vm_map_t    map = NULL;
971 	UInt32      index;
972 
973 	for (index = 0; index < gIOKitPageableSpace.count; index++) {
974 		if ((address >= gIOKitPageableSpace.maps[index].address)
975 		    && (address < gIOKitPageableSpace.maps[index].end)) {
976 			map = gIOKitPageableSpace.maps[index].map;
977 			break;
978 		}
979 	}
980 	if (!map) {
981 		panic("IOPageableMapForAddress: null");
982 	}
983 
984 	return map;
985 }
986 
987 static void
IOFreePageablePages(void * address,vm_size_t size)988 IOFreePageablePages(void * address, vm_size_t size)
989 {
990 	vm_map_t map;
991 
992 	map = IOPageableMapForAddress((vm_address_t) address);
993 	if (map) {
994 		kmem_free( map, (vm_offset_t) address, size);
995 	}
996 }
997 
998 #if defined(__x86_64__)
999 static uintptr_t
IOMallocOnePageablePage(kalloc_heap_t kheap __unused,iopa_t * a)1000 IOMallocOnePageablePage(kalloc_heap_t kheap __unused, iopa_t * a)
1001 {
1002 	return (uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT);
1003 }
1004 #endif /* defined(__x86_64__) */
1005 
1006 static void *
IOMallocPageableInternal(vm_size_t size,vm_size_t alignment,bool zeroed)1007 IOMallocPageableInternal(vm_size_t size, vm_size_t alignment, bool zeroed)
1008 {
1009 	void * addr;
1010 
1011 	if (((uint32_t) alignment) != alignment) {
1012 		return NULL;
1013 	}
1014 #if defined(__x86_64__)
1015 	if (size >= (page_size - 4 * gIOPageAllocChunkBytes) ||
1016 	    alignment > page_size) {
1017 		addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map));
1018 		/* Memory allocated this way will already be zeroed. */
1019 	} else {
1020 		addr = ((void *) iopa_alloc(&gIOPageablePageAllocator,
1021 		    &IOMallocOnePageablePage, KHEAP_ANY, size, (uint32_t) alignment));
1022 		if (addr && zeroed) {
1023 			bzero(addr, size);
1024 		}
1025 	}
1026 #else /* !defined(__x86_64__) */
1027 	vm_size_t allocSize = size;
1028 	if (allocSize == 0) {
1029 		allocSize = 1;
1030 	}
1031 	addr = IOMallocPageablePages(allocSize, alignment, IOMemoryTag(kernel_map));
1032 	/* already zeroed */
1033 #endif /* defined(__x86_64__) */
1034 
1035 	if (addr) {
1036 #if IOALLOCDEBUG
1037 		OSAddAtomicLong(size, &debug_iomallocpageable_size);
1038 #endif
1039 		IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
1040 	}
1041 
1042 	return addr;
1043 }
1044 
1045 void *
IOMallocPageable(vm_size_t size,vm_size_t alignment)1046 IOMallocPageable(vm_size_t size, vm_size_t alignment)
1047 {
1048 	return IOMallocPageableInternal(size, alignment, /*zeroed*/ false);
1049 }
1050 
1051 void *
IOMallocPageableZero(vm_size_t size,vm_size_t alignment)1052 IOMallocPageableZero(vm_size_t size, vm_size_t alignment)
1053 {
1054 	return IOMallocPageableInternal(size, alignment, /*zeroed*/ true);
1055 }
1056 
1057 void
IOFreePageable(void * address,vm_size_t size)1058 IOFreePageable(void * address, vm_size_t size)
1059 {
1060 #if IOALLOCDEBUG
1061 	OSAddAtomicLong(-size, &debug_iomallocpageable_size);
1062 #endif
1063 	IOStatisticsAlloc(kIOStatisticsFreePageable, size);
1064 
1065 #if defined(__x86_64__)
1066 	if (size < (page_size - 4 * gIOPageAllocChunkBytes)) {
1067 		address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size);
1068 		size = page_size;
1069 	}
1070 	if (address) {
1071 		IOFreePageablePages(address, size);
1072 	}
1073 #else /* !defined(__x86_64__) */
1074 	if (size == 0) {
1075 		size = 1;
1076 	}
1077 	if (address) {
1078 		IOFreePageablePages(address, size);
1079 	}
1080 #endif /* defined(__x86_64__) */
1081 }
1082 
1083 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1084 
1085 void *
IOMallocData(vm_size_t size)1086 IOMallocData(vm_size_t size)
1087 {
1088 	return IOMalloc_internal(KHEAP_DATA_BUFFERS, size);
1089 }
1090 void *
IOMallocZeroData(vm_size_t size)1091 IOMallocZeroData(vm_size_t size)
1092 {
1093 	return IOMallocZero_internal(KHEAP_DATA_BUFFERS, size);
1094 }
1095 
1096 void
IOFreeData(void * address,vm_size_t size)1097 IOFreeData(void * address, vm_size_t size)
1098 {
1099 	return IOFree_internal(KHEAP_DATA_BUFFERS, address, size);
1100 }
1101 
1102 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1103 
1104 void *
IOMallocTypeImpl(kalloc_type_view_t kt_view)1105 IOMallocTypeImpl(kalloc_type_view_t kt_view)
1106 {
1107 #if IOTRACKING
1108 	/*
1109 	 * When leak detection is on default to using IOMalloc as kalloc
1110 	 * type infrastructure isn't aware of needing additional space for
1111 	 * the header.
1112 	 */
1113 	if (TRACK_ALLOC) {
1114 		uint32_t kt_size = kalloc_type_get_size(kt_view->kt_size);
1115 		void *mem = IOMallocZero_internal(KHEAP_DEFAULT, kt_size);
1116 		if (!IOMallocType_from_vm(kt_view)) {
1117 			assert(mem);
1118 		}
1119 		return mem;
1120 	}
1121 #endif
1122 	zalloc_flags_t kt_flags = (zalloc_flags_t) (Z_WAITOK | Z_ZERO);
1123 	if (!IOMallocType_from_vm(kt_view)) {
1124 		kt_flags = (zalloc_flags_t) (kt_flags | Z_NOFAIL);
1125 	}
1126 	/*
1127 	 * Use external symbol for kalloc_type_impl as
1128 	 * kalloc_type_views generated at some external callsites
1129 	 * many not have been processed during boot.
1130 	 */
1131 	return kalloc_type_impl_external(kt_view, kt_flags);
1132 }
1133 
1134 void
IOFreeTypeImpl(kalloc_type_view_t kt_view,void * address)1135 IOFreeTypeImpl(kalloc_type_view_t kt_view, void * address)
1136 {
1137 #if IOTRACKING
1138 	if (TRACK_ALLOC) {
1139 		return IOFree_internal(KHEAP_DEFAULT, address,
1140 		           kalloc_type_get_size(kt_view->kt_size));
1141 	}
1142 #endif
1143 	/*
1144 	 * Use external symbol for kalloc_type_impl as
1145 	 * kalloc_type_views generated at some external callsites
1146 	 * many not have been processed during boot.
1147 	 */
1148 	return kfree_type_impl_external(kt_view, address);
1149 }
1150 
1151 void *
IOMallocTypeVarImpl(kalloc_type_var_view_t kt_view,vm_size_t size)1152 IOMallocTypeVarImpl(kalloc_type_var_view_t kt_view, vm_size_t size)
1153 {
1154 #if IOTRACKING
1155 	/*
1156 	 * When leak detection is on default to using IOMalloc as kalloc
1157 	 * type infrastructure isn't aware of needing additional space for
1158 	 * the header.
1159 	 */
1160 	if (TRACK_ALLOC) {
1161 		return IOMallocZero_internal(KHEAP_DEFAULT, size);
1162 	}
1163 #endif
1164 	zalloc_flags_t kt_flags = (zalloc_flags_t) (Z_WAITOK | Z_ZERO);
1165 
1166 	kt_flags = Z_VM_TAG_BT(kt_flags, VM_KERN_MEMORY_KALLOC_TYPE);
1167 	return kalloc_type_var_impl(kt_view, size, kt_flags, NULL);
1168 }
1169 
1170 void
IOFreeTypeVarImpl(kalloc_type_var_view_t kt_view,void * address,vm_size_t size)1171 IOFreeTypeVarImpl(kalloc_type_var_view_t kt_view, void * address,
1172     vm_size_t size)
1173 {
1174 #if IOTRACKING
1175 	if (TRACK_ALLOC) {
1176 		return IOFree_internal(KHEAP_DEFAULT, address, size);
1177 	}
1178 #endif
1179 
1180 	return kfree_type_var_impl(kt_view, address, size);
1181 }
1182 
1183 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1184 
1185 #if defined(__x86_64__)
1186 
1187 
1188 extern "C" void
iopa_init(iopa_t * a)1189 iopa_init(iopa_t * a)
1190 {
1191 	bzero(a, sizeof(*a));
1192 	a->lock = IOLockAlloc();
1193 	queue_init(&a->list);
1194 }
1195 
1196 static uintptr_t
iopa_allocinpage(iopa_page_t * pa,uint32_t count,uint64_t align)1197 iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
1198 {
1199 	uint32_t n, s;
1200 	uint64_t avail = pa->avail;
1201 
1202 	assert(avail);
1203 
1204 	// find strings of count 1 bits in avail
1205 	for (n = count; n > 1; n -= s) {
1206 		s = n >> 1;
1207 		avail = avail & (avail << s);
1208 	}
1209 	// and aligned
1210 	avail &= align;
1211 
1212 	if (avail) {
1213 		n = __builtin_clzll(avail);
1214 		pa->avail &= ~((-1ULL << (64 - count)) >> n);
1215 		if (!pa->avail && pa->link.next) {
1216 			remque(&pa->link);
1217 			pa->link.next = NULL;
1218 		}
1219 		return n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa);
1220 	}
1221 
1222 	return 0;
1223 }
1224 
1225 uintptr_t
iopa_alloc(iopa_t * a,iopa_proc_t alloc,kalloc_heap_t kheap,vm_size_t bytes,vm_size_t balign)1226 iopa_alloc(
1227 	iopa_t          * a,
1228 	iopa_proc_t       alloc,
1229 	kalloc_heap_t     kheap,
1230 	vm_size_t         bytes,
1231 	vm_size_t         balign)
1232 {
1233 	static const uint64_t align_masks[] = {
1234 		0xFFFFFFFFFFFFFFFF,
1235 		0xAAAAAAAAAAAAAAAA,
1236 		0x8888888888888888,
1237 		0x8080808080808080,
1238 		0x8000800080008000,
1239 		0x8000000080000000,
1240 		0x8000000000000000,
1241 	};
1242 	iopa_page_t * pa;
1243 	uintptr_t     addr = 0;
1244 	uint32_t      count;
1245 	uint64_t      align;
1246 	vm_size_t     align_masks_idx;
1247 
1248 	if (((uint32_t) bytes) != bytes) {
1249 		return 0;
1250 	}
1251 	if (!bytes) {
1252 		bytes = 1;
1253 	}
1254 	count = (((uint32_t) bytes) + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1255 
1256 	align_masks_idx = log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes);
1257 	assert(align_masks_idx < sizeof(align_masks) / sizeof(*align_masks));
1258 	align = align_masks[align_masks_idx];
1259 
1260 	IOLockLock(a->lock);
1261 	__IGNORE_WCASTALIGN(pa = (typeof(pa))queue_first(&a->list));
1262 	while (!queue_end(&a->list, &pa->link)) {
1263 		addr = iopa_allocinpage(pa, count, align);
1264 		if (addr) {
1265 			a->bytecount += bytes;
1266 			break;
1267 		}
1268 		__IGNORE_WCASTALIGN(pa = (typeof(pa))queue_next(&pa->link));
1269 	}
1270 	IOLockUnlock(a->lock);
1271 
1272 	if (!addr) {
1273 		addr = alloc(kheap, a);
1274 		if (addr) {
1275 			pa = (typeof(pa))(addr + page_size - gIOPageAllocChunkBytes);
1276 			pa->signature = kIOPageAllocSignature;
1277 			pa->avail     = -2ULL;
1278 
1279 			addr = iopa_allocinpage(pa, count, align);
1280 			IOLockLock(a->lock);
1281 			if (pa->avail) {
1282 				enqueue_head(&a->list, &pa->link);
1283 			}
1284 			a->pagecount++;
1285 			if (addr) {
1286 				a->bytecount += bytes;
1287 			}
1288 			IOLockUnlock(a->lock);
1289 		}
1290 	}
1291 
1292 	assert((addr & ((1 << log2up(balign)) - 1)) == 0);
1293 	return addr;
1294 }
1295 
1296 uintptr_t
iopa_free(iopa_t * a,uintptr_t addr,vm_size_t bytes)1297 iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes)
1298 {
1299 	iopa_page_t * pa;
1300 	uint32_t      count;
1301 	uintptr_t     chunk;
1302 
1303 	if (((uint32_t) bytes) != bytes) {
1304 		return 0;
1305 	}
1306 	if (!bytes) {
1307 		bytes = 1;
1308 	}
1309 
1310 	chunk = (addr & page_mask);
1311 	assert(0 == (chunk & (gIOPageAllocChunkBytes - 1)));
1312 
1313 	pa = (typeof(pa))(addr | (page_size - gIOPageAllocChunkBytes));
1314 	assert(kIOPageAllocSignature == pa->signature);
1315 
1316 	count = (((uint32_t) bytes) + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1317 	chunk /= gIOPageAllocChunkBytes;
1318 
1319 	IOLockLock(a->lock);
1320 	if (!pa->avail) {
1321 		assert(!pa->link.next);
1322 		enqueue_tail(&a->list, &pa->link);
1323 	}
1324 	pa->avail |= ((-1ULL << (64 - count)) >> chunk);
1325 	if (pa->avail != -2ULL) {
1326 		pa = NULL;
1327 	} else {
1328 		remque(&pa->link);
1329 		pa->link.next = NULL;
1330 		pa->signature = 0;
1331 		a->pagecount--;
1332 		// page to free
1333 		pa = (typeof(pa))trunc_page(pa);
1334 	}
1335 	a->bytecount -= bytes;
1336 	IOLockUnlock(a->lock);
1337 
1338 	return (uintptr_t) pa;
1339 }
1340 
1341 #endif /* defined(__x86_64__) */
1342 
1343 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1344 
1345 IOReturn
IOSetProcessorCacheMode(task_t task,IOVirtualAddress address,IOByteCount length,IOOptionBits cacheMode)1346 IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
1347     IOByteCount length, IOOptionBits cacheMode )
1348 {
1349 	IOReturn    ret = kIOReturnSuccess;
1350 	ppnum_t     pagenum;
1351 
1352 	if (task != kernel_task) {
1353 		return kIOReturnUnsupported;
1354 	}
1355 	if ((address | length) & PAGE_MASK) {
1356 //	OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
1357 		return kIOReturnUnsupported;
1358 	}
1359 	length = round_page(address + length) - trunc_page( address );
1360 	address = trunc_page( address );
1361 
1362 	// make map mode
1363 	cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
1364 
1365 	while ((kIOReturnSuccess == ret) && (length > 0)) {
1366 		// Get the physical page number
1367 		pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
1368 		if (pagenum) {
1369 			ret = IOUnmapPages( get_task_map(task), address, page_size );
1370 			ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
1371 		} else {
1372 			ret = kIOReturnVMError;
1373 		}
1374 
1375 		address += page_size;
1376 		length -= page_size;
1377 	}
1378 
1379 	return ret;
1380 }
1381 
1382 
1383 IOReturn
IOFlushProcessorCache(task_t task,IOVirtualAddress address,IOByteCount length)1384 IOFlushProcessorCache( task_t task, IOVirtualAddress address,
1385     IOByteCount length )
1386 {
1387 	if (task != kernel_task) {
1388 		return kIOReturnUnsupported;
1389 	}
1390 
1391 	flush_dcache64((addr64_t) address, (unsigned) length, false );
1392 
1393 	return kIOReturnSuccess;
1394 }
1395 
1396 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1397 
1398 vm_offset_t
OSKernelStackRemaining(void)1399 OSKernelStackRemaining( void )
1400 {
1401 	return ml_stack_remaining();
1402 }
1403 
1404 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1405 
1406 /*
1407  * Spin for indicated number of milliseconds.
1408  */
1409 void
IOSleep(unsigned milliseconds)1410 IOSleep(unsigned milliseconds)
1411 {
1412 	delay_for_interval(milliseconds, kMillisecondScale);
1413 }
1414 
1415 /*
1416  * Spin for indicated number of milliseconds, and potentially an
1417  * additional number of milliseconds up to the leeway values.
1418  */
1419 void
IOSleepWithLeeway(unsigned intervalMilliseconds,unsigned leewayMilliseconds)1420 IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds)
1421 {
1422 	delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale);
1423 }
1424 
1425 /*
1426  * Spin for indicated number of microseconds.
1427  */
1428 void
IODelay(unsigned microseconds)1429 IODelay(unsigned microseconds)
1430 {
1431 	delay_for_interval(microseconds, kMicrosecondScale);
1432 }
1433 
1434 /*
1435  * Spin for indicated number of nanoseconds.
1436  */
1437 void
IOPause(unsigned nanoseconds)1438 IOPause(unsigned nanoseconds)
1439 {
1440 	delay_for_interval(nanoseconds, kNanosecondScale);
1441 }
1442 
1443 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1444 
1445 static void _IOLogv(const char *format, va_list ap, void *caller) __printflike(1, 0);
1446 
1447 __attribute__((noinline, not_tail_called))
1448 void
IOLog(const char * format,...)1449 IOLog(const char *format, ...)
1450 {
1451 	void *caller = __builtin_return_address(0);
1452 	va_list ap;
1453 
1454 	va_start(ap, format);
1455 	_IOLogv(format, ap, caller);
1456 	va_end(ap);
1457 }
1458 
1459 __attribute__((noinline, not_tail_called))
1460 void
IOLogv(const char * format,va_list ap)1461 IOLogv(const char *format, va_list ap)
1462 {
1463 	void *caller = __builtin_return_address(0);
1464 	_IOLogv(format, ap, caller);
1465 }
1466 
1467 void
_IOLogv(const char * format,va_list ap,void * caller)1468 _IOLogv(const char *format, va_list ap, void *caller)
1469 {
1470 	va_list ap2;
1471 	struct console_printbuf_state info_data;
1472 	console_printbuf_state_init(&info_data, TRUE, TRUE);
1473 
1474 	va_copy(ap2, ap);
1475 
1476 #pragma clang diagnostic push
1477 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1478 	os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, format, ap, caller);
1479 #pragma clang diagnostic pop
1480 
1481 	if (!disable_iolog_serial_output) {
1482 		__doprnt(format, ap2, console_printbuf_putc, &info_data, 16, TRUE);
1483 		console_printbuf_clear(&info_data);
1484 	}
1485 	va_end(ap2);
1486 
1487 	assertf(ml_get_interrupts_enabled() || ml_is_quiescing() ||
1488 	    debug_mode_active() || !gCPUsRunning,
1489 	    "IOLog called with interrupts disabled");
1490 }
1491 
1492 #if !__LP64__
1493 void
IOPanic(const char * reason)1494 IOPanic(const char *reason)
1495 {
1496 	panic("%s", reason);
1497 }
1498 #endif
1499 
1500 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1501 
1502 void
IOKitKernelLogBuffer(const char * title,const void * buffer,size_t size,void (* output)(const char * format,...))1503 IOKitKernelLogBuffer(const char * title, const void * buffer, size_t size,
1504     void (*output)(const char *format, ...))
1505 {
1506 	size_t idx, linestart;
1507 	enum { bytelen = (sizeof("0xZZ, ") - 1) };
1508 	char hex[(bytelen * 16) + 1];
1509 	uint8_t c, chars[17];
1510 
1511 	output("%s(0x%lx):\n", title, size);
1512 	output("              0     1     2     3     4     5     6     7     8     9     A     B     C     D     E     F\n");
1513 	if (size > 4096) {
1514 		size = 4096;
1515 	}
1516 	chars[16] = 0;
1517 	for (idx = 0, linestart = 0; idx < size;) {
1518 		c = ((char *)buffer)[idx];
1519 		snprintf(&hex[bytelen * (idx & 15)], bytelen + 1, "0x%02x, ", c);
1520 		chars[idx & 15] = ((c >= 0x20) && (c <= 0x7f)) ? c : ' ';
1521 		idx++;
1522 		if ((idx == size) || !(idx & 15)) {
1523 			if (idx & 15) {
1524 				chars[idx & 15] = 0;
1525 			}
1526 			output("/* %04lx: */ %-96s /* |%-16s| */\n", linestart, hex, chars);
1527 			linestart += 16;
1528 		}
1529 	}
1530 }
1531 
1532 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1533 
1534 /*
1535  * Convert a integer constant (typically a #define or enum) to a string.
1536  */
1537 static char noValue[80];        // that's pretty
1538 
1539 const char *
IOFindNameForValue(int value,const IONamedValue * regValueArray)1540 IOFindNameForValue(int value, const IONamedValue *regValueArray)
1541 {
1542 	for (; regValueArray->name; regValueArray++) {
1543 		if (regValueArray->value == value) {
1544 			return regValueArray->name;
1545 		}
1546 	}
1547 	snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
1548 	return (const char *)noValue;
1549 }
1550 
1551 IOReturn
IOFindValueForName(const char * string,const IONamedValue * regValueArray,int * value)1552 IOFindValueForName(const char *string,
1553     const IONamedValue *regValueArray,
1554     int *value)
1555 {
1556 	for (; regValueArray->name; regValueArray++) {
1557 		if (!strcmp(regValueArray->name, string)) {
1558 			*value = regValueArray->value;
1559 			return kIOReturnSuccess;
1560 		}
1561 	}
1562 	return kIOReturnBadArgument;
1563 }
1564 
1565 OSString *
IOCopyLogNameForPID(int pid)1566 IOCopyLogNameForPID(int pid)
1567 {
1568 	char   buf[128];
1569 	size_t len;
1570 	snprintf(buf, sizeof(buf), "pid %d, ", pid);
1571 	len = strlen(buf);
1572 	proc_name(pid, buf + len, (int) (sizeof(buf) - len));
1573 	return OSString::withCString(buf);
1574 }
1575 
1576 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1577 
1578 IOAlignment
IOSizeToAlignment(unsigned int size)1579 IOSizeToAlignment(unsigned int size)
1580 {
1581 	int shift;
1582 	const int intsize = sizeof(unsigned int) * 8;
1583 
1584 	for (shift = 1; shift < intsize; shift++) {
1585 		if (size & 0x80000000) {
1586 			return (IOAlignment)(intsize - shift);
1587 		}
1588 		size <<= 1;
1589 	}
1590 	return 0;
1591 }
1592 
1593 unsigned int
IOAlignmentToSize(IOAlignment align)1594 IOAlignmentToSize(IOAlignment align)
1595 {
1596 	unsigned int size;
1597 
1598 	for (size = 1; align; align--) {
1599 		size <<= 1;
1600 	}
1601 	return size;
1602 }
1603 } /* extern "C" */
1604