xref: /xnu-8019.80.24/iokit/Kernel/IOLib.cpp (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * HISTORY
30  *
31  * 17-Apr-91   Portions from libIO.m, Doug Mitchell at NeXT.
32  * 17-Nov-98   cpp
33  *
34  */
35 
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <vm/vm_kern.h>
40 #include <libkern/c++/OSCPPDebug.h>
41 
42 #include <IOKit/assert.h>
43 
44 #include <IOKit/IOReturn.h>
45 #include <IOKit/IOLib.h>
46 #include <IOKit/IOLocks.h>
47 #include <IOKit/IOMapper.h>
48 #include <IOKit/IOBufferMemoryDescriptor.h>
49 #include <IOKit/IOKitDebug.h>
50 
51 #include "IOKitKernelInternal.h"
52 
53 #ifdef IOALLOCDEBUG
54 #include <libkern/OSDebug.h>
55 #include <sys/sysctl.h>
56 #endif
57 
58 #include "libkern/OSAtomic.h"
59 #include <libkern/c++/OSKext.h>
60 #include <IOKit/IOStatisticsPrivate.h>
61 #include <os/log_private.h>
62 #include <sys/msgbuf.h>
63 #include <console/serial_protos.h>
64 
65 #if IOKITSTATS
66 
67 #define IOStatisticsAlloc(type, size) \
68 do { \
69 	IOStatistics::countAlloc(type, size); \
70 } while (0)
71 
72 #else
73 
74 #define IOStatisticsAlloc(type, size)
75 
76 #endif /* IOKITSTATS */
77 
78 
79 #define TRACK_ALLOC     (IOTRACKING && (kIOTracking & gIOKitDebug))
80 
81 
82 extern "C"
83 {
84 mach_timespec_t IOZeroTvalspec = { 0, 0 };
85 
86 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
87 
88 extern int
89 __doprnt(
90 	const char              *fmt,
91 	va_list                 argp,
92 	void                    (*putc)(int, void *),
93 	void                    *arg,
94 	int                     radix,
95 	int                     is_log);
96 
97 extern bool bsd_log_lock(bool);
98 extern void bsd_log_unlock(void);
99 
100 
101 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
102 
103 lck_grp_t       *IOLockGroup;
104 
105 /*
106  * Global variables for use by iLogger
107  * These symbols are for use only by Apple diagnostic code.
108  * Binary compatibility is not guaranteed for kexts that reference these symbols.
109  */
110 
111 void *_giDebugLogInternal       = NULL;
112 void *_giDebugLogDataInternal   = NULL;
113 void *_giDebugReserved1         = NULL;
114 void *_giDebugReserved2         = NULL;
115 
116 iopa_t gIOBMDPageAllocator;
117 
118 /*
119  * Static variables for this module.
120  */
121 
122 static queue_head_t gIOMallocContiguousEntries;
123 static lck_mtx_t *  gIOMallocContiguousEntriesLock;
124 
125 #if __x86_64__
126 enum { kIOMaxPageableMaps    = 8 };
127 enum { kIOPageableMapSize    = 512 * 1024 * 1024 };
128 enum { kIOPageableMaxMapSize = 512 * 1024 * 1024 };
129 #else
130 enum { kIOMaxPageableMaps    = 16 };
131 enum { kIOPageableMapSize    = 96 * 1024 * 1024 };
132 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
133 #endif
134 
135 typedef struct {
136 	vm_map_t            map;
137 	vm_offset_t address;
138 	vm_offset_t end;
139 } IOMapData;
140 
141 static struct {
142 	UInt32      count;
143 	UInt32      hint;
144 	IOMapData   maps[kIOMaxPageableMaps];
145 	lck_mtx_t * lock;
146 } gIOKitPageableSpace;
147 
148 static iopa_t gIOPageablePageAllocator;
149 
150 uint32_t  gIOPageAllocChunkBytes;
151 
152 #if IOTRACKING
153 IOTrackingQueue * gIOMallocTracking;
154 IOTrackingQueue * gIOWireTracking;
155 IOTrackingQueue * gIOMapTracking;
156 #endif /* IOTRACKING */
157 
158 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
159 
160 void
IOLibInit(void)161 IOLibInit(void)
162 {
163 	kern_return_t ret;
164 
165 	static bool libInitialized;
166 
167 	if (libInitialized) {
168 		return;
169 	}
170 
171 	IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
172 
173 #if IOTRACKING
174 	IOTrackingInit();
175 	gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, 0,
176 	    kIOTrackingQueueTypeAlloc,
177 	    37);
178 	gIOWireTracking   = IOTrackingQueueAlloc(kIOWireTrackingName, 0, 0, page_size, 0, 0);
179 
180 	size_t mapCaptureSize = (kIOTracking & gIOKitDebug) ? page_size : (1024 * 1024);
181 	gIOMapTracking    = IOTrackingQueueAlloc(kIOMapTrackingName, 0, 0, mapCaptureSize,
182 	    kIOTrackingQueueTypeDefaultOn
183 	    | kIOTrackingQueueTypeMap
184 	    | kIOTrackingQueueTypeUser,
185 	    0);
186 #endif
187 
188 	gIOKitPageableSpace.maps[0].address = 0;
189 	ret = kmem_suballoc(kernel_map,
190 	    &gIOKitPageableSpace.maps[0].address,
191 	    kIOPageableMapSize,
192 	    TRUE,
193 	    VM_FLAGS_ANYWHERE,
194 	    VM_MAP_KERNEL_FLAGS_NONE,
195 	    VM_KERN_MEMORY_IOKIT,
196 	    &gIOKitPageableSpace.maps[0].map);
197 	if (ret != KERN_SUCCESS) {
198 		panic("failed to allocate iokit pageable map");
199 	}
200 
201 	gIOKitPageableSpace.lock            = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
202 	gIOKitPageableSpace.maps[0].end     = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
203 	gIOKitPageableSpace.hint            = 0;
204 	gIOKitPageableSpace.count           = 1;
205 
206 	gIOMallocContiguousEntriesLock      = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
207 	queue_init( &gIOMallocContiguousEntries );
208 
209 	gIOPageAllocChunkBytes = PAGE_SIZE / 64;
210 	assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes);
211 	iopa_init(&gIOBMDPageAllocator);
212 	iopa_init(&gIOPageablePageAllocator);
213 
214 
215 	libInitialized = true;
216 }
217 
218 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
219 
220 vm_size_t
log2up(vm_size_t size)221 log2up(vm_size_t size)
222 {
223 	if (size <= 1) {
224 		size = 0;
225 	} else {
226 #if __LP64__
227 		size = 64 - __builtin_clzl(size - 1);
228 #else
229 		size = 32 - __builtin_clzl(size - 1);
230 #endif
231 	}
232 	return size;
233 }
234 
235 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
236 
237 IOThread
IOCreateThread(IOThreadFunc fcn,void * arg)238 IOCreateThread(IOThreadFunc fcn, void *arg)
239 {
240 	kern_return_t   result;
241 	thread_t                thread;
242 
243 	result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
244 	if (result != KERN_SUCCESS) {
245 		return NULL;
246 	}
247 
248 	thread_deallocate(thread);
249 
250 	return thread;
251 }
252 
253 
254 void
IOExitThread(void)255 IOExitThread(void)
256 {
257 	(void) thread_terminate(current_thread());
258 }
259 
260 void *
261 IOMalloc_external(
262 	vm_size_t size);
263 void *
IOMalloc_external(vm_size_t size)264 IOMalloc_external(
265 	vm_size_t size)
266 {
267 	return IOMalloc_internal(KHEAP_KEXT, size);
268 }
269 
270 void *
271 IOMallocZero_external(
272 	vm_size_t size);
273 void *
IOMallocZero_external(vm_size_t size)274 IOMallocZero_external(
275 	vm_size_t size)
276 {
277 	return IOMallocZero_internal(KHEAP_KEXT, size);
278 }
279 
280 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
281 
282 void *
IOMallocZero_internal(struct kalloc_heap * kalloc_heap_cfg,vm_size_t size)283 IOMallocZero_internal(struct kalloc_heap *kalloc_heap_cfg, vm_size_t size)
284 {
285 	void * result;
286 	result = IOMalloc_internal(kalloc_heap_cfg, size);
287 	if (result) {
288 		bzero(result, size);
289 	}
290 	return result;
291 }
292 
293 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
294 
295 #if IOTRACKING
296 struct IOLibMallocHeader {
297 	IOTrackingAddress tracking;
298 };
299 #endif
300 
301 #if IOTRACKING
302 #define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
303 #else
304 #define sizeofIOLibMallocHeader (0)
305 #endif
306 
307 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
308 
309 void *
IOMalloc_internal(struct kalloc_heap * kheap,vm_size_t size)310 IOMalloc_internal(struct kalloc_heap *kheap, vm_size_t size)
311 {
312 	void * address;
313 	vm_size_t allocSize;
314 
315 	allocSize = size + sizeofIOLibMallocHeader;
316 #if IOTRACKING
317 	if (sizeofIOLibMallocHeader && (allocSize <= size)) {
318 		return NULL;                                          // overflow
319 	}
320 #endif
321 	address = kheap_alloc_tag_bt(kheap, allocSize, Z_WAITOK, VM_KERN_MEMORY_IOKIT);
322 
323 	if (address) {
324 #if IOTRACKING
325 		if (TRACK_ALLOC) {
326 			IOLibMallocHeader * hdr;
327 			hdr = (typeof(hdr))address;
328 			bzero(&hdr->tracking, sizeof(hdr->tracking));
329 			hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader);
330 			hdr->tracking.size    = size;
331 			IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
332 		}
333 #endif
334 		address = (typeof(address))(((uintptr_t) address) + sizeofIOLibMallocHeader);
335 
336 #if IOALLOCDEBUG
337 		OSAddAtomicLong(size, &debug_iomalloc_size);
338 #endif
339 		IOStatisticsAlloc(kIOStatisticsMalloc, size);
340 	}
341 
342 	return address;
343 }
344 
345 void
IOFree_internal(struct kalloc_heap * kheap,void * inAddress,vm_size_t size)346 IOFree_internal(struct kalloc_heap *kheap, void * inAddress, vm_size_t size)
347 {
348 	void * address;
349 
350 	if ((address = inAddress)) {
351 		address = (typeof(address))(((uintptr_t) address) - sizeofIOLibMallocHeader);
352 
353 #if IOTRACKING
354 		if (TRACK_ALLOC) {
355 			IOLibMallocHeader * hdr;
356 			struct ptr_reference { void * ptr; };
357 			volatile struct ptr_reference ptr;
358 
359 			// we're about to block in IOTrackingRemove(), make sure the original pointer
360 			// exists in memory or a register for leak scanning to find
361 			ptr.ptr = inAddress;
362 
363 			hdr = (typeof(hdr))address;
364 			if (size != hdr->tracking.size) {
365 				OSReportWithBacktrace("bad IOFree size 0x%lx should be 0x%lx", size, hdr->tracking.size);
366 				size = hdr->tracking.size;
367 			}
368 			IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
369 			ptr.ptr = NULL;
370 		}
371 #endif
372 
373 		kheap_free(kheap, address, size + sizeofIOLibMallocHeader);
374 #if IOALLOCDEBUG
375 		OSAddAtomicLong(-size, &debug_iomalloc_size);
376 #endif
377 		IOStatisticsAlloc(kIOStatisticsFree, size);
378 	}
379 }
380 
381 void
IOFree(void * inAddress,vm_size_t size)382 IOFree(void * inAddress, vm_size_t size)
383 {
384 	IOFree_internal(KHEAP_ANY, inAddress, size);
385 }
386 
387 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
388 
389 vm_tag_t
IOMemoryTag(vm_map_t map)390 IOMemoryTag(vm_map_t map)
391 {
392 	vm_tag_t tag;
393 
394 	if (!vm_kernel_map_is_kernel(map)) {
395 		return VM_MEMORY_IOKIT;
396 	}
397 
398 	tag = vm_tag_bt();
399 	if (tag == VM_KERN_MEMORY_NONE) {
400 		tag = VM_KERN_MEMORY_IOKIT;
401 	}
402 
403 	return tag;
404 }
405 
406 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
407 
408 struct IOLibPageMallocHeader {
409 	mach_vm_size_t    allocationSize;
410 	mach_vm_address_t allocationAddress;
411 #if IOTRACKING
412 	IOTrackingAddress tracking;
413 #endif
414 };
415 
416 #if IOTRACKING
417 #define sizeofIOLibPageMallocHeader     (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
418 #else
419 #define sizeofIOLibPageMallocHeader     (sizeof(IOLibPageMallocHeader))
420 #endif
421 
422 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
423 void *
424 IOMallocAligned_external(
425 	vm_size_t size, vm_size_t alignment);
426 void *
IOMallocAligned_external(vm_size_t size,vm_size_t alignment)427 IOMallocAligned_external(
428 	vm_size_t size, vm_size_t alignment)
429 {
430 	return IOMallocAligned_internal(KHEAP_KEXT, size, alignment);
431 }
432 
433 void *
IOMallocAligned_internal(struct kalloc_heap * kheap,vm_size_t size,vm_size_t alignment)434 IOMallocAligned_internal(struct kalloc_heap *kheap, vm_size_t size,
435     vm_size_t alignment)
436 {
437 	kern_return_t           kr;
438 	vm_offset_t             address;
439 	vm_offset_t             allocationAddress;
440 	vm_size_t               adjustedSize;
441 	uintptr_t               alignMask;
442 	IOLibPageMallocHeader * hdr;
443 
444 	if (size == 0) {
445 		return NULL;
446 	}
447 	if (((uint32_t) alignment) != alignment) {
448 		return NULL;
449 	}
450 
451 	alignment = (1UL << log2up((uint32_t) alignment));
452 	alignMask = alignment - 1;
453 	adjustedSize = size + sizeofIOLibPageMallocHeader;
454 
455 	if (size > adjustedSize) {
456 		address = 0; /* overflow detected */
457 	} else if (adjustedSize >= page_size) {
458 		kr = kernel_memory_allocate(kheap->kh_fallback_map, &address,
459 		    size, alignMask, KMA_NONE, IOMemoryTag(kernel_map));
460 		if (KERN_SUCCESS != kr) {
461 			address = 0;
462 		}
463 #if IOTRACKING
464 		else if (TRACK_ALLOC) {
465 			IOTrackingAlloc(gIOMallocTracking, address, size);
466 		}
467 #endif
468 	} else {
469 		adjustedSize += alignMask;
470 
471 		if (adjustedSize >= page_size) {
472 			kr = kernel_memory_allocate(kheap->kh_fallback_map, &allocationAddress,
473 			    adjustedSize, 0, KMA_NONE, IOMemoryTag(kernel_map));
474 			if (KERN_SUCCESS != kr) {
475 				allocationAddress = 0;
476 			}
477 		} else {
478 			allocationAddress = (vm_address_t) kheap_alloc_tag_bt(kheap,
479 			    adjustedSize, Z_WAITOK, VM_KERN_MEMORY_IOKIT);
480 		}
481 
482 		if (allocationAddress) {
483 			address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
484 			    & (~alignMask);
485 
486 			hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
487 			hdr->allocationSize    = adjustedSize;
488 			hdr->allocationAddress = allocationAddress;
489 #if IOTRACKING
490 			if (TRACK_ALLOC) {
491 				bzero(&hdr->tracking, sizeof(hdr->tracking));
492 				hdr->tracking.address = ~address;
493 				hdr->tracking.size = size;
494 				IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
495 			}
496 #endif
497 		} else {
498 			address = 0;
499 		}
500 	}
501 
502 	assert(0 == (address & alignMask));
503 
504 	if (address) {
505 #if IOALLOCDEBUG
506 		OSAddAtomicLong(size, &debug_iomalloc_size);
507 #endif
508 		IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
509 	}
510 
511 	return (void *) address;
512 }
513 
514 void
IOFreeAligned(void * address,vm_size_t size)515 IOFreeAligned(
516 	void                  * address,
517 	vm_size_t               size)
518 {
519 	IOFreeAligned_internal(KHEAP_ANY, address, size);
520 }
521 
522 void
IOFreeAligned_internal(kalloc_heap_t kheap,void * address,vm_size_t size)523 IOFreeAligned_internal(kalloc_heap_t kheap, void * address, vm_size_t size)
524 {
525 	vm_address_t            allocationAddress;
526 	vm_size_t               adjustedSize;
527 	IOLibPageMallocHeader * hdr;
528 	vm_map_t                kheap_map;
529 
530 	if (!address) {
531 		return;
532 	}
533 
534 	/*
535 	 * When called with KHEAP_ANY, use default fallback map as KHEAP_ANY
536 	 * is a construct that allows to free to a mismatched heap and is
537 	 * NULL.
538 	 */
539 	if (kheap == KHEAP_ANY) {
540 		kheap_map = KHEAP_DEFAULT->kh_fallback_map;
541 	} else {
542 		kheap_map = kheap->kh_fallback_map;
543 	}
544 
545 	assert(size);
546 
547 	adjustedSize = size + sizeofIOLibPageMallocHeader;
548 	if (adjustedSize >= page_size) {
549 #if IOTRACKING
550 		if (TRACK_ALLOC) {
551 			IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size);
552 		}
553 #endif
554 		kmem_free(kheap_map, (vm_offset_t) address, size);
555 	} else {
556 		hdr = (typeof(hdr))(((uintptr_t)address) - sizeofIOLibPageMallocHeader);
557 		adjustedSize = hdr->allocationSize;
558 		allocationAddress = hdr->allocationAddress;
559 
560 #if IOTRACKING
561 		if (TRACK_ALLOC) {
562 			if (size != hdr->tracking.size) {
563 				OSReportWithBacktrace("bad IOFreeAligned size 0x%lx should be 0x%lx", size, hdr->tracking.size);
564 				size = hdr->tracking.size;
565 			}
566 			IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
567 		}
568 #endif
569 		if (adjustedSize >= page_size) {
570 			kmem_free(kheap_map, allocationAddress, adjustedSize);
571 		} else {
572 			kheap_free(kheap, allocationAddress, adjustedSize);
573 		}
574 	}
575 
576 #if IOALLOCDEBUG
577 	OSAddAtomicLong(-size, &debug_iomalloc_size);
578 #endif
579 
580 	IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
581 }
582 
583 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
584 
585 void
IOKernelFreePhysical(kalloc_heap_t kheap,mach_vm_address_t address,mach_vm_size_t size)586 IOKernelFreePhysical(
587 	kalloc_heap_t         kheap,
588 	mach_vm_address_t     address,
589 	mach_vm_size_t        size)
590 {
591 	vm_address_t       allocationAddress;
592 	vm_size_t          adjustedSize;
593 	IOLibPageMallocHeader * hdr;
594 
595 	if (!address) {
596 		return;
597 	}
598 
599 	assert(size);
600 
601 	adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
602 	if (adjustedSize >= page_size) {
603 #if IOTRACKING
604 		if (TRACK_ALLOC) {
605 			IOTrackingFree(gIOMallocTracking, address, size);
606 		}
607 #endif
608 		kmem_free(kheap->kh_fallback_map, (vm_offset_t) address, size);
609 	} else {
610 		hdr = (typeof(hdr))(((uintptr_t)address) - sizeofIOLibPageMallocHeader);
611 		adjustedSize = hdr->allocationSize;
612 		allocationAddress = hdr->allocationAddress;
613 #if IOTRACKING
614 		if (TRACK_ALLOC) {
615 			IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
616 		}
617 #endif
618 		kheap_free(kheap, allocationAddress, adjustedSize);
619 	}
620 
621 	IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
622 #if IOALLOCDEBUG
623 	OSAddAtomicLong(-size, &debug_iomalloc_size);
624 #endif
625 }
626 
627 #if __arm__ || __arm64__
628 extern unsigned long gPhysBase, gPhysSize;
629 #endif
630 
631 mach_vm_address_t
IOKernelAllocateWithPhysicalRestrict(kalloc_heap_t kheap,mach_vm_size_t size,mach_vm_address_t maxPhys,mach_vm_size_t alignment,bool contiguous)632 IOKernelAllocateWithPhysicalRestrict(
633 	kalloc_heap_t         kheap,
634 	mach_vm_size_t        size,
635 	mach_vm_address_t     maxPhys,
636 	mach_vm_size_t        alignment,
637 	bool                  contiguous)
638 {
639 	kern_return_t           kr;
640 	mach_vm_address_t       address;
641 	mach_vm_address_t       allocationAddress;
642 	mach_vm_size_t          adjustedSize;
643 	mach_vm_address_t       alignMask;
644 	IOLibPageMallocHeader * hdr;
645 
646 	if (size == 0) {
647 		return 0;
648 	}
649 	if (alignment == 0) {
650 		alignment = 1;
651 	}
652 
653 	alignMask = alignment - 1;
654 
655 	if (os_mul_and_add_overflow(2, size, sizeofIOLibPageMallocHeader, &adjustedSize)) {
656 		return 0;
657 	}
658 
659 	contiguous = (contiguous && (adjustedSize > page_size))
660 	    || (alignment > page_size);
661 
662 	if (contiguous || maxPhys) {
663 		kma_flags_t options = KMA_NONE;
664 		vm_offset_t virt;
665 
666 		adjustedSize = size;
667 		contiguous = (contiguous && (adjustedSize > page_size))
668 		    || (alignment > page_size);
669 
670 		if (!contiguous) {
671 #if __arm__ || __arm64__
672 			if (maxPhys >= (mach_vm_address_t)(gPhysBase + gPhysSize)) {
673 				maxPhys = 0;
674 			} else
675 #endif
676 			if (maxPhys <= 0xFFFFFFFF) {
677 				maxPhys = 0;
678 				options = (kma_flags_t)(options | KMA_LOMEM);
679 			} else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage)) {
680 				maxPhys = 0;
681 			}
682 		}
683 		if (contiguous || maxPhys) {
684 			kr = kmem_alloc_contig(kheap->kh_fallback_map, &virt, size,
685 			    alignMask, (ppnum_t) atop(maxPhys), (ppnum_t) atop(alignMask),
686 			    KMA_NONE, IOMemoryTag(kernel_map));
687 		} else {
688 			kr = kernel_memory_allocate(kheap->kh_fallback_map, &virt,
689 			    size, alignMask, options, IOMemoryTag(kernel_map));
690 		}
691 		if (KERN_SUCCESS == kr) {
692 			address = virt;
693 #if IOTRACKING
694 			if (TRACK_ALLOC) {
695 				IOTrackingAlloc(gIOMallocTracking, address, size);
696 			}
697 #endif
698 		} else {
699 			address = 0;
700 		}
701 	} else {
702 		adjustedSize += alignMask;
703 		if (adjustedSize < size) {
704 			return 0;
705 		}
706 		allocationAddress = (mach_vm_address_t) kheap_alloc_tag_bt(kheap,
707 		    adjustedSize, Z_WAITOK, VM_KERN_MEMORY_IOKIT);
708 
709 		if (allocationAddress) {
710 			address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
711 			    & (~alignMask);
712 
713 			if (atop_32(address) != atop_32(address + size - 1)) {
714 				address = round_page(address);
715 			}
716 
717 			hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
718 			hdr->allocationSize    = adjustedSize;
719 			hdr->allocationAddress = allocationAddress;
720 #if IOTRACKING
721 			if (TRACK_ALLOC) {
722 				bzero(&hdr->tracking, sizeof(hdr->tracking));
723 				hdr->tracking.address = ~address;
724 				hdr->tracking.size    = size;
725 				IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
726 			}
727 #endif
728 		} else {
729 			address = 0;
730 		}
731 	}
732 
733 	if (address) {
734 		IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
735 #if IOALLOCDEBUG
736 		OSAddAtomicLong(size, &debug_iomalloc_size);
737 #endif
738 	}
739 
740 	return address;
741 }
742 
743 
744 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
745 
746 struct _IOMallocContiguousEntry {
747 	mach_vm_address_t          virtualAddr;
748 	IOBufferMemoryDescriptor * md;
749 	queue_chain_t              link;
750 };
751 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
752 
753 void *
IOMallocContiguous(vm_size_t size,vm_size_t alignment,IOPhysicalAddress * physicalAddress)754 IOMallocContiguous(vm_size_t size, vm_size_t alignment,
755     IOPhysicalAddress * physicalAddress)
756 {
757 	mach_vm_address_t   address = 0;
758 
759 	if (size == 0) {
760 		return NULL;
761 	}
762 	if (alignment == 0) {
763 		alignment = 1;
764 	}
765 
766 	/* Do we want a physical address? */
767 	if (!physicalAddress) {
768 		address = IOKernelAllocateWithPhysicalRestrict(KHEAP_KEXT,
769 		    size, 0 /*maxPhys*/, alignment, true);
770 	} else {
771 		do {
772 			IOBufferMemoryDescriptor * bmd;
773 			mach_vm_address_t          physicalMask;
774 			vm_offset_t                alignMask;
775 
776 			alignMask = alignment - 1;
777 			physicalMask = (0xFFFFFFFF ^ alignMask);
778 
779 			bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
780 				kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
781 			if (!bmd) {
782 				break;
783 			}
784 
785 			_IOMallocContiguousEntry *
786 			    entry = IOMallocType(_IOMallocContiguousEntry);
787 			if (!entry) {
788 				bmd->release();
789 				break;
790 			}
791 			entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
792 			entry->md          = bmd;
793 			lck_mtx_lock(gIOMallocContiguousEntriesLock);
794 			queue_enter( &gIOMallocContiguousEntries, entry,
795 			    _IOMallocContiguousEntry *, link );
796 			lck_mtx_unlock(gIOMallocContiguousEntriesLock);
797 
798 			address          = (mach_vm_address_t) entry->virtualAddr;
799 			*physicalAddress = bmd->getPhysicalAddress();
800 		}while (false);
801 	}
802 
803 	return (void *) address;
804 }
805 
806 void
IOFreeContiguous(void * _address,vm_size_t size)807 IOFreeContiguous(void * _address, vm_size_t size)
808 {
809 	_IOMallocContiguousEntry * entry;
810 	IOMemoryDescriptor *       md = NULL;
811 
812 	mach_vm_address_t address = (mach_vm_address_t) _address;
813 
814 	if (!address) {
815 		return;
816 	}
817 
818 	assert(size);
819 
820 	lck_mtx_lock(gIOMallocContiguousEntriesLock);
821 	queue_iterate( &gIOMallocContiguousEntries, entry,
822 	    _IOMallocContiguousEntry *, link )
823 	{
824 		if (entry->virtualAddr == address) {
825 			md   = entry->md;
826 			queue_remove( &gIOMallocContiguousEntries, entry,
827 			    _IOMallocContiguousEntry *, link );
828 			break;
829 		}
830 	}
831 	lck_mtx_unlock(gIOMallocContiguousEntriesLock);
832 
833 	if (md) {
834 		md->release();
835 		IOFreeType(entry, _IOMallocContiguousEntry);
836 	} else {
837 		IOKernelFreePhysical(KHEAP_KEXT, (mach_vm_address_t) address, size);
838 	}
839 }
840 
841 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
842 
843 kern_return_t
IOIteratePageableMaps(vm_size_t size,IOIteratePageableMapsCallback callback,void * ref)844 IOIteratePageableMaps(vm_size_t size,
845     IOIteratePageableMapsCallback callback, void * ref)
846 {
847 	kern_return_t       kr = kIOReturnNotReady;
848 	vm_size_t           segSize;
849 	UInt32              attempts;
850 	UInt32              index;
851 	vm_offset_t         min;
852 	vm_map_t            map;
853 
854 	if (size > kIOPageableMaxMapSize) {
855 		return kIOReturnBadArgument;
856 	}
857 
858 	do {
859 		index = gIOKitPageableSpace.hint;
860 		attempts = gIOKitPageableSpace.count;
861 		while (attempts--) {
862 			kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
863 			if (KERN_SUCCESS == kr) {
864 				gIOKitPageableSpace.hint = index;
865 				break;
866 			}
867 			if (index) {
868 				index--;
869 			} else {
870 				index = gIOKitPageableSpace.count - 1;
871 			}
872 		}
873 		if (KERN_NO_SPACE != kr) {
874 			break;
875 		}
876 
877 		lck_mtx_lock( gIOKitPageableSpace.lock );
878 
879 		index = gIOKitPageableSpace.count;
880 		if (index >= (kIOMaxPageableMaps - 1)) {
881 			lck_mtx_unlock( gIOKitPageableSpace.lock );
882 			break;
883 		}
884 
885 		if (size < kIOPageableMapSize) {
886 			segSize = kIOPageableMapSize;
887 		} else {
888 			segSize = size;
889 		}
890 
891 		min = 0;
892 		kr = kmem_suballoc(kernel_map,
893 		    &min,
894 		    segSize,
895 		    TRUE,
896 		    VM_FLAGS_ANYWHERE,
897 		    VM_MAP_KERNEL_FLAGS_NONE,
898 		    VM_KERN_MEMORY_IOKIT,
899 		    &map);
900 		if (KERN_SUCCESS != kr) {
901 			lck_mtx_unlock( gIOKitPageableSpace.lock );
902 			break;
903 		}
904 
905 		gIOKitPageableSpace.maps[index].map     = map;
906 		gIOKitPageableSpace.maps[index].address = min;
907 		gIOKitPageableSpace.maps[index].end     = min + segSize;
908 		gIOKitPageableSpace.hint                = index;
909 		gIOKitPageableSpace.count               = index + 1;
910 
911 		lck_mtx_unlock( gIOKitPageableSpace.lock );
912 	} while (true);
913 
914 	return kr;
915 }
916 
917 struct IOMallocPageableRef {
918 	vm_offset_t address;
919 	vm_size_t   size;
920 	vm_tag_t    tag;
921 };
922 
923 static kern_return_t
IOMallocPageableCallback(vm_map_t map,void * _ref)924 IOMallocPageableCallback(vm_map_t map, void * _ref)
925 {
926 	struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
927 	kern_return_t                kr;
928 
929 	kr = kmem_alloc_pageable( map, &ref->address, ref->size, ref->tag );
930 
931 	return kr;
932 }
933 
934 static void *
IOMallocPageablePages(vm_size_t size,vm_size_t alignment,vm_tag_t tag)935 IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag)
936 {
937 	kern_return_t              kr = kIOReturnNotReady;
938 	struct IOMallocPageableRef ref;
939 
940 	if (alignment > page_size) {
941 		return NULL;
942 	}
943 	if (size > kIOPageableMaxMapSize) {
944 		return NULL;
945 	}
946 
947 	ref.size = size;
948 	ref.tag  = tag;
949 	kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
950 	if (kIOReturnSuccess != kr) {
951 		ref.address = 0;
952 	}
953 
954 	return (void *) ref.address;
955 }
956 
957 vm_map_t
IOPageableMapForAddress(uintptr_t address)958 IOPageableMapForAddress( uintptr_t address )
959 {
960 	vm_map_t    map = NULL;
961 	UInt32      index;
962 
963 	for (index = 0; index < gIOKitPageableSpace.count; index++) {
964 		if ((address >= gIOKitPageableSpace.maps[index].address)
965 		    && (address < gIOKitPageableSpace.maps[index].end)) {
966 			map = gIOKitPageableSpace.maps[index].map;
967 			break;
968 		}
969 	}
970 	if (!map) {
971 		panic("IOPageableMapForAddress: null");
972 	}
973 
974 	return map;
975 }
976 
977 static void
IOFreePageablePages(void * address,vm_size_t size)978 IOFreePageablePages(void * address, vm_size_t size)
979 {
980 	vm_map_t map;
981 
982 	map = IOPageableMapForAddress((vm_address_t) address);
983 	if (map) {
984 		kmem_free( map, (vm_offset_t) address, size);
985 	}
986 }
987 
988 static uintptr_t
IOMallocOnePageablePage(kalloc_heap_t kheap __unused,iopa_t * a)989 IOMallocOnePageablePage(kalloc_heap_t kheap __unused, iopa_t * a)
990 {
991 	return (uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT);
992 }
993 
994 static void *
IOMallocPageableInternal(vm_size_t size,vm_size_t alignment,bool zeroed)995 IOMallocPageableInternal(vm_size_t size, vm_size_t alignment, bool zeroed)
996 {
997 	void * addr;
998 
999 	if (((uint32_t) alignment) != alignment) {
1000 		return NULL;
1001 	}
1002 	if (size >= (page_size - 4 * gIOPageAllocChunkBytes) ||
1003 	    alignment > page_size) {
1004 		addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map));
1005 		/* Memory allocated this way will already be zeroed. */
1006 	} else {
1007 		addr = ((void *) iopa_alloc(&gIOPageablePageAllocator,
1008 		    &IOMallocOnePageablePage, KHEAP_ANY, size, (uint32_t) alignment));
1009 		if (zeroed) {
1010 			bzero(addr, size);
1011 		}
1012 	}
1013 
1014 	if (addr) {
1015 #if IOALLOCDEBUG
1016 		OSAddAtomicLong(size, &debug_iomallocpageable_size);
1017 #endif
1018 		IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
1019 	}
1020 
1021 	return addr;
1022 }
1023 
1024 void *
IOMallocPageable(vm_size_t size,vm_size_t alignment)1025 IOMallocPageable(vm_size_t size, vm_size_t alignment)
1026 {
1027 	return IOMallocPageableInternal(size, alignment, /*zeroed*/ false);
1028 }
1029 
1030 void *
IOMallocPageableZero(vm_size_t size,vm_size_t alignment)1031 IOMallocPageableZero(vm_size_t size, vm_size_t alignment)
1032 {
1033 	return IOMallocPageableInternal(size, alignment, /*zeroed*/ true);
1034 }
1035 
1036 void
IOFreePageable(void * address,vm_size_t size)1037 IOFreePageable(void * address, vm_size_t size)
1038 {
1039 #if IOALLOCDEBUG
1040 	OSAddAtomicLong(-size, &debug_iomallocpageable_size);
1041 #endif
1042 	IOStatisticsAlloc(kIOStatisticsFreePageable, size);
1043 
1044 	if (size < (page_size - 4 * gIOPageAllocChunkBytes)) {
1045 		address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size);
1046 		size = page_size;
1047 	}
1048 	if (address) {
1049 		IOFreePageablePages(address, size);
1050 	}
1051 }
1052 
1053 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1054 
1055 void *
IOMallocData(vm_size_t size)1056 IOMallocData(vm_size_t size)
1057 {
1058 	return IOMalloc_internal(KHEAP_DATA_BUFFERS, size);
1059 }
1060 void *
IOMallocZeroData(vm_size_t size)1061 IOMallocZeroData(vm_size_t size)
1062 {
1063 	return IOMallocZero_internal(KHEAP_DATA_BUFFERS, size);
1064 }
1065 
1066 void
IOFreeData(void * address,vm_size_t size)1067 IOFreeData(void * address, vm_size_t size)
1068 {
1069 	return IOFree_internal(KHEAP_DATA_BUFFERS, address, size);
1070 }
1071 
1072 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1073 
1074 void *
IOMallocTypeImpl(kalloc_type_view_t kt_view)1075 IOMallocTypeImpl(kalloc_type_view_t kt_view)
1076 {
1077 	uint32_t kt_size = kalloc_type_get_size(kt_view->kt_size);
1078 	uint32_t kt_idx = kalloc_type_get_idx(kt_view->kt_size);
1079 #if IOTRACKING
1080 	/*
1081 	 * When leak detection is on default to using IOMalloc as kalloc
1082 	 * type infrastructure isn't aware of needing additional space for
1083 	 * the header.
1084 	 */
1085 	if (TRACK_ALLOC) {
1086 		void *mem = IOMallocZero_internal(KHEAP_DEFAULT, kt_size);
1087 		if (!IOMallocType_from_vm(kt_idx, kt_size)) {
1088 			assert(mem);
1089 		}
1090 		return mem;
1091 	}
1092 #endif
1093 	zalloc_flags_t kt_flags = (zalloc_flags_t) (Z_WAITOK | Z_ZERO);
1094 	if (!IOMallocType_from_vm(kt_idx, kt_size)) {
1095 		kt_flags = (zalloc_flags_t) (kt_flags | Z_NOFAIL);
1096 	}
1097 	/*
1098 	 * Use external symbol for kalloc_type_impl as
1099 	 * kalloc_type_views generated at some external callsites
1100 	 * many not have been processed during boot.
1101 	 */
1102 	return kalloc_type_impl_external(kt_view, kt_flags);
1103 }
1104 
1105 void
IOFreeTypeImpl(kalloc_type_view_t kt_view,void * address)1106 IOFreeTypeImpl(kalloc_type_view_t kt_view, void * address)
1107 {
1108 #if IOTRACKING
1109 	if (TRACK_ALLOC) {
1110 		return IOFree_internal(KHEAP_DEFAULT, address,
1111 		           kalloc_type_get_size(kt_view->kt_size));
1112 	}
1113 #endif
1114 	/*
1115 	 * Use external symbol for kalloc_type_impl as
1116 	 * kalloc_type_views generated at some external callsites
1117 	 * many not have been processed during boot.
1118 	 */
1119 	return kfree_type_impl_external(kt_view, address);
1120 }
1121 
1122 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1123 
1124 extern "C" void
iopa_init(iopa_t * a)1125 iopa_init(iopa_t * a)
1126 {
1127 	bzero(a, sizeof(*a));
1128 	a->lock = IOLockAlloc();
1129 	queue_init(&a->list);
1130 }
1131 
1132 static uintptr_t
iopa_allocinpage(iopa_page_t * pa,uint32_t count,uint64_t align)1133 iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
1134 {
1135 	uint32_t n, s;
1136 	uint64_t avail = pa->avail;
1137 
1138 	assert(avail);
1139 
1140 	// find strings of count 1 bits in avail
1141 	for (n = count; n > 1; n -= s) {
1142 		s = n >> 1;
1143 		avail = avail & (avail << s);
1144 	}
1145 	// and aligned
1146 	avail &= align;
1147 
1148 	if (avail) {
1149 		n = __builtin_clzll(avail);
1150 		pa->avail &= ~((-1ULL << (64 - count)) >> n);
1151 		if (!pa->avail && pa->link.next) {
1152 			remque(&pa->link);
1153 			pa->link.next = NULL;
1154 		}
1155 		return n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa);
1156 	}
1157 
1158 	return 0;
1159 }
1160 
1161 uintptr_t
iopa_alloc(iopa_t * a,iopa_proc_t alloc,kalloc_heap_t kheap,vm_size_t bytes,vm_size_t balign)1162 iopa_alloc(
1163 	iopa_t          * a,
1164 	iopa_proc_t       alloc,
1165 	kalloc_heap_t     kheap,
1166 	vm_size_t         bytes,
1167 	vm_size_t         balign)
1168 {
1169 	static const uint64_t align_masks[] = {
1170 		0xFFFFFFFFFFFFFFFF,
1171 		0xAAAAAAAAAAAAAAAA,
1172 		0x8888888888888888,
1173 		0x8080808080808080,
1174 		0x8000800080008000,
1175 		0x8000000080000000,
1176 		0x8000000000000000,
1177 	};
1178 	iopa_page_t * pa;
1179 	uintptr_t     addr = 0;
1180 	uint32_t      count;
1181 	uint64_t      align;
1182 	vm_size_t     align_masks_idx;
1183 
1184 	if (((uint32_t) bytes) != bytes) {
1185 		return 0;
1186 	}
1187 	if (!bytes) {
1188 		bytes = 1;
1189 	}
1190 	count = (((uint32_t) bytes) + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1191 
1192 	align_masks_idx = log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes);
1193 	assert(align_masks_idx < sizeof(align_masks) / sizeof(*align_masks));
1194 	align = align_masks[align_masks_idx];
1195 
1196 	IOLockLock(a->lock);
1197 	__IGNORE_WCASTALIGN(pa = (typeof(pa))queue_first(&a->list));
1198 	while (!queue_end(&a->list, &pa->link)) {
1199 		addr = iopa_allocinpage(pa, count, align);
1200 		if (addr) {
1201 			a->bytecount += bytes;
1202 			break;
1203 		}
1204 		__IGNORE_WCASTALIGN(pa = (typeof(pa))queue_next(&pa->link));
1205 	}
1206 	IOLockUnlock(a->lock);
1207 
1208 	if (!addr) {
1209 		addr = alloc(kheap, a);
1210 		if (addr) {
1211 			pa = (typeof(pa))(addr + page_size - gIOPageAllocChunkBytes);
1212 			pa->signature = kIOPageAllocSignature;
1213 			pa->avail     = -2ULL;
1214 
1215 			addr = iopa_allocinpage(pa, count, align);
1216 			IOLockLock(a->lock);
1217 			if (pa->avail) {
1218 				enqueue_head(&a->list, &pa->link);
1219 			}
1220 			a->pagecount++;
1221 			if (addr) {
1222 				a->bytecount += bytes;
1223 			}
1224 			IOLockUnlock(a->lock);
1225 		}
1226 	}
1227 
1228 	assert((addr & ((1 << log2up(balign)) - 1)) == 0);
1229 	return addr;
1230 }
1231 
1232 uintptr_t
iopa_free(iopa_t * a,uintptr_t addr,vm_size_t bytes)1233 iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes)
1234 {
1235 	iopa_page_t * pa;
1236 	uint32_t      count;
1237 	uintptr_t     chunk;
1238 
1239 	if (((uint32_t) bytes) != bytes) {
1240 		return 0;
1241 	}
1242 	if (!bytes) {
1243 		bytes = 1;
1244 	}
1245 
1246 	chunk = (addr & page_mask);
1247 	assert(0 == (chunk & (gIOPageAllocChunkBytes - 1)));
1248 
1249 	pa = (typeof(pa))(addr | (page_size - gIOPageAllocChunkBytes));
1250 	assert(kIOPageAllocSignature == pa->signature);
1251 
1252 	count = (((uint32_t) bytes) + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1253 	chunk /= gIOPageAllocChunkBytes;
1254 
1255 	IOLockLock(a->lock);
1256 	if (!pa->avail) {
1257 		assert(!pa->link.next);
1258 		enqueue_tail(&a->list, &pa->link);
1259 	}
1260 	pa->avail |= ((-1ULL << (64 - count)) >> chunk);
1261 	if (pa->avail != -2ULL) {
1262 		pa = NULL;
1263 	} else {
1264 		remque(&pa->link);
1265 		pa->link.next = NULL;
1266 		pa->signature = 0;
1267 		a->pagecount--;
1268 		// page to free
1269 		pa = (typeof(pa))trunc_page(pa);
1270 	}
1271 	a->bytecount -= bytes;
1272 	IOLockUnlock(a->lock);
1273 
1274 	return (uintptr_t) pa;
1275 }
1276 
1277 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1278 
1279 IOReturn
IOSetProcessorCacheMode(task_t task,IOVirtualAddress address,IOByteCount length,IOOptionBits cacheMode)1280 IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
1281     IOByteCount length, IOOptionBits cacheMode )
1282 {
1283 	IOReturn    ret = kIOReturnSuccess;
1284 	ppnum_t     pagenum;
1285 
1286 	if (task != kernel_task) {
1287 		return kIOReturnUnsupported;
1288 	}
1289 	if ((address | length) & PAGE_MASK) {
1290 //	OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
1291 		return kIOReturnUnsupported;
1292 	}
1293 	length = round_page(address + length) - trunc_page( address );
1294 	address = trunc_page( address );
1295 
1296 	// make map mode
1297 	cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
1298 
1299 	while ((kIOReturnSuccess == ret) && (length > 0)) {
1300 		// Get the physical page number
1301 		pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
1302 		if (pagenum) {
1303 			ret = IOUnmapPages( get_task_map(task), address, page_size );
1304 			ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
1305 		} else {
1306 			ret = kIOReturnVMError;
1307 		}
1308 
1309 		address += page_size;
1310 		length -= page_size;
1311 	}
1312 
1313 	return ret;
1314 }
1315 
1316 
1317 IOReturn
IOFlushProcessorCache(task_t task,IOVirtualAddress address,IOByteCount length)1318 IOFlushProcessorCache( task_t task, IOVirtualAddress address,
1319     IOByteCount length )
1320 {
1321 	if (task != kernel_task) {
1322 		return kIOReturnUnsupported;
1323 	}
1324 
1325 	flush_dcache64((addr64_t) address, (unsigned) length, false );
1326 
1327 	return kIOReturnSuccess;
1328 }
1329 
1330 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1331 
1332 vm_offset_t
OSKernelStackRemaining(void)1333 OSKernelStackRemaining( void )
1334 {
1335 	return ml_stack_remaining();
1336 }
1337 
1338 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1339 
1340 /*
1341  * Spin for indicated number of milliseconds.
1342  */
1343 void
IOSleep(unsigned milliseconds)1344 IOSleep(unsigned milliseconds)
1345 {
1346 	delay_for_interval(milliseconds, kMillisecondScale);
1347 }
1348 
1349 /*
1350  * Spin for indicated number of milliseconds, and potentially an
1351  * additional number of milliseconds up to the leeway values.
1352  */
1353 void
IOSleepWithLeeway(unsigned intervalMilliseconds,unsigned leewayMilliseconds)1354 IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds)
1355 {
1356 	delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale);
1357 }
1358 
1359 /*
1360  * Spin for indicated number of microseconds.
1361  */
1362 void
IODelay(unsigned microseconds)1363 IODelay(unsigned microseconds)
1364 {
1365 	delay_for_interval(microseconds, kMicrosecondScale);
1366 }
1367 
1368 /*
1369  * Spin for indicated number of nanoseconds.
1370  */
1371 void
IOPause(unsigned nanoseconds)1372 IOPause(unsigned nanoseconds)
1373 {
1374 	delay_for_interval(nanoseconds, kNanosecondScale);
1375 }
1376 
1377 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1378 
1379 static void _IOLogv(const char *format, va_list ap, void *caller) __printflike(1, 0);
1380 
1381 __attribute__((noinline, not_tail_called))
1382 void
IOLog(const char * format,...)1383 IOLog(const char *format, ...)
1384 {
1385 	void *caller = __builtin_return_address(0);
1386 	va_list ap;
1387 
1388 	va_start(ap, format);
1389 	_IOLogv(format, ap, caller);
1390 	va_end(ap);
1391 }
1392 
1393 __attribute__((noinline, not_tail_called))
1394 void
IOLogv(const char * format,va_list ap)1395 IOLogv(const char *format, va_list ap)
1396 {
1397 	void *caller = __builtin_return_address(0);
1398 	_IOLogv(format, ap, caller);
1399 }
1400 
1401 void
_IOLogv(const char * format,va_list ap,void * caller)1402 _IOLogv(const char *format, va_list ap, void *caller)
1403 {
1404 	va_list ap2;
1405 	struct console_printbuf_state info_data;
1406 	console_printbuf_state_init(&info_data, TRUE, TRUE);
1407 
1408 	va_copy(ap2, ap);
1409 
1410 	os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, format, ap, caller);
1411 
1412 	if (!disable_iolog_serial_output) {
1413 		__doprnt(format, ap2, console_printbuf_putc, &info_data, 16, TRUE);
1414 		console_printbuf_clear(&info_data);
1415 	}
1416 	va_end(ap2);
1417 
1418 	assertf(ml_get_interrupts_enabled() || ml_is_quiescing() ||
1419 	    debug_mode_active() || !gCPUsRunning,
1420 	    "IOLog called with interrupts disabled");
1421 }
1422 
1423 #if !__LP64__
1424 void
IOPanic(const char * reason)1425 IOPanic(const char *reason)
1426 {
1427 	panic("%s", reason);
1428 }
1429 #endif
1430 
1431 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1432 
1433 void
IOKitKernelLogBuffer(const char * title,const void * buffer,size_t size,void (* output)(const char * format,...))1434 IOKitKernelLogBuffer(const char * title, const void * buffer, size_t size,
1435     void (*output)(const char *format, ...))
1436 {
1437 	size_t idx, linestart;
1438 	enum { bytelen = (sizeof("0xZZ, ") - 1) };
1439 	char hex[(bytelen * 16) + 1];
1440 	uint8_t c, chars[17];
1441 
1442 	output("%s(0x%lx):\n", title, size);
1443 	output("              0     1     2     3     4     5     6     7     8     9     A     B     C     D     E     F\n");
1444 	if (size > 4096) {
1445 		size = 4096;
1446 	}
1447 	chars[16] = 0;
1448 	for (idx = 0, linestart = 0; idx < size;) {
1449 		c = ((char *)buffer)[idx];
1450 		snprintf(&hex[bytelen * (idx & 15)], bytelen + 1, "0x%02x, ", c);
1451 		chars[idx & 15] = ((c >= 0x20) && (c <= 0x7f)) ? c : ' ';
1452 		idx++;
1453 		if ((idx == size) || !(idx & 15)) {
1454 			if (idx & 15) {
1455 				chars[idx & 15] = 0;
1456 			}
1457 			output("/* %04lx: */ %-96s /* |%-16s| */\n", linestart, hex, chars);
1458 			linestart += 16;
1459 		}
1460 	}
1461 }
1462 
1463 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1464 
1465 /*
1466  * Convert a integer constant (typically a #define or enum) to a string.
1467  */
1468 static char noValue[80];        // that's pretty
1469 
1470 const char *
IOFindNameForValue(int value,const IONamedValue * regValueArray)1471 IOFindNameForValue(int value, const IONamedValue *regValueArray)
1472 {
1473 	for (; regValueArray->name; regValueArray++) {
1474 		if (regValueArray->value == value) {
1475 			return regValueArray->name;
1476 		}
1477 	}
1478 	snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
1479 	return (const char *)noValue;
1480 }
1481 
1482 IOReturn
IOFindValueForName(const char * string,const IONamedValue * regValueArray,int * value)1483 IOFindValueForName(const char *string,
1484     const IONamedValue *regValueArray,
1485     int *value)
1486 {
1487 	for (; regValueArray->name; regValueArray++) {
1488 		if (!strcmp(regValueArray->name, string)) {
1489 			*value = regValueArray->value;
1490 			return kIOReturnSuccess;
1491 		}
1492 	}
1493 	return kIOReturnBadArgument;
1494 }
1495 
1496 OSString *
IOCopyLogNameForPID(int pid)1497 IOCopyLogNameForPID(int pid)
1498 {
1499 	char   buf[128];
1500 	size_t len;
1501 	snprintf(buf, sizeof(buf), "pid %d, ", pid);
1502 	len = strlen(buf);
1503 	proc_name(pid, buf + len, (int) (sizeof(buf) - len));
1504 	return OSString::withCString(buf);
1505 }
1506 
1507 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1508 
1509 IOAlignment
IOSizeToAlignment(unsigned int size)1510 IOSizeToAlignment(unsigned int size)
1511 {
1512 	int shift;
1513 	const int intsize = sizeof(unsigned int) * 8;
1514 
1515 	for (shift = 1; shift < intsize; shift++) {
1516 		if (size & 0x80000000) {
1517 			return (IOAlignment)(intsize - shift);
1518 		}
1519 		size <<= 1;
1520 	}
1521 	return 0;
1522 }
1523 
1524 unsigned int
IOAlignmentToSize(IOAlignment align)1525 IOAlignmentToSize(IOAlignment align)
1526 {
1527 	unsigned int size;
1528 
1529 	for (size = 1; align; align--) {
1530 		size <<= 1;
1531 	}
1532 	return size;
1533 }
1534 } /* extern "C" */
1535