1 /*
2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * HISTORY
30 *
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
32 * 17-Nov-98 cpp
33 *
34 */
35
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <vm/vm_kern.h>
40 #include <libkern/c++/OSCPPDebug.h>
41
42 #include <IOKit/assert.h>
43
44 #include <IOKit/IOReturn.h>
45 #include <IOKit/IOLib.h>
46 #include <IOKit/IOLocks.h>
47 #include <IOKit/IOMapper.h>
48 #include <IOKit/IOBufferMemoryDescriptor.h>
49 #include <IOKit/IOKitDebug.h>
50
51 #include "IOKitKernelInternal.h"
52
53 #ifdef IOALLOCDEBUG
54 #include <libkern/OSDebug.h>
55 #include <sys/sysctl.h>
56 #endif
57
58 #include "libkern/OSAtomic.h"
59 #include <libkern/c++/OSKext.h>
60 #include <IOKit/IOStatisticsPrivate.h>
61 #include <os/log_private.h>
62 #include <sys/msgbuf.h>
63 #include <console/serial_protos.h>
64
65 #if IOKITSTATS
66
67 #define IOStatisticsAlloc(type, size) \
68 do { \
69 IOStatistics::countAlloc(type, size); \
70 } while (0)
71
72 #else
73
74 #define IOStatisticsAlloc(type, size)
75
76 #endif /* IOKITSTATS */
77
78
79 #define TRACK_ALLOC (IOTRACKING && (kIOTracking & gIOKitDebug))
80
81
82 extern "C"
83 {
84 mach_timespec_t IOZeroTvalspec = { 0, 0 };
85
86 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
87
88 extern int
89 __doprnt(
90 const char *fmt,
91 va_list argp,
92 void (*putc)(int, void *),
93 void *arg,
94 int radix,
95 int is_log);
96
97 extern bool bsd_log_lock(bool);
98 extern void bsd_log_unlock(void);
99
100
101 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
102
103 lck_grp_t *IOLockGroup;
104
105 /*
106 * Global variables for use by iLogger
107 * These symbols are for use only by Apple diagnostic code.
108 * Binary compatibility is not guaranteed for kexts that reference these symbols.
109 */
110
111 void *_giDebugLogInternal = NULL;
112 void *_giDebugLogDataInternal = NULL;
113 void *_giDebugReserved1 = NULL;
114 void *_giDebugReserved2 = NULL;
115
116 #if defined(__x86_64__)
117 iopa_t gIOBMDPageAllocator;
118 #endif /* defined(__x86_64__) */
119
120 /*
121 * Static variables for this module.
122 */
123
124 static queue_head_t gIOMallocContiguousEntries;
125 static lck_mtx_t * gIOMallocContiguousEntriesLock;
126
127 #if __x86_64__
128 enum { kIOMaxPageableMaps = 8 };
129 enum { kIOMaxFixedRanges = 4 };
130 enum { kIOPageableMapSize = 512 * 1024 * 1024 };
131 enum { kIOPageableMaxMapSize = 512 * 1024 * 1024 };
132 #else
133 enum { kIOMaxPageableMaps = 16 };
134 enum { kIOMaxFixedRanges = 4 };
135 enum { kIOPageableMapSize = 96 * 1024 * 1024 };
136 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
137 #endif
138
139 typedef struct {
140 vm_map_t map;
141 vm_offset_t address;
142 vm_offset_t end;
143 } IOMapData;
144
145 static SECURITY_READ_ONLY_LATE(struct kmem_range)
146 gIOKitPageableFixedRanges[kIOMaxFixedRanges];
147
148 static struct {
149 UInt32 count;
150 UInt32 hint;
151 IOMapData maps[kIOMaxPageableMaps];
152 lck_mtx_t * lock;
153 } gIOKitPageableSpace;
154
155 #if defined(__x86_64__)
156 static iopa_t gIOPageablePageAllocator;
157
158 uint32_t gIOPageAllocChunkBytes;
159 #endif /* defined(__x86_64__) */
160
161 #if IOTRACKING
162 IOTrackingQueue * gIOMallocTracking;
163 IOTrackingQueue * gIOWireTracking;
164 IOTrackingQueue * gIOMapTracking;
165 #endif /* IOTRACKING */
166
167 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
168
169 KMEM_RANGE_REGISTER_STATIC(gIOKitPageableFixed0,
170 &gIOKitPageableFixedRanges[0], kIOPageableMapSize);
171 KMEM_RANGE_REGISTER_STATIC(gIOKitPageableFixed1,
172 &gIOKitPageableFixedRanges[1], kIOPageableMapSize);
173 KMEM_RANGE_REGISTER_STATIC(gIOKitPageableFixed2,
174 &gIOKitPageableFixedRanges[2], kIOPageableMapSize);
175 KMEM_RANGE_REGISTER_STATIC(gIOKitPageableFixed3,
176 &gIOKitPageableFixedRanges[3], kIOPageableMapSize);
177 void
IOLibInit(void)178 IOLibInit(void)
179 {
180 static bool libInitialized;
181
182 if (libInitialized) {
183 return;
184 }
185
186 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
187
188 #if IOTRACKING
189 IOTrackingInit();
190 gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, 0,
191 kIOTrackingQueueTypeAlloc,
192 37);
193 gIOWireTracking = IOTrackingQueueAlloc(kIOWireTrackingName, 0, 0, page_size, 0, 0);
194
195 size_t mapCaptureSize = (kIOTracking & gIOKitDebug) ? page_size : (1024 * 1024);
196 gIOMapTracking = IOTrackingQueueAlloc(kIOMapTrackingName, 0, 0, mapCaptureSize,
197 kIOTrackingQueueTypeDefaultOn
198 | kIOTrackingQueueTypeMap
199 | kIOTrackingQueueTypeUser,
200 0);
201 #endif
202
203 gIOKitPageableSpace.maps[0].map = kmem_suballoc(kernel_map,
204 &gIOKitPageableFixedRanges[0].min_address,
205 kIOPageableMapSize,
206 VM_MAP_CREATE_PAGEABLE,
207 VM_FLAGS_FIXED_RANGE_SUBALLOC,
208 (kms_flags_t)(KMS_PERMANENT | KMS_DATA | KMS_NOFAIL),
209 VM_KERN_MEMORY_IOKIT).kmr_submap;
210
211 gIOKitPageableSpace.maps[0].address = gIOKitPageableFixedRanges[0].min_address;
212 gIOKitPageableSpace.maps[0].end = gIOKitPageableFixedRanges[0].min_address +
213 kIOPageableMapSize;
214 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
215 gIOKitPageableSpace.hint = 0;
216 gIOKitPageableSpace.count = 1;
217
218 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
219 queue_init( &gIOMallocContiguousEntries );
220
221 #if defined(__x86_64__)
222 gIOPageAllocChunkBytes = PAGE_SIZE / 64;
223
224 assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes);
225 iopa_init(&gIOBMDPageAllocator);
226 iopa_init(&gIOPageablePageAllocator);
227 #endif /* defined(__x86_64__) */
228
229
230 libInitialized = true;
231 }
232
233 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
234
235 vm_size_t
log2up(vm_size_t size)236 log2up(vm_size_t size)
237 {
238 if (size <= 1) {
239 size = 0;
240 } else {
241 #if __LP64__
242 size = 64 - __builtin_clzl(size - 1);
243 #else
244 size = 32 - __builtin_clzl(size - 1);
245 #endif
246 }
247 return size;
248 }
249
250 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
251
252 IOThread
IOCreateThread(IOThreadFunc fcn,void * arg)253 IOCreateThread(IOThreadFunc fcn, void *arg)
254 {
255 kern_return_t result;
256 thread_t thread;
257
258 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
259 if (result != KERN_SUCCESS) {
260 return NULL;
261 }
262
263 thread_deallocate(thread);
264
265 return thread;
266 }
267
268
269 void
IOExitThread(void)270 IOExitThread(void)
271 {
272 (void) thread_terminate(current_thread());
273 }
274
275 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
276
277 #if IOTRACKING
278 struct IOLibMallocHeader {
279 IOTrackingAddress tracking;
280 };
281 #endif
282
283 #if IOTRACKING
284 #define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
285 #else
286 #define sizeofIOLibMallocHeader (0)
287 #endif
288
289 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
290
291 void *
IOMalloc_internal(struct kalloc_heap * kheap,vm_size_t size,zalloc_flags_t flags)292 IOMalloc_internal(struct kalloc_heap *kheap, vm_size_t size,
293 zalloc_flags_t flags)
294 {
295 void * address;
296 vm_size_t allocSize;
297
298 allocSize = size + sizeofIOLibMallocHeader;
299 #if IOTRACKING
300 if (sizeofIOLibMallocHeader && (allocSize <= size)) {
301 return NULL; // overflow
302 }
303 #endif
304 address = kheap_alloc(kheap, allocSize,
305 Z_VM_TAG(Z_WAITOK | flags, VM_KERN_MEMORY_IOKIT));
306
307 if (address) {
308 #if IOTRACKING
309 if (TRACK_ALLOC) {
310 IOLibMallocHeader * hdr;
311 hdr = (typeof(hdr))address;
312 bzero(&hdr->tracking, sizeof(hdr->tracking));
313 hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader);
314 hdr->tracking.size = size;
315 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
316 }
317 #endif
318 address = (typeof(address))(((uintptr_t) address) + sizeofIOLibMallocHeader);
319
320 #if IOALLOCDEBUG
321 OSAddAtomicLong(size, &debug_iomalloc_size);
322 #endif
323 IOStatisticsAlloc(kIOStatisticsMalloc, size);
324 }
325
326 return address;
327 }
328
329 void
IOFree_internal(struct kalloc_heap * kheap,void * inAddress,vm_size_t size)330 IOFree_internal(struct kalloc_heap *kheap, void * inAddress, vm_size_t size)
331 {
332 void * address;
333
334 if ((address = inAddress)) {
335 address = (typeof(address))(((uintptr_t) address) - sizeofIOLibMallocHeader);
336
337 #if IOTRACKING
338 if (TRACK_ALLOC) {
339 IOLibMallocHeader * hdr;
340 struct ptr_reference { void * ptr; };
341 volatile struct ptr_reference ptr;
342
343 // we're about to block in IOTrackingRemove(), make sure the original pointer
344 // exists in memory or a register for leak scanning to find
345 ptr.ptr = inAddress;
346
347 hdr = (typeof(hdr))address;
348 if (size != hdr->tracking.size) {
349 OSReportWithBacktrace("bad IOFree size 0x%zx should be 0x%zx",
350 (size_t)size, (size_t)hdr->tracking.size);
351 size = hdr->tracking.size;
352 }
353 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
354 ptr.ptr = NULL;
355 }
356 #endif
357
358 kheap_free(kheap, address, size + sizeofIOLibMallocHeader);
359 #if IOALLOCDEBUG
360 OSAddAtomicLong(-size, &debug_iomalloc_size);
361 #endif
362 IOStatisticsAlloc(kIOStatisticsFree, size);
363 }
364 }
365
366 void *
367 IOMalloc_external(
368 vm_size_t size);
369 void *
IOMalloc_external(vm_size_t size)370 IOMalloc_external(
371 vm_size_t size)
372 {
373 return IOMalloc_internal(KHEAP_DEFAULT, size, Z_VM_TAG_BT_BIT);
374 }
375
376 void
IOFree(void * inAddress,vm_size_t size)377 IOFree(void * inAddress, vm_size_t size)
378 {
379 IOFree_internal(KHEAP_ANY, inAddress, size);
380 }
381
382 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
383
384 void *
385 IOMallocZero_external(
386 vm_size_t size);
387 void *
IOMallocZero_external(vm_size_t size)388 IOMallocZero_external(
389 vm_size_t size)
390 {
391 return IOMalloc_internal(KHEAP_DEFAULT, size, Z_ZERO_VM_TAG_BT_BIT);
392 }
393
394 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
395
396 vm_tag_t
IOMemoryTag(vm_map_t map)397 IOMemoryTag(vm_map_t map)
398 {
399 vm_tag_t tag;
400
401 if (!vm_kernel_map_is_kernel(map)) {
402 return VM_MEMORY_IOKIT;
403 }
404
405 tag = vm_tag_bt();
406 if (tag == VM_KERN_MEMORY_NONE) {
407 tag = VM_KERN_MEMORY_IOKIT;
408 }
409
410 return tag;
411 }
412
413 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
414
415 struct IOLibPageMallocHeader {
416 mach_vm_size_t alignMask;
417 mach_vm_offset_t allocationOffset;
418 #if IOTRACKING
419 IOTrackingAddress tracking;
420 #endif
421 };
422
423 #if IOTRACKING
424 #define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
425 #else
426 #define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader))
427 #endif
428
429 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
430
431 static __header_always_inline void
IOMallocAlignedSetHdr(IOLibPageMallocHeader * hdr,mach_vm_size_t alignMask,mach_vm_address_t allocationStart,mach_vm_address_t alignedStart)432 IOMallocAlignedSetHdr(
433 IOLibPageMallocHeader *hdr,
434 mach_vm_size_t alignMask,
435 mach_vm_address_t allocationStart,
436 mach_vm_address_t alignedStart)
437 {
438 mach_vm_offset_t offset = alignedStart - allocationStart;
439 #if __has_feature(ptrauth_calls)
440 offset = (mach_vm_offset_t) ptrauth_sign_unauthenticated((void *)offset,
441 ptrauth_key_process_independent_data,
442 ptrauth_blend_discriminator((void *)(alignedStart | alignMask),
443 OS_PTRAUTH_DISCRIMINATOR("IOLibPageMallocHeader.allocationOffset")));
444 #endif /* __has_feature(ptrauth_calls) */
445 hdr->allocationOffset = offset;
446 hdr->alignMask = alignMask;
447 }
448
449 __abortlike
450 static void
IOMallocAlignedHdrCorruptionPanic(mach_vm_offset_t offset,mach_vm_size_t alignMask,mach_vm_address_t alignedStart,vm_size_t size)451 IOMallocAlignedHdrCorruptionPanic(
452 mach_vm_offset_t offset,
453 mach_vm_size_t alignMask,
454 mach_vm_address_t alignedStart,
455 vm_size_t size)
456 {
457 mach_vm_address_t address = 0;
458 mach_vm_address_t recalAlignedStart = 0;
459
460 if (os_sub_overflow(alignedStart, offset, &address)) {
461 panic("Invalid offset %p for aligned addr %p", (void *)offset,
462 (void *)alignedStart);
463 }
464 if (os_add3_overflow(address, sizeofIOLibPageMallocHeader, alignMask,
465 &recalAlignedStart)) {
466 panic("alignMask 0x%llx overflows recalAlignedStart %p for provided addr "
467 "%p", alignMask, (void *)recalAlignedStart, (void *)alignedStart);
468 }
469 if (((recalAlignedStart &= ~alignMask) != alignedStart) &&
470 (round_page(recalAlignedStart) != alignedStart)) {
471 panic("Recalculated aligned addr %p doesn't match provided addr %p",
472 (void *)recalAlignedStart, (void *)alignedStart);
473 }
474 if (offset < sizeofIOLibPageMallocHeader) {
475 panic("Offset %zd doesn't accomodate IOLibPageMallocHeader for aligned "
476 "addr %p", (size_t)offset, (void *)alignedStart);
477 }
478 panic("alignMask 0x%llx overflows adjusted size %zd for aligned addr %p",
479 alignMask, (size_t)size, (void *)alignedStart);
480 }
481
482 static __header_always_inline mach_vm_address_t
IOMallocAlignedGetAddress(IOLibPageMallocHeader * hdr,mach_vm_address_t alignedStart,vm_size_t * size)483 IOMallocAlignedGetAddress(
484 IOLibPageMallocHeader *hdr,
485 mach_vm_address_t alignedStart,
486 vm_size_t *size)
487 {
488 mach_vm_address_t address = 0;
489 mach_vm_address_t recalAlignedStart = 0;
490 mach_vm_offset_t offset = hdr->allocationOffset;
491 mach_vm_size_t alignMask = hdr->alignMask;
492 #if __has_feature(ptrauth_calls)
493 offset = (mach_vm_offset_t) ptrauth_auth_data((void *)offset,
494 ptrauth_key_process_independent_data,
495 ptrauth_blend_discriminator((void *)(alignedStart | alignMask),
496 OS_PTRAUTH_DISCRIMINATOR("IOLibPageMallocHeader.allocationOffset")));
497 #endif /* __has_feature(ptrauth_calls) */
498 if (os_sub_overflow(alignedStart, offset, &address) ||
499 os_add3_overflow(address, sizeofIOLibPageMallocHeader, alignMask,
500 &recalAlignedStart) ||
501 (((recalAlignedStart &= ~alignMask) != alignedStart) &&
502 (round_page(recalAlignedStart) != alignedStart)) ||
503 (offset < sizeofIOLibPageMallocHeader) ||
504 os_add_overflow(*size, alignMask, size)) {
505 IOMallocAlignedHdrCorruptionPanic(offset, alignMask, alignedStart, *size);
506 }
507 return address;
508 }
509
510 void *
IOMallocAligned_internal(struct kalloc_heap * kheap,vm_size_t size,vm_size_t alignment,zalloc_flags_t flags)511 IOMallocAligned_internal(struct kalloc_heap *kheap, vm_size_t size,
512 vm_size_t alignment, zalloc_flags_t flags)
513 {
514 kern_return_t kr;
515 vm_offset_t address;
516 vm_offset_t allocationAddress;
517 vm_size_t adjustedSize;
518 uintptr_t alignMask;
519 IOLibPageMallocHeader * hdr;
520 kma_flags_t kma_flags = KMA_NONE;
521
522 if (size == 0) {
523 return NULL;
524 }
525 if (((uint32_t) alignment) != alignment) {
526 return NULL;
527 }
528
529 if (flags & Z_ZERO) {
530 kma_flags = KMA_ZERO;
531 }
532
533 if (kheap == KHEAP_DATA_BUFFERS) {
534 kma_flags = (kma_flags_t) (kma_flags | KMA_DATA);
535 }
536
537 alignment = (1UL << log2up((uint32_t) alignment));
538 alignMask = alignment - 1;
539 adjustedSize = size + sizeofIOLibPageMallocHeader;
540
541 if (size > adjustedSize) {
542 address = 0; /* overflow detected */
543 } else if (adjustedSize >= page_size) {
544 kr = kernel_memory_allocate(kernel_map, &address,
545 size, alignMask, kma_flags, IOMemoryTag(kernel_map));
546 if (KERN_SUCCESS != kr) {
547 address = 0;
548 }
549 #if IOTRACKING
550 else if (TRACK_ALLOC) {
551 IOTrackingAlloc(gIOMallocTracking, address, size);
552 }
553 #endif
554 } else {
555 adjustedSize += alignMask;
556
557 if (adjustedSize >= page_size) {
558 kr = kmem_alloc(kernel_map, &allocationAddress,
559 adjustedSize, kma_flags, IOMemoryTag(kernel_map));
560 if (KERN_SUCCESS != kr) {
561 allocationAddress = 0;
562 }
563 } else {
564 allocationAddress = (vm_address_t) kheap_alloc(kheap,
565 adjustedSize, Z_VM_TAG(Z_WAITOK | flags, VM_KERN_MEMORY_IOKIT));
566 }
567
568 if (allocationAddress) {
569 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
570 & (~alignMask);
571
572 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
573 IOMallocAlignedSetHdr(hdr, alignMask, allocationAddress, address);
574 #if IOTRACKING
575 if (TRACK_ALLOC) {
576 bzero(&hdr->tracking, sizeof(hdr->tracking));
577 hdr->tracking.address = ~address;
578 hdr->tracking.size = size;
579 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
580 }
581 #endif
582 } else {
583 address = 0;
584 }
585 }
586
587 assert(0 == (address & alignMask));
588
589 if (address) {
590 #if IOALLOCDEBUG
591 OSAddAtomicLong(size, &debug_iomalloc_size);
592 #endif
593 IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
594 }
595
596 return (void *) address;
597 }
598
599 void
IOFreeAligned_internal(kalloc_heap_t kheap,void * address,vm_size_t size)600 IOFreeAligned_internal(kalloc_heap_t kheap, void * address, vm_size_t size)
601 {
602 vm_address_t allocationAddress;
603 vm_size_t adjustedSize;
604 IOLibPageMallocHeader * hdr;
605
606 if (!address) {
607 return;
608 }
609
610 assert(size);
611
612 adjustedSize = size + sizeofIOLibPageMallocHeader;
613 if (adjustedSize >= page_size) {
614 #if IOTRACKING
615 if (TRACK_ALLOC) {
616 IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size);
617 }
618 #endif
619 kmem_free(kernel_map, (vm_offset_t) address, size);
620 } else {
621 hdr = (typeof(hdr))(((uintptr_t)address) - sizeofIOLibPageMallocHeader);
622 allocationAddress = IOMallocAlignedGetAddress(hdr,
623 (mach_vm_address_t)address, &adjustedSize);
624
625 #if IOTRACKING
626 if (TRACK_ALLOC) {
627 if (size != hdr->tracking.size) {
628 OSReportWithBacktrace("bad IOFreeAligned size 0x%zx should be 0x%zx",
629 (size_t)size, (size_t)hdr->tracking.size);
630 size = hdr->tracking.size;
631 }
632 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
633 }
634 #endif
635 if (adjustedSize >= page_size) {
636 kmem_free(kernel_map, allocationAddress, adjustedSize);
637 } else {
638 kheap_free(kheap, allocationAddress, adjustedSize);
639 }
640 }
641
642 #if IOALLOCDEBUG
643 OSAddAtomicLong(-size, &debug_iomalloc_size);
644 #endif
645
646 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
647 }
648
649 void *
650 IOMallocAligned_external(
651 vm_size_t size, vm_size_t alignment);
652 void *
IOMallocAligned_external(vm_size_t size,vm_size_t alignment)653 IOMallocAligned_external(
654 vm_size_t size, vm_size_t alignment)
655 {
656 return IOMallocAligned_internal(KHEAP_DEFAULT, size, alignment,
657 Z_VM_TAG_BT_BIT);
658 }
659
660 void
IOFreeAligned(void * address,vm_size_t size)661 IOFreeAligned(
662 void * address,
663 vm_size_t size)
664 {
665 IOFreeAligned_internal(KHEAP_DEFAULT, address, size);
666 }
667
668 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
669
670 void
IOKernelFreePhysical(kalloc_heap_t kheap,mach_vm_address_t address,mach_vm_size_t size)671 IOKernelFreePhysical(
672 kalloc_heap_t kheap,
673 mach_vm_address_t address,
674 mach_vm_size_t size)
675 {
676 vm_address_t allocationAddress;
677 vm_size_t adjustedSize;
678 IOLibPageMallocHeader * hdr;
679
680 if (!address) {
681 return;
682 }
683
684 assert(size);
685
686 adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
687 if (adjustedSize >= page_size) {
688 #if IOTRACKING
689 if (TRACK_ALLOC) {
690 IOTrackingFree(gIOMallocTracking, address, size);
691 }
692 #endif
693 kmem_free(kernel_map, (vm_offset_t) address, size);
694 } else {
695 hdr = (typeof(hdr))(((uintptr_t)address) - sizeofIOLibPageMallocHeader);
696 allocationAddress = IOMallocAlignedGetAddress(hdr, address, &adjustedSize);
697 #if IOTRACKING
698 if (TRACK_ALLOC) {
699 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
700 }
701 #endif
702 kheap_free(kheap, allocationAddress, adjustedSize);
703 }
704
705 IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
706 #if IOALLOCDEBUG
707 OSAddAtomicLong(-size, &debug_iomalloc_size);
708 #endif
709 }
710
711 #if __arm__ || __arm64__
712 extern unsigned long gPhysBase, gPhysSize;
713 #endif
714
715 mach_vm_address_t
IOKernelAllocateWithPhysicalRestrict(kalloc_heap_t kheap,mach_vm_size_t size,mach_vm_address_t maxPhys,mach_vm_size_t alignment,bool contiguous)716 IOKernelAllocateWithPhysicalRestrict(
717 kalloc_heap_t kheap,
718 mach_vm_size_t size,
719 mach_vm_address_t maxPhys,
720 mach_vm_size_t alignment,
721 bool contiguous)
722 {
723 kern_return_t kr;
724 mach_vm_address_t address;
725 mach_vm_address_t allocationAddress;
726 mach_vm_size_t adjustedSize;
727 mach_vm_address_t alignMask;
728 IOLibPageMallocHeader * hdr;
729
730 if (size == 0) {
731 return 0;
732 }
733 if (alignment == 0) {
734 alignment = 1;
735 }
736
737 alignMask = alignment - 1;
738
739 if (os_mul_and_add_overflow(2, size, sizeofIOLibPageMallocHeader, &adjustedSize)) {
740 return 0;
741 }
742
743 contiguous = (contiguous && (adjustedSize > page_size))
744 || (alignment > page_size);
745
746 if (contiguous || maxPhys) {
747 kma_flags_t options = KMA_ZERO;
748 vm_offset_t virt;
749
750 if (kheap == KHEAP_DATA_BUFFERS) {
751 options = (kma_flags_t) (options | KMA_DATA);
752 }
753
754 adjustedSize = size;
755 contiguous = (contiguous && (adjustedSize > page_size))
756 || (alignment > page_size);
757
758 if (!contiguous) {
759 #if __arm__ || __arm64__
760 if (maxPhys >= (mach_vm_address_t)(gPhysBase + gPhysSize)) {
761 maxPhys = 0;
762 } else
763 #endif
764 if (maxPhys <= 0xFFFFFFFF) {
765 maxPhys = 0;
766 options = (kma_flags_t)(options | KMA_LOMEM);
767 } else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage)) {
768 maxPhys = 0;
769 }
770 }
771 if (contiguous || maxPhys) {
772 kr = kmem_alloc_contig(kernel_map, &virt, size,
773 alignMask, (ppnum_t) atop(maxPhys), (ppnum_t) atop(alignMask),
774 options, IOMemoryTag(kernel_map));
775 } else {
776 kr = kernel_memory_allocate(kernel_map, &virt,
777 size, alignMask, options, IOMemoryTag(kernel_map));
778 }
779 if (KERN_SUCCESS == kr) {
780 address = virt;
781 #if IOTRACKING
782 if (TRACK_ALLOC) {
783 IOTrackingAlloc(gIOMallocTracking, address, size);
784 }
785 #endif
786 } else {
787 address = 0;
788 }
789 } else {
790 adjustedSize += alignMask;
791 if (adjustedSize < size) {
792 return 0;
793 }
794 allocationAddress = (mach_vm_address_t) kheap_alloc(kheap,
795 adjustedSize, Z_VM_TAG_BT(Z_WAITOK, VM_KERN_MEMORY_IOKIT));
796
797 if (allocationAddress) {
798 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
799 & (~alignMask);
800
801 if (atop_32(address) != atop_32(address + size - 1)) {
802 address = round_page(address);
803 }
804
805 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
806 IOMallocAlignedSetHdr(hdr, alignMask, allocationAddress, address);
807 #if IOTRACKING
808 if (TRACK_ALLOC) {
809 bzero(&hdr->tracking, sizeof(hdr->tracking));
810 hdr->tracking.address = ~address;
811 hdr->tracking.size = size;
812 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
813 }
814 #endif
815 } else {
816 address = 0;
817 }
818 }
819
820 if (address) {
821 IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
822 #if IOALLOCDEBUG
823 OSAddAtomicLong(size, &debug_iomalloc_size);
824 #endif
825 }
826
827 return address;
828 }
829
830
831 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
832
833 struct _IOMallocContiguousEntry {
834 mach_vm_address_t virtualAddr;
835 IOBufferMemoryDescriptor * md;
836 queue_chain_t link;
837 };
838 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
839
840 void *
IOMallocContiguous(vm_size_t size,vm_size_t alignment,IOPhysicalAddress * physicalAddress)841 IOMallocContiguous(vm_size_t size, vm_size_t alignment,
842 IOPhysicalAddress * physicalAddress)
843 {
844 mach_vm_address_t address = 0;
845
846 if (size == 0) {
847 return NULL;
848 }
849 if (alignment == 0) {
850 alignment = 1;
851 }
852
853 /* Do we want a physical address? */
854 if (!physicalAddress) {
855 address = IOKernelAllocateWithPhysicalRestrict(KHEAP_DEFAULT,
856 size, 0 /*maxPhys*/, alignment, true);
857 } else {
858 do {
859 IOBufferMemoryDescriptor * bmd;
860 mach_vm_address_t physicalMask;
861 vm_offset_t alignMask;
862
863 alignMask = alignment - 1;
864 physicalMask = (0xFFFFFFFF ^ alignMask);
865
866 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
867 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
868 if (!bmd) {
869 break;
870 }
871
872 _IOMallocContiguousEntry *
873 entry = IOMallocType(_IOMallocContiguousEntry);
874 if (!entry) {
875 bmd->release();
876 break;
877 }
878 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
879 entry->md = bmd;
880 lck_mtx_lock(gIOMallocContiguousEntriesLock);
881 queue_enter( &gIOMallocContiguousEntries, entry,
882 _IOMallocContiguousEntry *, link );
883 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
884
885 address = (mach_vm_address_t) entry->virtualAddr;
886 *physicalAddress = bmd->getPhysicalAddress();
887 }while (false);
888 }
889
890 return (void *) address;
891 }
892
893 void
IOFreeContiguous(void * _address,vm_size_t size)894 IOFreeContiguous(void * _address, vm_size_t size)
895 {
896 _IOMallocContiguousEntry * entry;
897 IOMemoryDescriptor * md = NULL;
898
899 mach_vm_address_t address = (mach_vm_address_t) _address;
900
901 if (!address) {
902 return;
903 }
904
905 assert(size);
906
907 lck_mtx_lock(gIOMallocContiguousEntriesLock);
908 queue_iterate( &gIOMallocContiguousEntries, entry,
909 _IOMallocContiguousEntry *, link )
910 {
911 if (entry->virtualAddr == address) {
912 md = entry->md;
913 queue_remove( &gIOMallocContiguousEntries, entry,
914 _IOMallocContiguousEntry *, link );
915 break;
916 }
917 }
918 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
919
920 if (md) {
921 md->release();
922 IOFreeType(entry, _IOMallocContiguousEntry);
923 } else {
924 IOKernelFreePhysical(KHEAP_DEFAULT, (mach_vm_address_t) address, size);
925 }
926 }
927
928 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
929
930 kern_return_t
IOIteratePageableMaps(vm_size_t size,IOIteratePageableMapsCallback callback,void * ref)931 IOIteratePageableMaps(vm_size_t size,
932 IOIteratePageableMapsCallback callback, void * ref)
933 {
934 kern_return_t kr = kIOReturnNotReady;
935 kmem_return_t kmr;
936 vm_size_t segSize;
937 UInt32 attempts;
938 UInt32 index;
939 vm_offset_t min;
940 int flags;
941
942 if (size > kIOPageableMaxMapSize) {
943 return kIOReturnBadArgument;
944 }
945
946 do {
947 index = gIOKitPageableSpace.hint;
948 attempts = gIOKitPageableSpace.count;
949 while (attempts--) {
950 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
951 if (KERN_SUCCESS == kr) {
952 gIOKitPageableSpace.hint = index;
953 break;
954 }
955 if (index) {
956 index--;
957 } else {
958 index = gIOKitPageableSpace.count - 1;
959 }
960 }
961 if (KERN_NO_SPACE != kr) {
962 break;
963 }
964
965 lck_mtx_lock( gIOKitPageableSpace.lock );
966
967 index = gIOKitPageableSpace.count;
968 if (index >= (kIOMaxPageableMaps - 1)) {
969 lck_mtx_unlock( gIOKitPageableSpace.lock );
970 break;
971 }
972
973 if (size < kIOPageableMapSize) {
974 segSize = kIOPageableMapSize;
975 } else {
976 segSize = size;
977 }
978
979 /*
980 * Use the predefine ranges if available, else default to data
981 */
982 if (index < kIOMaxFixedRanges) {
983 min = gIOKitPageableFixedRanges[index].min_address;
984 flags = VM_FLAGS_FIXED_RANGE_SUBALLOC;
985 } else {
986 min = 0;
987 flags = VM_FLAGS_ANYWHERE;
988 }
989 kmr = kmem_suballoc(kernel_map,
990 &min,
991 segSize,
992 VM_MAP_CREATE_PAGEABLE,
993 flags,
994 (kms_flags_t)(KMS_PERMANENT | KMS_DATA),
995 VM_KERN_MEMORY_IOKIT);
996 if (kmr.kmr_return != KERN_SUCCESS) {
997 kr = kmr.kmr_return;
998 lck_mtx_unlock( gIOKitPageableSpace.lock );
999 break;
1000 }
1001
1002 gIOKitPageableSpace.maps[index].map = kmr.kmr_submap;
1003 gIOKitPageableSpace.maps[index].address = min;
1004 gIOKitPageableSpace.maps[index].end = min + segSize;
1005 gIOKitPageableSpace.hint = index;
1006 gIOKitPageableSpace.count = index + 1;
1007
1008 lck_mtx_unlock( gIOKitPageableSpace.lock );
1009 } while (true);
1010
1011 return kr;
1012 }
1013
1014 struct IOMallocPageableRef {
1015 vm_offset_t address;
1016 vm_size_t size;
1017 vm_tag_t tag;
1018 };
1019
1020 static kern_return_t
IOMallocPageableCallback(vm_map_t map,void * _ref)1021 IOMallocPageableCallback(vm_map_t map, void * _ref)
1022 {
1023 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
1024 kma_flags_t flags = (kma_flags_t)(KMA_PAGEABLE | KMA_DATA);
1025
1026 return kmem_alloc( map, &ref->address, ref->size, flags, ref->tag );
1027 }
1028
1029 static void *
IOMallocPageablePages(vm_size_t size,vm_size_t alignment,vm_tag_t tag)1030 IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag)
1031 {
1032 kern_return_t kr = kIOReturnNotReady;
1033 struct IOMallocPageableRef ref;
1034
1035 if (alignment > page_size) {
1036 return NULL;
1037 }
1038 if (size > kIOPageableMaxMapSize) {
1039 return NULL;
1040 }
1041
1042 ref.size = size;
1043 ref.tag = tag;
1044 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
1045 if (kIOReturnSuccess != kr) {
1046 ref.address = 0;
1047 }
1048
1049 return (void *) ref.address;
1050 }
1051
1052 vm_map_t
IOPageableMapForAddress(uintptr_t address)1053 IOPageableMapForAddress( uintptr_t address )
1054 {
1055 vm_map_t map = NULL;
1056 UInt32 index;
1057
1058 for (index = 0; index < gIOKitPageableSpace.count; index++) {
1059 if ((address >= gIOKitPageableSpace.maps[index].address)
1060 && (address < gIOKitPageableSpace.maps[index].end)) {
1061 map = gIOKitPageableSpace.maps[index].map;
1062 break;
1063 }
1064 }
1065 if (!map) {
1066 panic("IOPageableMapForAddress: null");
1067 }
1068
1069 return map;
1070 }
1071
1072 static void
IOFreePageablePages(void * address,vm_size_t size)1073 IOFreePageablePages(void * address, vm_size_t size)
1074 {
1075 vm_map_t map;
1076
1077 map = IOPageableMapForAddress((vm_address_t) address);
1078 if (map) {
1079 kmem_free( map, (vm_offset_t) address, size);
1080 }
1081 }
1082
1083 #if defined(__x86_64__)
1084 static uintptr_t
IOMallocOnePageablePage(kalloc_heap_t kheap __unused,iopa_t * a)1085 IOMallocOnePageablePage(kalloc_heap_t kheap __unused, iopa_t * a)
1086 {
1087 return (uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT);
1088 }
1089 #endif /* defined(__x86_64__) */
1090
1091 static void *
IOMallocPageableInternal(vm_size_t size,vm_size_t alignment,bool zeroed)1092 IOMallocPageableInternal(vm_size_t size, vm_size_t alignment, bool zeroed)
1093 {
1094 void * addr;
1095
1096 if (((uint32_t) alignment) != alignment) {
1097 return NULL;
1098 }
1099 #if defined(__x86_64__)
1100 if (size >= (page_size - 4 * gIOPageAllocChunkBytes) ||
1101 alignment > page_size) {
1102 addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map));
1103 /* Memory allocated this way will already be zeroed. */
1104 } else {
1105 addr = ((void *) iopa_alloc(&gIOPageablePageAllocator,
1106 &IOMallocOnePageablePage, KHEAP_DEFAULT, size, (uint32_t) alignment));
1107 if (addr && zeroed) {
1108 bzero(addr, size);
1109 }
1110 }
1111 #else /* !defined(__x86_64__) */
1112 vm_size_t allocSize = size;
1113 if (allocSize == 0) {
1114 allocSize = 1;
1115 }
1116 addr = IOMallocPageablePages(allocSize, alignment, IOMemoryTag(kernel_map));
1117 /* already zeroed */
1118 #endif /* defined(__x86_64__) */
1119
1120 if (addr) {
1121 #if IOALLOCDEBUG
1122 OSAddAtomicLong(size, &debug_iomallocpageable_size);
1123 #endif
1124 IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
1125 }
1126
1127 return addr;
1128 }
1129
1130 void *
IOMallocPageable(vm_size_t size,vm_size_t alignment)1131 IOMallocPageable(vm_size_t size, vm_size_t alignment)
1132 {
1133 return IOMallocPageableInternal(size, alignment, /*zeroed*/ false);
1134 }
1135
1136 void *
IOMallocPageableZero(vm_size_t size,vm_size_t alignment)1137 IOMallocPageableZero(vm_size_t size, vm_size_t alignment)
1138 {
1139 return IOMallocPageableInternal(size, alignment, /*zeroed*/ true);
1140 }
1141
1142 void
IOFreePageable(void * address,vm_size_t size)1143 IOFreePageable(void * address, vm_size_t size)
1144 {
1145 #if IOALLOCDEBUG
1146 OSAddAtomicLong(-size, &debug_iomallocpageable_size);
1147 #endif
1148 IOStatisticsAlloc(kIOStatisticsFreePageable, size);
1149
1150 #if defined(__x86_64__)
1151 if (size < (page_size - 4 * gIOPageAllocChunkBytes)) {
1152 address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size);
1153 size = page_size;
1154 }
1155 if (address) {
1156 IOFreePageablePages(address, size);
1157 }
1158 #else /* !defined(__x86_64__) */
1159 if (size == 0) {
1160 size = 1;
1161 }
1162 if (address) {
1163 IOFreePageablePages(address, size);
1164 }
1165 #endif /* defined(__x86_64__) */
1166 }
1167
1168 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1169
1170 void *
1171 IOMallocData_external(
1172 vm_size_t size);
1173 void *
IOMallocData_external(vm_size_t size)1174 IOMallocData_external(vm_size_t size)
1175 {
1176 return IOMalloc_internal(KHEAP_DATA_BUFFERS, size, Z_VM_TAG_BT_BIT);
1177 }
1178
1179 void *
1180 IOMallocZeroData_external(
1181 vm_size_t size);
1182 void *
IOMallocZeroData_external(vm_size_t size)1183 IOMallocZeroData_external(vm_size_t size)
1184 {
1185 return IOMalloc_internal(KHEAP_DATA_BUFFERS, size, Z_ZERO_VM_TAG_BT_BIT);
1186 }
1187
1188 void
IOFreeData(void * address,vm_size_t size)1189 IOFreeData(void * address, vm_size_t size)
1190 {
1191 return IOFree_internal(KHEAP_DATA_BUFFERS, address, size);
1192 }
1193
1194 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1195
1196 void *
IOMallocTypeImpl(kalloc_type_view_t kt_view)1197 IOMallocTypeImpl(kalloc_type_view_t kt_view)
1198 {
1199 #if IOTRACKING
1200 /*
1201 * When leak detection is on default to using IOMalloc as kalloc
1202 * type infrastructure isn't aware of needing additional space for
1203 * the header.
1204 */
1205 if (TRACK_ALLOC) {
1206 uint32_t kt_size = kalloc_type_get_size(kt_view->kt_size);
1207 void *mem = IOMalloc_internal(KHEAP_DEFAULT, kt_size, Z_ZERO);
1208 if (!IOMallocType_from_vm(kt_view)) {
1209 assert(mem);
1210 }
1211 return mem;
1212 }
1213 #endif
1214 zalloc_flags_t kt_flags = (zalloc_flags_t) (Z_WAITOK | Z_ZERO);
1215 if (!IOMallocType_from_vm(kt_view)) {
1216 kt_flags = (zalloc_flags_t) (kt_flags | Z_NOFAIL);
1217 }
1218 /*
1219 * Use external symbol for kalloc_type_impl as
1220 * kalloc_type_views generated at some external callsites
1221 * many not have been processed during boot.
1222 */
1223 return kalloc_type_impl_external(kt_view, kt_flags);
1224 }
1225
1226 void
IOFreeTypeImpl(kalloc_type_view_t kt_view,void * address)1227 IOFreeTypeImpl(kalloc_type_view_t kt_view, void * address)
1228 {
1229 #if IOTRACKING
1230 if (TRACK_ALLOC) {
1231 return IOFree_internal(KHEAP_DEFAULT, address,
1232 kalloc_type_get_size(kt_view->kt_size));
1233 }
1234 #endif
1235 /*
1236 * Use external symbol for kalloc_type_impl as
1237 * kalloc_type_views generated at some external callsites
1238 * many not have been processed during boot.
1239 */
1240 return kfree_type_impl_external(kt_view, address);
1241 }
1242
1243 void *
IOMallocTypeVarImpl(kalloc_type_var_view_t kt_view,vm_size_t size)1244 IOMallocTypeVarImpl(kalloc_type_var_view_t kt_view, vm_size_t size)
1245 {
1246 #if IOTRACKING
1247 /*
1248 * When leak detection is on default to using IOMalloc as kalloc
1249 * type infrastructure isn't aware of needing additional space for
1250 * the header.
1251 */
1252 if (TRACK_ALLOC) {
1253 return IOMalloc_internal(KHEAP_DEFAULT, size, Z_ZERO);
1254 }
1255 #endif
1256 zalloc_flags_t kt_flags = (zalloc_flags_t) (Z_WAITOK | Z_ZERO);
1257
1258 kt_flags = Z_VM_TAG_BT(kt_flags, VM_KERN_MEMORY_KALLOC_TYPE);
1259 return kalloc_type_var_impl(kt_view, size, kt_flags, NULL);
1260 }
1261
1262 void
IOFreeTypeVarImpl(kalloc_type_var_view_t kt_view,void * address,vm_size_t size)1263 IOFreeTypeVarImpl(kalloc_type_var_view_t kt_view, void * address,
1264 vm_size_t size)
1265 {
1266 #if IOTRACKING
1267 if (TRACK_ALLOC) {
1268 return IOFree_internal(KHEAP_DEFAULT, address, size);
1269 }
1270 #endif
1271
1272 return kfree_type_var_impl(kt_view, address, size);
1273 }
1274
1275 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1276
1277 #if defined(__x86_64__)
1278
1279
1280 extern "C" void
iopa_init(iopa_t * a)1281 iopa_init(iopa_t * a)
1282 {
1283 bzero(a, sizeof(*a));
1284 a->lock = IOLockAlloc();
1285 queue_init(&a->list);
1286 }
1287
1288 static uintptr_t
iopa_allocinpage(iopa_page_t * pa,uint32_t count,uint64_t align)1289 iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
1290 {
1291 uint32_t n, s;
1292 uint64_t avail = pa->avail;
1293
1294 assert(avail);
1295
1296 // find strings of count 1 bits in avail
1297 for (n = count; n > 1; n -= s) {
1298 s = n >> 1;
1299 avail = avail & (avail << s);
1300 }
1301 // and aligned
1302 avail &= align;
1303
1304 if (avail) {
1305 n = __builtin_clzll(avail);
1306 pa->avail &= ~((-1ULL << (64 - count)) >> n);
1307 if (!pa->avail && pa->link.next) {
1308 remque(&pa->link);
1309 pa->link.next = NULL;
1310 }
1311 return n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa);
1312 }
1313
1314 return 0;
1315 }
1316
1317 uintptr_t
iopa_alloc(iopa_t * a,iopa_proc_t alloc,kalloc_heap_t kheap,vm_size_t bytes,vm_size_t balign)1318 iopa_alloc(
1319 iopa_t * a,
1320 iopa_proc_t alloc,
1321 kalloc_heap_t kheap,
1322 vm_size_t bytes,
1323 vm_size_t balign)
1324 {
1325 static const uint64_t align_masks[] = {
1326 0xFFFFFFFFFFFFFFFF,
1327 0xAAAAAAAAAAAAAAAA,
1328 0x8888888888888888,
1329 0x8080808080808080,
1330 0x8000800080008000,
1331 0x8000000080000000,
1332 0x8000000000000000,
1333 };
1334 iopa_page_t * pa;
1335 uintptr_t addr = 0;
1336 uint32_t count;
1337 uint64_t align;
1338 vm_size_t align_masks_idx;
1339
1340 if (((uint32_t) bytes) != bytes) {
1341 return 0;
1342 }
1343 if (!bytes) {
1344 bytes = 1;
1345 }
1346 count = (((uint32_t) bytes) + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1347
1348 align_masks_idx = log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes);
1349 assert(align_masks_idx < sizeof(align_masks) / sizeof(*align_masks));
1350 align = align_masks[align_masks_idx];
1351
1352 IOLockLock(a->lock);
1353 __IGNORE_WCASTALIGN(pa = (typeof(pa))queue_first(&a->list));
1354 while (!queue_end(&a->list, &pa->link)) {
1355 addr = iopa_allocinpage(pa, count, align);
1356 if (addr) {
1357 a->bytecount += bytes;
1358 break;
1359 }
1360 __IGNORE_WCASTALIGN(pa = (typeof(pa))queue_next(&pa->link));
1361 }
1362 IOLockUnlock(a->lock);
1363
1364 if (!addr) {
1365 addr = alloc(kheap, a);
1366 if (addr) {
1367 pa = (typeof(pa))(addr + page_size - gIOPageAllocChunkBytes);
1368 pa->signature = kIOPageAllocSignature;
1369 pa->avail = -2ULL;
1370
1371 addr = iopa_allocinpage(pa, count, align);
1372 IOLockLock(a->lock);
1373 if (pa->avail) {
1374 enqueue_head(&a->list, &pa->link);
1375 }
1376 a->pagecount++;
1377 if (addr) {
1378 a->bytecount += bytes;
1379 }
1380 IOLockUnlock(a->lock);
1381 }
1382 }
1383
1384 assert((addr & ((1 << log2up(balign)) - 1)) == 0);
1385 return addr;
1386 }
1387
1388 uintptr_t
iopa_free(iopa_t * a,uintptr_t addr,vm_size_t bytes)1389 iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes)
1390 {
1391 iopa_page_t * pa;
1392 uint32_t count;
1393 uintptr_t chunk;
1394
1395 if (((uint32_t) bytes) != bytes) {
1396 return 0;
1397 }
1398 if (!bytes) {
1399 bytes = 1;
1400 }
1401
1402 chunk = (addr & page_mask);
1403 assert(0 == (chunk & (gIOPageAllocChunkBytes - 1)));
1404
1405 pa = (typeof(pa))(addr | (page_size - gIOPageAllocChunkBytes));
1406 assert(kIOPageAllocSignature == pa->signature);
1407
1408 count = (((uint32_t) bytes) + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1409 chunk /= gIOPageAllocChunkBytes;
1410
1411 IOLockLock(a->lock);
1412 if (!pa->avail) {
1413 assert(!pa->link.next);
1414 enqueue_tail(&a->list, &pa->link);
1415 }
1416 pa->avail |= ((-1ULL << (64 - count)) >> chunk);
1417 if (pa->avail != -2ULL) {
1418 pa = NULL;
1419 } else {
1420 remque(&pa->link);
1421 pa->link.next = NULL;
1422 pa->signature = 0;
1423 a->pagecount--;
1424 // page to free
1425 pa = (typeof(pa))trunc_page(pa);
1426 }
1427 a->bytecount -= bytes;
1428 IOLockUnlock(a->lock);
1429
1430 return (uintptr_t) pa;
1431 }
1432
1433 #endif /* defined(__x86_64__) */
1434
1435 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1436
1437 IOReturn
IOSetProcessorCacheMode(task_t task,IOVirtualAddress address,IOByteCount length,IOOptionBits cacheMode)1438 IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
1439 IOByteCount length, IOOptionBits cacheMode )
1440 {
1441 IOReturn ret = kIOReturnSuccess;
1442 ppnum_t pagenum;
1443
1444 if (task != kernel_task) {
1445 return kIOReturnUnsupported;
1446 }
1447 if ((address | length) & PAGE_MASK) {
1448 // OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
1449 return kIOReturnUnsupported;
1450 }
1451 length = round_page(address + length) - trunc_page( address );
1452 address = trunc_page( address );
1453
1454 // make map mode
1455 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
1456
1457 while ((kIOReturnSuccess == ret) && (length > 0)) {
1458 // Get the physical page number
1459 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
1460 if (pagenum) {
1461 ret = IOUnmapPages( get_task_map(task), address, page_size );
1462 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
1463 } else {
1464 ret = kIOReturnVMError;
1465 }
1466
1467 address += page_size;
1468 length -= page_size;
1469 }
1470
1471 return ret;
1472 }
1473
1474
1475 IOReturn
IOFlushProcessorCache(task_t task,IOVirtualAddress address,IOByteCount length)1476 IOFlushProcessorCache( task_t task, IOVirtualAddress address,
1477 IOByteCount length )
1478 {
1479 if (task != kernel_task) {
1480 return kIOReturnUnsupported;
1481 }
1482
1483 flush_dcache64((addr64_t) address, (unsigned) length, false );
1484
1485 return kIOReturnSuccess;
1486 }
1487
1488 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1489
1490 vm_offset_t
OSKernelStackRemaining(void)1491 OSKernelStackRemaining( void )
1492 {
1493 return ml_stack_remaining();
1494 }
1495
1496 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1497
1498 /*
1499 * Spin for indicated number of milliseconds.
1500 */
1501 void
IOSleep(unsigned milliseconds)1502 IOSleep(unsigned milliseconds)
1503 {
1504 delay_for_interval(milliseconds, kMillisecondScale);
1505 }
1506
1507 /*
1508 * Spin for indicated number of milliseconds, and potentially an
1509 * additional number of milliseconds up to the leeway values.
1510 */
1511 void
IOSleepWithLeeway(unsigned intervalMilliseconds,unsigned leewayMilliseconds)1512 IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds)
1513 {
1514 delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale);
1515 }
1516
1517 /*
1518 * Spin for indicated number of microseconds.
1519 */
1520 void
IODelay(unsigned microseconds)1521 IODelay(unsigned microseconds)
1522 {
1523 delay_for_interval(microseconds, kMicrosecondScale);
1524 }
1525
1526 /*
1527 * Spin for indicated number of nanoseconds.
1528 */
1529 void
IOPause(unsigned nanoseconds)1530 IOPause(unsigned nanoseconds)
1531 {
1532 delay_for_interval(nanoseconds, kNanosecondScale);
1533 }
1534
1535 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1536
1537 static void _IOLogv(const char *format, va_list ap, void *caller) __printflike(1, 0);
1538
1539 __attribute__((noinline, not_tail_called))
1540 void
IOLog(const char * format,...)1541 IOLog(const char *format, ...)
1542 {
1543 void *caller = __builtin_return_address(0);
1544 va_list ap;
1545
1546 va_start(ap, format);
1547 _IOLogv(format, ap, caller);
1548 va_end(ap);
1549 }
1550
1551 __attribute__((noinline, not_tail_called))
1552 void
IOLogv(const char * format,va_list ap)1553 IOLogv(const char *format, va_list ap)
1554 {
1555 void *caller = __builtin_return_address(0);
1556 _IOLogv(format, ap, caller);
1557 }
1558
1559 void
_IOLogv(const char * format,va_list ap,void * caller)1560 _IOLogv(const char *format, va_list ap, void *caller)
1561 {
1562 va_list ap2;
1563 struct console_printbuf_state info_data;
1564 console_printbuf_state_init(&info_data, TRUE, TRUE);
1565
1566 va_copy(ap2, ap);
1567
1568 #pragma clang diagnostic push
1569 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1570 os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, format, ap, caller);
1571 #pragma clang diagnostic pop
1572
1573 if (!disable_iolog_serial_output) {
1574 __doprnt(format, ap2, console_printbuf_putc, &info_data, 16, TRUE);
1575 console_printbuf_clear(&info_data);
1576 }
1577 va_end(ap2);
1578
1579 assertf(ml_get_interrupts_enabled() || ml_is_quiescing() ||
1580 debug_mode_active() || !gCPUsRunning,
1581 "IOLog called with interrupts disabled");
1582 }
1583
1584 #if !__LP64__
1585 void
IOPanic(const char * reason)1586 IOPanic(const char *reason)
1587 {
1588 panic("%s", reason);
1589 }
1590 #endif
1591
1592 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1593
1594 void
IOKitKernelLogBuffer(const char * title,const void * buffer,size_t size,void (* output)(const char * format,...))1595 IOKitKernelLogBuffer(const char * title, const void * buffer, size_t size,
1596 void (*output)(const char *format, ...))
1597 {
1598 size_t idx, linestart;
1599 enum { bytelen = (sizeof("0xZZ, ") - 1) };
1600 char hex[(bytelen * 16) + 1];
1601 uint8_t c, chars[17];
1602
1603 output("%s(0x%lx):\n", title, size);
1604 output(" 0 1 2 3 4 5 6 7 8 9 A B C D E F\n");
1605 if (size > 4096) {
1606 size = 4096;
1607 }
1608 chars[16] = 0;
1609 for (idx = 0, linestart = 0; idx < size;) {
1610 c = ((char *)buffer)[idx];
1611 snprintf(&hex[bytelen * (idx & 15)], bytelen + 1, "0x%02x, ", c);
1612 chars[idx & 15] = ((c >= 0x20) && (c <= 0x7f)) ? c : ' ';
1613 idx++;
1614 if ((idx == size) || !(idx & 15)) {
1615 if (idx & 15) {
1616 chars[idx & 15] = 0;
1617 }
1618 output("/* %04lx: */ %-96s /* |%-16s| */\n", linestart, hex, chars);
1619 linestart += 16;
1620 }
1621 }
1622 }
1623
1624 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1625
1626 /*
1627 * Convert a integer constant (typically a #define or enum) to a string.
1628 */
1629 static char noValue[80]; // that's pretty
1630
1631 const char *
IOFindNameForValue(int value,const IONamedValue * regValueArray)1632 IOFindNameForValue(int value, const IONamedValue *regValueArray)
1633 {
1634 for (; regValueArray->name; regValueArray++) {
1635 if (regValueArray->value == value) {
1636 return regValueArray->name;
1637 }
1638 }
1639 snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
1640 return (const char *)noValue;
1641 }
1642
1643 IOReturn
IOFindValueForName(const char * string,const IONamedValue * regValueArray,int * value)1644 IOFindValueForName(const char *string,
1645 const IONamedValue *regValueArray,
1646 int *value)
1647 {
1648 for (; regValueArray->name; regValueArray++) {
1649 if (!strcmp(regValueArray->name, string)) {
1650 *value = regValueArray->value;
1651 return kIOReturnSuccess;
1652 }
1653 }
1654 return kIOReturnBadArgument;
1655 }
1656
1657 OSString *
IOCopyLogNameForPID(int pid)1658 IOCopyLogNameForPID(int pid)
1659 {
1660 char buf[128];
1661 size_t len;
1662 snprintf(buf, sizeof(buf), "pid %d, ", pid);
1663 len = strlen(buf);
1664 proc_name(pid, buf + len, (int) (sizeof(buf) - len));
1665 return OSString::withCString(buf);
1666 }
1667
1668 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1669
1670 IOAlignment
IOSizeToAlignment(unsigned int size)1671 IOSizeToAlignment(unsigned int size)
1672 {
1673 int shift;
1674 const int intsize = sizeof(unsigned int) * 8;
1675
1676 for (shift = 1; shift < intsize; shift++) {
1677 if (size & 0x80000000) {
1678 return (IOAlignment)(intsize - shift);
1679 }
1680 size <<= 1;
1681 }
1682 return 0;
1683 }
1684
1685 unsigned int
IOAlignmentToSize(IOAlignment align)1686 IOAlignmentToSize(IOAlignment align)
1687 {
1688 unsigned int size;
1689
1690 for (size = 1; align; align--) {
1691 size <<= 1;
1692 }
1693 return size;
1694 }
1695 } /* extern "C" */
1696