1 /*
2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * HISTORY
30 *
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
32 * 17-Nov-98 cpp
33 *
34 */
35
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <vm/vm_kern_xnu.h>
40 #include <vm/vm_map_xnu.h>
41 #include <libkern/c++/OSCPPDebug.h>
42
43 #include <IOKit/assert.h>
44
45 #include <IOKit/IOReturn.h>
46 #include <IOKit/IOLib.h>
47 #include <IOKit/IOLocks.h>
48 #include <IOKit/IOMapper.h>
49 #include <IOKit/IOBufferMemoryDescriptor.h>
50 #include <IOKit/IOKitDebug.h>
51
52 #include "IOKitKernelInternal.h"
53
54 #ifdef IOALLOCDEBUG
55 #include <libkern/OSDebug.h>
56 #include <sys/sysctl.h>
57 #endif
58
59 #include "libkern/OSAtomic.h"
60 #include <libkern/c++/OSKext.h>
61 #include <IOKit/IOStatisticsPrivate.h>
62 #include <os/log_private.h>
63 #include <sys/msgbuf.h>
64 #include <console/serial_protos.h>
65
66 #if IOKITSTATS
67
68 #define IOStatisticsAlloc(type, size) \
69 do { \
70 IOStatistics::countAlloc(type, size); \
71 } while (0)
72
73 #else
74
75 #define IOStatisticsAlloc(type, size)
76
77 #endif /* IOKITSTATS */
78
79
80 #define TRACK_ALLOC (IOTRACKING && (kIOTracking & gIOKitDebug))
81
82
83 extern "C"
84 {
85 mach_timespec_t IOZeroTvalspec = { 0, 0 };
86
87 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
88
89 extern int
90 __doprnt(
91 const char *fmt,
92 va_list argp,
93 void (*putc)(int, void *),
94 void *arg,
95 int radix,
96 int is_log);
97
98 extern bool bsd_log_lock(bool);
99 extern void bsd_log_unlock(void);
100
101
102 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
103
104 lck_grp_t io_lck_grp;
105 lck_grp_t *IOLockGroup;
106
107 /*
108 * Global variables for use by iLogger
109 * These symbols are for use only by Apple diagnostic code.
110 * Binary compatibility is not guaranteed for kexts that reference these symbols.
111 */
112
113 void *_giDebugLogInternal = NULL;
114 void *_giDebugLogDataInternal = NULL;
115 void *_giDebugReserved1 = NULL;
116 void *_giDebugReserved2 = NULL;
117
118 #if defined(__x86_64__)
119 iopa_t gIOBMDPageAllocator;
120 #endif /* defined(__x86_64__) */
121
122 /*
123 * Static variables for this module.
124 */
125
126 static queue_head_t gIOMallocContiguousEntries;
127 static lck_mtx_t * gIOMallocContiguousEntriesLock;
128
129 #if __x86_64__
130 enum { kIOPageableMaxAllocSize = 512ULL * 1024 * 1024 };
131 enum { kIOPageableMapSize = 8ULL * kIOPageableMaxAllocSize };
132 #else
133 enum { kIOPageableMaxAllocSize = 96ULL * 1024 * 1024 };
134 enum { kIOPageableMapSize = 16ULL * kIOPageableMaxAllocSize };
135 #endif
136
137 typedef struct {
138 vm_map_t map;
139 vm_offset_t address;
140 vm_offset_t end;
141 } IOMapData;
142
143 #ifndef __BUILDING_XNU_LIBRARY__
144 /* this makes clang emit a C and C++ symbol which confuses lldb rdar://135688747 */
145 static
146 #endif /* __BUILDING_XNU_LIBRARY__ */
147 SECURITY_READ_ONLY_LATE(struct mach_vm_range) gIOKitPageableFixedRange;
148 IOMapData gIOKitPageableMap;
149
150 #if defined(__x86_64__)
151 static iopa_t gIOPageablePageAllocator;
152
153 uint32_t gIOPageAllocChunkBytes;
154 #endif /* defined(__x86_64__) */
155
156 #if IOTRACKING
157 IOTrackingQueue * gIOMallocTracking;
158 IOTrackingQueue * gIOWireTracking;
159 IOTrackingQueue * gIOMapTracking;
160 #endif /* IOTRACKING */
161
162 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
163
164 KMEM_RANGE_REGISTER_STATIC(gIOKitPageableFixed,
165 &gIOKitPageableFixedRange, kIOPageableMapSize);
166 void
IOLibInit(void)167 IOLibInit(void)
168 {
169 static bool libInitialized;
170
171 if (libInitialized) {
172 return;
173 }
174
175 lck_grp_init(&io_lck_grp, "IOKit", LCK_GRP_ATTR_NULL);
176 IOLockGroup = &io_lck_grp;
177
178 #if IOTRACKING
179 IOTrackingInit();
180 gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, 0,
181 kIOTrackingQueueTypeAlloc,
182 37);
183 gIOWireTracking = IOTrackingQueueAlloc(kIOWireTrackingName, 0, 0, page_size, 0, 0);
184
185 size_t mapCaptureSize = (kIOTracking & gIOKitDebug) ? page_size : (1024 * 1024);
186 gIOMapTracking = IOTrackingQueueAlloc(kIOMapTrackingName, 0, 0, mapCaptureSize,
187 kIOTrackingQueueTypeDefaultOn
188 | kIOTrackingQueueTypeMap
189 | kIOTrackingQueueTypeUser,
190 0);
191 #endif
192
193 gIOKitPageableMap.map = kmem_suballoc(kernel_map,
194 &gIOKitPageableFixedRange.min_address,
195 kIOPageableMapSize,
196 VM_MAP_CREATE_PAGEABLE,
197 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
198 (kms_flags_t)(KMS_PERMANENT | KMS_DATA | KMS_NOFAIL | KMS_NOSOFTLIMIT),
199 VM_KERN_MEMORY_IOKIT).kmr_submap;
200
201 gIOKitPageableMap.address = gIOKitPageableFixedRange.min_address;
202 gIOKitPageableMap.end = gIOKitPageableFixedRange.max_address;
203
204 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
205 queue_init( &gIOMallocContiguousEntries );
206
207 #if defined(__x86_64__)
208 gIOPageAllocChunkBytes = PAGE_SIZE / 64;
209
210 assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes);
211 iopa_init(&gIOBMDPageAllocator);
212 iopa_init(&gIOPageablePageAllocator);
213 #endif /* defined(__x86_64__) */
214
215
216 libInitialized = true;
217 }
218
219 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
220
221 vm_size_t
log2up(vm_size_t size)222 log2up(vm_size_t size)
223 {
224 if (size <= 1) {
225 size = 0;
226 } else {
227 #if __LP64__
228 size = 64 - __builtin_clzl(size - 1);
229 #else
230 size = 32 - __builtin_clzl(size - 1);
231 #endif
232 }
233 return size;
234 }
235
236 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
237
238 IOThread
IOCreateThread(IOThreadFunc fcn,void * arg)239 IOCreateThread(IOThreadFunc fcn, void *arg)
240 {
241 kern_return_t result;
242 thread_t thread;
243
244 result = kernel_thread_start((thread_continue_t)(void (*)(void))fcn, arg, &thread);
245 if (result != KERN_SUCCESS) {
246 return NULL;
247 }
248
249 thread_deallocate(thread);
250
251 return thread;
252 }
253
254
255 void
IOExitThread(void)256 IOExitThread(void)
257 {
258 (void) thread_terminate(current_thread());
259 }
260
261 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
262
263 #if IOTRACKING
264 struct IOLibMallocHeader {
265 IOTrackingAddress tracking;
266 };
267 #endif
268
269 #if IOTRACKING
270 #define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
271 #else
272 #define sizeofIOLibMallocHeader (0)
273 #endif
274
275 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
276
277 __typed_allocators_ignore_push // allocator implementation
278
279 void *
280 (IOMalloc_internal)(struct kalloc_heap *kheap, vm_size_t size,
281 zalloc_flags_t flags)
282 {
283 void * address;
284 vm_size_t allocSize;
285
286 allocSize = size + sizeofIOLibMallocHeader;
287 #if IOTRACKING
288 if (sizeofIOLibMallocHeader && (allocSize <= size)) {
289 return NULL; // overflow
290 }
291 #endif
292 address = kheap_alloc(kheap, allocSize,
293 Z_VM_TAG(Z_WAITOK | flags, VM_KERN_MEMORY_IOKIT));
294
295 if (address) {
296 #if IOTRACKING
297 if (TRACK_ALLOC) {
298 IOLibMallocHeader * hdr;
299 hdr = (typeof(hdr))address;
300 bzero(&hdr->tracking, sizeof(hdr->tracking));
301 hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader);
302 hdr->tracking.size = size;
303 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
304 }
305 #endif
306 address = (typeof(address))(((uintptr_t) address) + sizeofIOLibMallocHeader);
307
308 #if IOALLOCDEBUG
309 OSAddAtomicLong(size, &debug_iomalloc_size);
310 #endif
311 IOStatisticsAlloc(kIOStatisticsMalloc, size);
312 }
313
314 return address;
315 }
316
317 void
IOFree_internal(struct kalloc_heap * kheap,void * inAddress,vm_size_t size)318 IOFree_internal(struct kalloc_heap *kheap, void * inAddress, vm_size_t size)
319 {
320 void * address;
321
322 if ((address = inAddress)) {
323 address = (typeof(address))(((uintptr_t) address) - sizeofIOLibMallocHeader);
324
325 #if IOTRACKING
326 if (TRACK_ALLOC) {
327 IOLibMallocHeader * hdr;
328 struct ptr_reference { void * ptr; };
329 volatile struct ptr_reference ptr;
330
331 // we're about to block in IOTrackingRemove(), make sure the original pointer
332 // exists in memory or a register for leak scanning to find
333 ptr.ptr = inAddress;
334
335 hdr = (typeof(hdr))address;
336 if (size != hdr->tracking.size) {
337 OSReportWithBacktrace("bad IOFree size 0x%zx should be 0x%zx",
338 (size_t)size, (size_t)hdr->tracking.size);
339 size = hdr->tracking.size;
340 }
341 IOTrackingRemoveAddress(gIOMallocTracking, &hdr->tracking, size);
342 ptr.ptr = NULL;
343 }
344 #endif
345
346 kheap_free(kheap, address, size + sizeofIOLibMallocHeader);
347 #if IOALLOCDEBUG
348 OSAddAtomicLong(-size, &debug_iomalloc_size);
349 #endif
350 IOStatisticsAlloc(kIOStatisticsFree, size);
351 }
352 }
353
354 void *
355 IOMalloc_external(
356 vm_size_t size);
357 void *
IOMalloc_external(vm_size_t size)358 IOMalloc_external(
359 vm_size_t size)
360 {
361 return IOMalloc_internal(KHEAP_DEFAULT, size, Z_VM_TAG_BT_BIT);
362 }
363
364 void
IOFree(void * inAddress,vm_size_t size)365 IOFree(void * inAddress, vm_size_t size)
366 {
367 IOFree_internal(KHEAP_DEFAULT, inAddress, size);
368 }
369
370 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
371
372 void *
373 IOMallocZero_external(
374 vm_size_t size);
375 void *
IOMallocZero_external(vm_size_t size)376 IOMallocZero_external(
377 vm_size_t size)
378 {
379 return IOMalloc_internal(KHEAP_DEFAULT, size, Z_ZERO_VM_TAG_BT_BIT);
380 }
381
382 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
383
384 vm_tag_t
IOMemoryTag(vm_map_t map)385 IOMemoryTag(vm_map_t map)
386 {
387 vm_tag_t tag;
388
389 if (!vm_kernel_map_is_kernel(map)) {
390 return VM_MEMORY_IOKIT;
391 }
392
393 tag = vm_tag_bt();
394 if (tag == VM_KERN_MEMORY_NONE) {
395 tag = VM_KERN_MEMORY_IOKIT;
396 }
397
398 return tag;
399 }
400
401 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
402
403 struct IOLibPageMallocHeader {
404 mach_vm_size_t alignMask;
405 mach_vm_offset_t allocationOffset;
406 #if IOTRACKING
407 IOTrackingAddress tracking;
408 #endif
409 };
410
411 #if IOTRACKING
412 #define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
413 #else
414 #define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader))
415 #endif
416
417 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
418
419 static __header_always_inline void
IOMallocAlignedSetHdr(IOLibPageMallocHeader * hdr,mach_vm_size_t alignMask,mach_vm_address_t allocationStart,mach_vm_address_t alignedStart)420 IOMallocAlignedSetHdr(
421 IOLibPageMallocHeader *hdr,
422 mach_vm_size_t alignMask,
423 mach_vm_address_t allocationStart,
424 mach_vm_address_t alignedStart)
425 {
426 mach_vm_offset_t offset = alignedStart - allocationStart;
427 #if __has_feature(ptrauth_calls)
428 offset = (mach_vm_offset_t) ptrauth_sign_unauthenticated((void *)offset,
429 ptrauth_key_process_independent_data,
430 ptrauth_blend_discriminator((void *)(alignedStart | alignMask),
431 OS_PTRAUTH_DISCRIMINATOR("IOLibPageMallocHeader.allocationOffset")));
432 #endif /* __has_feature(ptrauth_calls) */
433 hdr->allocationOffset = offset;
434 hdr->alignMask = alignMask;
435 }
436
437 __abortlike
438 static void
IOMallocAlignedHdrCorruptionPanic(mach_vm_offset_t offset,mach_vm_size_t alignMask,mach_vm_address_t alignedStart,vm_size_t size)439 IOMallocAlignedHdrCorruptionPanic(
440 mach_vm_offset_t offset,
441 mach_vm_size_t alignMask,
442 mach_vm_address_t alignedStart,
443 vm_size_t size)
444 {
445 mach_vm_address_t address = 0;
446 mach_vm_address_t recalAlignedStart = 0;
447
448 if (os_sub_overflow(alignedStart, offset, &address)) {
449 panic("Invalid offset %p for aligned addr %p", (void *)offset,
450 (void *)alignedStart);
451 }
452 if (os_add3_overflow(address, sizeofIOLibPageMallocHeader, alignMask,
453 &recalAlignedStart)) {
454 panic("alignMask 0x%llx overflows recalAlignedStart %p for provided addr "
455 "%p", alignMask, (void *)recalAlignedStart, (void *)alignedStart);
456 }
457 if (((recalAlignedStart &= ~alignMask) != alignedStart) &&
458 (round_page(recalAlignedStart) != alignedStart)) {
459 panic("Recalculated aligned addr %p doesn't match provided addr %p",
460 (void *)recalAlignedStart, (void *)alignedStart);
461 }
462 if (offset < sizeofIOLibPageMallocHeader) {
463 panic("Offset %zd doesn't accomodate IOLibPageMallocHeader for aligned "
464 "addr %p", (size_t)offset, (void *)alignedStart);
465 }
466 panic("alignMask 0x%llx overflows adjusted size %zd for aligned addr %p",
467 alignMask, (size_t)size, (void *)alignedStart);
468 }
469
470 static __header_always_inline mach_vm_address_t
IOMallocAlignedGetAddress(IOLibPageMallocHeader * hdr,mach_vm_address_t alignedStart,vm_size_t * size)471 IOMallocAlignedGetAddress(
472 IOLibPageMallocHeader *hdr,
473 mach_vm_address_t alignedStart,
474 vm_size_t *size)
475 {
476 mach_vm_address_t address = 0;
477 mach_vm_address_t recalAlignedStart = 0;
478 mach_vm_offset_t offset = hdr->allocationOffset;
479 mach_vm_size_t alignMask = hdr->alignMask;
480 #if __has_feature(ptrauth_calls)
481 offset = (mach_vm_offset_t) ptrauth_auth_data((void *)offset,
482 ptrauth_key_process_independent_data,
483 ptrauth_blend_discriminator((void *)(alignedStart | alignMask),
484 OS_PTRAUTH_DISCRIMINATOR("IOLibPageMallocHeader.allocationOffset")));
485 #endif /* __has_feature(ptrauth_calls) */
486 if (os_sub_overflow(alignedStart, offset, &address) ||
487 os_add3_overflow(address, sizeofIOLibPageMallocHeader, alignMask,
488 &recalAlignedStart) ||
489 (((recalAlignedStart &= ~alignMask) != alignedStart) &&
490 (round_page(recalAlignedStart) != alignedStart)) ||
491 (offset < sizeofIOLibPageMallocHeader) ||
492 os_add_overflow(*size, alignMask, size)) {
493 IOMallocAlignedHdrCorruptionPanic(offset, alignMask, alignedStart, *size);
494 }
495 return address;
496 }
497
498 void *
499 (IOMallocAligned_internal)(struct kalloc_heap *kheap, vm_size_t size,
500 vm_size_t alignment, zalloc_flags_t flags)
501 {
502 kern_return_t kr;
503 vm_offset_t address;
504 vm_offset_t allocationAddress;
505 vm_size_t adjustedSize;
506 uintptr_t alignMask;
507 IOLibPageMallocHeader * hdr;
508 kma_flags_t kma_flags = KMA_NONE;
509
510 if (size == 0) {
511 return NULL;
512 }
513 if (((uint32_t) alignment) != alignment) {
514 return NULL;
515 }
516
517 if (flags & Z_ZERO) {
518 kma_flags = KMA_ZERO;
519 }
520
521 if (kheap == KHEAP_DATA_BUFFERS) {
522 kma_flags = (kma_flags_t) (kma_flags | KMA_DATA);
523 } else if (kheap == KHEAP_DATA_SHARED) {
524 kma_flags = (kma_flags_t) (kma_flags | KMA_DATA_SHARED);
525 }
526
527 alignment = (1UL << log2up((uint32_t) alignment));
528 alignMask = alignment - 1;
529 adjustedSize = size + sizeofIOLibPageMallocHeader;
530
531 if (size > adjustedSize) {
532 address = 0; /* overflow detected */
533 } else if (adjustedSize >= page_size) {
534 kr = kernel_memory_allocate(kernel_map, &address,
535 size, alignMask, kma_flags, IOMemoryTag(kernel_map));
536 if (KERN_SUCCESS != kr) {
537 address = 0;
538 }
539 #if IOTRACKING
540 else if (TRACK_ALLOC) {
541 IOTrackingAlloc(gIOMallocTracking, address, size);
542 }
543 #endif
544 } else {
545 adjustedSize += alignMask;
546
547 if (adjustedSize >= page_size) {
548 kr = kmem_alloc(kernel_map, &allocationAddress,
549 adjustedSize, kma_flags, IOMemoryTag(kernel_map));
550 if (KERN_SUCCESS != kr) {
551 allocationAddress = 0;
552 }
553 } else {
554 allocationAddress = (vm_address_t) kheap_alloc(kheap,
555 adjustedSize, Z_VM_TAG(Z_WAITOK | flags, VM_KERN_MEMORY_IOKIT));
556 }
557
558 if (allocationAddress) {
559 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
560 & (~alignMask);
561
562 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
563 IOMallocAlignedSetHdr(hdr, alignMask, allocationAddress, address);
564 #if IOTRACKING
565 if (TRACK_ALLOC) {
566 bzero(&hdr->tracking, sizeof(hdr->tracking));
567 hdr->tracking.address = ~address;
568 hdr->tracking.size = size;
569 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
570 }
571 #endif
572 } else {
573 address = 0;
574 }
575 }
576
577 assert(0 == (address & alignMask));
578
579 if (address) {
580 #if IOALLOCDEBUG
581 OSAddAtomicLong(size, &debug_iomalloc_size);
582 #endif
583 IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
584 }
585
586 return (void *) address;
587 }
588
589 void
IOFreeAligned_internal(kalloc_heap_t kheap,void * address,vm_size_t size)590 IOFreeAligned_internal(kalloc_heap_t kheap, void * address, vm_size_t size)
591 {
592 vm_address_t allocationAddress;
593 vm_size_t adjustedSize;
594 IOLibPageMallocHeader * hdr;
595
596 if (!address) {
597 return;
598 }
599
600 assert(size);
601
602 adjustedSize = size + sizeofIOLibPageMallocHeader;
603 if (adjustedSize >= page_size) {
604 #if IOTRACKING
605 if (TRACK_ALLOC) {
606 IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size);
607 }
608 #endif
609 kmem_free(kernel_map, (vm_offset_t) address, size);
610 } else {
611 hdr = (typeof(hdr))(((uintptr_t)address) - sizeofIOLibPageMallocHeader);
612 allocationAddress = IOMallocAlignedGetAddress(hdr,
613 (mach_vm_address_t)address, &adjustedSize);
614
615 #if IOTRACKING
616 if (TRACK_ALLOC) {
617 if (size != hdr->tracking.size) {
618 OSReportWithBacktrace("bad IOFreeAligned size 0x%zx should be 0x%zx",
619 (size_t)size, (size_t)hdr->tracking.size);
620 size = hdr->tracking.size;
621 }
622 IOTrackingRemoveAddress(gIOMallocTracking, &hdr->tracking, size);
623 }
624 #endif
625 if (adjustedSize >= page_size) {
626 kmem_free(kernel_map, allocationAddress, adjustedSize);
627 } else {
628 kheap_free(kheap, allocationAddress, adjustedSize);
629 }
630 }
631
632 #if IOALLOCDEBUG
633 OSAddAtomicLong(-size, &debug_iomalloc_size);
634 #endif
635
636 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
637 }
638
639 void *
640 IOMallocAligned_external(
641 vm_size_t size, vm_size_t alignment);
642 void *
IOMallocAligned_external(vm_size_t size,vm_size_t alignment)643 IOMallocAligned_external(
644 vm_size_t size, vm_size_t alignment)
645 {
646 return IOMallocAligned_internal(GET_KEXT_KHEAP_DATA(), size, alignment,
647 Z_VM_TAG_BT_BIT);
648 }
649
650 void
IOFreeAligned(void * address,vm_size_t size)651 IOFreeAligned(
652 void * address,
653 vm_size_t size)
654 {
655 IOFreeAligned_internal(GET_KEXT_KHEAP_DATA(), address, size);
656 }
657
658 __typed_allocators_ignore_pop
659
660 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
661
662 void
IOKernelFreePhysical(kalloc_heap_t kheap,mach_vm_address_t address,mach_vm_size_t size)663 IOKernelFreePhysical(
664 kalloc_heap_t kheap,
665 mach_vm_address_t address,
666 mach_vm_size_t size)
667 {
668 vm_address_t allocationAddress;
669 vm_size_t adjustedSize;
670 IOLibPageMallocHeader * hdr;
671
672 if (!address) {
673 return;
674 }
675
676 assert(size);
677
678 adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
679 if (adjustedSize >= page_size) {
680 #if IOTRACKING
681 if (TRACK_ALLOC) {
682 IOTrackingFree(gIOMallocTracking, address, size);
683 }
684 #endif
685 kmem_free(kernel_map, (vm_offset_t) address, size);
686 } else {
687 hdr = (typeof(hdr))(((uintptr_t)address) - sizeofIOLibPageMallocHeader);
688 allocationAddress = IOMallocAlignedGetAddress(hdr, address, &adjustedSize);
689 #if IOTRACKING
690 if (TRACK_ALLOC) {
691 IOTrackingRemoveAddress(gIOMallocTracking, &hdr->tracking, size);
692 }
693 #endif
694 __typed_allocators_ignore(kheap_free(kheap, allocationAddress, adjustedSize));
695 }
696
697 IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
698 #if IOALLOCDEBUG
699 OSAddAtomicLong(-size, &debug_iomalloc_size);
700 #endif
701 }
702
703 #if __arm64__
704 extern unsigned long gPhysBase, gPhysSize;
705 #endif
706
707 mach_vm_address_t
IOKernelAllocateWithPhysicalRestrict(kalloc_heap_t kheap,mach_vm_size_t size,mach_vm_address_t maxPhys,mach_vm_size_t alignment,bool contiguous,bool noSoftLimit)708 IOKernelAllocateWithPhysicalRestrict(
709 kalloc_heap_t kheap,
710 mach_vm_size_t size,
711 mach_vm_address_t maxPhys,
712 mach_vm_size_t alignment,
713 bool contiguous,
714 bool noSoftLimit)
715 {
716 kern_return_t kr;
717 mach_vm_address_t address;
718 mach_vm_address_t allocationAddress;
719 mach_vm_size_t adjustedSize;
720 mach_vm_address_t alignMask;
721 IOLibPageMallocHeader * hdr;
722
723 if (size == 0) {
724 return 0;
725 }
726 if (alignment == 0) {
727 alignment = 1;
728 }
729
730 alignMask = alignment - 1;
731
732 if (os_mul_and_add_overflow(2, size, sizeofIOLibPageMallocHeader, &adjustedSize)) {
733 return 0;
734 }
735
736 contiguous = (contiguous && (adjustedSize > page_size))
737 || (alignment > page_size);
738
739 if (contiguous || maxPhys) {
740 kma_flags_t options = KMA_ZERO;
741 vm_offset_t virt;
742
743 if (kheap == KHEAP_DATA_BUFFERS) {
744 options = (kma_flags_t) (options | KMA_DATA);
745 } else if (kheap == KHEAP_DATA_SHARED) {
746 options = (kma_flags_t) (options | KMA_DATA_SHARED);
747 }
748
749 if (noSoftLimit) {
750 options = (kma_flags_t) (options | KMA_NOSOFTLIMIT);
751 }
752
753 adjustedSize = size;
754 contiguous = (contiguous && (adjustedSize > page_size))
755 || (alignment > page_size);
756
757 if (!contiguous) {
758 #if __arm64__
759 if (maxPhys >= (mach_vm_address_t)(gPhysBase + gPhysSize)) {
760 maxPhys = 0;
761 } else
762 #endif
763 if (maxPhys <= 0xFFFFFFFF) {
764 maxPhys = 0;
765 options = (kma_flags_t)(options | KMA_LOMEM);
766 } else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage)) {
767 maxPhys = 0;
768 }
769 }
770 if (contiguous || maxPhys) {
771 kr = kmem_alloc_contig(kernel_map, &virt, size,
772 alignMask, (ppnum_t) atop(maxPhys), (ppnum_t) atop(alignMask),
773 options, IOMemoryTag(kernel_map));
774 } else {
775 kr = kernel_memory_allocate(kernel_map, &virt,
776 size, alignMask, options, IOMemoryTag(kernel_map));
777 }
778 if (KERN_SUCCESS == kr) {
779 address = virt;
780 #if IOTRACKING
781 if (TRACK_ALLOC) {
782 IOTrackingAlloc(gIOMallocTracking, address, size);
783 }
784 #endif
785 } else {
786 address = 0;
787 }
788 } else {
789 zalloc_flags_t zflags = Z_WAITOK;
790
791 if (noSoftLimit) {
792 zflags = (zalloc_flags_t)(zflags | Z_NOSOFTLIMIT);
793 }
794
795 adjustedSize += alignMask;
796 if (adjustedSize < size) {
797 return 0;
798 }
799
800 /* BEGIN IGNORE CODESTYLE */
801 __typed_allocators_ignore_push // allocator implementation
802 allocationAddress = (mach_vm_address_t) kheap_alloc(kheap,
803 adjustedSize, Z_VM_TAG_BT(zflags, VM_KERN_MEMORY_IOKIT));
804 __typed_allocators_ignore_pop
805 /* END IGNORE CODESTYLE */
806
807 if (allocationAddress) {
808 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
809 & (~alignMask);
810
811 if (atop_32(address) != atop_32(address + size - 1)) {
812 address = round_page(address);
813 }
814
815 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
816 IOMallocAlignedSetHdr(hdr, alignMask, allocationAddress, address);
817 #if IOTRACKING
818 if (TRACK_ALLOC) {
819 bzero(&hdr->tracking, sizeof(hdr->tracking));
820 hdr->tracking.address = ~address;
821 hdr->tracking.size = size;
822 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
823 }
824 #endif
825 } else {
826 address = 0;
827 }
828 }
829
830 if (address) {
831 IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
832 #if IOALLOCDEBUG
833 OSAddAtomicLong(size, &debug_iomalloc_size);
834 #endif
835 }
836
837 return address;
838 }
839
840
841 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
842
843 struct _IOMallocContiguousEntry {
844 mach_vm_address_t virtualAddr;
845 IOBufferMemoryDescriptor * md;
846 queue_chain_t link;
847 };
848 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
849
850 void *
IOMallocContiguous(vm_size_t size,vm_size_t alignment,IOPhysicalAddress * physicalAddress)851 IOMallocContiguous(vm_size_t size, vm_size_t alignment,
852 IOPhysicalAddress * physicalAddress)
853 {
854 mach_vm_address_t address = 0;
855
856 if (size == 0) {
857 return NULL;
858 }
859 if (alignment == 0) {
860 alignment = 1;
861 }
862
863 /* Do we want a physical address? */
864 if (!physicalAddress) {
865 address = IOKernelAllocateWithPhysicalRestrict(KHEAP_DEFAULT,
866 size, 0 /*maxPhys*/, alignment, true, false /* noSoftLimit */);
867 } else {
868 do {
869 IOBufferMemoryDescriptor * bmd;
870 mach_vm_address_t physicalMask;
871 vm_offset_t alignMask;
872
873 alignMask = alignment - 1;
874 physicalMask = (0xFFFFFFFF ^ alignMask);
875
876 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
877 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
878 if (!bmd) {
879 break;
880 }
881
882 _IOMallocContiguousEntry *
883 entry = IOMallocType(_IOMallocContiguousEntry);
884 if (!entry) {
885 bmd->release();
886 break;
887 }
888 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
889 entry->md = bmd;
890 lck_mtx_lock(gIOMallocContiguousEntriesLock);
891 queue_enter( &gIOMallocContiguousEntries, entry,
892 _IOMallocContiguousEntry *, link );
893 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
894
895 address = (mach_vm_address_t) entry->virtualAddr;
896 *physicalAddress = bmd->getPhysicalAddress();
897 }while (false);
898 }
899
900 return (void *) address;
901 }
902
903 void
IOFreeContiguous(void * _address,vm_size_t size)904 IOFreeContiguous(void * _address, vm_size_t size)
905 {
906 _IOMallocContiguousEntry * entry;
907 IOMemoryDescriptor * md = NULL;
908
909 mach_vm_address_t address = (mach_vm_address_t) _address;
910
911 if (!address) {
912 return;
913 }
914
915 assert(size);
916
917 lck_mtx_lock(gIOMallocContiguousEntriesLock);
918 queue_iterate( &gIOMallocContiguousEntries, entry,
919 _IOMallocContiguousEntry *, link )
920 {
921 if (entry->virtualAddr == address) {
922 md = entry->md;
923 queue_remove( &gIOMallocContiguousEntries, entry,
924 _IOMallocContiguousEntry *, link );
925 break;
926 }
927 }
928 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
929
930 if (md) {
931 md->release();
932 IOFreeType(entry, _IOMallocContiguousEntry);
933 } else {
934 IOKernelFreePhysical(KHEAP_DEFAULT, (mach_vm_address_t) address, size);
935 }
936 }
937
938 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
939
940 kern_return_t
IOIteratePageableMaps(vm_size_t size,IOIteratePageableMapsCallback callback,void * ref)941 IOIteratePageableMaps(vm_size_t size,
942 IOIteratePageableMapsCallback callback, void * ref)
943 {
944 if (size > kIOPageableMaxAllocSize) {
945 return kIOReturnBadArgument;
946 }
947 return (*callback)(gIOKitPageableMap.map, ref);
948 }
949
950 struct IOMallocPageableRef {
951 vm_offset_t address;
952 vm_size_t size;
953 vm_tag_t tag;
954 };
955
956 static kern_return_t
IOMallocPageableCallback(vm_map_t map,void * _ref)957 IOMallocPageableCallback(vm_map_t map, void * _ref)
958 {
959 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
960 kma_flags_t flags = (kma_flags_t)(KMA_PAGEABLE | KMA_DATA_SHARED);
961
962 return kmem_alloc( map, &ref->address, ref->size, flags, ref->tag );
963 }
964
965 static void *
IOMallocPageablePages(vm_size_t size,vm_size_t alignment,vm_tag_t tag)966 IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag)
967 {
968 kern_return_t kr = kIOReturnNotReady;
969 struct IOMallocPageableRef ref;
970
971 if (alignment > page_size) {
972 return NULL;
973 }
974 if (size > kIOPageableMaxAllocSize) {
975 return NULL;
976 }
977
978 ref.size = size;
979 ref.tag = tag;
980 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
981 if (kIOReturnSuccess != kr) {
982 ref.address = 0;
983 }
984
985 return (void *) ref.address;
986 }
987
988 vm_map_t
IOPageableMapForAddress(uintptr_t address)989 IOPageableMapForAddress(uintptr_t address)
990 {
991 if (address < gIOKitPageableMap.address || address >= gIOKitPageableMap.end) {
992 panic("IOPageableMapForAddress: address out of range");
993 }
994 return gIOKitPageableMap.map;
995 }
996
997 static void
IOFreePageablePages(void * address,vm_size_t size)998 IOFreePageablePages(void * address, vm_size_t size)
999 {
1000 vm_map_t map;
1001
1002 map = IOPageableMapForAddress((vm_address_t) address);
1003 if (map) {
1004 kmem_free( map, (vm_offset_t) address, size);
1005 }
1006 }
1007
1008 #if defined(__x86_64__)
1009 static uintptr_t
IOMallocOnePageablePage(kalloc_heap_t kheap __unused,iopa_t * a)1010 IOMallocOnePageablePage(kalloc_heap_t kheap __unused, iopa_t * a)
1011 {
1012 return (uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT);
1013 }
1014 #endif /* defined(__x86_64__) */
1015
1016 static void *
IOMallocPageableInternal(vm_size_t size,vm_size_t alignment,bool zeroed)1017 IOMallocPageableInternal(vm_size_t size, vm_size_t alignment, bool zeroed)
1018 {
1019 void * addr;
1020
1021 if (((uint32_t) alignment) != alignment) {
1022 return NULL;
1023 }
1024 #if defined(__x86_64__)
1025 if (size >= (page_size - 4 * gIOPageAllocChunkBytes) ||
1026 alignment > page_size) {
1027 addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map));
1028 /* Memory allocated this way will already be zeroed. */
1029 } else {
1030 addr = ((void *) iopa_alloc(&gIOPageablePageAllocator,
1031 &IOMallocOnePageablePage, KHEAP_DEFAULT, size, (uint32_t) alignment));
1032 if (addr && zeroed) {
1033 bzero(addr, size);
1034 }
1035 }
1036 #else /* !defined(__x86_64__) */
1037 vm_size_t allocSize = size;
1038 if (allocSize == 0) {
1039 allocSize = 1;
1040 }
1041 addr = IOMallocPageablePages(allocSize, alignment, IOMemoryTag(kernel_map));
1042 /* already zeroed */
1043 #endif /* defined(__x86_64__) */
1044
1045 if (addr) {
1046 #if IOALLOCDEBUG
1047 OSAddAtomicLong(size, &debug_iomallocpageable_size);
1048 #endif
1049 IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
1050 }
1051
1052 return addr;
1053 }
1054
1055 void *
IOMallocPageable(vm_size_t size,vm_size_t alignment)1056 IOMallocPageable(vm_size_t size, vm_size_t alignment)
1057 {
1058 return IOMallocPageableInternal(size, alignment, /*zeroed*/ false);
1059 }
1060
1061 void *
IOMallocPageableZero(vm_size_t size,vm_size_t alignment)1062 IOMallocPageableZero(vm_size_t size, vm_size_t alignment)
1063 {
1064 return IOMallocPageableInternal(size, alignment, /*zeroed*/ true);
1065 }
1066
1067 void
IOFreePageable(void * address,vm_size_t size)1068 IOFreePageable(void * address, vm_size_t size)
1069 {
1070 #if IOALLOCDEBUG
1071 OSAddAtomicLong(-size, &debug_iomallocpageable_size);
1072 #endif
1073 IOStatisticsAlloc(kIOStatisticsFreePageable, size);
1074
1075 #if defined(__x86_64__)
1076 if (size < (page_size - 4 * gIOPageAllocChunkBytes)) {
1077 address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size);
1078 size = page_size;
1079 }
1080 if (address) {
1081 IOFreePageablePages(address, size);
1082 }
1083 #else /* !defined(__x86_64__) */
1084 if (size == 0) {
1085 size = 1;
1086 }
1087 if (address) {
1088 IOFreePageablePages(address, size);
1089 }
1090 #endif /* defined(__x86_64__) */
1091 }
1092
1093 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1094
1095
1096 __typed_allocators_ignore_push
1097
1098 void *
1099 IOMallocData_external(
1100 vm_size_t size);
1101 void *
IOMallocData_external(vm_size_t size)1102 IOMallocData_external(vm_size_t size)
1103 {
1104 return IOMalloc_internal(GET_KEXT_KHEAP_DATA(), size, Z_VM_TAG_BT_BIT);
1105 }
1106
1107 void *
1108 IOMallocZeroData_external(
1109 vm_size_t size);
1110 void *
IOMallocZeroData_external(vm_size_t size)1111 IOMallocZeroData_external(vm_size_t size)
1112 {
1113 return IOMalloc_internal(GET_KEXT_KHEAP_DATA(), size, Z_ZERO_VM_TAG_BT_BIT);
1114 }
1115
1116 void *
1117 IOMallocDataSharable_external(
1118 vm_size_t size);
1119 void *
IOMallocDataSharable_external(vm_size_t size)1120 IOMallocDataSharable_external(vm_size_t size)
1121 {
1122 return IOMalloc_internal(KHEAP_DATA_SHARED, size, Z_VM_TAG_BT_BIT);
1123 }
1124
1125 void *
1126 IOMallocZeroDataSharable_external(
1127 vm_size_t size);
1128 void *
IOMallocZeroDataSharable_external(vm_size_t size)1129 IOMallocZeroDataSharable_external(vm_size_t size)
1130 {
1131 return IOMalloc_internal(KHEAP_DATA_SHARED, size, Z_ZERO_VM_TAG_BT_BIT);
1132 }
1133
1134 void
IOFreeData(void * address,vm_size_t size)1135 IOFreeData(void * address, vm_size_t size)
1136 {
1137 return IOFree_internal(GET_KEXT_KHEAP_DATA(), address, size);
1138 }
1139
1140 void
IOFreeDataSharable(void * address,vm_size_t size)1141 IOFreeDataSharable(void * address, vm_size_t size)
1142 {
1143 return IOFree_internal(KHEAP_DATA_SHARED, address, size);
1144 }
1145
1146 __typed_allocators_ignore_pop
1147
1148 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1149
1150 __typed_allocators_ignore_push // allocator implementation
1151
1152 void *
IOMallocTypeImpl(kalloc_type_view_t kt_view)1153 IOMallocTypeImpl(kalloc_type_view_t kt_view)
1154 {
1155 #if IOTRACKING
1156 /*
1157 * When leak detection is on default to using IOMalloc as kalloc
1158 * type infrastructure isn't aware of needing additional space for
1159 * the header.
1160 */
1161 if (TRACK_ALLOC) {
1162 uint32_t kt_size = kalloc_type_get_size(kt_view->kt_size);
1163 void *mem = IOMalloc_internal(KHEAP_DEFAULT, kt_size, Z_ZERO);
1164 if (!IOMallocType_from_vm(kt_view)) {
1165 assert(mem);
1166 }
1167 return mem;
1168 }
1169 #endif
1170 zalloc_flags_t kt_flags = (zalloc_flags_t) (Z_WAITOK | Z_ZERO);
1171 if (!IOMallocType_from_vm(kt_view)) {
1172 kt_flags = (zalloc_flags_t) (kt_flags | Z_NOFAIL);
1173 }
1174 /*
1175 * Use external symbol for kalloc_type_impl as
1176 * kalloc_type_views generated at some external callsites
1177 * many not have been processed during boot.
1178 */
1179 return kalloc_type_impl_external(kt_view, kt_flags);
1180 }
1181
1182 void
IOFreeTypeImpl(kalloc_type_view_t kt_view,void * address)1183 IOFreeTypeImpl(kalloc_type_view_t kt_view, void * address)
1184 {
1185 #if IOTRACKING
1186 if (TRACK_ALLOC) {
1187 return IOFree_internal(KHEAP_DEFAULT, address,
1188 kalloc_type_get_size(kt_view->kt_size));
1189 }
1190 #endif
1191 /*
1192 * Use external symbol for kalloc_type_impl as
1193 * kalloc_type_views generated at some external callsites
1194 * many not have been processed during boot.
1195 */
1196 return kfree_type_impl_external(kt_view, address);
1197 }
1198
1199 void *
IOMallocTypeVarImpl(kalloc_type_var_view_t kt_view,vm_size_t size)1200 IOMallocTypeVarImpl(kalloc_type_var_view_t kt_view, vm_size_t size)
1201 {
1202 #if IOTRACKING
1203 /*
1204 * When leak detection is on default to using IOMalloc as kalloc
1205 * type infrastructure isn't aware of needing additional space for
1206 * the header.
1207 */
1208 if (TRACK_ALLOC) {
1209 return IOMalloc_internal(KHEAP_DEFAULT, size, Z_ZERO);
1210 }
1211 #endif
1212 zalloc_flags_t kt_flags = (zalloc_flags_t) (Z_WAITOK | Z_ZERO);
1213
1214 kt_flags = Z_VM_TAG_BT(kt_flags, VM_KERN_MEMORY_KALLOC_TYPE);
1215 return kalloc_type_var_impl(kt_view, size, kt_flags, NULL);
1216 }
1217
1218 void
IOFreeTypeVarImpl(kalloc_type_var_view_t kt_view,void * address,vm_size_t size)1219 IOFreeTypeVarImpl(kalloc_type_var_view_t kt_view, void * address,
1220 vm_size_t size)
1221 {
1222 #if IOTRACKING
1223 if (TRACK_ALLOC) {
1224 return IOFree_internal(KHEAP_DEFAULT, address, size);
1225 }
1226 #endif
1227
1228 return kfree_type_var_impl(kt_view, address, size);
1229 }
1230
1231 __typed_allocators_ignore_pop
1232
1233 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1234
1235 #if defined(__x86_64__)
1236
1237
1238 extern "C" void
iopa_init(iopa_t * a)1239 iopa_init(iopa_t * a)
1240 {
1241 bzero(a, sizeof(*a));
1242 a->lock = IOLockAlloc();
1243 queue_init(&a->list);
1244 }
1245
1246 static uintptr_t
iopa_allocinpage(iopa_page_t * pa,uint32_t count,uint64_t align)1247 iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
1248 {
1249 uint32_t n, s;
1250 uint64_t avail = pa->avail;
1251
1252 assert(avail);
1253
1254 // find strings of count 1 bits in avail
1255 for (n = count; n > 1; n -= s) {
1256 s = n >> 1;
1257 avail = avail & (avail << s);
1258 }
1259 // and aligned
1260 avail &= align;
1261
1262 if (avail) {
1263 n = __builtin_clzll(avail);
1264 pa->avail &= ~((-1ULL << (64 - count)) >> n);
1265 if (!pa->avail && pa->link.next) {
1266 remque(&pa->link);
1267 pa->link.next = NULL;
1268 }
1269 return n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa);
1270 }
1271
1272 return 0;
1273 }
1274
1275 uintptr_t
iopa_alloc(iopa_t * a,iopa_proc_t alloc,kalloc_heap_t kheap,vm_size_t bytes,vm_size_t balign)1276 iopa_alloc(
1277 iopa_t * a,
1278 iopa_proc_t alloc,
1279 kalloc_heap_t kheap,
1280 vm_size_t bytes,
1281 vm_size_t balign)
1282 {
1283 static const uint64_t align_masks[] = {
1284 0xFFFFFFFFFFFFFFFF,
1285 0xAAAAAAAAAAAAAAAA,
1286 0x8888888888888888,
1287 0x8080808080808080,
1288 0x8000800080008000,
1289 0x8000000080000000,
1290 0x8000000000000000,
1291 };
1292 iopa_page_t * pa;
1293 uintptr_t addr = 0;
1294 uint32_t count;
1295 uint64_t align;
1296 vm_size_t align_masks_idx;
1297
1298 if (((uint32_t) bytes) != bytes) {
1299 return 0;
1300 }
1301 if (!bytes) {
1302 bytes = 1;
1303 }
1304 count = (((uint32_t) bytes) + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1305
1306 align_masks_idx = log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes);
1307 assert(align_masks_idx < sizeof(align_masks) / sizeof(*align_masks));
1308 align = align_masks[align_masks_idx];
1309
1310 IOLockLock(a->lock);
1311 __IGNORE_WCASTALIGN(pa = (typeof(pa))queue_first(&a->list));
1312 while (!queue_end(&a->list, &pa->link)) {
1313 addr = iopa_allocinpage(pa, count, align);
1314 if (addr) {
1315 a->bytecount += bytes;
1316 break;
1317 }
1318 __IGNORE_WCASTALIGN(pa = (typeof(pa))queue_next(&pa->link));
1319 }
1320 IOLockUnlock(a->lock);
1321
1322 if (!addr) {
1323 addr = alloc(kheap, a);
1324 if (addr) {
1325 pa = (typeof(pa))(addr + page_size - gIOPageAllocChunkBytes);
1326 pa->signature = kIOPageAllocSignature;
1327 pa->avail = -2ULL;
1328
1329 addr = iopa_allocinpage(pa, count, align);
1330 IOLockLock(a->lock);
1331 if (pa->avail) {
1332 enqueue_head(&a->list, &pa->link);
1333 }
1334 a->pagecount++;
1335 if (addr) {
1336 a->bytecount += bytes;
1337 }
1338 IOLockUnlock(a->lock);
1339 }
1340 }
1341
1342 assert((addr & ((1 << log2up(balign)) - 1)) == 0);
1343 return addr;
1344 }
1345
1346 uintptr_t
iopa_free(iopa_t * a,uintptr_t addr,vm_size_t bytes)1347 iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes)
1348 {
1349 iopa_page_t * pa;
1350 uint32_t count;
1351 uintptr_t chunk;
1352
1353 if (((uint32_t) bytes) != bytes) {
1354 return 0;
1355 }
1356 if (!bytes) {
1357 bytes = 1;
1358 }
1359
1360 chunk = (addr & page_mask);
1361 assert(0 == (chunk & (gIOPageAllocChunkBytes - 1)));
1362
1363 pa = (typeof(pa))(addr | (page_size - gIOPageAllocChunkBytes));
1364 assert(kIOPageAllocSignature == pa->signature);
1365
1366 count = (((uint32_t) bytes) + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1367 chunk /= gIOPageAllocChunkBytes;
1368
1369 IOLockLock(a->lock);
1370 if (!pa->avail) {
1371 assert(!pa->link.next);
1372 enqueue_tail(&a->list, &pa->link);
1373 }
1374 pa->avail |= ((-1ULL << (64 - count)) >> chunk);
1375 if (pa->avail != -2ULL) {
1376 pa = NULL;
1377 } else {
1378 remque(&pa->link);
1379 pa->link.next = NULL;
1380 pa->signature = 0;
1381 a->pagecount--;
1382 // page to free
1383 pa = (typeof(pa))trunc_page(pa);
1384 }
1385 a->bytecount -= bytes;
1386 IOLockUnlock(a->lock);
1387
1388 return (uintptr_t) pa;
1389 }
1390
1391 #endif /* defined(__x86_64__) */
1392
1393 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1394
1395 IOReturn
IOSetProcessorCacheMode(task_t task,IOVirtualAddress address,IOByteCount length,IOOptionBits cacheMode)1396 IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
1397 IOByteCount length, IOOptionBits cacheMode )
1398 {
1399 IOReturn ret = kIOReturnSuccess;
1400 ppnum_t pagenum;
1401
1402 if (task != kernel_task) {
1403 return kIOReturnUnsupported;
1404 }
1405 if ((address | length) & PAGE_MASK) {
1406 // OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
1407 return kIOReturnUnsupported;
1408 }
1409 length = round_page(address + length) - trunc_page( address );
1410 address = trunc_page( address );
1411
1412 // make map mode
1413 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
1414
1415 while ((kIOReturnSuccess == ret) && (length > 0)) {
1416 // Get the physical page number
1417 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
1418 if (pagenum) {
1419 ret = IOUnmapPages( get_task_map(task), address, page_size );
1420 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
1421 } else {
1422 ret = kIOReturnVMError;
1423 }
1424
1425 address += page_size;
1426 length -= page_size;
1427 }
1428
1429 return ret;
1430 }
1431
1432
1433 IOReturn
IOFlushProcessorCache(task_t task,IOVirtualAddress address,IOByteCount length)1434 IOFlushProcessorCache( task_t task, IOVirtualAddress address,
1435 IOByteCount length )
1436 {
1437 if (task != kernel_task) {
1438 return kIOReturnUnsupported;
1439 }
1440
1441 flush_dcache64((addr64_t) address, (unsigned) length, false );
1442
1443 return kIOReturnSuccess;
1444 }
1445
1446 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1447
1448 vm_offset_t
OSKernelStackRemaining(void)1449 OSKernelStackRemaining( void )
1450 {
1451 return ml_stack_remaining();
1452 }
1453
1454 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1455
1456 /*
1457 * Spin for indicated number of milliseconds.
1458 */
1459 void
IOSleep(unsigned milliseconds)1460 IOSleep(unsigned milliseconds)
1461 {
1462 delay_for_interval(milliseconds, kMillisecondScale);
1463 }
1464
1465 /*
1466 * Spin for indicated number of milliseconds, and potentially an
1467 * additional number of milliseconds up to the leeway values.
1468 */
1469 void
IOSleepWithLeeway(unsigned intervalMilliseconds,unsigned leewayMilliseconds)1470 IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds)
1471 {
1472 delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale);
1473 }
1474
1475 /*
1476 * Spin for indicated number of microseconds.
1477 */
1478 void
IODelay(unsigned microseconds)1479 IODelay(unsigned microseconds)
1480 {
1481 delay_for_interval(microseconds, kMicrosecondScale);
1482 }
1483
1484 /*
1485 * Spin for indicated number of nanoseconds.
1486 */
1487 void
IOPause(unsigned nanoseconds)1488 IOPause(unsigned nanoseconds)
1489 {
1490 delay_for_interval(nanoseconds, kNanosecondScale);
1491 }
1492
1493 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1494
1495 static void _IOLogv(const char *format, va_list ap, void *caller) __printflike(1, 0);
1496
1497 __attribute__((noinline, not_tail_called))
1498 void
IOLog(const char * format,...)1499 IOLog(const char *format, ...)
1500 {
1501 void *caller = __builtin_return_address(0);
1502 va_list ap;
1503
1504 va_start(ap, format);
1505 _IOLogv(format, ap, caller);
1506 va_end(ap);
1507 }
1508
1509 __attribute__((noinline, not_tail_called))
1510 void
IOLogv(const char * format,va_list ap)1511 IOLogv(const char *format, va_list ap)
1512 {
1513 void *caller = __builtin_return_address(0);
1514 _IOLogv(format, ap, caller);
1515 }
1516
1517 void
_IOLogv(const char * format,va_list ap,void * caller)1518 _IOLogv(const char *format, va_list ap, void *caller)
1519 {
1520 va_list ap2;
1521 struct console_printbuf_state info_data;
1522 console_printbuf_state_init(&info_data, TRUE, TRUE);
1523
1524 va_copy(ap2, ap);
1525
1526 #pragma clang diagnostic push
1527 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1528 #pragma clang diagnostic ignored "-Wformat"
1529 os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, format, ap, caller);
1530 #pragma clang diagnostic pop
1531
1532 if (!disable_iolog_serial_output) {
1533 __doprnt(format, ap2, console_printbuf_putc, &info_data, 16, TRUE);
1534 console_printbuf_clear(&info_data);
1535 }
1536 va_end(ap2);
1537
1538 assertf(ml_get_interrupts_enabled() || ml_is_quiescing() ||
1539 debug_mode_active() || !gCPUsRunning,
1540 "IOLog called with interrupts disabled");
1541 }
1542
1543 #if !__LP64__
1544 void
IOPanic(const char * reason)1545 IOPanic(const char *reason)
1546 {
1547 panic("%s", reason);
1548 }
1549 #endif
1550
1551 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1552
1553 void
IOKitKernelLogBuffer(const char * title,const void * buffer,size_t size,void (* output)(const char * format,...))1554 IOKitKernelLogBuffer(const char * title, const void * buffer, size_t size,
1555 void (*output)(const char *format, ...))
1556 {
1557 size_t idx, linestart;
1558 enum { bytelen = (sizeof("0xZZ, ") - 1) };
1559 char hex[(bytelen * 16) + 1];
1560 uint8_t c, chars[17];
1561
1562 output("%s(0x%lx):\n", title, size);
1563 output(" 0 1 2 3 4 5 6 7 8 9 A B C D E F\n");
1564 if (size > 4096) {
1565 size = 4096;
1566 }
1567 chars[16] = 0;
1568 for (idx = 0, linestart = 0; idx < size;) {
1569 c = ((char *)buffer)[idx];
1570 snprintf(&hex[bytelen * (idx & 15)], bytelen + 1, "0x%02x, ", c);
1571 chars[idx & 15] = ((c >= 0x20) && (c <= 0x7f)) ? c : ' ';
1572 idx++;
1573 if ((idx == size) || !(idx & 15)) {
1574 if (idx & 15) {
1575 chars[idx & 15] = 0;
1576 }
1577 output("/* %04lx: */ %-96s /* |%-16s| */\n", linestart, hex, chars);
1578 linestart += 16;
1579 }
1580 }
1581 }
1582
1583 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1584
1585 /*
1586 * Convert a integer constant (typically a #define or enum) to a string.
1587 */
1588 static char noValue[80]; // that's pretty
1589
1590 const char *
IOFindNameForValue(int value,const IONamedValue * regValueArray)1591 IOFindNameForValue(int value, const IONamedValue *regValueArray)
1592 {
1593 for (; regValueArray->name; regValueArray++) {
1594 if (regValueArray->value == value) {
1595 return regValueArray->name;
1596 }
1597 }
1598 snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
1599 return (const char *)noValue;
1600 }
1601
1602 IOReturn
IOFindValueForName(const char * string,const IONamedValue * regValueArray,int * value)1603 IOFindValueForName(const char *string,
1604 const IONamedValue *regValueArray,
1605 int *value)
1606 {
1607 for (; regValueArray->name; regValueArray++) {
1608 if (!strcmp(regValueArray->name, string)) {
1609 *value = regValueArray->value;
1610 return kIOReturnSuccess;
1611 }
1612 }
1613 return kIOReturnBadArgument;
1614 }
1615
1616 OSString *
IOCopyLogNameForPID(int pid)1617 IOCopyLogNameForPID(int pid)
1618 {
1619 char buf[128];
1620 size_t len;
1621 snprintf(buf, sizeof(buf), "pid %d, ", pid);
1622 len = strlen(buf);
1623 proc_name(pid, buf + len, (int) (sizeof(buf) - len));
1624 return OSString::withCString(buf);
1625 }
1626
1627 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1628
1629 IOAlignment
IOSizeToAlignment(unsigned int size)1630 IOSizeToAlignment(unsigned int size)
1631 {
1632 int shift;
1633 const int intsize = sizeof(unsigned int) * 8;
1634
1635 for (shift = 1; shift < intsize; shift++) {
1636 if (size & 0x80000000) {
1637 return (IOAlignment)(intsize - shift);
1638 }
1639 size <<= 1;
1640 }
1641 return 0;
1642 }
1643
1644 unsigned int
IOAlignmentToSize(IOAlignment align)1645 IOAlignmentToSize(IOAlignment align)
1646 {
1647 unsigned int size;
1648
1649 for (size = 1; align; align--) {
1650 size <<= 1;
1651 }
1652 return size;
1653 }
1654 } /* extern "C" */
1655