1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #define IOKIT_ENABLE_SHARED_PTR
29
30 #define _IOMEMORYDESCRIPTOR_INTERNAL_
31
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34
35 #include <IOKit/IOLib.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IOBufferMemoryDescriptor.h>
38 #include <libkern/OSDebug.h>
39 #include <mach/mach_vm.h>
40
41 #include <vm/vm_kern_xnu.h>
42
43 #include "IOKitKernelInternal.h"
44
45 #ifdef IOALLOCDEBUG
46 #include <libkern/c++/OSCPPDebug.h>
47 #endif
48 #include <IOKit/IOStatisticsPrivate.h>
49
50 #if IOKITSTATS
51 #define IOStatisticsAlloc(type, size) \
52 do { \
53 IOStatistics::countAlloc(type, size); \
54 } while (0)
55 #else
56 #define IOStatisticsAlloc(type, size)
57 #endif /* IOKITSTATS */
58
59
60 __BEGIN_DECLS
61 void ipc_port_release_send(ipc_port_t port);
62 #include <vm/pmap.h>
63
64 KALLOC_HEAP_DEFINE(KHEAP_IOBMD_CONTROL, "IOBMD_control", KHEAP_ID_KT_VAR);
65 __END_DECLS
66
67 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
68
69 enum{
70 kInternalFlagPhysical = 0x00000001,
71 kInternalFlagPageSized = 0x00000002,
72 kInternalFlagPageAllocated = 0x00000004,
73 kInternalFlagInit = 0x00000008,
74 kInternalFlagHasPointers = 0x00000010,
75 kInternalFlagGuardPages = 0x00000020,
76 /**
77 * Should the IOBMD behave as if it has no kernel mapping for the
78 * underlying buffer? Note that this does not necessarily imply the
79 * existence (or non-existence) of a kernel mapping.
80 */
81 kInternalFlagAsIfUnmapped = 0x00000040,
82 };
83
84 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
85
86 #define super IOGeneralMemoryDescriptor
87 OSDefineMetaClassAndStructorsWithZone(IOBufferMemoryDescriptor,
88 IOGeneralMemoryDescriptor, ZC_ZFREE_CLEARMEM);
89
90 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
91
92 #if defined(__x86_64__)
93 static uintptr_t
IOBMDPageProc(kalloc_heap_t kheap,iopa_t * a)94 IOBMDPageProc(kalloc_heap_t kheap, iopa_t * a)
95 {
96 kern_return_t kr;
97 vm_address_t vmaddr = 0;
98 kma_flags_t kma_flags = KMA_ZERO;
99
100 if (kheap == KHEAP_DATA_SHARED) {
101 kma_flags = (kma_flags_t) (kma_flags | KMA_DATA_SHARED);
102 }
103 kr = kmem_alloc(kernel_map, &vmaddr, page_size,
104 kma_flags, VM_KERN_MEMORY_IOKIT);
105
106 if (KERN_SUCCESS != kr) {
107 vmaddr = 0;
108 }
109
110 return (uintptr_t) vmaddr;
111 }
112 #endif /* defined(__x86_64__) */
113
114 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
115
116 #ifndef __LP64__
117 bool
initWithOptions(IOOptionBits options,vm_size_t capacity,vm_offset_t alignment,task_t inTask)118 IOBufferMemoryDescriptor::initWithOptions(
119 IOOptionBits options,
120 vm_size_t capacity,
121 vm_offset_t alignment,
122 task_t inTask)
123 {
124 mach_vm_address_t physicalMask = 0;
125 return initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask);
126 }
127 #endif /* !__LP64__ */
128
129 OSSharedPtr<IOBufferMemoryDescriptor>
withCopy(task_t inTask,IOOptionBits options,vm_map_t sourceMap,mach_vm_address_t source,mach_vm_size_t size)130 IOBufferMemoryDescriptor::withCopy(
131 task_t inTask,
132 IOOptionBits options,
133 vm_map_t sourceMap,
134 mach_vm_address_t source,
135 mach_vm_size_t size)
136 {
137 OSSharedPtr<IOBufferMemoryDescriptor> inst;
138 kern_return_t err;
139 vm_map_copy_t copy;
140 vm_map_address_t address;
141
142 copy = NULL;
143 do {
144 err = kIOReturnNoMemory;
145 inst = OSMakeShared<IOBufferMemoryDescriptor>();
146 if (!inst) {
147 break;
148 }
149 inst->_ranges.v64 = IOMallocType(IOAddressRange);
150
151 err = vm_map_copyin(sourceMap, source, size,
152 false /* src_destroy */, ©);
153 if (KERN_SUCCESS != err) {
154 break;
155 }
156
157 err = vm_map_copyout(get_task_map(inTask), &address, copy);
158 if (KERN_SUCCESS != err) {
159 break;
160 }
161 copy = NULL;
162
163 inst->_ranges.v64->address = address;
164 inst->_ranges.v64->length = size;
165
166 if (!inst->initWithPhysicalMask(inTask, options, size, page_size, 0)) {
167 err = kIOReturnError;
168 }
169 } while (false);
170
171 if (KERN_SUCCESS == err) {
172 return inst;
173 }
174
175 if (copy) {
176 vm_map_copy_discard(copy);
177 }
178
179 return nullptr;
180 }
181
182
183 bool
initWithPhysicalMask(task_t inTask,IOOptionBits options,mach_vm_size_t capacity,mach_vm_address_t alignment,mach_vm_address_t physicalMask)184 IOBufferMemoryDescriptor::initWithPhysicalMask(
185 task_t inTask,
186 IOOptionBits options,
187 mach_vm_size_t capacity,
188 mach_vm_address_t alignment,
189 mach_vm_address_t physicalMask)
190 {
191 task_t mapTask = NULL;
192 kalloc_heap_t kheap = KHEAP_DATA_SHARED;
193 mach_vm_address_t highestMask = 0;
194 IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
195 IODMAMapSpecification mapSpec;
196 bool mapped = false;
197 bool withCopy = false;
198 bool mappedOrShared = false;
199 bool noSoftLimit = false;
200
201 if (!capacity) {
202 return false;
203 }
204
205 /*
206 * The IOKit constructor requests the allocator for zeroed memory
207 * so the members of the class do not need to be explicitly zeroed.
208 */
209 _options = options;
210 _capacity = capacity;
211
212 if (!_ranges.v64) {
213 _ranges.v64 = IOMallocType(IOAddressRange);
214 _ranges.v64->address = 0;
215 _ranges.v64->length = 0;
216 } else {
217 if (!_ranges.v64->address) {
218 return false;
219 }
220 if (!(kIOMemoryPageable & options)) {
221 return false;
222 }
223 if (!inTask) {
224 return false;
225 }
226 _buffer = (void *) _ranges.v64->address;
227 withCopy = true;
228 }
229
230 /*
231 * Set kalloc_heap to KHEAP_IOBMD_CONTROL if allocation contains pointers
232 */
233 if (kInternalFlagHasPointers & _internalFlags) {
234 kheap = KHEAP_IOBMD_CONTROL;
235 }
236
237 // make sure super::free doesn't dealloc _ranges before super::init
238 _flags = kIOMemoryAsReference;
239
240 // Grab IOMD bits from the Buffer MD options
241 iomdOptions |= (options & kIOBufferDescriptorMemoryFlags);
242
243 if (!(kIOMemoryMapperNone & options)) {
244 IOMapper::checkForSystemMapper();
245 mapped = (NULL != IOMapper::gSystem);
246 }
247
248 if (physicalMask && (alignment <= 1)) {
249 alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
250 highestMask = (physicalMask | alignment);
251 alignment++;
252 if (alignment < page_size) {
253 alignment = page_size;
254 }
255 }
256
257 if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) {
258 alignment = page_size;
259 }
260
261 if (alignment >= page_size) {
262 if (round_page_overflow(capacity, &capacity)) {
263 return false;
264 }
265 }
266
267 if (alignment > page_size) {
268 options |= kIOMemoryPhysicallyContiguous;
269 }
270
271 _alignment = alignment;
272
273 if ((capacity + alignment) < _capacity) {
274 return false;
275 }
276
277 if (inTask) {
278 if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) {
279 // Cannot create non-pageable memory in user tasks
280 return false;
281 }
282 } else {
283 // Not passing a task implies the memory should not be mapped (or, at
284 // least, should behave as if it were not mapped)
285 _internalFlags |= kInternalFlagAsIfUnmapped;
286
287 // Disable the soft-limit since the mapping, if any, will not escape the
288 // IOBMD.
289 noSoftLimit = true;
290 }
291
292 bzero(&mapSpec, sizeof(mapSpec));
293 mapSpec.alignment = _alignment;
294 mapSpec.numAddressBits = 64;
295 if (highestMask && mapped) {
296 if (highestMask <= 0xFFFFFFFF) {
297 mapSpec.numAddressBits = (uint8_t)(32 - __builtin_clz((unsigned int) highestMask));
298 } else {
299 mapSpec.numAddressBits = (uint8_t)(64 - __builtin_clz((unsigned int) (highestMask >> 32)));
300 }
301 highestMask = 0;
302 }
303
304 // set memory entry cache mode, pageable, purgeable
305 iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift;
306 if (options & kIOMemoryPageable) {
307 if (_internalFlags & kInternalFlagGuardPages) {
308 printf("IOBMD: Unsupported use of guard pages with pageable memory.\n");
309 return false;
310 }
311 iomdOptions |= kIOMemoryBufferPageable;
312 if (options & kIOMemoryPurgeable) {
313 iomdOptions |= kIOMemoryBufferPurgeable;
314 }
315 } else {
316 // Buffer shouldn't auto prepare they should be prepared explicitly
317 // But it never was enforced so what are you going to do?
318 iomdOptions |= kIOMemoryAutoPrepare;
319
320 /* Allocate a wired-down buffer inside kernel space. */
321
322 bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));
323
324 if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) {
325 contig |= (!mapped);
326 contig |= (0 != (kIOMemoryMapperNone & options));
327 #if 0
328 // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
329 contig |= true;
330 #endif
331 }
332
333 mappedOrShared = (mapped || (0 != (kIOMemorySharingTypeMask & options)));
334 if (contig || highestMask || (alignment > page_size)) {
335 if (_internalFlags & kInternalFlagGuardPages) {
336 printf("IOBMD: Unsupported use of guard pages with physical mask or contiguous memory.\n");
337 return false;
338 }
339 _internalFlags |= kInternalFlagPhysical;
340 if (highestMask) {
341 _internalFlags |= kInternalFlagPageSized;
342 if (round_page_overflow(capacity, &capacity)) {
343 return false;
344 }
345 }
346 _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(kheap,
347 capacity, highestMask, alignment, contig, noSoftLimit);
348 } else if (_internalFlags & kInternalFlagGuardPages) {
349 vm_offset_t address = 0;
350 kern_return_t kr;
351 uintptr_t alignMask;
352 kma_flags_t kma_flags = (kma_flags_t) (KMA_GUARD_FIRST |
353 KMA_GUARD_LAST | KMA_ZERO);
354
355 if (((uint32_t) alignment) != alignment) {
356 return false;
357 }
358 if (kheap == KHEAP_DATA_SHARED) {
359 kma_flags = (kma_flags_t) (kma_flags | KMA_DATA_SHARED);
360 }
361
362 if (noSoftLimit) {
363 kma_flags = (kma_flags_t)(kma_flags | KMA_NOSOFTLIMIT);
364 }
365
366 alignMask = (1UL << log2up((uint32_t) alignment)) - 1;
367 kr = kernel_memory_allocate(kernel_map, &address,
368 capacity + page_size * 2, alignMask, kma_flags,
369 IOMemoryTag(kernel_map));
370 if (kr != KERN_SUCCESS || address == 0) {
371 return false;
372 }
373 #if IOALLOCDEBUG
374 OSAddAtomicLong(capacity, &debug_iomalloc_size);
375 #endif
376 IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
377 _buffer = (void *)(address + page_size);
378 #if defined(__x86_64__)
379 } else if (mappedOrShared
380 && (capacity + alignment) <= (page_size - gIOPageAllocChunkBytes)) {
381 _internalFlags |= kInternalFlagPageAllocated;
382 _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator,
383 &IOBMDPageProc, kheap, capacity, alignment);
384 if (_buffer) {
385 bzero(_buffer, capacity);
386 IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
387 #if IOALLOCDEBUG
388 OSAddAtomicLong(capacity, &debug_iomalloc_size);
389 #endif
390 }
391 #endif /* defined(__x86_64__) */
392 } else {
393 zalloc_flags_t zflags = Z_ZERO_VM_TAG_BT_BIT;
394 if (noSoftLimit) {
395 zflags = (zalloc_flags_t)(zflags | Z_NOSOFTLIMIT);
396 }
397
398 /* BEGIN IGNORE CODESTYLE */
399 __typed_allocators_ignore_push
400 if (alignment > 1) {
401 _buffer = IOMallocAligned_internal(kheap, capacity, alignment,
402 zflags);
403 } else {
404 _buffer = IOMalloc_internal(kheap, capacity, zflags);
405 }
406 __typed_allocators_ignore_pop
407 /* END IGNORE CODESTYLE */
408 }
409 if (!_buffer) {
410 return false;
411 }
412 }
413
414 if ((options & (kIOMemoryPageable | kIOMapCacheMask))) {
415 vm_size_t size = round_page(capacity);
416
417 // initWithOptions will create memory entry
418 if (!withCopy) {
419 iomdOptions |= kIOMemoryPersistent;
420 }
421
422 if (options & kIOMemoryPageable) {
423 #if IOALLOCDEBUG
424 OSAddAtomicLong(size, &debug_iomallocpageable_size);
425 #endif
426 if (!withCopy) {
427 mapTask = inTask;
428 }
429 } else if (options & kIOMapCacheMask) {
430 // Prefetch each page to put entries into the pmap
431 volatile UInt8 * startAddr = (UInt8 *)_buffer;
432 volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity;
433
434 while (startAddr < endAddr) {
435 UInt8 dummyVar = *startAddr;
436 (void) dummyVar;
437 startAddr += page_size;
438 }
439 }
440 }
441
442 _ranges.v64->address = (mach_vm_address_t) _buffer;
443 _ranges.v64->length = _capacity;
444
445 if (!super::initWithOptions(
446 /* buffers */ _ranges.v64, /* count */ 1, /* offset */ 0,
447 // Since we handle all "unmapped" behavior internally and our superclass
448 // requires a task, default all unbound IOBMDs to the kernel task.
449 /* task */ inTask ?: kernel_task,
450 /* options */ iomdOptions,
451 /* System mapper */ NULL)) {
452 return false;
453 }
454
455 _internalFlags |= kInternalFlagInit;
456 #if IOTRACKING
457 if (!(options & kIOMemoryPageable)) {
458 trackingAccumSize(capacity);
459 }
460 #endif /* IOTRACKING */
461
462 // give any system mapper the allocation params
463 if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec,
464 &mapSpec, sizeof(mapSpec))) {
465 return false;
466 }
467
468 if (mapTask) {
469 if (!reserved) {
470 reserved = IOMallocType(ExpansionData);
471 if (!reserved) {
472 return false;
473 }
474 }
475 reserved->map = createMappingInTask(mapTask, 0,
476 kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0).detach();
477 if (!reserved->map) {
478 _buffer = NULL;
479 return false;
480 }
481 release(); // map took a retain on this
482 reserved->map->retain();
483 removeMapping(reserved->map);
484 mach_vm_address_t buffer = reserved->map->getAddress();
485 _buffer = (void *) buffer;
486 if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) {
487 _ranges.v64->address = buffer;
488 }
489 }
490
491 setLength(_capacity);
492
493 return true;
494 }
495
496 bool
initControlWithPhysicalMask(task_t inTask,IOOptionBits options,mach_vm_size_t capacity,mach_vm_address_t alignment,mach_vm_address_t physicalMask)497 IOBufferMemoryDescriptor::initControlWithPhysicalMask(
498 task_t inTask,
499 IOOptionBits options,
500 mach_vm_size_t capacity,
501 mach_vm_address_t alignment,
502 mach_vm_address_t physicalMask)
503 {
504 _internalFlags = kInternalFlagHasPointers;
505 return initWithPhysicalMask(inTask, options, capacity, alignment,
506 physicalMask);
507 }
508
509 bool
initWithGuardPages(task_t inTask,IOOptionBits options,mach_vm_size_t capacity)510 IOBufferMemoryDescriptor::initWithGuardPages(
511 task_t inTask,
512 IOOptionBits options,
513 mach_vm_size_t capacity)
514 {
515 mach_vm_size_t roundedCapacity;
516
517 _internalFlags = kInternalFlagGuardPages;
518
519 if (round_page_overflow(capacity, &roundedCapacity)) {
520 return false;
521 }
522
523 return initWithPhysicalMask(inTask, options, roundedCapacity, page_size,
524 (mach_vm_address_t)0);
525 }
526
527 OSSharedPtr<IOBufferMemoryDescriptor>
inTaskWithOptions(task_t inTask,IOOptionBits options,vm_size_t capacity,vm_offset_t alignment)528 IOBufferMemoryDescriptor::inTaskWithOptions(
529 task_t inTask,
530 IOOptionBits options,
531 vm_size_t capacity,
532 vm_offset_t alignment)
533 {
534 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
535
536 if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
537 me.reset();
538 }
539 return me;
540 }
541
542 OSSharedPtr<IOBufferMemoryDescriptor>
inTaskWithOptions(task_t inTask,IOOptionBits options,vm_size_t capacity,vm_offset_t alignment,uint32_t kernTag,uint32_t userTag)543 IOBufferMemoryDescriptor::inTaskWithOptions(
544 task_t inTask,
545 IOOptionBits options,
546 vm_size_t capacity,
547 vm_offset_t alignment,
548 uint32_t kernTag,
549 uint32_t userTag)
550 {
551 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
552
553 if (me) {
554 me->setVMTags(kernTag, userTag);
555
556 if (!me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
557 me.reset();
558 }
559 }
560 return me;
561 }
562
563 OSSharedPtr<IOBufferMemoryDescriptor>
inTaskWithPhysicalMask(task_t inTask,IOOptionBits options,mach_vm_size_t capacity,mach_vm_address_t physicalMask)564 IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
565 task_t inTask,
566 IOOptionBits options,
567 mach_vm_size_t capacity,
568 mach_vm_address_t physicalMask)
569 {
570 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
571
572 if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) {
573 me.reset();
574 }
575 return me;
576 }
577
578 OSSharedPtr<IOBufferMemoryDescriptor>
inTaskWithGuardPages(task_t inTask,IOOptionBits options,mach_vm_size_t capacity)579 IOBufferMemoryDescriptor::inTaskWithGuardPages(
580 task_t inTask,
581 IOOptionBits options,
582 mach_vm_size_t capacity)
583 {
584 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
585
586 if (me && !me->initWithGuardPages(inTask, options, capacity)) {
587 me.reset();
588 }
589 return me;
590 }
591
592 #ifndef __LP64__
593 bool
initWithOptions(IOOptionBits options,vm_size_t capacity,vm_offset_t alignment)594 IOBufferMemoryDescriptor::initWithOptions(
595 IOOptionBits options,
596 vm_size_t capacity,
597 vm_offset_t alignment)
598 {
599 return initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0);
600 }
601 #endif /* !__LP64__ */
602
603 OSSharedPtr<IOBufferMemoryDescriptor>
withOptions(IOOptionBits options,vm_size_t capacity,vm_offset_t alignment)604 IOBufferMemoryDescriptor::withOptions(
605 IOOptionBits options,
606 vm_size_t capacity,
607 vm_offset_t alignment)
608 {
609 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
610
611 if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) {
612 me.reset();
613 }
614 return me;
615 }
616
617
618 /*
619 * withCapacity:
620 *
621 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
622 * hold capacity bytes. The descriptor's length is initially set to the capacity.
623 */
624 OSSharedPtr<IOBufferMemoryDescriptor>
withCapacity(vm_size_t inCapacity,IODirection inDirection,bool inContiguous)625 IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
626 IODirection inDirection,
627 bool inContiguous)
628 {
629 return IOBufferMemoryDescriptor::withOptions(
630 inDirection | kIOMemoryUnshared
631 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
632 inCapacity, inContiguous ? inCapacity : 1 );
633 }
634
635 #ifndef __LP64__
636 /*
637 * initWithBytes:
638 *
639 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
640 * The descriptor's length and capacity are set to the input buffer's size.
641 */
642 bool
initWithBytes(const void * inBytes,vm_size_t inLength,IODirection inDirection,bool inContiguous)643 IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
644 vm_size_t inLength,
645 IODirection inDirection,
646 bool inContiguous)
647 {
648 if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared
649 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
650 inLength, inLength, (mach_vm_address_t)0)) {
651 return false;
652 }
653
654 // start out with no data
655 setLength(0);
656
657 if (!appendBytes(inBytes, inLength)) {
658 return false;
659 }
660
661 return true;
662 }
663 #endif /* !__LP64__ */
664
665 /*
666 * withBytes:
667 *
668 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
669 * The descriptor's length and capacity are set to the input buffer's size.
670 */
671 OSSharedPtr<IOBufferMemoryDescriptor>
withBytes(const void * inBytes,vm_size_t inLength,IODirection inDirection,bool inContiguous)672 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
673 vm_size_t inLength,
674 IODirection inDirection,
675 bool inContiguous)
676 {
677 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
678 mach_vm_address_t alignment;
679
680 alignment = (inLength <= page_size) ? inLength : page_size;
681 if (me && !me->initWithPhysicalMask(
682 kernel_task, inDirection | kIOMemoryUnshared
683 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
684 inLength, alignment, 0 )) {
685 me.reset();
686 }
687
688 if (me) {
689 // start out with no data
690 me->setLength(0);
691
692 if (!me->appendBytes(inBytes, inLength)) {
693 me.reset();
694 }
695 }
696 return me;
697 }
698
699 /*
700 * free:
701 *
702 * Free resources
703 */
704 void
free()705 IOBufferMemoryDescriptor::free()
706 {
707 // Cache all of the relevant information on the stack for use
708 // after we call super::free()!
709 IOOptionBits flags = _flags;
710 IOOptionBits internalFlags = _internalFlags;
711 IOOptionBits options = _options;
712 vm_size_t size = _capacity;
713 void * buffer = _buffer;
714 IOMemoryMap * map = NULL;
715 IOAddressRange * range = _ranges.v64;
716 vm_offset_t alignment = _alignment;
717 kalloc_heap_t kheap = KHEAP_DATA_SHARED;
718 vm_size_t rsize;
719
720 if (alignment >= page_size) {
721 if (!round_page_overflow(size, &rsize)) {
722 size = rsize;
723 }
724 }
725
726 if (reserved) {
727 map = reserved->map;
728 IOFreeType(reserved, ExpansionData);
729 if (map) {
730 map->release();
731 }
732 }
733
734 if ((options & kIOMemoryPageable)
735 || (kInternalFlagPageSized & internalFlags)) {
736 if (!round_page_overflow(size, &rsize)) {
737 size = rsize;
738 }
739 }
740
741 if (internalFlags & kInternalFlagHasPointers) {
742 kheap = KHEAP_IOBMD_CONTROL;
743 }
744
745 #if IOTRACKING
746 if (!(options & kIOMemoryPageable)
747 && buffer
748 && (kInternalFlagInit & _internalFlags)) {
749 trackingAccumSize(-size);
750 }
751 #endif /* IOTRACKING */
752
753 /* super::free may unwire - deallocate buffer afterwards */
754 super::free();
755
756 if (options & kIOMemoryPageable) {
757 #if IOALLOCDEBUG
758 OSAddAtomicLong(-size, &debug_iomallocpageable_size);
759 #endif
760 } else if (buffer) {
761 if (kInternalFlagPhysical & internalFlags) {
762 IOKernelFreePhysical(kheap, (mach_vm_address_t) buffer, size);
763 } else if (kInternalFlagPageAllocated & internalFlags) {
764 #if defined(__x86_64__)
765 uintptr_t page;
766 page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size);
767 if (page) {
768 kmem_free(kernel_map, page, page_size);
769 }
770 #if IOALLOCDEBUG
771 OSAddAtomicLong(-size, &debug_iomalloc_size);
772 #endif
773 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
774 #else /* !defined(__x86_64__) */
775 /* should be unreachable */
776 panic("Attempting to free IOBMD with page allocated flag");
777 #endif /* defined(__x86_64__) */
778 } else if (kInternalFlagGuardPages & internalFlags) {
779 vm_offset_t allocation = (vm_offset_t)buffer - page_size;
780 kmem_free(kernel_map, allocation, size + page_size * 2,
781 (kmf_flags_t)(KMF_GUARD_FIRST | KMF_GUARD_LAST));
782 #if IOALLOCDEBUG
783 OSAddAtomicLong(-size, &debug_iomalloc_size);
784 #endif
785 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
786 } else if (alignment > 1) {
787 /* BEGIN IGNORE CODESTYLE */
788 __typed_allocators_ignore_push
789 IOFreeAligned_internal(kheap, buffer, size);
790 } else {
791 IOFree_internal(kheap, buffer, size);
792 __typed_allocators_ignore_pop
793 /* END IGNORE CODESTYLE */
794 }
795 }
796 if (range && (kIOMemoryAsReference & flags)) {
797 IOFreeType(range, IOAddressRange);
798 }
799 }
800
801 /*
802 * getCapacity:
803 *
804 * Get the buffer capacity
805 */
806 vm_size_t
getCapacity() const807 IOBufferMemoryDescriptor::getCapacity() const
808 {
809 return _capacity;
810 }
811
812 /*
813 * setLength:
814 *
815 * Change the buffer length of the memory descriptor. When a new buffer
816 * is created, the initial length of the buffer is set to be the same as
817 * the capacity. The length can be adjusted via setLength for a shorter
818 * transfer (there is no need to create more buffer descriptors when you
819 * can reuse an existing one, even for different transfer sizes). Note
820 * that the specified length must not exceed the capacity of the buffer.
821 */
822 void
setLength(vm_size_t length)823 IOBufferMemoryDescriptor::setLength(vm_size_t length)
824 {
825 assert(length <= _capacity);
826 if (length > _capacity) {
827 return;
828 }
829
830 _length = length;
831 _ranges.v64->length = length;
832 }
833
834 /*
835 * setDirection:
836 *
837 * Change the direction of the transfer. This method allows one to redirect
838 * the descriptor's transfer direction. This eliminates the need to destroy
839 * and create new buffers when different transfer directions are needed.
840 */
841 void
setDirection(IODirection direction)842 IOBufferMemoryDescriptor::setDirection(IODirection direction)
843 {
844 _flags = (_flags & ~kIOMemoryDirectionMask) | direction;
845 #ifndef __LP64__
846 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
847 #endif /* !__LP64__ */
848 }
849
850 /*
851 * appendBytes:
852 *
853 * Add some data to the end of the buffer. This method automatically
854 * maintains the memory descriptor buffer length. Note that appendBytes
855 * will not copy past the end of the memory descriptor's current capacity.
856 */
857 bool
appendBytes(const void * bytes,vm_size_t withLength)858 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
859 {
860 vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
861 IOByteCount offset;
862
863 assert(_length <= _capacity);
864
865 offset = _length;
866 _length += actualBytesToCopy;
867 _ranges.v64->length += actualBytesToCopy;
868
869 if (_task == kernel_task) {
870 bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset),
871 actualBytesToCopy);
872 } else {
873 writeBytes(offset, bytes, actualBytesToCopy);
874 }
875
876 return true;
877 }
878
879 /*
880 * getBytesNoCopy:
881 *
882 * Return the virtual address of the beginning of the buffer
883 */
884 void *
getBytesNoCopy()885 IOBufferMemoryDescriptor::getBytesNoCopy()
886 {
887 if (__improbable(_internalFlags & kInternalFlagAsIfUnmapped)) {
888 return NULL;
889 }
890
891 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) {
892 return _buffer;
893 } else {
894 return (void *)_ranges.v64->address;
895 }
896 }
897
898
899 /*
900 * getBytesNoCopy:
901 *
902 * Return the virtual address of an offset from the beginning of the buffer
903 */
904 void *
getBytesNoCopy(vm_size_t start,vm_size_t withLength)905 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
906 {
907 IOVirtualAddress address;
908
909 if (__improbable(_internalFlags & kInternalFlagAsIfUnmapped)) {
910 return NULL;
911 }
912
913 if ((start + withLength) < start) {
914 return NULL;
915 }
916
917 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) {
918 address = (IOVirtualAddress) _buffer;
919 } else {
920 address = _ranges.v64->address;
921 }
922
923 if (start < _length && (start + withLength) <= _length) {
924 return (void *)(address + start);
925 }
926 return NULL;
927 }
928
929 #ifndef __LP64__
930 void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)931 IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
932 IOByteCount * lengthOfSegment)
933 {
934 void * bytes = getBytesNoCopy(offset, 0);
935
936 if (bytes && lengthOfSegment) {
937 *lengthOfSegment = _length - offset;
938 }
939
940 return bytes;
941 }
942 #endif /* !__LP64__ */
943
944 #ifdef __LP64__
945 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0);
946 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
947 #else /* !__LP64__ */
948 OSMetaClassDefineReservedUsedX86(IOBufferMemoryDescriptor, 0);
949 OSMetaClassDefineReservedUsedX86(IOBufferMemoryDescriptor, 1);
950 #endif /* !__LP64__ */
951 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
952 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
953 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
954 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
955 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
956 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
957 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
958 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
959 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
960 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
961 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
962 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
963 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
964 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);
965