1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #define IOKIT_ENABLE_SHARED_PTR
29
30 #define _IOMEMORYDESCRIPTOR_INTERNAL_
31
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34
35 #include <IOKit/IOLib.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IOBufferMemoryDescriptor.h>
38 #include <libkern/OSDebug.h>
39 #include <mach/mach_vm.h>
40
41 #include "IOKitKernelInternal.h"
42
43 #ifdef IOALLOCDEBUG
44 #include <libkern/c++/OSCPPDebug.h>
45 #endif
46 #include <IOKit/IOStatisticsPrivate.h>
47
48 #if IOKITSTATS
49 #define IOStatisticsAlloc(type, size) \
50 do { \
51 IOStatistics::countAlloc(type, size); \
52 } while (0)
53 #else
54 #define IOStatisticsAlloc(type, size)
55 #endif /* IOKITSTATS */
56
57
58 __BEGIN_DECLS
59 void ipc_port_release_send(ipc_port_t port);
60 #include <vm/pmap.h>
61
62 __END_DECLS
63
64 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
65
66 enum{
67 kInternalFlagPhysical = 0x00000001,
68 kInternalFlagPageSized = 0x00000002,
69 kInternalFlagPageAllocated = 0x00000004,
70 kInternalFlagInit = 0x00000008,
71 kInternalFlagHasPointers = 0x00000010,
72 kInternalFlagGuardPages = 0x00000020,
73 };
74
75 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
76
77 #define super IOGeneralMemoryDescriptor
78 OSDefineMetaClassAndStructorsWithZone(IOBufferMemoryDescriptor,
79 IOGeneralMemoryDescriptor, ZC_ZFREE_CLEARMEM);
80
81 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
82
83 static uintptr_t
IOBMDPageProc(kalloc_heap_t kheap,iopa_t * a)84 IOBMDPageProc(kalloc_heap_t kheap, iopa_t * a)
85 {
86 kern_return_t kr;
87 vm_address_t vmaddr = 0;
88
89 kr = kernel_memory_allocate(kheap->kh_fallback_map, &vmaddr,
90 page_size, 0, (kma_flags_t) (KMA_NONE | KMA_ZERO), VM_KERN_MEMORY_IOKIT);
91
92 if (KERN_SUCCESS != kr) {
93 vmaddr = 0;
94 }
95
96 return (uintptr_t) vmaddr;
97 }
98
99 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
100
101 #ifndef __LP64__
102 bool
initWithOptions(IOOptionBits options,vm_size_t capacity,vm_offset_t alignment,task_t inTask)103 IOBufferMemoryDescriptor::initWithOptions(
104 IOOptionBits options,
105 vm_size_t capacity,
106 vm_offset_t alignment,
107 task_t inTask)
108 {
109 mach_vm_address_t physicalMask = 0;
110 return initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask);
111 }
112 #endif /* !__LP64__ */
113
114 OSSharedPtr<IOBufferMemoryDescriptor>
withCopy(task_t inTask,IOOptionBits options,vm_map_t sourceMap,mach_vm_address_t source,mach_vm_size_t size)115 IOBufferMemoryDescriptor::withCopy(
116 task_t inTask,
117 IOOptionBits options,
118 vm_map_t sourceMap,
119 mach_vm_address_t source,
120 mach_vm_size_t size)
121 {
122 OSSharedPtr<IOBufferMemoryDescriptor> inst;
123 kern_return_t err;
124 vm_map_copy_t copy;
125 vm_map_address_t address;
126
127 copy = NULL;
128 do {
129 err = kIOReturnNoMemory;
130 inst = OSMakeShared<IOBufferMemoryDescriptor>();
131 if (!inst) {
132 break;
133 }
134 inst->_ranges.v64 = IOMallocType(IOAddressRange);
135
136 err = vm_map_copyin(sourceMap, source, size,
137 false /* src_destroy */, ©);
138 if (KERN_SUCCESS != err) {
139 break;
140 }
141
142 err = vm_map_copyout(get_task_map(inTask), &address, copy);
143 if (KERN_SUCCESS != err) {
144 break;
145 }
146 copy = NULL;
147
148 inst->_ranges.v64->address = address;
149 inst->_ranges.v64->length = size;
150
151 if (!inst->initWithPhysicalMask(inTask, options, size, page_size, 0)) {
152 err = kIOReturnError;
153 }
154 } while (false);
155
156 if (KERN_SUCCESS == err) {
157 return inst;
158 }
159
160 if (copy) {
161 vm_map_copy_discard(copy);
162 }
163
164 return nullptr;
165 }
166
167
168 bool
initWithPhysicalMask(task_t inTask,IOOptionBits options,mach_vm_size_t capacity,mach_vm_address_t alignment,mach_vm_address_t physicalMask)169 IOBufferMemoryDescriptor::initWithPhysicalMask(
170 task_t inTask,
171 IOOptionBits options,
172 mach_vm_size_t capacity,
173 mach_vm_address_t alignment,
174 mach_vm_address_t physicalMask)
175 {
176 task_t mapTask = NULL;
177 kalloc_heap_t kheap = KHEAP_DATA_BUFFERS;
178 mach_vm_address_t highestMask = 0;
179 IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
180 IODMAMapSpecification mapSpec;
181 bool mapped = false;
182 bool withCopy = false;
183 bool mappedOrShared = false;
184
185 /*
186 * Temporarily use default heap on intel due to rdar://74982985
187 */
188 #if __x86_64__
189 kheap = KHEAP_DEFAULT;
190 #endif
191
192 if (!capacity) {
193 return false;
194 }
195
196 /*
197 * The IOKit constructor requests the allocator for zeroed memory
198 * so the members of the class do not need to be explicitly zeroed.
199 */
200 _options = options;
201 _capacity = capacity;
202
203 if (!_ranges.v64) {
204 _ranges.v64 = IOMallocType(IOAddressRange);
205 _ranges.v64->address = 0;
206 _ranges.v64->length = 0;
207 } else {
208 if (!_ranges.v64->address) {
209 return false;
210 }
211 if (!(kIOMemoryPageable & options)) {
212 return false;
213 }
214 if (!inTask) {
215 return false;
216 }
217 _buffer = (void *) _ranges.v64->address;
218 withCopy = true;
219 }
220
221 /*
222 * Set kalloc_heap to default if allocation contains pointers
223 */
224 if (kInternalFlagHasPointers & _internalFlags) {
225 kheap = KHEAP_DEFAULT;
226 }
227
228 // make sure super::free doesn't dealloc _ranges before super::init
229 _flags = kIOMemoryAsReference;
230
231 // Grab IOMD bits from the Buffer MD options
232 iomdOptions |= (options & kIOBufferDescriptorMemoryFlags);
233
234 if (!(kIOMemoryMapperNone & options)) {
235 IOMapper::checkForSystemMapper();
236 mapped = (NULL != IOMapper::gSystem);
237 }
238
239 if (physicalMask && (alignment <= 1)) {
240 alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
241 highestMask = (physicalMask | alignment);
242 alignment++;
243 if (alignment < page_size) {
244 alignment = page_size;
245 }
246 }
247
248 if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) {
249 alignment = page_size;
250 }
251
252 if (alignment >= page_size) {
253 if (round_page_overflow(capacity, &capacity)) {
254 return false;
255 }
256 }
257
258 if (alignment > page_size) {
259 options |= kIOMemoryPhysicallyContiguous;
260 }
261
262 _alignment = alignment;
263
264 if ((capacity + alignment) < _capacity) {
265 return false;
266 }
267
268 if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) {
269 return false;
270 }
271
272 bzero(&mapSpec, sizeof(mapSpec));
273 mapSpec.alignment = _alignment;
274 mapSpec.numAddressBits = 64;
275 if (highestMask && mapped) {
276 if (highestMask <= 0xFFFFFFFF) {
277 mapSpec.numAddressBits = (uint8_t)(32 - __builtin_clz((unsigned int) highestMask));
278 } else {
279 mapSpec.numAddressBits = (uint8_t)(64 - __builtin_clz((unsigned int) (highestMask >> 32)));
280 }
281 highestMask = 0;
282 }
283
284 // set memory entry cache mode, pageable, purgeable
285 iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift;
286 if (options & kIOMemoryPageable) {
287 if (_internalFlags & kInternalFlagGuardPages) {
288 printf("IOBMD: Unsupported use of guard pages with pageable memory.\n");
289 return false;
290 }
291 iomdOptions |= kIOMemoryBufferPageable;
292 if (options & kIOMemoryPurgeable) {
293 iomdOptions |= kIOMemoryBufferPurgeable;
294 }
295 } else {
296 // Buffer shouldn't auto prepare they should be prepared explicitly
297 // But it never was enforced so what are you going to do?
298 iomdOptions |= kIOMemoryAutoPrepare;
299
300 /* Allocate a wired-down buffer inside kernel space. */
301
302 bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));
303
304 if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) {
305 contig |= (!mapped);
306 contig |= (0 != (kIOMemoryMapperNone & options));
307 #if 0
308 // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
309 contig |= true;
310 #endif
311 }
312
313 mappedOrShared = (mapped || (0 != (kIOMemorySharingTypeMask & options)));
314 if (contig || highestMask || (alignment > page_size)) {
315 if (_internalFlags & kInternalFlagGuardPages) {
316 printf("IOBMD: Unsupported use of guard pages with physical mask or contiguous memory.\n");
317 return false;
318 }
319 _internalFlags |= kInternalFlagPhysical;
320 if (highestMask) {
321 _internalFlags |= kInternalFlagPageSized;
322 if (round_page_overflow(capacity, &capacity)) {
323 return false;
324 }
325 }
326 _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(kheap,
327 capacity, highestMask, alignment, contig);
328 } else if (_internalFlags & kInternalFlagGuardPages) {
329 vm_offset_t address = 0;
330 kern_return_t kr;
331 uintptr_t alignMask;
332
333 if (((uint32_t) alignment) != alignment) {
334 return NULL;
335 }
336
337 alignMask = (1UL << log2up((uint32_t) alignment)) - 1;
338 kr = kernel_memory_allocate(kheap->kh_fallback_map, &address,
339 capacity + page_size * 2, alignMask, (kma_flags_t)(KMA_GUARD_FIRST | KMA_GUARD_LAST), IOMemoryTag(kernel_map));
340 if (kr != KERN_SUCCESS || address == 0) {
341 return false;
342 }
343 #if IOALLOCDEBUG
344 OSAddAtomicLong(capacity, &debug_iomalloc_size);
345 #endif
346 IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
347 _buffer = (void *)(address + page_size);
348 } else if (mappedOrShared
349 && (capacity + alignment) <= (page_size - gIOPageAllocChunkBytes)) {
350 _internalFlags |= kInternalFlagPageAllocated;
351 _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator,
352 &IOBMDPageProc, kheap, capacity, alignment);
353 if (_buffer) {
354 IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
355 #if IOALLOCDEBUG
356 OSAddAtomicLong(capacity, &debug_iomalloc_size);
357 #endif
358 }
359 } else if (alignment > 1) {
360 _buffer = IOMallocAligned_internal(kheap, capacity, alignment);
361 } else {
362 _buffer = IOMalloc_internal(kheap, capacity);
363 }
364 if (!_buffer) {
365 return false;
366 }
367 bzero(_buffer, capacity);
368 }
369
370 if ((options & (kIOMemoryPageable | kIOMapCacheMask))) {
371 vm_size_t size = round_page(capacity);
372
373 // initWithOptions will create memory entry
374 if (!withCopy) {
375 iomdOptions |= kIOMemoryPersistent;
376 }
377
378 if (options & kIOMemoryPageable) {
379 #if IOALLOCDEBUG
380 OSAddAtomicLong(size, &debug_iomallocpageable_size);
381 #endif
382 if (!withCopy) {
383 mapTask = inTask;
384 }
385 if (NULL == inTask) {
386 inTask = kernel_task;
387 }
388 } else if (options & kIOMapCacheMask) {
389 // Prefetch each page to put entries into the pmap
390 volatile UInt8 * startAddr = (UInt8 *)_buffer;
391 volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity;
392
393 while (startAddr < endAddr) {
394 UInt8 dummyVar = *startAddr;
395 (void) dummyVar;
396 startAddr += page_size;
397 }
398 }
399 }
400
401 _ranges.v64->address = (mach_vm_address_t) _buffer;
402 _ranges.v64->length = _capacity;
403
404 if (!super::initWithOptions(_ranges.v64, 1, 0,
405 inTask, iomdOptions, /* System mapper */ NULL)) {
406 return false;
407 }
408
409 _internalFlags |= kInternalFlagInit;
410 #if IOTRACKING
411 if (!(options & kIOMemoryPageable)) {
412 trackingAccumSize(capacity);
413 }
414 #endif /* IOTRACKING */
415
416 // give any system mapper the allocation params
417 if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec,
418 &mapSpec, sizeof(mapSpec))) {
419 return false;
420 }
421
422 if (mapTask) {
423 if (!reserved) {
424 reserved = IOMallocType(ExpansionData);
425 if (!reserved) {
426 return false;
427 }
428 }
429 reserved->map = createMappingInTask(mapTask, 0,
430 kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0).detach();
431 if (!reserved->map) {
432 _buffer = NULL;
433 return false;
434 }
435 release(); // map took a retain on this
436 reserved->map->retain();
437 removeMapping(reserved->map);
438 mach_vm_address_t buffer = reserved->map->getAddress();
439 _buffer = (void *) buffer;
440 if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) {
441 _ranges.v64->address = buffer;
442 }
443 }
444
445 setLength(_capacity);
446
447 return true;
448 }
449
450 bool
initControlWithPhysicalMask(task_t inTask,IOOptionBits options,mach_vm_size_t capacity,mach_vm_address_t alignment,mach_vm_address_t physicalMask)451 IOBufferMemoryDescriptor::initControlWithPhysicalMask(
452 task_t inTask,
453 IOOptionBits options,
454 mach_vm_size_t capacity,
455 mach_vm_address_t alignment,
456 mach_vm_address_t physicalMask)
457 {
458 _internalFlags = kInternalFlagHasPointers;
459 return initWithPhysicalMask(inTask, options, capacity, alignment,
460 physicalMask);
461 }
462
463 bool
initWithGuardPages(task_t inTask,IOOptionBits options,mach_vm_size_t capacity)464 IOBufferMemoryDescriptor::initWithGuardPages(
465 task_t inTask,
466 IOOptionBits options,
467 mach_vm_size_t capacity)
468 {
469 mach_vm_size_t roundedCapacity;
470
471 _internalFlags = kInternalFlagGuardPages;
472
473 if (round_page_overflow(capacity, &roundedCapacity)) {
474 return false;
475 }
476
477 return initWithPhysicalMask(inTask, options, roundedCapacity, page_size,
478 (mach_vm_address_t)0);
479 }
480
481 OSSharedPtr<IOBufferMemoryDescriptor>
inTaskWithOptions(task_t inTask,IOOptionBits options,vm_size_t capacity,vm_offset_t alignment)482 IOBufferMemoryDescriptor::inTaskWithOptions(
483 task_t inTask,
484 IOOptionBits options,
485 vm_size_t capacity,
486 vm_offset_t alignment)
487 {
488 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
489
490 if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
491 me.reset();
492 }
493 return me;
494 }
495
496 OSSharedPtr<IOBufferMemoryDescriptor>
inTaskWithOptions(task_t inTask,IOOptionBits options,vm_size_t capacity,vm_offset_t alignment,uint32_t kernTag,uint32_t userTag)497 IOBufferMemoryDescriptor::inTaskWithOptions(
498 task_t inTask,
499 IOOptionBits options,
500 vm_size_t capacity,
501 vm_offset_t alignment,
502 uint32_t kernTag,
503 uint32_t userTag)
504 {
505 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
506
507 if (me) {
508 me->setVMTags(kernTag, userTag);
509
510 if (!me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
511 me.reset();
512 }
513 }
514 return me;
515 }
516
517 OSSharedPtr<IOBufferMemoryDescriptor>
inTaskWithPhysicalMask(task_t inTask,IOOptionBits options,mach_vm_size_t capacity,mach_vm_address_t physicalMask)518 IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
519 task_t inTask,
520 IOOptionBits options,
521 mach_vm_size_t capacity,
522 mach_vm_address_t physicalMask)
523 {
524 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
525
526 if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) {
527 me.reset();
528 }
529 return me;
530 }
531
532 OSSharedPtr<IOBufferMemoryDescriptor>
inTaskWithGuardPages(task_t inTask,IOOptionBits options,mach_vm_size_t capacity)533 IOBufferMemoryDescriptor::inTaskWithGuardPages(
534 task_t inTask,
535 IOOptionBits options,
536 mach_vm_size_t capacity)
537 {
538 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
539
540 if (me && !me->initWithGuardPages(inTask, options, capacity)) {
541 me.reset();
542 }
543 return me;
544 }
545
546 #ifndef __LP64__
547 bool
initWithOptions(IOOptionBits options,vm_size_t capacity,vm_offset_t alignment)548 IOBufferMemoryDescriptor::initWithOptions(
549 IOOptionBits options,
550 vm_size_t capacity,
551 vm_offset_t alignment)
552 {
553 return initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0);
554 }
555 #endif /* !__LP64__ */
556
557 OSSharedPtr<IOBufferMemoryDescriptor>
withOptions(IOOptionBits options,vm_size_t capacity,vm_offset_t alignment)558 IOBufferMemoryDescriptor::withOptions(
559 IOOptionBits options,
560 vm_size_t capacity,
561 vm_offset_t alignment)
562 {
563 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
564
565 if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) {
566 me.reset();
567 }
568 return me;
569 }
570
571
572 /*
573 * withCapacity:
574 *
575 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
576 * hold capacity bytes. The descriptor's length is initially set to the capacity.
577 */
578 OSSharedPtr<IOBufferMemoryDescriptor>
withCapacity(vm_size_t inCapacity,IODirection inDirection,bool inContiguous)579 IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
580 IODirection inDirection,
581 bool inContiguous)
582 {
583 return IOBufferMemoryDescriptor::withOptions(
584 inDirection | kIOMemoryUnshared
585 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
586 inCapacity, inContiguous ? inCapacity : 1 );
587 }
588
589 #ifndef __LP64__
590 /*
591 * initWithBytes:
592 *
593 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
594 * The descriptor's length and capacity are set to the input buffer's size.
595 */
596 bool
initWithBytes(const void * inBytes,vm_size_t inLength,IODirection inDirection,bool inContiguous)597 IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
598 vm_size_t inLength,
599 IODirection inDirection,
600 bool inContiguous)
601 {
602 if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared
603 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
604 inLength, inLength, (mach_vm_address_t)0)) {
605 return false;
606 }
607
608 // start out with no data
609 setLength(0);
610
611 if (!appendBytes(inBytes, inLength)) {
612 return false;
613 }
614
615 return true;
616 }
617 #endif /* !__LP64__ */
618
619 /*
620 * withBytes:
621 *
622 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
623 * The descriptor's length and capacity are set to the input buffer's size.
624 */
625 OSSharedPtr<IOBufferMemoryDescriptor>
withBytes(const void * inBytes,vm_size_t inLength,IODirection inDirection,bool inContiguous)626 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
627 vm_size_t inLength,
628 IODirection inDirection,
629 bool inContiguous)
630 {
631 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
632
633 if (me && !me->initWithPhysicalMask(
634 kernel_task, inDirection | kIOMemoryUnshared
635 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
636 inLength, inLength, 0 )) {
637 me.reset();
638 }
639
640 if (me) {
641 // start out with no data
642 me->setLength(0);
643
644 if (!me->appendBytes(inBytes, inLength)) {
645 me.reset();
646 }
647 }
648 return me;
649 }
650
651 /*
652 * free:
653 *
654 * Free resources
655 */
656 void
free()657 IOBufferMemoryDescriptor::free()
658 {
659 // Cache all of the relevant information on the stack for use
660 // after we call super::free()!
661 IOOptionBits flags = _flags;
662 IOOptionBits internalFlags = _internalFlags;
663 IOOptionBits options = _options;
664 vm_size_t size = _capacity;
665 void * buffer = _buffer;
666 IOMemoryMap * map = NULL;
667 IOAddressRange * range = _ranges.v64;
668 vm_offset_t alignment = _alignment;
669 kalloc_heap_t kheap = KHEAP_DATA_BUFFERS;
670
671 /*
672 * Temporarily use default heap on intel due to rdar://74982985
673 */
674 #if __x86_64__
675 kheap = KHEAP_DEFAULT;
676 #endif
677
678 if (alignment >= page_size) {
679 size = round_page(size);
680 }
681
682 if (reserved) {
683 map = reserved->map;
684 IOFreeType(reserved, ExpansionData);
685 if (map) {
686 map->release();
687 }
688 }
689
690 if ((options & kIOMemoryPageable)
691 || (kInternalFlagPageSized & internalFlags)) {
692 size = round_page(size);
693 }
694
695 if (internalFlags & kInternalFlagHasPointers) {
696 kheap = KHEAP_DEFAULT;
697 }
698
699 #if IOTRACKING
700 if (!(options & kIOMemoryPageable)
701 && buffer
702 && (kInternalFlagInit & _internalFlags)) {
703 trackingAccumSize(-size);
704 }
705 #endif /* IOTRACKING */
706
707 /* super::free may unwire - deallocate buffer afterwards */
708 super::free();
709
710 if (options & kIOMemoryPageable) {
711 #if IOALLOCDEBUG
712 OSAddAtomicLong(-size, &debug_iomallocpageable_size);
713 #endif
714 } else if (buffer) {
715 if (kInternalFlagPhysical & internalFlags) {
716 IOKernelFreePhysical(kheap, (mach_vm_address_t) buffer, size);
717 } else if (kInternalFlagPageAllocated & internalFlags) {
718 uintptr_t page;
719 page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size);
720 if (page) {
721 kmem_free(kheap->kh_fallback_map, page, page_size);
722 }
723 #if IOALLOCDEBUG
724 OSAddAtomicLong(-size, &debug_iomalloc_size);
725 #endif
726 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
727 } else if (kInternalFlagGuardPages & internalFlags) {
728 vm_offset_t allocation = (vm_offset_t)buffer - page_size;
729 kmem_free(kheap->kh_fallback_map, allocation, size + page_size * 2);
730 #if IOALLOCDEBUG
731 OSAddAtomicLong(-size, &debug_iomalloc_size);
732 #endif
733 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
734 } else if (alignment > 1) {
735 IOFreeAligned_internal(kheap, buffer, size);
736 } else {
737 IOFree_internal(kheap, buffer, size);
738 }
739 }
740 if (range && (kIOMemoryAsReference & flags)) {
741 IOFreeType(range, IOAddressRange);
742 }
743 }
744
745 /*
746 * getCapacity:
747 *
748 * Get the buffer capacity
749 */
750 vm_size_t
getCapacity() const751 IOBufferMemoryDescriptor::getCapacity() const
752 {
753 return _capacity;
754 }
755
756 /*
757 * setLength:
758 *
759 * Change the buffer length of the memory descriptor. When a new buffer
760 * is created, the initial length of the buffer is set to be the same as
761 * the capacity. The length can be adjusted via setLength for a shorter
762 * transfer (there is no need to create more buffer descriptors when you
763 * can reuse an existing one, even for different transfer sizes). Note
764 * that the specified length must not exceed the capacity of the buffer.
765 */
766 void
setLength(vm_size_t length)767 IOBufferMemoryDescriptor::setLength(vm_size_t length)
768 {
769 assert(length <= _capacity);
770 if (length > _capacity) {
771 return;
772 }
773
774 _length = length;
775 _ranges.v64->length = length;
776 }
777
778 /*
779 * setDirection:
780 *
781 * Change the direction of the transfer. This method allows one to redirect
782 * the descriptor's transfer direction. This eliminates the need to destroy
783 * and create new buffers when different transfer directions are needed.
784 */
785 void
setDirection(IODirection direction)786 IOBufferMemoryDescriptor::setDirection(IODirection direction)
787 {
788 _flags = (_flags & ~kIOMemoryDirectionMask) | direction;
789 #ifndef __LP64__
790 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
791 #endif /* !__LP64__ */
792 }
793
794 /*
795 * appendBytes:
796 *
797 * Add some data to the end of the buffer. This method automatically
798 * maintains the memory descriptor buffer length. Note that appendBytes
799 * will not copy past the end of the memory descriptor's current capacity.
800 */
801 bool
appendBytes(const void * bytes,vm_size_t withLength)802 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
803 {
804 vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
805 IOByteCount offset;
806
807 assert(_length <= _capacity);
808
809 offset = _length;
810 _length += actualBytesToCopy;
811 _ranges.v64->length += actualBytesToCopy;
812
813 if (_task == kernel_task) {
814 bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset),
815 actualBytesToCopy);
816 } else {
817 writeBytes(offset, bytes, actualBytesToCopy);
818 }
819
820 return true;
821 }
822
823 /*
824 * getBytesNoCopy:
825 *
826 * Return the virtual address of the beginning of the buffer
827 */
828 void *
getBytesNoCopy()829 IOBufferMemoryDescriptor::getBytesNoCopy()
830 {
831 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) {
832 return _buffer;
833 } else {
834 return (void *)_ranges.v64->address;
835 }
836 }
837
838
839 /*
840 * getBytesNoCopy:
841 *
842 * Return the virtual address of an offset from the beginning of the buffer
843 */
844 void *
getBytesNoCopy(vm_size_t start,vm_size_t withLength)845 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
846 {
847 IOVirtualAddress address;
848
849 if ((start + withLength) < start) {
850 return NULL;
851 }
852
853 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) {
854 address = (IOVirtualAddress) _buffer;
855 } else {
856 address = _ranges.v64->address;
857 }
858
859 if (start < _length && (start + withLength) <= _length) {
860 return (void *)(address + start);
861 }
862 return NULL;
863 }
864
865 #ifndef __LP64__
866 void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)867 IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
868 IOByteCount * lengthOfSegment)
869 {
870 void * bytes = getBytesNoCopy(offset, 0);
871
872 if (bytes && lengthOfSegment) {
873 *lengthOfSegment = _length - offset;
874 }
875
876 return bytes;
877 }
878 #endif /* !__LP64__ */
879
880 #ifdef __LP64__
881 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0);
882 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
883 #else /* !__LP64__ */
884 OSMetaClassDefineReservedUsedX86(IOBufferMemoryDescriptor, 0);
885 OSMetaClassDefineReservedUsedX86(IOBufferMemoryDescriptor, 1);
886 #endif /* !__LP64__ */
887 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
888 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
889 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
890 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
891 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
892 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
893 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
894 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
895 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
896 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
897 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
898 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
899 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
900 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);
901