1 /*
2 * Copyright (c) 1998-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #define IOKIT_ENABLE_SHARED_PTR
29
30 #include <sys/cdefs.h>
31
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOKitKeysPrivate.h>
39
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #include <IOKit/IOMultiMemoryDescriptor.h>
42 #include <IOKit/IOBufferMemoryDescriptor.h>
43
44 #include <IOKit/IOKitDebug.h>
45 #include <IOKit/IOTimeStamp.h>
46 #include <libkern/OSDebug.h>
47 #include <libkern/OSKextLibPrivate.h>
48
49 #include "IOKitKernelInternal.h"
50
51 #include <libkern/c++/OSAllocation.h>
52 #include <libkern/c++/OSContainers.h>
53 #include <libkern/c++/OSDictionary.h>
54 #include <libkern/c++/OSArray.h>
55 #include <libkern/c++/OSSymbol.h>
56 #include <libkern/c++/OSNumber.h>
57 #include <os/overflow.h>
58 #include <os/cpp_util.h>
59 #include <os/base_private.h>
60
61 #include <sys/uio.h>
62
63 __BEGIN_DECLS
64 #include <vm/pmap.h>
65 #include <vm/vm_pageout_xnu.h>
66 #include <mach/memory_object_types.h>
67 #include <device/device_port.h>
68
69 #include <mach/vm_prot.h>
70 #include <mach/mach_vm.h>
71 #include <mach/memory_entry.h>
72 #include <mach/mach_host.h>
73 #include <vm/vm_fault_xnu.h>
74 #include <vm/vm_protos.h>
75 #include <vm/vm_memory_entry.h>
76 #include <vm/vm_kern_xnu.h>
77 #include <vm/vm_iokit.h>
78 #include <vm/vm_map_xnu.h>
79 #include <kern/thread.h>
80
81 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
82 extern void ipc_port_release_send(ipc_port_t port);
83
84 __END_DECLS
85
86 #define kIOMapperWaitSystem ((IOMapper *) 1)
87
88 static IOMapper * gIOSystemMapper = NULL;
89
90 ppnum_t gIOLastPage;
91
92 enum {
93 kIOMapGuardSizeLarge = 65536
94 };
95
96 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
97
98 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
99
100 #define super IOMemoryDescriptor
101
102 OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor,
103 IOMemoryDescriptor, ZC_ZFREE_CLEARMEM)
104
105 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
106
107 static IORecursiveLock * gIOMemoryLock;
108
109 #define LOCK IORecursiveLockLock( gIOMemoryLock)
110 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
111 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
112 #define WAKEUP \
113 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
114
115 #if 0
116 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
117 #else
118 #define DEBG(fmt, args...) {}
119 #endif
120
121 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
122
123 // Some data structures and accessor macros used by the initWithOptions
124 // Function
125
126 enum ioPLBlockFlags {
127 kIOPLOnDevice = 0x00000001,
128 kIOPLExternUPL = 0x00000002,
129 };
130
131 struct IOMDPersistentInitData {
132 const IOGeneralMemoryDescriptor * fMD;
133 IOMemoryReference * fMemRef;
134 };
135
136 struct ioPLBlock {
137 upl_t fIOPL;
138 vm_address_t fPageInfo; // Pointer to page list or index into it
139 uint64_t fIOMDOffset; // The offset of this iopl in descriptor
140 ppnum_t fMappedPage; // Page number of first page in this iopl
141 unsigned int fPageOffset; // Offset within first page of iopl
142 unsigned int fFlags; // Flags
143 };
144
145 enum { kMaxWireTags = 6 };
146
147 struct ioGMDData {
148 IOMapper * fMapper;
149 uint64_t fDMAMapAlignment;
150 uint64_t fMappedBase;
151 uint64_t fMappedLength;
152 uint64_t fPreparationID;
153 #if IOTRACKING
154 IOTracking fWireTracking;
155 #endif /* IOTRACKING */
156 unsigned int fPageCnt;
157 uint8_t fDMAMapNumAddressBits;
158 unsigned char fCompletionError:1;
159 unsigned char fMappedBaseValid:1;
160 unsigned char _resv:4;
161 unsigned char fDMAAccess:2;
162
163 /* variable length arrays */
164 upl_page_info_t fPageList[1]
165 #if __LP64__
166 // align fPageList as for ioPLBlock
167 __attribute__((aligned(sizeof(upl_t))))
168 #endif
169 ;
170 //ioPLBlock fBlocks[1];
171 };
172
173 #pragma GCC visibility push(hidden)
174
175 class _IOMemoryDescriptorMixedData : public OSObject
176 {
177 OSDeclareDefaultStructors(_IOMemoryDescriptorMixedData);
178
179 public:
180 static OSPtr<_IOMemoryDescriptorMixedData> withCapacity(size_t capacity);
181 bool initWithCapacity(size_t capacity);
182 virtual void free() APPLE_KEXT_OVERRIDE;
183
184 bool appendBytes(const void * bytes, size_t length);
185 bool setLength(size_t length);
186
187 const void * getBytes() const;
188 size_t getLength() const;
189
190 private:
191 void freeMemory();
192
193 void * _data = nullptr;
194 size_t _length = 0;
195 size_t _capacity = 0;
196 };
197
198 #pragma GCC visibility pop
199
200 #define getDataP(osd) ((ioGMDData *) (osd)->getBytes())
201 #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
202 #define getNumIOPL(osd, d) \
203 ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)))
204 #define getPageList(d) (&(d->fPageList[0]))
205 #define computeDataSize(p, u) \
206 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
207
208 enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
209
210 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
211
212 extern "C" {
213 kern_return_t
device_data_action(uintptr_t device_handle,ipc_port_t device_pager,vm_prot_t protection,vm_object_offset_t offset,vm_size_t size)214 device_data_action(
215 uintptr_t device_handle,
216 ipc_port_t device_pager,
217 vm_prot_t protection,
218 vm_object_offset_t offset,
219 vm_size_t size)
220 {
221 kern_return_t kr;
222 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
223 OSSharedPtr<IOMemoryDescriptor> memDesc;
224
225 LOCK;
226 if (ref->dp.memory) {
227 memDesc.reset(ref->dp.memory, OSRetain);
228 kr = memDesc->handleFault(device_pager, offset, size);
229 memDesc.reset();
230 } else {
231 kr = KERN_ABORTED;
232 }
233 UNLOCK;
234
235 return kr;
236 }
237
238 kern_return_t
device_close(uintptr_t device_handle)239 device_close(
240 uintptr_t device_handle)
241 {
242 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
243
244 IOFreeType( ref, IOMemoryDescriptorReserved );
245
246 return kIOReturnSuccess;
247 }
248 }; // end extern "C"
249
250 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
251
252 // Note this inline function uses C++ reference arguments to return values
253 // This means that pointers are not passed and NULLs don't have to be
254 // checked for as a NULL reference is illegal.
255 static inline void
getAddrLenForInd(mach_vm_address_t & addr,mach_vm_size_t & len,UInt32 type,IOGeneralMemoryDescriptor::Ranges r,UInt32 ind,task_t task __unused)256 getAddrLenForInd(
257 mach_vm_address_t &addr,
258 mach_vm_size_t &len, // Output variables
259 UInt32 type,
260 IOGeneralMemoryDescriptor::Ranges r,
261 UInt32 ind,
262 task_t task __unused)
263 {
264 assert(kIOMemoryTypeUIO == type
265 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
266 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
267 if (kIOMemoryTypeUIO == type) {
268 user_size_t us;
269 user_addr_t ad;
270 uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
271 }
272 #ifndef __LP64__
273 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
274 IOAddressRange cur = r.v64[ind];
275 addr = cur.address;
276 len = cur.length;
277 }
278 #endif /* !__LP64__ */
279 else {
280 IOVirtualRange cur = r.v[ind];
281 addr = cur.address;
282 len = cur.length;
283 }
284 #if CONFIG_PROB_GZALLOC
285 if (task == kernel_task) {
286 addr = pgz_decode(addr, len);
287 }
288 #endif /* CONFIG_PROB_GZALLOC */
289 }
290
291 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
292
293 static IOReturn
purgeableControlBits(IOOptionBits newState,vm_purgable_t * control,int * state)294 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
295 {
296 IOReturn err = kIOReturnSuccess;
297
298 *control = VM_PURGABLE_SET_STATE;
299
300 enum { kIOMemoryPurgeableControlMask = 15 };
301
302 switch (kIOMemoryPurgeableControlMask & newState) {
303 case kIOMemoryPurgeableKeepCurrent:
304 *control = VM_PURGABLE_GET_STATE;
305 break;
306
307 case kIOMemoryPurgeableNonVolatile:
308 *state = VM_PURGABLE_NONVOLATILE;
309 break;
310 case kIOMemoryPurgeableVolatile:
311 *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
312 break;
313 case kIOMemoryPurgeableEmpty:
314 *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
315 break;
316 default:
317 err = kIOReturnBadArgument;
318 break;
319 }
320
321 if (*control == VM_PURGABLE_SET_STATE) {
322 // let VM know this call is from the kernel and is allowed to alter
323 // the volatility of the memory entry even if it was created with
324 // MAP_MEM_PURGABLE_KERNEL_ONLY
325 *control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
326 }
327
328 return err;
329 }
330
331 static IOReturn
purgeableStateBits(int * state)332 purgeableStateBits(int * state)
333 {
334 IOReturn err = kIOReturnSuccess;
335
336 switch (VM_PURGABLE_STATE_MASK & *state) {
337 case VM_PURGABLE_NONVOLATILE:
338 *state = kIOMemoryPurgeableNonVolatile;
339 break;
340 case VM_PURGABLE_VOLATILE:
341 *state = kIOMemoryPurgeableVolatile;
342 break;
343 case VM_PURGABLE_EMPTY:
344 *state = kIOMemoryPurgeableEmpty;
345 break;
346 default:
347 *state = kIOMemoryPurgeableNonVolatile;
348 err = kIOReturnNotReady;
349 break;
350 }
351 return err;
352 }
353
354 typedef struct {
355 unsigned int wimg;
356 unsigned int object_type;
357 } iokit_memtype_entry;
358
359 static const iokit_memtype_entry iomd_mem_types[] = {
360 [kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
361 [kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
362 [kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
363 [kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
364 [kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
365 [kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
366 [kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
367 [kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
368 [kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
369 [kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
370 };
371
372 static vm_prot_t
vmProtForCacheMode(IOOptionBits cacheMode)373 vmProtForCacheMode(IOOptionBits cacheMode)
374 {
375 assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
376 if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
377 cacheMode = kIODefaultCache;
378 }
379 vm_prot_t prot = 0;
380 SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
381 return prot;
382 }
383
384 static unsigned int
pagerFlagsForCacheMode(IOOptionBits cacheMode)385 pagerFlagsForCacheMode(IOOptionBits cacheMode)
386 {
387 assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
388 if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
389 cacheMode = kIODefaultCache;
390 }
391 if (cacheMode == kIODefaultCache) {
392 return -1U;
393 }
394 return iomd_mem_types[cacheMode].wimg;
395 }
396
397 static IOOptionBits
cacheModeForPagerFlags(unsigned int pagerFlags)398 cacheModeForPagerFlags(unsigned int pagerFlags)
399 {
400 pagerFlags &= VM_WIMG_MASK;
401 IOOptionBits cacheMode = kIODefaultCache;
402 for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
403 if (iomd_mem_types[i].wimg == pagerFlags) {
404 cacheMode = i;
405 break;
406 }
407 }
408 return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
409 }
410
411 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
412 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
413
414 struct IOMemoryEntry {
415 ipc_port_t entry;
416 int64_t offset;
417 uint64_t size;
418 uint64_t start;
419 };
420
421 struct IOMemoryReference {
422 volatile SInt32 refCount;
423 vm_prot_t prot;
424 uint32_t capacity;
425 uint32_t count;
426 struct IOMemoryReference * mapRef;
427 IOMemoryEntry entries[0];
428 };
429
430 enum{
431 kIOMemoryReferenceReuse = 0x00000001,
432 kIOMemoryReferenceWrite = 0x00000002,
433 kIOMemoryReferenceCOW = 0x00000004,
434 };
435
436 SInt32 gIOMemoryReferenceCount;
437
438 IOMemoryReference *
memoryReferenceAlloc(uint32_t capacity,IOMemoryReference * realloc)439 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
440 {
441 IOMemoryReference * ref;
442 size_t oldCapacity;
443
444 if (realloc) {
445 oldCapacity = realloc->capacity;
446 } else {
447 oldCapacity = 0;
448 }
449
450 // Use the kalloc API instead of manually handling the reallocation
451 ref = krealloc_type(IOMemoryReference, IOMemoryEntry,
452 oldCapacity, capacity, realloc, Z_WAITOK_ZERO);
453 if (ref) {
454 if (oldCapacity == 0) {
455 ref->refCount = 1;
456 OSIncrementAtomic(&gIOMemoryReferenceCount);
457 }
458 ref->capacity = capacity;
459 }
460 return ref;
461 }
462
463 void
memoryReferenceFree(IOMemoryReference * ref)464 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
465 {
466 IOMemoryEntry * entries;
467
468 if (ref->mapRef) {
469 memoryReferenceFree(ref->mapRef);
470 ref->mapRef = NULL;
471 }
472
473 entries = ref->entries + ref->count;
474 while (entries > &ref->entries[0]) {
475 entries--;
476 ipc_port_release_send(entries->entry);
477 }
478 kfree_type(IOMemoryReference, IOMemoryEntry, ref->capacity, ref);
479
480 OSDecrementAtomic(&gIOMemoryReferenceCount);
481 }
482
483 void
memoryReferenceRelease(IOMemoryReference * ref)484 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
485 {
486 if (1 == OSDecrementAtomic(&ref->refCount)) {
487 memoryReferenceFree(ref);
488 }
489 }
490
491
492 IOReturn
memoryReferenceCreate(IOOptionBits options,IOMemoryReference ** reference)493 IOGeneralMemoryDescriptor::memoryReferenceCreate(
494 IOOptionBits options,
495 IOMemoryReference ** reference)
496 {
497 enum { kCapacity = 4, kCapacityInc = 4 };
498
499 kern_return_t err;
500 IOMemoryReference * ref;
501 IOMemoryEntry * entries;
502 IOMemoryEntry * cloneEntries = NULL;
503 vm_map_t map;
504 ipc_port_t entry, cloneEntry;
505 vm_prot_t prot;
506 memory_object_size_t actualSize;
507 uint32_t rangeIdx;
508 uint32_t count;
509 mach_vm_address_t entryAddr, endAddr, entrySize;
510 mach_vm_size_t srcAddr, srcLen;
511 mach_vm_size_t nextAddr, nextLen;
512 mach_vm_size_t offset, remain;
513 vm_map_offset_t overmap_start = 0, overmap_end = 0;
514 int misaligned_start = 0, misaligned_end = 0;
515 IOByteCount physLen;
516 IOOptionBits type = (_flags & kIOMemoryTypeMask);
517 IOOptionBits cacheMode;
518 unsigned int pagerFlags;
519 vm_tag_t tag;
520 vm_named_entry_kernel_flags_t vmne_kflags;
521
522 ref = memoryReferenceAlloc(kCapacity, NULL);
523 if (!ref) {
524 return kIOReturnNoMemory;
525 }
526
527 tag = (vm_tag_t) getVMTag(kernel_map);
528 vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
529 entries = &ref->entries[0];
530 count = 0;
531 err = KERN_SUCCESS;
532
533 offset = 0;
534 rangeIdx = 0;
535 remain = _length;
536 if (_task) {
537 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
538
539 // account for IOBMD setLength(), use its capacity as length
540 IOBufferMemoryDescriptor * bmd;
541 if ((bmd = OSDynamicCast(IOBufferMemoryDescriptor, this))) {
542 nextLen = bmd->getCapacity();
543 remain = nextLen;
544 }
545 } else {
546 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
547 nextLen = physLen;
548
549 // default cache mode for physical
550 if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
551 IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
552 _flags |= (mode << kIOMemoryBufferCacheShift);
553 }
554 }
555
556 // cache mode & vm_prot
557 prot = VM_PROT_READ;
558 cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
559 prot |= vmProtForCacheMode(cacheMode);
560 // VM system requires write access to change cache mode
561 if (kIODefaultCache != cacheMode) {
562 prot |= VM_PROT_WRITE;
563 }
564 if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
565 prot |= VM_PROT_WRITE;
566 }
567 if (kIOMemoryReferenceWrite & options) {
568 prot |= VM_PROT_WRITE;
569 }
570 if (kIOMemoryReferenceCOW & options) {
571 prot |= MAP_MEM_VM_COPY;
572 }
573
574 if (kIOMemoryUseReserve & _flags) {
575 prot |= MAP_MEM_GRAB_SECLUDED;
576 }
577
578 if ((kIOMemoryReferenceReuse & options) && _memRef) {
579 cloneEntries = &_memRef->entries[0];
580 prot |= MAP_MEM_NAMED_REUSE;
581 }
582
583 if (_task) {
584 // virtual ranges
585
586 if (kIOMemoryBufferPageable & _flags) {
587 int ledger_tag, ledger_no_footprint;
588
589 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
590 prot |= MAP_MEM_NAMED_CREATE;
591
592 // default accounting settings:
593 // + "none" ledger tag
594 // + include in footprint
595 // can be changed later with ::setOwnership()
596 ledger_tag = VM_LEDGER_TAG_NONE;
597 ledger_no_footprint = 0;
598
599 if (kIOMemoryBufferPurgeable & _flags) {
600 prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
601 if (VM_KERN_MEMORY_SKYWALK == tag) {
602 // Skywalk purgeable memory accounting:
603 // + "network" ledger tag
604 // + not included in footprint
605 ledger_tag = VM_LEDGER_TAG_NETWORK;
606 ledger_no_footprint = 1;
607 } else {
608 // regular purgeable memory accounting:
609 // + no ledger tag
610 // + included in footprint
611 ledger_tag = VM_LEDGER_TAG_NONE;
612 ledger_no_footprint = 0;
613 }
614 }
615 vmne_kflags.vmnekf_ledger_tag = ledger_tag;
616 vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
617 if (kIOMemoryUseReserve & _flags) {
618 prot |= MAP_MEM_GRAB_SECLUDED;
619 }
620
621 prot |= VM_PROT_WRITE;
622 map = NULL;
623 } else {
624 prot |= MAP_MEM_USE_DATA_ADDR;
625 map = get_task_map(_task);
626 }
627 DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map, (uint64_t)_length, prot);
628
629 while (remain) {
630 srcAddr = nextAddr;
631 srcLen = nextLen;
632 nextAddr = 0;
633 nextLen = 0;
634 // coalesce addr range
635 for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
636 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
637 if ((srcAddr + srcLen) != nextAddr) {
638 break;
639 }
640 srcLen += nextLen;
641 }
642
643 if (MAP_MEM_USE_DATA_ADDR & prot) {
644 entryAddr = srcAddr;
645 endAddr = srcAddr + srcLen;
646 } else {
647 entryAddr = trunc_page_64(srcAddr);
648 endAddr = round_page_64(srcAddr + srcLen);
649 }
650 if (vm_map_page_mask(get_task_map(_task)) < PAGE_MASK) {
651 DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref, (uint32_t)_flags, prot, rangeIdx - 1, srcAddr, srcLen);
652 }
653
654 do{
655 entrySize = (endAddr - entryAddr);
656 if (!entrySize) {
657 break;
658 }
659 actualSize = entrySize;
660
661 cloneEntry = MACH_PORT_NULL;
662 if (MAP_MEM_NAMED_REUSE & prot) {
663 if (cloneEntries < &_memRef->entries[_memRef->count]) {
664 cloneEntry = cloneEntries->entry;
665 } else {
666 prot &= ~MAP_MEM_NAMED_REUSE;
667 }
668 }
669
670 mach_vm_offset_t entryAddrForVm = entryAddr;
671 err = mach_make_memory_entry_internal(map,
672 &actualSize, entryAddrForVm, prot, vmne_kflags, &entry, cloneEntry);
673
674 if (KERN_SUCCESS != err) {
675 DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map, entryAddrForVm, actualSize, prot, err);
676 break;
677 }
678 if (MAP_MEM_USE_DATA_ADDR & prot) {
679 if (actualSize > entrySize) {
680 actualSize = entrySize;
681 }
682 } else if (actualSize > entrySize) {
683 panic("mach_make_memory_entry_64 actualSize");
684 }
685
686 memory_entry_check_for_adjustment(map, entry, &overmap_start, &overmap_end);
687
688 if (count && overmap_start) {
689 /*
690 * Track misaligned start for all
691 * except the first entry.
692 */
693 misaligned_start++;
694 }
695
696 if (overmap_end) {
697 /*
698 * Ignore misaligned end for the
699 * last entry.
700 */
701 if ((entryAddr + actualSize) != endAddr) {
702 misaligned_end++;
703 }
704 }
705
706 if (count) {
707 /* Middle entries */
708 if (misaligned_start || misaligned_end) {
709 DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr);
710 ipc_port_release_send(entry);
711 err = KERN_NOT_SUPPORTED;
712 break;
713 }
714 }
715
716 if (count >= ref->capacity) {
717 ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
718 entries = &ref->entries[count];
719 }
720 entries->entry = entry;
721 entries->size = actualSize;
722 entries->offset = offset + (entryAddr - srcAddr);
723 entries->start = entryAddr;
724 entryAddr += actualSize;
725 if (MAP_MEM_NAMED_REUSE & prot) {
726 if ((cloneEntries->entry == entries->entry)
727 && (cloneEntries->size == entries->size)
728 && (cloneEntries->offset == entries->offset)) {
729 cloneEntries++;
730 } else {
731 prot &= ~MAP_MEM_NAMED_REUSE;
732 }
733 }
734 entries++;
735 count++;
736 }while (true);
737 offset += srcLen;
738 remain -= srcLen;
739 }
740 } else {
741 // _task == 0, physical or kIOMemoryTypeUPL
742 memory_object_t pager;
743 vm_size_t size = ptoa_64(_pages);
744
745 if (!getKernelReserved()) {
746 panic("getKernelReserved");
747 }
748
749 reserved->dp.pagerContig = (1 == _rangesCount);
750 reserved->dp.memory = this;
751
752 pagerFlags = pagerFlagsForCacheMode(cacheMode);
753 if (-1U == pagerFlags) {
754 panic("phys is kIODefaultCache");
755 }
756 if (reserved->dp.pagerContig) {
757 pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
758 }
759
760 pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
761 size, pagerFlags);
762 assert(pager);
763 if (!pager) {
764 DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size, pagerFlags);
765 err = kIOReturnVMError;
766 } else {
767 srcAddr = nextAddr;
768 entryAddr = trunc_page_64(srcAddr);
769 err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
770 size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
771 assert(KERN_SUCCESS == err);
772 if (KERN_SUCCESS != err) {
773 device_pager_deallocate(pager);
774 } else {
775 reserved->dp.devicePager = pager;
776 entries->entry = entry;
777 entries->size = size;
778 entries->offset = offset + (entryAddr - srcAddr);
779 entries++;
780 count++;
781 }
782 }
783 }
784
785 ref->count = count;
786 ref->prot = prot;
787
788 if (_task && (KERN_SUCCESS == err)
789 && (kIOMemoryMapCopyOnWrite & _flags)
790 && !(kIOMemoryReferenceCOW & options)) {
791 err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
792 if (KERN_SUCCESS != err) {
793 DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref, (unsigned int)options, err);
794 }
795 }
796
797 if (KERN_SUCCESS == err) {
798 if (MAP_MEM_NAMED_REUSE & prot) {
799 memoryReferenceFree(ref);
800 OSIncrementAtomic(&_memRef->refCount);
801 ref = _memRef;
802 }
803 } else {
804 DEBUG4K_ERROR("ref %p err 0x%x\n", ref, err);
805 memoryReferenceFree(ref);
806 ref = NULL;
807 }
808
809 *reference = ref;
810
811 return err;
812 }
813
814 static mach_vm_size_t
IOMemoryDescriptorMapGuardSize(vm_map_t map,IOOptionBits options)815 IOMemoryDescriptorMapGuardSize(vm_map_t map, IOOptionBits options)
816 {
817 switch (kIOMapGuardedMask & options) {
818 default:
819 case kIOMapGuardedSmall:
820 return vm_map_page_size(map);
821 case kIOMapGuardedLarge:
822 assert(0 == (kIOMapGuardSizeLarge & vm_map_page_mask(map)));
823 return kIOMapGuardSizeLarge;
824 }
825 ;
826 }
827
828 static kern_return_t
IOMemoryDescriptorMapDealloc(IOOptionBits options,vm_map_t map,vm_map_offset_t addr,mach_vm_size_t size)829 IOMemoryDescriptorMapDealloc(IOOptionBits options, vm_map_t map,
830 vm_map_offset_t addr, mach_vm_size_t size)
831 {
832 kern_return_t kr;
833 vm_map_offset_t actualAddr;
834 mach_vm_size_t actualSize;
835
836 actualAddr = vm_map_trunc_page(addr, vm_map_page_mask(map));
837 actualSize = vm_map_round_page(addr + size, vm_map_page_mask(map)) - actualAddr;
838
839 if (kIOMapGuardedMask & options) {
840 mach_vm_size_t guardSize = IOMemoryDescriptorMapGuardSize(map, options);
841 actualAddr -= guardSize;
842 actualSize += 2 * guardSize;
843 }
844 kr = mach_vm_deallocate(map, actualAddr, actualSize);
845
846 return kr;
847 }
848
849 kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map,void * _ref)850 IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
851 {
852 IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
853 IOReturn err;
854 vm_map_offset_t addr;
855 mach_vm_size_t size;
856 mach_vm_size_t guardSize;
857 vm_map_kernel_flags_t vmk_flags;
858
859 addr = ref->mapped;
860 size = ref->size;
861 guardSize = 0;
862
863 if (kIOMapGuardedMask & ref->options) {
864 if (!(kIOMapAnywhere & ref->options)) {
865 return kIOReturnBadArgument;
866 }
867 guardSize = IOMemoryDescriptorMapGuardSize(map, ref->options);
868 size += 2 * guardSize;
869 }
870 if (kIOMapAnywhere & ref->options) {
871 vmk_flags = VM_MAP_KERNEL_FLAGS_ANYWHERE();
872 } else {
873 vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED();
874 }
875 vmk_flags.vm_tag = ref->tag;
876
877 /*
878 * Mapping memory into the kernel_map using IOMDs use a dedicated range.
879 * Memory being mapped should not contain kernel pointers.
880 */
881 if (map == kernel_map) {
882 vmk_flags.vmkf_range_id = KMEM_RANGE_ID_IOKIT;
883 }
884
885 err = mach_vm_map_kernel(map, &addr, size,
886 #if __ARM_MIXED_PAGE_SIZE__
887 // TODO4K this should not be necessary...
888 (vm_map_offset_t)((ref->options & kIOMapAnywhere) ? max(PAGE_MASK, vm_map_page_mask(map)) : 0),
889 #else /* __ARM_MIXED_PAGE_SIZE__ */
890 (vm_map_offset_t) 0,
891 #endif /* __ARM_MIXED_PAGE_SIZE__ */
892 vmk_flags,
893 IPC_PORT_NULL,
894 (memory_object_offset_t) 0,
895 false, /* copy */
896 ref->prot,
897 ref->prot,
898 VM_INHERIT_NONE);
899 if (KERN_SUCCESS == err) {
900 ref->mapped = (mach_vm_address_t) addr;
901 ref->map = map;
902 if (kIOMapGuardedMask & ref->options) {
903 vm_map_offset_t lastpage = vm_map_trunc_page(addr + size - guardSize, vm_map_page_mask(map));
904
905 err = mach_vm_protect(map, addr, guardSize, false /*set max*/, VM_PROT_NONE);
906 assert(KERN_SUCCESS == err);
907 err = mach_vm_protect(map, lastpage, guardSize, false /*set max*/, VM_PROT_NONE);
908 assert(KERN_SUCCESS == err);
909 ref->mapped += guardSize;
910 }
911 }
912
913 return err;
914 }
915
916 IOReturn
memoryReferenceMap(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)917 IOGeneralMemoryDescriptor::memoryReferenceMap(
918 IOMemoryReference * ref,
919 vm_map_t map,
920 mach_vm_size_t inoffset,
921 mach_vm_size_t size,
922 IOOptionBits options,
923 mach_vm_address_t * inaddr)
924 {
925 IOReturn err;
926 int64_t offset = inoffset;
927 uint32_t rangeIdx, entryIdx;
928 vm_map_offset_t addr, mapAddr;
929 vm_map_offset_t pageOffset, entryOffset, remain, chunk;
930
931 mach_vm_address_t nextAddr;
932 mach_vm_size_t nextLen;
933 IOByteCount physLen;
934 IOMemoryEntry * entry;
935 vm_prot_t prot, memEntryCacheMode;
936 IOOptionBits type;
937 IOOptionBits cacheMode;
938 vm_tag_t tag;
939 // for the kIOMapPrefault option.
940 upl_page_info_t * pageList = NULL;
941 UInt currentPageIndex = 0;
942 bool didAlloc;
943
944 DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
945
946 if (ref->mapRef) {
947 err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
948 return err;
949 }
950
951 if (MAP_MEM_USE_DATA_ADDR & ref->prot) {
952 err = memoryReferenceMapNew(ref, map, inoffset, size, options, inaddr);
953 return err;
954 }
955
956 type = _flags & kIOMemoryTypeMask;
957
958 prot = VM_PROT_READ;
959 if (!(kIOMapReadOnly & options)) {
960 prot |= VM_PROT_WRITE;
961 }
962 prot &= ref->prot;
963
964 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
965 if (kIODefaultCache != cacheMode) {
966 // VM system requires write access to update named entry cache mode
967 memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
968 }
969
970 tag = (typeof(tag))getVMTag(map);
971
972 if (_task) {
973 // Find first range for offset
974 if (!_rangesCount) {
975 return kIOReturnBadArgument;
976 }
977 for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
978 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
979 if (remain < nextLen) {
980 break;
981 }
982 remain -= nextLen;
983 }
984 } else {
985 rangeIdx = 0;
986 remain = 0;
987 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
988 nextLen = size;
989 }
990
991 assert(remain < nextLen);
992 if (remain >= nextLen) {
993 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)remain, nextLen);
994 return kIOReturnBadArgument;
995 }
996
997 nextAddr += remain;
998 nextLen -= remain;
999 #if __ARM_MIXED_PAGE_SIZE__
1000 pageOffset = (vm_map_page_mask(map) & nextAddr);
1001 #else /* __ARM_MIXED_PAGE_SIZE__ */
1002 pageOffset = (page_mask & nextAddr);
1003 #endif /* __ARM_MIXED_PAGE_SIZE__ */
1004 addr = 0;
1005 didAlloc = false;
1006
1007 if (!(options & kIOMapAnywhere)) {
1008 addr = *inaddr;
1009 if (pageOffset != (vm_map_page_mask(map) & addr)) {
1010 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)addr, (uint64_t)page_mask, (uint64_t)pageOffset);
1011 }
1012 addr -= pageOffset;
1013 }
1014
1015 // find first entry for offset
1016 for (entryIdx = 0;
1017 (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
1018 entryIdx++) {
1019 }
1020 entryIdx--;
1021 entry = &ref->entries[entryIdx];
1022
1023 // allocate VM
1024 #if __ARM_MIXED_PAGE_SIZE__
1025 size = round_page_mask_64(size + pageOffset, vm_map_page_mask(map));
1026 #else
1027 size = round_page_64(size + pageOffset);
1028 #endif
1029 if (kIOMapOverwrite & options) {
1030 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1031 map = IOPageableMapForAddress(addr);
1032 }
1033 err = KERN_SUCCESS;
1034 } else {
1035 IOMemoryDescriptorMapAllocRef ref;
1036 ref.map = map;
1037 ref.tag = tag;
1038 ref.options = options;
1039 ref.size = size;
1040 ref.prot = prot;
1041 if (options & kIOMapAnywhere) {
1042 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1043 ref.mapped = 0;
1044 } else {
1045 ref.mapped = addr;
1046 }
1047 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1048 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1049 } else {
1050 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1051 }
1052 if (KERN_SUCCESS == err) {
1053 addr = ref.mapped;
1054 map = ref.map;
1055 didAlloc = true;
1056 }
1057 }
1058
1059 /*
1060 * If the memory is associated with a device pager but doesn't have a UPL,
1061 * it will be immediately faulted in through the pager via populateDevicePager().
1062 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1063 * operations.
1064 */
1065 if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1066 options &= ~kIOMapPrefault;
1067 }
1068
1069 /*
1070 * Prefaulting is only possible if we wired the memory earlier. Check the
1071 * memory type, and the underlying data.
1072 */
1073 if (options & kIOMapPrefault) {
1074 /*
1075 * The memory must have been wired by calling ::prepare(), otherwise
1076 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1077 */
1078 assert(_wireCount != 0);
1079 assert(_memoryEntries != NULL);
1080 if ((_wireCount == 0) ||
1081 (_memoryEntries == NULL)) {
1082 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr);
1083 return kIOReturnBadArgument;
1084 }
1085
1086 // Get the page list.
1087 ioGMDData* dataP = getDataP(_memoryEntries);
1088 ioPLBlock const* ioplList = getIOPLList(dataP);
1089 pageList = getPageList(dataP);
1090
1091 // Get the number of IOPLs.
1092 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1093
1094 /*
1095 * Scan through the IOPL Info Blocks, looking for the first block containing
1096 * the offset. The research will go past it, so we'll need to go back to the
1097 * right range at the end.
1098 */
1099 UInt ioplIndex = 0;
1100 while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1101 ioplIndex++;
1102 }
1103 ioplIndex--;
1104
1105 // Retrieve the IOPL info block.
1106 ioPLBlock ioplInfo = ioplList[ioplIndex];
1107
1108 /*
1109 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1110 * array.
1111 */
1112 if (ioplInfo.fFlags & kIOPLExternUPL) {
1113 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1114 } else {
1115 pageList = &pageList[ioplInfo.fPageInfo];
1116 }
1117
1118 // Rebase [offset] into the IOPL in order to looks for the first page index.
1119 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1120
1121 // Retrieve the index of the first page corresponding to the offset.
1122 currentPageIndex = atop_32(offsetInIOPL);
1123 }
1124
1125 // enter mappings
1126 remain = size;
1127 mapAddr = addr;
1128 addr += pageOffset;
1129
1130 while (remain && (KERN_SUCCESS == err)) {
1131 entryOffset = offset - entry->offset;
1132 if ((min(vm_map_page_mask(map), page_mask) & entryOffset) != pageOffset) {
1133 err = kIOReturnNotAligned;
1134 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)entryOffset, (uint64_t)pageOffset);
1135 break;
1136 }
1137
1138 if (kIODefaultCache != cacheMode) {
1139 vm_size_t unused = 0;
1140 err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1141 memEntryCacheMode, NULL, entry->entry);
1142 assert(KERN_SUCCESS == err);
1143 }
1144
1145 entryOffset -= pageOffset;
1146 if (entryOffset >= entry->size) {
1147 panic("entryOffset");
1148 }
1149 chunk = entry->size - entryOffset;
1150 if (chunk) {
1151 vm_map_kernel_flags_t vmk_flags = {
1152 .vmf_fixed = true,
1153 .vmf_overwrite = true,
1154 .vm_tag = tag,
1155 .vmkf_iokit_acct = true,
1156 };
1157
1158 if (chunk > remain) {
1159 chunk = remain;
1160 }
1161 if (options & kIOMapPrefault) {
1162 UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1163
1164 err = vm_map_enter_mem_object_prefault(map,
1165 &mapAddr,
1166 chunk, 0 /* mask */,
1167 vmk_flags,
1168 entry->entry,
1169 entryOffset,
1170 prot, // cur
1171 prot, // max
1172 &pageList[currentPageIndex],
1173 nb_pages);
1174
1175 if (err || vm_map_page_mask(map) < PAGE_MASK) {
1176 DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1177 }
1178 // Compute the next index in the page list.
1179 currentPageIndex += nb_pages;
1180 assert(currentPageIndex <= _pages);
1181 } else {
1182 err = mach_vm_map_kernel(map,
1183 &mapAddr,
1184 chunk, 0 /* mask */,
1185 vmk_flags,
1186 entry->entry,
1187 entryOffset,
1188 false, // copy
1189 prot, // cur
1190 prot, // max
1191 VM_INHERIT_NONE);
1192 }
1193 if (KERN_SUCCESS != err) {
1194 DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1195 break;
1196 }
1197 remain -= chunk;
1198 if (!remain) {
1199 break;
1200 }
1201 mapAddr += chunk;
1202 offset += chunk - pageOffset;
1203 }
1204 pageOffset = 0;
1205 entry++;
1206 entryIdx++;
1207 if (entryIdx >= ref->count) {
1208 err = kIOReturnOverrun;
1209 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map, inoffset, size, (uint32_t)options, *inaddr, entryIdx, ref->count);
1210 break;
1211 }
1212 }
1213
1214 if ((KERN_SUCCESS != err) && didAlloc) {
1215 (void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1216 addr = 0;
1217 }
1218 *inaddr = addr;
1219
1220 if (err /* || vm_map_page_mask(map) < PAGE_MASK */) {
1221 DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map, vm_map_page_shift(map), inoffset, size, (uint32_t)options, *inaddr, err);
1222 }
1223 return err;
1224 }
1225
1226 #define LOGUNALIGN 0
1227 IOReturn
memoryReferenceMapNew(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)1228 IOGeneralMemoryDescriptor::memoryReferenceMapNew(
1229 IOMemoryReference * ref,
1230 vm_map_t map,
1231 mach_vm_size_t inoffset,
1232 mach_vm_size_t size,
1233 IOOptionBits options,
1234 mach_vm_address_t * inaddr)
1235 {
1236 IOReturn err;
1237 int64_t offset = inoffset;
1238 uint32_t entryIdx, firstEntryIdx;
1239 vm_map_offset_t addr, mapAddr, mapAddrOut;
1240 vm_map_offset_t entryOffset, remain, chunk;
1241
1242 IOMemoryEntry * entry;
1243 vm_prot_t prot, memEntryCacheMode;
1244 IOOptionBits type;
1245 IOOptionBits cacheMode;
1246 vm_tag_t tag;
1247 // for the kIOMapPrefault option.
1248 upl_page_info_t * pageList = NULL;
1249 UInt currentPageIndex = 0;
1250 bool didAlloc;
1251
1252 DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
1253
1254 if (ref->mapRef) {
1255 err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
1256 return err;
1257 }
1258
1259 #if LOGUNALIGN
1260 printf("MAP offset %qx, %qx\n", inoffset, size);
1261 #endif
1262
1263 type = _flags & kIOMemoryTypeMask;
1264
1265 prot = VM_PROT_READ;
1266 if (!(kIOMapReadOnly & options)) {
1267 prot |= VM_PROT_WRITE;
1268 }
1269 prot &= ref->prot;
1270
1271 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
1272 if (kIODefaultCache != cacheMode) {
1273 // VM system requires write access to update named entry cache mode
1274 memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
1275 }
1276
1277 tag = (vm_tag_t) getVMTag(map);
1278
1279 addr = 0;
1280 didAlloc = false;
1281
1282 if (!(options & kIOMapAnywhere)) {
1283 addr = *inaddr;
1284 }
1285
1286 // find first entry for offset
1287 for (firstEntryIdx = 0;
1288 (firstEntryIdx < ref->count) && (offset >= ref->entries[firstEntryIdx].offset);
1289 firstEntryIdx++) {
1290 }
1291 firstEntryIdx--;
1292
1293 // calculate required VM space
1294
1295 entryIdx = firstEntryIdx;
1296 entry = &ref->entries[entryIdx];
1297
1298 remain = size;
1299 int64_t iteroffset = offset;
1300 uint64_t mapSize = 0;
1301 while (remain) {
1302 entryOffset = iteroffset - entry->offset;
1303 if (entryOffset >= entry->size) {
1304 panic("entryOffset");
1305 }
1306
1307 #if LOGUNALIGN
1308 printf("[%d] size %qx offset %qx start %qx iter %qx\n",
1309 entryIdx, entry->size, entry->offset, entry->start, iteroffset);
1310 #endif
1311
1312 chunk = entry->size - entryOffset;
1313 if (chunk) {
1314 if (chunk > remain) {
1315 chunk = remain;
1316 }
1317 mach_vm_size_t entrySize;
1318 err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1319 assert(KERN_SUCCESS == err);
1320 mapSize += entrySize;
1321
1322 remain -= chunk;
1323 if (!remain) {
1324 break;
1325 }
1326 iteroffset += chunk; // - pageOffset;
1327 }
1328 entry++;
1329 entryIdx++;
1330 if (entryIdx >= ref->count) {
1331 panic("overrun");
1332 err = kIOReturnOverrun;
1333 break;
1334 }
1335 }
1336
1337 if (kIOMapOverwrite & options) {
1338 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1339 map = IOPageableMapForAddress(addr);
1340 }
1341 err = KERN_SUCCESS;
1342 } else {
1343 IOMemoryDescriptorMapAllocRef ref;
1344 ref.map = map;
1345 ref.tag = tag;
1346 ref.options = options;
1347 ref.size = mapSize;
1348 ref.prot = prot;
1349 if (options & kIOMapAnywhere) {
1350 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1351 ref.mapped = 0;
1352 } else {
1353 ref.mapped = addr;
1354 }
1355 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1356 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1357 } else {
1358 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1359 }
1360
1361 if (KERN_SUCCESS == err) {
1362 addr = ref.mapped;
1363 map = ref.map;
1364 didAlloc = true;
1365 }
1366 #if LOGUNALIGN
1367 IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
1368 #endif
1369 }
1370
1371 /*
1372 * If the memory is associated with a device pager but doesn't have a UPL,
1373 * it will be immediately faulted in through the pager via populateDevicePager().
1374 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1375 * operations.
1376 */
1377 if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1378 options &= ~kIOMapPrefault;
1379 }
1380
1381 /*
1382 * Prefaulting is only possible if we wired the memory earlier. Check the
1383 * memory type, and the underlying data.
1384 */
1385 if (options & kIOMapPrefault) {
1386 /*
1387 * The memory must have been wired by calling ::prepare(), otherwise
1388 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1389 */
1390 assert(_wireCount != 0);
1391 assert(_memoryEntries != NULL);
1392 if ((_wireCount == 0) ||
1393 (_memoryEntries == NULL)) {
1394 return kIOReturnBadArgument;
1395 }
1396
1397 // Get the page list.
1398 ioGMDData* dataP = getDataP(_memoryEntries);
1399 ioPLBlock const* ioplList = getIOPLList(dataP);
1400 pageList = getPageList(dataP);
1401
1402 // Get the number of IOPLs.
1403 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1404
1405 /*
1406 * Scan through the IOPL Info Blocks, looking for the first block containing
1407 * the offset. The research will go past it, so we'll need to go back to the
1408 * right range at the end.
1409 */
1410 UInt ioplIndex = 0;
1411 while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1412 ioplIndex++;
1413 }
1414 ioplIndex--;
1415
1416 // Retrieve the IOPL info block.
1417 ioPLBlock ioplInfo = ioplList[ioplIndex];
1418
1419 /*
1420 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1421 * array.
1422 */
1423 if (ioplInfo.fFlags & kIOPLExternUPL) {
1424 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1425 } else {
1426 pageList = &pageList[ioplInfo.fPageInfo];
1427 }
1428
1429 // Rebase [offset] into the IOPL in order to looks for the first page index.
1430 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1431
1432 // Retrieve the index of the first page corresponding to the offset.
1433 currentPageIndex = atop_32(offsetInIOPL);
1434 }
1435
1436 // enter mappings
1437 remain = size;
1438 mapAddr = addr;
1439 entryIdx = firstEntryIdx;
1440 entry = &ref->entries[entryIdx];
1441
1442 while (remain && (KERN_SUCCESS == err)) {
1443 #if LOGUNALIGN
1444 printf("offset %qx, %qx\n", offset, entry->offset);
1445 #endif
1446 if (kIODefaultCache != cacheMode) {
1447 vm_size_t unused = 0;
1448 err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1449 memEntryCacheMode, NULL, entry->entry);
1450 assert(KERN_SUCCESS == err);
1451 }
1452 entryOffset = offset - entry->offset;
1453 if (entryOffset >= entry->size) {
1454 panic("entryOffset");
1455 }
1456 chunk = entry->size - entryOffset;
1457 #if LOGUNALIGN
1458 printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
1459 #endif
1460 if (chunk) {
1461 vm_map_kernel_flags_t vmk_flags = {
1462 .vmf_fixed = true,
1463 .vmf_overwrite = true,
1464 .vmf_return_data_addr = true,
1465 .vm_tag = tag,
1466 .vmkf_iokit_acct = true,
1467 };
1468
1469 if (chunk > remain) {
1470 chunk = remain;
1471 }
1472 mapAddrOut = mapAddr;
1473 if (options & kIOMapPrefault) {
1474 UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1475
1476 err = vm_map_enter_mem_object_prefault(map,
1477 &mapAddrOut,
1478 chunk, 0 /* mask */,
1479 vmk_flags,
1480 entry->entry,
1481 entryOffset,
1482 prot, // cur
1483 prot, // max
1484 &pageList[currentPageIndex],
1485 nb_pages);
1486
1487 // Compute the next index in the page list.
1488 currentPageIndex += nb_pages;
1489 assert(currentPageIndex <= _pages);
1490 } else {
1491 #if LOGUNALIGN
1492 printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
1493 #endif
1494 err = mach_vm_map_kernel(map,
1495 &mapAddrOut,
1496 chunk, 0 /* mask */,
1497 vmk_flags,
1498 entry->entry,
1499 entryOffset,
1500 false, // copy
1501 prot, // cur
1502 prot, // max
1503 VM_INHERIT_NONE);
1504 }
1505 if (KERN_SUCCESS != err) {
1506 panic("map enter err %x", err);
1507 break;
1508 }
1509 #if LOGUNALIGN
1510 printf("mapAddr o %qx\n", mapAddrOut);
1511 #endif
1512 if (entryIdx == firstEntryIdx) {
1513 addr = mapAddrOut;
1514 }
1515 remain -= chunk;
1516 if (!remain) {
1517 break;
1518 }
1519 mach_vm_size_t entrySize;
1520 err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1521 assert(KERN_SUCCESS == err);
1522 mapAddr += entrySize;
1523 offset += chunk;
1524 }
1525
1526 entry++;
1527 entryIdx++;
1528 if (entryIdx >= ref->count) {
1529 err = kIOReturnOverrun;
1530 break;
1531 }
1532 }
1533
1534 if (KERN_SUCCESS != err) {
1535 DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
1536 }
1537
1538 if ((KERN_SUCCESS != err) && didAlloc) {
1539 (void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1540 addr = 0;
1541 }
1542 *inaddr = addr;
1543
1544 return err;
1545 }
1546
1547 uint64_t
memoryReferenceGetDMAMapLength(IOMemoryReference * ref,uint64_t * offset)1548 IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
1549 IOMemoryReference * ref,
1550 uint64_t * offset)
1551 {
1552 kern_return_t kr;
1553 vm_object_offset_t data_offset = 0;
1554 uint64_t total;
1555 uint32_t idx;
1556
1557 assert(ref->count);
1558 if (offset) {
1559 *offset = (uint64_t) data_offset;
1560 }
1561 total = 0;
1562 for (idx = 0; idx < ref->count; idx++) {
1563 kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry,
1564 &data_offset);
1565 if (KERN_SUCCESS != kr) {
1566 DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
1567 } else if (0 != data_offset) {
1568 DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
1569 }
1570 if (offset && !idx) {
1571 *offset = (uint64_t) data_offset;
1572 }
1573 total += round_page(data_offset + ref->entries[idx].size);
1574 }
1575
1576 DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
1577 (offset ? *offset : (vm_object_offset_t)-1), total);
1578
1579 return total;
1580 }
1581
1582
1583 IOReturn
memoryReferenceGetPageCounts(IOMemoryReference * ref,IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)1584 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1585 IOMemoryReference * ref,
1586 IOByteCount * residentPageCount,
1587 IOByteCount * dirtyPageCount)
1588 {
1589 IOReturn err;
1590 IOMemoryEntry * entries;
1591 unsigned int resident, dirty;
1592 unsigned int totalResident, totalDirty;
1593
1594 totalResident = totalDirty = 0;
1595 err = kIOReturnSuccess;
1596 entries = ref->entries + ref->count;
1597 while (entries > &ref->entries[0]) {
1598 entries--;
1599 err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1600 if (KERN_SUCCESS != err) {
1601 break;
1602 }
1603 totalResident += resident;
1604 totalDirty += dirty;
1605 }
1606
1607 if (residentPageCount) {
1608 *residentPageCount = totalResident;
1609 }
1610 if (dirtyPageCount) {
1611 *dirtyPageCount = totalDirty;
1612 }
1613 return err;
1614 }
1615
1616 IOReturn
memoryReferenceSetPurgeable(IOMemoryReference * ref,IOOptionBits newState,IOOptionBits * oldState)1617 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1618 IOMemoryReference * ref,
1619 IOOptionBits newState,
1620 IOOptionBits * oldState)
1621 {
1622 IOReturn err;
1623 IOMemoryEntry * entries;
1624 vm_purgable_t control;
1625 int totalState, state;
1626
1627 totalState = kIOMemoryPurgeableNonVolatile;
1628 err = kIOReturnSuccess;
1629 entries = ref->entries + ref->count;
1630 while (entries > &ref->entries[0]) {
1631 entries--;
1632
1633 err = purgeableControlBits(newState, &control, &state);
1634 if (KERN_SUCCESS != err) {
1635 break;
1636 }
1637 err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1638 if (KERN_SUCCESS != err) {
1639 break;
1640 }
1641 err = purgeableStateBits(&state);
1642 if (KERN_SUCCESS != err) {
1643 break;
1644 }
1645
1646 if (kIOMemoryPurgeableEmpty == state) {
1647 totalState = kIOMemoryPurgeableEmpty;
1648 } else if (kIOMemoryPurgeableEmpty == totalState) {
1649 continue;
1650 } else if (kIOMemoryPurgeableVolatile == totalState) {
1651 continue;
1652 } else if (kIOMemoryPurgeableVolatile == state) {
1653 totalState = kIOMemoryPurgeableVolatile;
1654 } else {
1655 totalState = kIOMemoryPurgeableNonVolatile;
1656 }
1657 }
1658
1659 if (oldState) {
1660 *oldState = totalState;
1661 }
1662 return err;
1663 }
1664
1665 IOReturn
memoryReferenceSetOwnership(IOMemoryReference * ref,task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)1666 IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1667 IOMemoryReference * ref,
1668 task_t newOwner,
1669 int newLedgerTag,
1670 IOOptionBits newLedgerOptions)
1671 {
1672 IOReturn err, totalErr;
1673 IOMemoryEntry * entries;
1674
1675 totalErr = kIOReturnSuccess;
1676 entries = ref->entries + ref->count;
1677 while (entries > &ref->entries[0]) {
1678 entries--;
1679
1680 err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1681 if (KERN_SUCCESS != err) {
1682 totalErr = err;
1683 }
1684 }
1685
1686 return totalErr;
1687 }
1688
1689 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1690
1691 OSSharedPtr<IOMemoryDescriptor>
withAddress(void * address,IOByteCount length,IODirection direction)1692 IOMemoryDescriptor::withAddress(void * address,
1693 IOByteCount length,
1694 IODirection direction)
1695 {
1696 return IOMemoryDescriptor::
1697 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1698 }
1699
1700 #ifndef __LP64__
1701 OSSharedPtr<IOMemoryDescriptor>
withAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)1702 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1703 IOByteCount length,
1704 IODirection direction,
1705 task_t task)
1706 {
1707 OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1708 if (that) {
1709 if (that->initWithAddress(address, length, direction, task)) {
1710 return os::move(that);
1711 }
1712 }
1713 return nullptr;
1714 }
1715 #endif /* !__LP64__ */
1716
1717 OSSharedPtr<IOMemoryDescriptor>
withPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)1718 IOMemoryDescriptor::withPhysicalAddress(
1719 IOPhysicalAddress address,
1720 IOByteCount length,
1721 IODirection direction )
1722 {
1723 return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
1724 }
1725
1726 #ifndef __LP64__
1727 OSSharedPtr<IOMemoryDescriptor>
withRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)1728 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1729 UInt32 withCount,
1730 IODirection direction,
1731 task_t task,
1732 bool asReference)
1733 {
1734 OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1735 if (that) {
1736 if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1737 return os::move(that);
1738 }
1739 }
1740 return nullptr;
1741 }
1742 #endif /* !__LP64__ */
1743
1744 OSSharedPtr<IOMemoryDescriptor>
withAddressRange(mach_vm_address_t address,mach_vm_size_t length,IOOptionBits options,task_t task)1745 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1746 mach_vm_size_t length,
1747 IOOptionBits options,
1748 task_t task)
1749 {
1750 IOAddressRange range = { address, length };
1751 return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
1752 }
1753
1754 OSSharedPtr<IOMemoryDescriptor>
withAddressRanges(IOAddressRange * ranges,UInt32 rangeCount,IOOptionBits options,task_t task)1755 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
1756 UInt32 rangeCount,
1757 IOOptionBits options,
1758 task_t task)
1759 {
1760 OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1761 if (that) {
1762 if (task) {
1763 options |= kIOMemoryTypeVirtual64;
1764 } else {
1765 options |= kIOMemoryTypePhysical64;
1766 }
1767
1768 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
1769 return os::move(that);
1770 }
1771 }
1772
1773 return nullptr;
1774 }
1775
1776
1777 /*
1778 * withOptions:
1779 *
1780 * Create a new IOMemoryDescriptor. The buffer is made up of several
1781 * virtual address ranges, from a given task.
1782 *
1783 * Passing the ranges as a reference will avoid an extra allocation.
1784 */
1785 OSSharedPtr<IOMemoryDescriptor>
withOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits opts,IOMapper * mapper)1786 IOMemoryDescriptor::withOptions(void * buffers,
1787 UInt32 count,
1788 UInt32 offset,
1789 task_t task,
1790 IOOptionBits opts,
1791 IOMapper * mapper)
1792 {
1793 OSSharedPtr<IOGeneralMemoryDescriptor> self = OSMakeShared<IOGeneralMemoryDescriptor>();
1794
1795 if (self
1796 && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1797 return nullptr;
1798 }
1799
1800 return os::move(self);
1801 }
1802
1803 bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1804 IOMemoryDescriptor::initWithOptions(void * buffers,
1805 UInt32 count,
1806 UInt32 offset,
1807 task_t task,
1808 IOOptionBits options,
1809 IOMapper * mapper)
1810 {
1811 return false;
1812 }
1813
1814 #ifndef __LP64__
1815 OSSharedPtr<IOMemoryDescriptor>
withPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)1816 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1817 UInt32 withCount,
1818 IODirection direction,
1819 bool asReference)
1820 {
1821 OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1822 if (that) {
1823 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1824 return os::move(that);
1825 }
1826 }
1827 return nullptr;
1828 }
1829
1830 OSSharedPtr<IOMemoryDescriptor>
withSubRange(IOMemoryDescriptor * of,IOByteCount offset,IOByteCount length,IODirection direction)1831 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1832 IOByteCount offset,
1833 IOByteCount length,
1834 IODirection direction)
1835 {
1836 return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1837 }
1838 #endif /* !__LP64__ */
1839
1840 OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOMemoryDescriptor * originalMD)1841 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1842 {
1843 IOGeneralMemoryDescriptor *origGenMD =
1844 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1845
1846 if (origGenMD) {
1847 return IOGeneralMemoryDescriptor::
1848 withPersistentMemoryDescriptor(origGenMD);
1849 } else {
1850 return nullptr;
1851 }
1852 }
1853
1854 OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor * originalMD)1855 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1856 {
1857 IOMemoryReference * memRef;
1858 OSSharedPtr<IOGeneralMemoryDescriptor> self;
1859
1860 if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1861 return nullptr;
1862 }
1863
1864 if (memRef == originalMD->_memRef) {
1865 self.reset(originalMD, OSRetain);
1866 originalMD->memoryReferenceRelease(memRef);
1867 return os::move(self);
1868 }
1869
1870 self = OSMakeShared<IOGeneralMemoryDescriptor>();
1871 IOMDPersistentInitData initData = { originalMD, memRef };
1872
1873 if (self
1874 && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
1875 return nullptr;
1876 }
1877 return os::move(self);
1878 }
1879
1880 #ifndef __LP64__
1881 bool
initWithAddress(void * address,IOByteCount withLength,IODirection withDirection)1882 IOGeneralMemoryDescriptor::initWithAddress(void * address,
1883 IOByteCount withLength,
1884 IODirection withDirection)
1885 {
1886 _singleRange.v.address = (vm_offset_t) address;
1887 _singleRange.v.length = withLength;
1888
1889 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1890 }
1891
1892 bool
initWithAddress(IOVirtualAddress address,IOByteCount withLength,IODirection withDirection,task_t withTask)1893 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1894 IOByteCount withLength,
1895 IODirection withDirection,
1896 task_t withTask)
1897 {
1898 _singleRange.v.address = address;
1899 _singleRange.v.length = withLength;
1900
1901 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1902 }
1903
1904 bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount withLength,IODirection withDirection)1905 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1906 IOPhysicalAddress address,
1907 IOByteCount withLength,
1908 IODirection withDirection )
1909 {
1910 _singleRange.p.address = address;
1911 _singleRange.p.length = withLength;
1912
1913 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1914 }
1915
1916 bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 count,IODirection direction,bool reference)1917 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1918 IOPhysicalRange * ranges,
1919 UInt32 count,
1920 IODirection direction,
1921 bool reference)
1922 {
1923 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1924
1925 if (reference) {
1926 mdOpts |= kIOMemoryAsReference;
1927 }
1928
1929 return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1930 }
1931
1932 bool
initWithRanges(IOVirtualRange * ranges,UInt32 count,IODirection direction,task_t task,bool reference)1933 IOGeneralMemoryDescriptor::initWithRanges(
1934 IOVirtualRange * ranges,
1935 UInt32 count,
1936 IODirection direction,
1937 task_t task,
1938 bool reference)
1939 {
1940 IOOptionBits mdOpts = direction;
1941
1942 if (reference) {
1943 mdOpts |= kIOMemoryAsReference;
1944 }
1945
1946 if (task) {
1947 mdOpts |= kIOMemoryTypeVirtual;
1948
1949 // Auto-prepare if this is a kernel memory descriptor as very few
1950 // clients bother to prepare() kernel memory.
1951 // But it was not enforced so what are you going to do?
1952 if (task == kernel_task) {
1953 mdOpts |= kIOMemoryAutoPrepare;
1954 }
1955 } else {
1956 mdOpts |= kIOMemoryTypePhysical;
1957 }
1958
1959 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1960 }
1961 #endif /* !__LP64__ */
1962
1963 /*
1964 * initWithOptions:
1965 *
1966 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1967 * from a given task, several physical ranges, an UPL from the ubc
1968 * system or a uio (may be 64bit) from the BSD subsystem.
1969 *
1970 * Passing the ranges as a reference will avoid an extra allocation.
1971 *
1972 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1973 * existing instance -- note this behavior is not commonly supported in other
1974 * I/O Kit classes, although it is supported here.
1975 */
1976
1977 bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1978 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1979 UInt32 count,
1980 UInt32 offset,
1981 task_t task,
1982 IOOptionBits options,
1983 IOMapper * mapper)
1984 {
1985 IOOptionBits type = options & kIOMemoryTypeMask;
1986
1987 #ifndef __LP64__
1988 if (task
1989 && (kIOMemoryTypeVirtual == type)
1990 && vm_map_is_64bit(get_task_map(task))
1991 && ((IOVirtualRange *) buffers)->address) {
1992 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1993 return false;
1994 }
1995 #endif /* !__LP64__ */
1996
1997 // Grab the original MD's configuation data to initialse the
1998 // arguments to this function.
1999 if (kIOMemoryTypePersistentMD == type) {
2000 IOMDPersistentInitData *initData = (typeof(initData))buffers;
2001 const IOGeneralMemoryDescriptor *orig = initData->fMD;
2002 ioGMDData *dataP = getDataP(orig->_memoryEntries);
2003
2004 // Only accept persistent memory descriptors with valid dataP data.
2005 assert(orig->_rangesCount == 1);
2006 if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
2007 return false;
2008 }
2009
2010 _memRef = initData->fMemRef; // Grab the new named entry
2011 options = orig->_flags & ~kIOMemoryAsReference;
2012 type = options & kIOMemoryTypeMask;
2013 buffers = orig->_ranges.v;
2014 count = orig->_rangesCount;
2015
2016 // Now grab the original task and whatever mapper was previously used
2017 task = orig->_task;
2018 mapper = dataP->fMapper;
2019
2020 // We are ready to go through the original initialisation now
2021 }
2022
2023 switch (type) {
2024 case kIOMemoryTypeUIO:
2025 case kIOMemoryTypeVirtual:
2026 #ifndef __LP64__
2027 case kIOMemoryTypeVirtual64:
2028 #endif /* !__LP64__ */
2029 assert(task);
2030 if (!task) {
2031 return false;
2032 }
2033 break;
2034
2035 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
2036 #ifndef __LP64__
2037 case kIOMemoryTypePhysical64:
2038 #endif /* !__LP64__ */
2039 case kIOMemoryTypeUPL:
2040 assert(!task);
2041 break;
2042 default:
2043 return false; /* bad argument */
2044 }
2045
2046 assert(buffers);
2047 assert(count);
2048
2049 /*
2050 * We can check the _initialized instance variable before having ever set
2051 * it to an initial value because I/O Kit guarantees that all our instance
2052 * variables are zeroed on an object's allocation.
2053 */
2054
2055 if (_initialized) {
2056 /*
2057 * An existing memory descriptor is being retargeted to point to
2058 * somewhere else. Clean up our present state.
2059 */
2060 IOOptionBits type = _flags & kIOMemoryTypeMask;
2061 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
2062 while (_wireCount) {
2063 complete();
2064 }
2065 }
2066 if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2067 if (kIOMemoryTypeUIO == type) {
2068 uio_free((uio_t) _ranges.v);
2069 }
2070 #ifndef __LP64__
2071 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2072 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2073 }
2074 #endif /* !__LP64__ */
2075 else {
2076 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2077 }
2078 }
2079
2080 options |= (kIOMemoryRedirected & _flags);
2081 if (!(kIOMemoryRedirected & options)) {
2082 if (_memRef) {
2083 memoryReferenceRelease(_memRef);
2084 _memRef = NULL;
2085 }
2086 if (_mappings) {
2087 _mappings->flushCollection();
2088 }
2089 }
2090 } else {
2091 if (!super::init()) {
2092 return false;
2093 }
2094 _initialized = true;
2095 }
2096
2097 // Grab the appropriate mapper
2098 if (kIOMemoryHostOrRemote & options) {
2099 options |= kIOMemoryMapperNone;
2100 }
2101 if (kIOMemoryMapperNone & options) {
2102 mapper = NULL; // No Mapper
2103 } else if (mapper == kIOMapperSystem) {
2104 IOMapper::checkForSystemMapper();
2105 gIOSystemMapper = mapper = IOMapper::gSystem;
2106 }
2107
2108 // Remove the dynamic internal use flags from the initial setting
2109 options &= ~(kIOMemoryPreparedReadOnly);
2110 _flags = options;
2111 _task = task;
2112
2113 #ifndef __LP64__
2114 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
2115 #endif /* !__LP64__ */
2116
2117 _dmaReferences = 0;
2118 __iomd_reservedA = 0;
2119 __iomd_reservedB = 0;
2120 _highestPage = 0;
2121
2122 if (kIOMemoryThreadSafe & options) {
2123 if (!_prepareLock) {
2124 _prepareLock = IOLockAlloc();
2125 }
2126 } else if (_prepareLock) {
2127 IOLockFree(_prepareLock);
2128 _prepareLock = NULL;
2129 }
2130
2131 if (kIOMemoryTypeUPL == type) {
2132 ioGMDData *dataP;
2133 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
2134
2135 if (!initMemoryEntries(dataSize, mapper)) {
2136 return false;
2137 }
2138 dataP = getDataP(_memoryEntries);
2139 dataP->fPageCnt = 0;
2140 switch (kIOMemoryDirectionMask & options) {
2141 case kIODirectionOut:
2142 dataP->fDMAAccess = kIODMAMapReadAccess;
2143 break;
2144 case kIODirectionIn:
2145 dataP->fDMAAccess = kIODMAMapWriteAccess;
2146 break;
2147 case kIODirectionNone:
2148 case kIODirectionOutIn:
2149 default:
2150 panic("bad dir for upl 0x%x", (int) options);
2151 break;
2152 }
2153 // _wireCount++; // UPLs start out life wired
2154
2155 _length = count;
2156 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
2157
2158 ioPLBlock iopl;
2159 iopl.fIOPL = (upl_t) buffers;
2160 upl_set_referenced(iopl.fIOPL, true);
2161 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
2162
2163 if (upl_get_size(iopl.fIOPL) < (count + offset)) {
2164 panic("short external upl");
2165 }
2166
2167 _highestPage = upl_get_highest_page(iopl.fIOPL);
2168 DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2169
2170 // Set the flag kIOPLOnDevice convieniently equal to 1
2171 iopl.fFlags = pageList->device | kIOPLExternUPL;
2172 if (!pageList->device) {
2173 // Pre-compute the offset into the UPL's page list
2174 pageList = &pageList[atop_32(offset)];
2175 offset &= PAGE_MASK;
2176 }
2177 iopl.fIOMDOffset = 0;
2178 iopl.fMappedPage = 0;
2179 iopl.fPageInfo = (vm_address_t) pageList;
2180 iopl.fPageOffset = offset;
2181 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
2182 } else {
2183 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
2184 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
2185
2186 // Initialize the memory descriptor
2187 if (options & kIOMemoryAsReference) {
2188 #ifndef __LP64__
2189 _rangesIsAllocated = false;
2190 #endif /* !__LP64__ */
2191
2192 // Hack assignment to get the buffer arg into _ranges.
2193 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
2194 // work, C++ sigh.
2195 // This also initialises the uio & physical ranges.
2196 _ranges.v = (IOVirtualRange *) buffers;
2197 } else {
2198 #ifndef __LP64__
2199 _rangesIsAllocated = true;
2200 #endif /* !__LP64__ */
2201 switch (type) {
2202 case kIOMemoryTypeUIO:
2203 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
2204 break;
2205
2206 #ifndef __LP64__
2207 case kIOMemoryTypeVirtual64:
2208 case kIOMemoryTypePhysical64:
2209 if (count == 1
2210 #ifndef __arm__
2211 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
2212 #endif
2213 ) {
2214 if (type == kIOMemoryTypeVirtual64) {
2215 type = kIOMemoryTypeVirtual;
2216 } else {
2217 type = kIOMemoryTypePhysical;
2218 }
2219 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
2220 _rangesIsAllocated = false;
2221 _ranges.v = &_singleRange.v;
2222 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
2223 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
2224 break;
2225 }
2226 _ranges.v64 = IONew(IOAddressRange, count);
2227 if (!_ranges.v64) {
2228 return false;
2229 }
2230 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
2231 break;
2232 #endif /* !__LP64__ */
2233 case kIOMemoryTypeVirtual:
2234 case kIOMemoryTypePhysical:
2235 if (count == 1) {
2236 _flags |= kIOMemoryAsReference;
2237 #ifndef __LP64__
2238 _rangesIsAllocated = false;
2239 #endif /* !__LP64__ */
2240 _ranges.v = &_singleRange.v;
2241 } else {
2242 _ranges.v = IONew(IOVirtualRange, count);
2243 if (!_ranges.v) {
2244 return false;
2245 }
2246 }
2247 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
2248 break;
2249 }
2250 }
2251 _rangesCount = count;
2252
2253 // Find starting address within the vector of ranges
2254 Ranges vec = _ranges;
2255 mach_vm_size_t totalLength = 0;
2256 unsigned int ind, pages = 0;
2257 for (ind = 0; ind < count; ind++) {
2258 mach_vm_address_t addr;
2259 mach_vm_address_t endAddr;
2260 mach_vm_size_t len;
2261
2262 // addr & len are returned by this function
2263 getAddrLenForInd(addr, len, type, vec, ind, _task);
2264 if (_task) {
2265 mach_vm_size_t phys_size;
2266 kern_return_t kret;
2267 kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size);
2268 if (KERN_SUCCESS != kret) {
2269 break;
2270 }
2271 if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
2272 break;
2273 }
2274 } else {
2275 if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
2276 break;
2277 }
2278 if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
2279 break;
2280 }
2281 if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
2282 break;
2283 }
2284 }
2285 if (os_add_overflow(totalLength, len, &totalLength)) {
2286 break;
2287 }
2288 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2289 uint64_t highPage = atop_64(addr + len - 1);
2290 if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
2291 _highestPage = (ppnum_t) highPage;
2292 DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2293 }
2294 }
2295 }
2296 if ((ind < count)
2297 || (totalLength != ((IOByteCount) totalLength))) {
2298 return false; /* overflow */
2299 }
2300 _length = totalLength;
2301 _pages = pages;
2302
2303 // Auto-prepare memory at creation time.
2304 // Implied completion when descriptor is free-ed
2305
2306 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2307 _wireCount++; // Physical MDs are, by definition, wired
2308 } else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
2309 ioGMDData *dataP;
2310 unsigned dataSize;
2311
2312 if (_pages > atop_64(max_mem)) {
2313 return false;
2314 }
2315
2316 dataSize = computeDataSize(_pages, /* upls */ count * 2);
2317 if (!initMemoryEntries(dataSize, mapper)) {
2318 return false;
2319 }
2320 dataP = getDataP(_memoryEntries);
2321 dataP->fPageCnt = _pages;
2322
2323 if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
2324 && (VM_KERN_MEMORY_NONE == _kernelTag)) {
2325 _kernelTag = IOMemoryTag(kernel_map);
2326 if (_kernelTag == gIOSurfaceTag) {
2327 _userTag = VM_MEMORY_IOSURFACE;
2328 }
2329 }
2330
2331 if ((kIOMemoryPersistent & _flags) && !_memRef) {
2332 IOReturn
2333 err = memoryReferenceCreate(0, &_memRef);
2334 if (kIOReturnSuccess != err) {
2335 return false;
2336 }
2337 }
2338
2339 if ((_flags & kIOMemoryAutoPrepare)
2340 && prepare() != kIOReturnSuccess) {
2341 return false;
2342 }
2343 }
2344 }
2345
2346 return true;
2347 }
2348
2349 /*
2350 * free
2351 *
2352 * Free resources.
2353 */
2354 void
free()2355 IOGeneralMemoryDescriptor::free()
2356 {
2357 IOOptionBits type = _flags & kIOMemoryTypeMask;
2358
2359 if (reserved && reserved->dp.memory) {
2360 LOCK;
2361 reserved->dp.memory = NULL;
2362 UNLOCK;
2363 }
2364 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2365 ioGMDData * dataP;
2366 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
2367 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
2368 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
2369 }
2370 } else {
2371 while (_wireCount) {
2372 complete();
2373 }
2374 }
2375
2376 if (_memoryEntries) {
2377 _memoryEntries.reset();
2378 }
2379
2380 if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2381 if (kIOMemoryTypeUIO == type) {
2382 uio_free((uio_t) _ranges.v);
2383 }
2384 #ifndef __LP64__
2385 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2386 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2387 }
2388 #endif /* !__LP64__ */
2389 else {
2390 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2391 }
2392
2393 _ranges.v = NULL;
2394 }
2395
2396 if (reserved) {
2397 cleanKernelReserved(reserved);
2398 if (reserved->dp.devicePager) {
2399 // memEntry holds a ref on the device pager which owns reserved
2400 // (IOMemoryDescriptorReserved) so no reserved access after this point
2401 device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
2402 } else {
2403 IOFreeType(reserved, IOMemoryDescriptorReserved);
2404 }
2405 reserved = NULL;
2406 }
2407
2408 if (_memRef) {
2409 memoryReferenceRelease(_memRef);
2410 }
2411 if (_prepareLock) {
2412 IOLockFree(_prepareLock);
2413 }
2414
2415 super::free();
2416 }
2417
2418 #ifndef __LP64__
2419 void
unmapFromKernel()2420 IOGeneralMemoryDescriptor::unmapFromKernel()
2421 {
2422 panic("IOGMD::unmapFromKernel deprecated");
2423 }
2424
2425 void
mapIntoKernel(unsigned rangeIndex)2426 IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
2427 {
2428 panic("IOGMD::mapIntoKernel deprecated");
2429 }
2430 #endif /* !__LP64__ */
2431
2432 /*
2433 * getDirection:
2434 *
2435 * Get the direction of the transfer.
2436 */
2437 IODirection
getDirection() const2438 IOMemoryDescriptor::getDirection() const
2439 {
2440 #ifndef __LP64__
2441 if (_direction) {
2442 return _direction;
2443 }
2444 #endif /* !__LP64__ */
2445 return (IODirection) (_flags & kIOMemoryDirectionMask);
2446 }
2447
2448 /*
2449 * getLength:
2450 *
2451 * Get the length of the transfer (over all ranges).
2452 */
2453 IOByteCount
getLength() const2454 IOMemoryDescriptor::getLength() const
2455 {
2456 return _length;
2457 }
2458
2459 void
setTag(IOOptionBits tag)2460 IOMemoryDescriptor::setTag( IOOptionBits tag )
2461 {
2462 _tag = tag;
2463 }
2464
2465 IOOptionBits
getTag(void)2466 IOMemoryDescriptor::getTag( void )
2467 {
2468 return _tag;
2469 }
2470
2471 uint64_t
getFlags(void)2472 IOMemoryDescriptor::getFlags(void)
2473 {
2474 return _flags;
2475 }
2476
2477 OSObject *
copyContext(void) const2478 IOMemoryDescriptor::copyContext(void) const
2479 {
2480 if (reserved) {
2481 OSObject * context = reserved->contextObject;
2482 if (context) {
2483 context->retain();
2484 }
2485 return context;
2486 } else {
2487 return NULL;
2488 }
2489 }
2490
2491 void
setContext(OSObject * obj)2492 IOMemoryDescriptor::setContext(OSObject * obj)
2493 {
2494 if (this->reserved == NULL && obj == NULL) {
2495 // No existing object, and no object to set
2496 return;
2497 }
2498
2499 IOMemoryDescriptorReserved * reserved = getKernelReserved();
2500 if (reserved) {
2501 OSObject * oldObject = reserved->contextObject;
2502 if (oldObject && OSCompareAndSwapPtr(oldObject, NULL, &reserved->contextObject)) {
2503 oldObject->release();
2504 }
2505 if (obj != NULL) {
2506 obj->retain();
2507 reserved->contextObject = obj;
2508 }
2509 }
2510 }
2511
2512 #ifndef __LP64__
2513 #pragma clang diagnostic push
2514 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2515
2516 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
2517 IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * length)2518 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
2519 {
2520 addr64_t physAddr = 0;
2521
2522 if (prepare() == kIOReturnSuccess) {
2523 physAddr = getPhysicalSegment64( offset, length );
2524 complete();
2525 }
2526
2527 return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
2528 }
2529
2530 #pragma clang diagnostic pop
2531
2532 #endif /* !__LP64__ */
2533
2534
2535 IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)2536 IOMemoryDescriptor::readBytes
2537 (IOByteCount offset, void *bytes, IOByteCount length)
2538 {
2539 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
2540 IOByteCount endoffset;
2541 IOByteCount remaining;
2542
2543 // Check that this entire I/O is within the available range
2544 if ((offset > _length)
2545 || os_add_overflow(length, offset, &endoffset)
2546 || (endoffset > _length)) {
2547 assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
2548 return 0;
2549 }
2550 if (offset >= _length) {
2551 return 0;
2552 }
2553
2554 assert(!(kIOMemoryRemote & _flags));
2555 if (kIOMemoryRemote & _flags) {
2556 return 0;
2557 }
2558
2559 if (kIOMemoryThreadSafe & _flags) {
2560 LOCK;
2561 }
2562
2563 remaining = length = min(length, _length - offset);
2564 while (remaining) { // (process another target segment?)
2565 addr64_t srcAddr64;
2566 IOByteCount srcLen;
2567 int options = cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap;
2568
2569 IOOptionBits getPhysSegmentOptions = kIOMemoryMapperNone;
2570 srcAddr64 = getPhysicalSegment(offset, &srcLen, getPhysSegmentOptions);
2571 if (!srcAddr64) {
2572 break;
2573 }
2574
2575 // Clip segment length to remaining
2576 if (srcLen > remaining) {
2577 srcLen = remaining;
2578 }
2579
2580 if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
2581 srcLen = (UINT_MAX - PAGE_SIZE + 1);
2582 }
2583
2584
2585 kern_return_t copy_ret = copypv(srcAddr64, dstAddr, (unsigned int) srcLen, options);
2586 #pragma unused(copy_ret)
2587
2588 dstAddr += srcLen;
2589 offset += srcLen;
2590 remaining -= srcLen;
2591 }
2592
2593 if (kIOMemoryThreadSafe & _flags) {
2594 UNLOCK;
2595 }
2596
2597 assert(!remaining);
2598
2599 return length - remaining;
2600 }
2601
2602 IOByteCount
writeBytes(IOByteCount inoffset,const void * bytes,IOByteCount length)2603 IOMemoryDescriptor::writeBytes
2604 (IOByteCount inoffset, const void *bytes, IOByteCount length)
2605 {
2606 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
2607 IOByteCount remaining;
2608 IOByteCount endoffset;
2609 IOByteCount offset = inoffset;
2610
2611 assert( !(kIOMemoryPreparedReadOnly & _flags));
2612
2613 // Check that this entire I/O is within the available range
2614 if ((offset > _length)
2615 || os_add_overflow(length, offset, &endoffset)
2616 || (endoffset > _length)) {
2617 assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
2618 return 0;
2619 }
2620 if (kIOMemoryPreparedReadOnly & _flags) {
2621 return 0;
2622 }
2623 if (offset >= _length) {
2624 return 0;
2625 }
2626
2627 assert(!(kIOMemoryRemote & _flags));
2628 if (kIOMemoryRemote & _flags) {
2629 return 0;
2630 }
2631
2632 if (kIOMemoryThreadSafe & _flags) {
2633 LOCK;
2634 }
2635
2636 remaining = length = min(length, _length - offset);
2637 while (remaining) { // (process another target segment?)
2638 addr64_t dstAddr64;
2639 IOByteCount dstLen;
2640 int options = cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap;
2641
2642 IOOptionBits getPhysSegmentOptions = kIOMemoryMapperNone;
2643 dstAddr64 = getPhysicalSegment(offset, &dstLen, getPhysSegmentOptions);
2644 if (!dstAddr64) {
2645 break;
2646 }
2647
2648 // Clip segment length to remaining
2649 if (dstLen > remaining) {
2650 dstLen = remaining;
2651 }
2652
2653 if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
2654 dstLen = (UINT_MAX - PAGE_SIZE + 1);
2655 }
2656
2657
2658 if (!srcAddr) {
2659 bzero_phys(dstAddr64, (unsigned int) dstLen);
2660 } else {
2661 kern_return_t copy_ret = copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen, options);
2662 #pragma unused(copy_ret)
2663 srcAddr += dstLen;
2664 }
2665 offset += dstLen;
2666 remaining -= dstLen;
2667 }
2668
2669 if (kIOMemoryThreadSafe & _flags) {
2670 UNLOCK;
2671 }
2672
2673 assert(!remaining);
2674
2675 #if defined(__x86_64__)
2676 // copypv does not cppvFsnk on intel
2677 #else
2678 if (!srcAddr) {
2679 performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2680 }
2681 #endif
2682
2683 return length - remaining;
2684 }
2685
2686 #ifndef __LP64__
2687 void
setPosition(IOByteCount position)2688 IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2689 {
2690 panic("IOGMD::setPosition deprecated");
2691 }
2692 #endif /* !__LP64__ */
2693
2694 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2695 static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
2696
2697 uint64_t
getPreparationID(void)2698 IOGeneralMemoryDescriptor::getPreparationID( void )
2699 {
2700 ioGMDData *dataP;
2701
2702 if (!_wireCount) {
2703 return kIOPreparationIDUnprepared;
2704 }
2705
2706 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2707 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2708 IOMemoryDescriptor::setPreparationID();
2709 return IOMemoryDescriptor::getPreparationID();
2710 }
2711
2712 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2713 return kIOPreparationIDUnprepared;
2714 }
2715
2716 if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2717 SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2718 OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2719 }
2720 return dataP->fPreparationID;
2721 }
2722
2723 void
cleanKernelReserved(IOMemoryDescriptorReserved * reserved)2724 IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2725 {
2726 if (reserved->creator) {
2727 task_deallocate(reserved->creator);
2728 reserved->creator = NULL;
2729 }
2730
2731 if (reserved->contextObject) {
2732 reserved->contextObject->release();
2733 reserved->contextObject = NULL;
2734 }
2735 }
2736
2737 IOMemoryDescriptorReserved *
getKernelReserved(void)2738 IOMemoryDescriptor::getKernelReserved( void )
2739 {
2740 if (!reserved) {
2741 reserved = IOMallocType(IOMemoryDescriptorReserved);
2742 }
2743 return reserved;
2744 }
2745
2746 void
setPreparationID(void)2747 IOMemoryDescriptor::setPreparationID( void )
2748 {
2749 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2750 SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2751 OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2752 }
2753 }
2754
2755 uint64_t
getPreparationID(void)2756 IOMemoryDescriptor::getPreparationID( void )
2757 {
2758 if (reserved) {
2759 return reserved->preparationID;
2760 } else {
2761 return kIOPreparationIDUnsupported;
2762 }
2763 }
2764
2765 void
setDescriptorID(void)2766 IOMemoryDescriptor::setDescriptorID( void )
2767 {
2768 if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
2769 SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID);
2770 OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
2771 }
2772 }
2773
2774 uint64_t
getDescriptorID(void)2775 IOMemoryDescriptor::getDescriptorID( void )
2776 {
2777 setDescriptorID();
2778
2779 if (reserved) {
2780 return reserved->descriptorID;
2781 } else {
2782 return kIODescriptorIDInvalid;
2783 }
2784 }
2785
2786 IOReturn
ktraceEmitPhysicalSegments(void)2787 IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
2788 {
2789 if (!kdebug_debugid_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
2790 return kIOReturnSuccess;
2791 }
2792
2793 assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
2794 if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
2795 return kIOReturnBadArgument;
2796 }
2797
2798 uint64_t descriptorID = getDescriptorID();
2799 assert(descriptorID != kIODescriptorIDInvalid);
2800 if (getDescriptorID() == kIODescriptorIDInvalid) {
2801 return kIOReturnBadArgument;
2802 }
2803
2804 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength());
2805
2806 #if __LP64__
2807 static const uint8_t num_segments_page = 8;
2808 #else
2809 static const uint8_t num_segments_page = 4;
2810 #endif
2811 static const uint8_t num_segments_long = 2;
2812
2813 IOPhysicalAddress segments_page[num_segments_page];
2814 IOPhysicalRange segments_long[num_segments_long];
2815 memset(segments_page, UINT32_MAX, sizeof(segments_page));
2816 memset(segments_long, 0, sizeof(segments_long));
2817
2818 uint8_t segment_page_idx = 0;
2819 uint8_t segment_long_idx = 0;
2820
2821 IOPhysicalRange physical_segment;
2822 for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
2823 physical_segment.address = getPhysicalSegment(offset, &physical_segment.length);
2824
2825 if (physical_segment.length == 0) {
2826 break;
2827 }
2828
2829 /**
2830 * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages. To optimize for trace
2831 * buffer memory, pack segment events according to the following.
2832 *
2833 * Mappings must be emitted in ascending order starting from offset 0. Mappings can be associated with the previous
2834 * IOMDPA_MAPPED event emitted on by the current thread_id.
2835 *
2836 * IOMDPA_SEGMENTS_PAGE = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
2837 * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
2838 * - unmapped pages will have a ppn of MAX_INT_32
2839 * IOMDPA_SEGMENTS_LONG = up to 2 virtually contiguous mappings of variable length
2840 * - address_0, length_0, address_0, length_1
2841 * - unmapped pages will have an address of 0
2842 *
2843 * During each iteration do the following depending on the length of the mapping:
2844 * 1. add the current segment to the appropriate queue of pending segments
2845 * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
2846 * 1a. if FALSE emit and reset all events in the previous queue
2847 * 2. check if we have filled up the current queue of pending events
2848 * 2a. if TRUE emit and reset all events in the pending queue
2849 * 3. after completing all iterations emit events in the current queue
2850 */
2851
2852 bool emit_page = false;
2853 bool emit_long = false;
2854 if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
2855 segments_page[segment_page_idx] = physical_segment.address;
2856 segment_page_idx++;
2857
2858 emit_long = segment_long_idx != 0;
2859 emit_page = segment_page_idx == num_segments_page;
2860
2861 if (os_unlikely(emit_long)) {
2862 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2863 segments_long[0].address, segments_long[0].length,
2864 segments_long[1].address, segments_long[1].length);
2865 }
2866
2867 if (os_unlikely(emit_page)) {
2868 #if __LP64__
2869 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2870 ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2871 ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2872 ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2873 ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2874 #else
2875 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2876 (ppnum_t) atop_32(segments_page[1]),
2877 (ppnum_t) atop_32(segments_page[2]),
2878 (ppnum_t) atop_32(segments_page[3]),
2879 (ppnum_t) atop_32(segments_page[4]));
2880 #endif
2881 }
2882 } else {
2883 segments_long[segment_long_idx] = physical_segment;
2884 segment_long_idx++;
2885
2886 emit_page = segment_page_idx != 0;
2887 emit_long = segment_long_idx == num_segments_long;
2888
2889 if (os_unlikely(emit_page)) {
2890 #if __LP64__
2891 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2892 ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2893 ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2894 ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2895 ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2896 #else
2897 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2898 (ppnum_t) atop_32(segments_page[1]),
2899 (ppnum_t) atop_32(segments_page[2]),
2900 (ppnum_t) atop_32(segments_page[3]),
2901 (ppnum_t) atop_32(segments_page[4]));
2902 #endif
2903 }
2904
2905 if (emit_long) {
2906 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2907 segments_long[0].address, segments_long[0].length,
2908 segments_long[1].address, segments_long[1].length);
2909 }
2910 }
2911
2912 if (os_unlikely(emit_page)) {
2913 memset(segments_page, UINT32_MAX, sizeof(segments_page));
2914 segment_page_idx = 0;
2915 }
2916
2917 if (os_unlikely(emit_long)) {
2918 memset(segments_long, 0, sizeof(segments_long));
2919 segment_long_idx = 0;
2920 }
2921 }
2922
2923 if (segment_page_idx != 0) {
2924 assert(segment_long_idx == 0);
2925 #if __LP64__
2926 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2927 ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2928 ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2929 ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2930 ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2931 #else
2932 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2933 (ppnum_t) atop_32(segments_page[1]),
2934 (ppnum_t) atop_32(segments_page[2]),
2935 (ppnum_t) atop_32(segments_page[3]),
2936 (ppnum_t) atop_32(segments_page[4]));
2937 #endif
2938 } else if (segment_long_idx != 0) {
2939 assert(segment_page_idx == 0);
2940 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2941 segments_long[0].address, segments_long[0].length,
2942 segments_long[1].address, segments_long[1].length);
2943 }
2944
2945 return kIOReturnSuccess;
2946 }
2947
2948 void
setVMTags(uint32_t kernelTag,uint32_t userTag)2949 IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
2950 {
2951 _kernelTag = (vm_tag_t) kernelTag;
2952 _userTag = (vm_tag_t) userTag;
2953 }
2954
2955 uint32_t
getVMTag(vm_map_t map)2956 IOMemoryDescriptor::getVMTag(vm_map_t map)
2957 {
2958 if (vm_kernel_map_is_kernel(map)) {
2959 if (VM_KERN_MEMORY_NONE != _kernelTag) {
2960 return (uint32_t) _kernelTag;
2961 }
2962 } else {
2963 if (VM_KERN_MEMORY_NONE != _userTag) {
2964 return (uint32_t) _userTag;
2965 }
2966 }
2967 return IOMemoryTag(map);
2968 }
2969
2970 IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const2971 IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2972 {
2973 IOReturn err = kIOReturnSuccess;
2974 DMACommandOps params;
2975 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2976 ioGMDData *dataP;
2977
2978 params = (op & ~kIOMDDMACommandOperationMask & op);
2979 op &= kIOMDDMACommandOperationMask;
2980
2981 if (kIOMDDMAMap == op) {
2982 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2983 return kIOReturnUnderrun;
2984 }
2985
2986 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2987
2988 if (!_memoryEntries
2989 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2990 return kIOReturnNoMemory;
2991 }
2992
2993 if (_memoryEntries && data->fMapper) {
2994 bool remap, keepMap;
2995 dataP = getDataP(_memoryEntries);
2996
2997 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2998 dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2999 }
3000 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
3001 dataP->fDMAMapAlignment = data->fMapSpec.alignment;
3002 }
3003
3004 keepMap = (data->fMapper == gIOSystemMapper);
3005 keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
3006
3007 if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3008 IOLockLock(_prepareLock);
3009 }
3010
3011 remap = (!keepMap);
3012 remap |= (dataP->fDMAMapNumAddressBits < 64)
3013 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
3014 remap |= (dataP->fDMAMapAlignment > page_size);
3015
3016 if (remap || !dataP->fMappedBaseValid) {
3017 err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3018 if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
3019 dataP->fMappedBase = data->fAlloc;
3020 dataP->fMappedBaseValid = true;
3021 dataP->fMappedLength = data->fAllocLength;
3022 data->fAllocLength = 0; // IOMD owns the alloc now
3023 }
3024 } else {
3025 data->fAlloc = dataP->fMappedBase;
3026 data->fAllocLength = 0; // give out IOMD map
3027 md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
3028 }
3029
3030 if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3031 IOLockUnlock(_prepareLock);
3032 }
3033 }
3034 return err;
3035 }
3036 if (kIOMDDMAUnmap == op) {
3037 if (dataSize < sizeof(IOMDDMAMapArgs)) {
3038 return kIOReturnUnderrun;
3039 }
3040 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3041
3042 if (_pages) {
3043 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3044 }
3045
3046 return kIOReturnSuccess;
3047 }
3048
3049 if (kIOMDAddDMAMapSpec == op) {
3050 if (dataSize < sizeof(IODMAMapSpecification)) {
3051 return kIOReturnUnderrun;
3052 }
3053
3054 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
3055
3056 if (!_memoryEntries
3057 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3058 return kIOReturnNoMemory;
3059 }
3060
3061 if (_memoryEntries) {
3062 dataP = getDataP(_memoryEntries);
3063 if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
3064 dataP->fDMAMapNumAddressBits = data->numAddressBits;
3065 }
3066 if (data->alignment > dataP->fDMAMapAlignment) {
3067 dataP->fDMAMapAlignment = data->alignment;
3068 }
3069 }
3070 return kIOReturnSuccess;
3071 }
3072
3073 if (kIOMDGetCharacteristics == op) {
3074 if (dataSize < sizeof(IOMDDMACharacteristics)) {
3075 return kIOReturnUnderrun;
3076 }
3077
3078 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3079 data->fLength = _length;
3080 data->fSGCount = _rangesCount;
3081 data->fPages = _pages;
3082 data->fDirection = getDirection();
3083 if (!_wireCount) {
3084 data->fIsPrepared = false;
3085 } else {
3086 data->fIsPrepared = true;
3087 data->fHighestPage = _highestPage;
3088 if (_memoryEntries) {
3089 dataP = getDataP(_memoryEntries);
3090 ioPLBlock *ioplList = getIOPLList(dataP);
3091 UInt count = getNumIOPL(_memoryEntries, dataP);
3092 if (count == 1) {
3093 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
3094 }
3095 }
3096 }
3097
3098 return kIOReturnSuccess;
3099 } else if (kIOMDDMAActive == op) {
3100 if (params) {
3101 int16_t prior;
3102 prior = OSAddAtomic16(1, &md->_dmaReferences);
3103 if (!prior) {
3104 md->_mapName = NULL;
3105 }
3106 } else {
3107 if (md->_dmaReferences) {
3108 OSAddAtomic16(-1, &md->_dmaReferences);
3109 } else {
3110 panic("_dmaReferences underflow");
3111 }
3112 }
3113 } else if (kIOMDWalkSegments != op) {
3114 return kIOReturnBadArgument;
3115 }
3116
3117 // Get the next segment
3118 struct InternalState {
3119 IOMDDMAWalkSegmentArgs fIO;
3120 mach_vm_size_t fOffset2Index;
3121 mach_vm_size_t fNextOffset;
3122 UInt fIndex;
3123 } *isP;
3124
3125 // Find the next segment
3126 if (dataSize < sizeof(*isP)) {
3127 return kIOReturnUnderrun;
3128 }
3129
3130 isP = (InternalState *) vData;
3131 uint64_t offset = isP->fIO.fOffset;
3132 uint8_t mapped = isP->fIO.fMapped;
3133 uint64_t mappedBase;
3134
3135 if (mapped && (kIOMemoryRemote & _flags)) {
3136 return kIOReturnNotAttached;
3137 }
3138
3139 if (IOMapper::gSystem && mapped
3140 && (!(kIOMemoryHostOnly & _flags))
3141 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
3142 // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
3143 if (!_memoryEntries
3144 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3145 return kIOReturnNoMemory;
3146 }
3147
3148 dataP = getDataP(_memoryEntries);
3149 if (dataP->fMapper) {
3150 IODMAMapSpecification mapSpec;
3151 bzero(&mapSpec, sizeof(mapSpec));
3152 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3153 mapSpec.alignment = dataP->fDMAMapAlignment;
3154 err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
3155 if (kIOReturnSuccess != err) {
3156 return err;
3157 }
3158 dataP->fMappedBaseValid = true;
3159 }
3160 }
3161
3162 if (mapped) {
3163 if (IOMapper::gSystem
3164 && (!(kIOMemoryHostOnly & _flags))
3165 && _memoryEntries
3166 && (dataP = getDataP(_memoryEntries))
3167 && dataP->fMappedBaseValid) {
3168 mappedBase = dataP->fMappedBase;
3169 } else {
3170 mapped = 0;
3171 }
3172 }
3173
3174 if (offset >= _length) {
3175 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
3176 }
3177
3178 // Validate the previous offset
3179 UInt ind;
3180 mach_vm_size_t off2Ind = isP->fOffset2Index;
3181 if (!params
3182 && offset
3183 && (offset == isP->fNextOffset || off2Ind <= offset)) {
3184 ind = isP->fIndex;
3185 } else {
3186 ind = off2Ind = 0; // Start from beginning
3187 }
3188 mach_vm_size_t length;
3189 UInt64 address;
3190
3191 if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
3192 // Physical address based memory descriptor
3193 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
3194
3195 // Find the range after the one that contains the offset
3196 mach_vm_size_t len;
3197 for (len = 0; off2Ind <= offset; ind++) {
3198 len = physP[ind].length;
3199 off2Ind += len;
3200 }
3201
3202 // Calculate length within range and starting address
3203 length = off2Ind - offset;
3204 address = physP[ind - 1].address + len - length;
3205
3206 if (true && mapped) {
3207 address = mappedBase + offset;
3208 } else {
3209 // see how far we can coalesce ranges
3210 while (ind < _rangesCount && address + length == physP[ind].address) {
3211 len = physP[ind].length;
3212 length += len;
3213 off2Ind += len;
3214 ind++;
3215 }
3216 }
3217
3218 // correct contiguous check overshoot
3219 ind--;
3220 off2Ind -= len;
3221 }
3222 #ifndef __LP64__
3223 else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
3224 // Physical address based memory descriptor
3225 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
3226
3227 // Find the range after the one that contains the offset
3228 mach_vm_size_t len;
3229 for (len = 0; off2Ind <= offset; ind++) {
3230 len = physP[ind].length;
3231 off2Ind += len;
3232 }
3233
3234 // Calculate length within range and starting address
3235 length = off2Ind - offset;
3236 address = physP[ind - 1].address + len - length;
3237
3238 if (true && mapped) {
3239 address = mappedBase + offset;
3240 } else {
3241 // see how far we can coalesce ranges
3242 while (ind < _rangesCount && address + length == physP[ind].address) {
3243 len = physP[ind].length;
3244 length += len;
3245 off2Ind += len;
3246 ind++;
3247 }
3248 }
3249 // correct contiguous check overshoot
3250 ind--;
3251 off2Ind -= len;
3252 }
3253 #endif /* !__LP64__ */
3254 else {
3255 do {
3256 if (!_wireCount) {
3257 panic("IOGMD: not wired for the IODMACommand");
3258 }
3259
3260 assert(_memoryEntries);
3261
3262 dataP = getDataP(_memoryEntries);
3263 const ioPLBlock *ioplList = getIOPLList(dataP);
3264 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
3265 upl_page_info_t *pageList = getPageList(dataP);
3266
3267 assert(numIOPLs > 0);
3268
3269 // Scan through iopl info blocks looking for block containing offset
3270 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
3271 ind++;
3272 }
3273
3274 // Go back to actual range as search goes past it
3275 ioPLBlock ioplInfo = ioplList[ind - 1];
3276 off2Ind = ioplInfo.fIOMDOffset;
3277
3278 if (ind < numIOPLs) {
3279 length = ioplList[ind].fIOMDOffset;
3280 } else {
3281 length = _length;
3282 }
3283 length -= offset; // Remainder within iopl
3284
3285 // Subtract offset till this iopl in total list
3286 offset -= off2Ind;
3287
3288 // If a mapped address is requested and this is a pre-mapped IOPL
3289 // then just need to compute an offset relative to the mapped base.
3290 if (mapped) {
3291 offset += (ioplInfo.fPageOffset & PAGE_MASK);
3292 address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
3293 continue; // Done leave do/while(false) now
3294 }
3295
3296 // The offset is rebased into the current iopl.
3297 // Now add the iopl 1st page offset.
3298 offset += ioplInfo.fPageOffset;
3299
3300 // For external UPLs the fPageInfo field points directly to
3301 // the upl's upl_page_info_t array.
3302 if (ioplInfo.fFlags & kIOPLExternUPL) {
3303 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
3304 } else {
3305 pageList = &pageList[ioplInfo.fPageInfo];
3306 }
3307
3308 // Check for direct device non-paged memory
3309 if (ioplInfo.fFlags & kIOPLOnDevice) {
3310 address = ptoa_64(pageList->phys_addr) + offset;
3311 continue; // Done leave do/while(false) now
3312 }
3313
3314 // Now we need compute the index into the pageList
3315 UInt pageInd = atop_32(offset);
3316 offset &= PAGE_MASK;
3317
3318 // Compute the starting address of this segment
3319 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
3320 if (!pageAddr) {
3321 panic("!pageList phys_addr");
3322 }
3323
3324 address = ptoa_64(pageAddr) + offset;
3325
3326 // length is currently set to the length of the remainider of the iopl.
3327 // We need to check that the remainder of the iopl is contiguous.
3328 // This is indicated by pageList[ind].phys_addr being sequential.
3329 IOByteCount contigLength = PAGE_SIZE - offset;
3330 while (contigLength < length
3331 && ++pageAddr == pageList[++pageInd].phys_addr) {
3332 contigLength += PAGE_SIZE;
3333 }
3334
3335 if (contigLength < length) {
3336 length = contigLength;
3337 }
3338
3339 assert(address);
3340 assert(length);
3341 } while (false);
3342 }
3343
3344 // Update return values and state
3345 isP->fIO.fIOVMAddr = address;
3346 isP->fIO.fLength = length;
3347 isP->fIndex = ind;
3348 isP->fOffset2Index = off2Ind;
3349 isP->fNextOffset = isP->fIO.fOffset + length;
3350
3351 return kIOReturnSuccess;
3352 }
3353
3354 addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3355 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3356 {
3357 IOReturn ret;
3358 mach_vm_address_t address = 0;
3359 mach_vm_size_t length = 0;
3360 IOMapper * mapper = gIOSystemMapper;
3361 IOOptionBits type = _flags & kIOMemoryTypeMask;
3362
3363 if (lengthOfSegment) {
3364 *lengthOfSegment = 0;
3365 }
3366
3367 if (offset >= _length) {
3368 return 0;
3369 }
3370
3371 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
3372 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
3373 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
3374 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
3375
3376 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
3377 unsigned rangesIndex = 0;
3378 Ranges vec = _ranges;
3379 mach_vm_address_t addr;
3380
3381 // Find starting address within the vector of ranges
3382 for (;;) {
3383 getAddrLenForInd(addr, length, type, vec, rangesIndex, _task);
3384 if (offset < length) {
3385 break;
3386 }
3387 offset -= length; // (make offset relative)
3388 rangesIndex++;
3389 }
3390
3391 // Now that we have the starting range,
3392 // lets find the last contiguous range
3393 addr += offset;
3394 length -= offset;
3395
3396 for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
3397 mach_vm_address_t newAddr;
3398 mach_vm_size_t newLen;
3399
3400 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex, _task);
3401 if (addr + length != newAddr) {
3402 break;
3403 }
3404 length += newLen;
3405 }
3406 if (addr) {
3407 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
3408 }
3409 } else {
3410 IOMDDMAWalkSegmentState _state;
3411 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
3412
3413 state->fOffset = offset;
3414 state->fLength = _length - offset;
3415 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
3416
3417 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
3418
3419 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
3420 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
3421 ret, this, state->fOffset,
3422 state->fIOVMAddr, state->fLength);
3423 }
3424 if (kIOReturnSuccess == ret) {
3425 address = state->fIOVMAddr;
3426 length = state->fLength;
3427 }
3428
3429 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
3430 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
3431
3432 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
3433 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
3434 addr64_t origAddr = address;
3435 IOByteCount origLen = length;
3436
3437 address = mapper->mapToPhysicalAddress(origAddr);
3438 length = page_size - (address & (page_size - 1));
3439 while ((length < origLen)
3440 && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
3441 length += page_size;
3442 }
3443 if (length > origLen) {
3444 length = origLen;
3445 }
3446 }
3447 }
3448 }
3449
3450 if (!address) {
3451 length = 0;
3452 }
3453
3454 if (lengthOfSegment) {
3455 *lengthOfSegment = length;
3456 }
3457
3458 return address;
3459 }
3460
3461 IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)3462 IOGeneralMemoryDescriptor::readBytes
3463 (IOByteCount offset, void *bytes, IOByteCount length)
3464 {
3465 IOByteCount count = super::readBytes(offset, bytes, length);
3466 return count;
3467 }
3468
3469 IOByteCount
writeBytes(IOByteCount offset,const void * bytes,IOByteCount withLength)3470 IOGeneralMemoryDescriptor::writeBytes
3471 (IOByteCount offset, const void* bytes, IOByteCount withLength)
3472 {
3473 IOByteCount count = super::writeBytes(offset, bytes, withLength);
3474 return count;
3475 }
3476
3477 #ifndef __LP64__
3478 #pragma clang diagnostic push
3479 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3480
3481 addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3482 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3483 {
3484 addr64_t address = 0;
3485
3486 if (options & _kIOMemorySourceSegment) {
3487 address = getSourceSegment(offset, lengthOfSegment);
3488 } else if (options & kIOMemoryMapperNone) {
3489 address = getPhysicalSegment64(offset, lengthOfSegment);
3490 } else {
3491 address = getPhysicalSegment(offset, lengthOfSegment);
3492 }
3493
3494 return address;
3495 }
3496 #pragma clang diagnostic pop
3497
3498 addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3499 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3500 {
3501 return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
3502 }
3503
3504 IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3505 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3506 {
3507 addr64_t address = 0;
3508 IOByteCount length = 0;
3509
3510 address = getPhysicalSegment(offset, lengthOfSegment, 0);
3511
3512 if (lengthOfSegment) {
3513 length = *lengthOfSegment;
3514 }
3515
3516 if ((address + length) > 0x100000000ULL) {
3517 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
3518 address, (long) length, (getMetaClass())->getClassName());
3519 }
3520
3521 return (IOPhysicalAddress) address;
3522 }
3523
3524 addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3525 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3526 {
3527 IOPhysicalAddress phys32;
3528 IOByteCount length;
3529 addr64_t phys64;
3530 IOMapper * mapper = NULL;
3531
3532 phys32 = getPhysicalSegment(offset, lengthOfSegment);
3533 if (!phys32) {
3534 return 0;
3535 }
3536
3537 if (gIOSystemMapper) {
3538 mapper = gIOSystemMapper;
3539 }
3540
3541 if (mapper) {
3542 IOByteCount origLen;
3543
3544 phys64 = mapper->mapToPhysicalAddress(phys32);
3545 origLen = *lengthOfSegment;
3546 length = page_size - (phys64 & (page_size - 1));
3547 while ((length < origLen)
3548 && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
3549 length += page_size;
3550 }
3551 if (length > origLen) {
3552 length = origLen;
3553 }
3554
3555 *lengthOfSegment = length;
3556 } else {
3557 phys64 = (addr64_t) phys32;
3558 }
3559
3560 return phys64;
3561 }
3562
3563 IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3564 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3565 {
3566 return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
3567 }
3568
3569 IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3570 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3571 {
3572 return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
3573 }
3574
3575 #pragma clang diagnostic push
3576 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3577
3578 void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3579 IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3580 IOByteCount * lengthOfSegment)
3581 {
3582 if (_task == kernel_task) {
3583 return (void *) getSourceSegment(offset, lengthOfSegment);
3584 } else {
3585 panic("IOGMD::getVirtualSegment deprecated");
3586 }
3587
3588 return NULL;
3589 }
3590 #pragma clang diagnostic pop
3591 #endif /* !__LP64__ */
3592
3593 IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const3594 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3595 {
3596 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
3597 DMACommandOps params;
3598 IOReturn err;
3599
3600 params = (op & ~kIOMDDMACommandOperationMask & op);
3601 op &= kIOMDDMACommandOperationMask;
3602
3603 if (kIOMDGetCharacteristics == op) {
3604 if (dataSize < sizeof(IOMDDMACharacteristics)) {
3605 return kIOReturnUnderrun;
3606 }
3607
3608 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3609 data->fLength = getLength();
3610 data->fSGCount = 0;
3611 data->fDirection = getDirection();
3612 data->fIsPrepared = true; // Assume prepared - fails safe
3613 } else if (kIOMDWalkSegments == op) {
3614 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
3615 return kIOReturnUnderrun;
3616 }
3617
3618 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
3619 IOByteCount offset = (IOByteCount) data->fOffset;
3620 IOPhysicalLength length, nextLength;
3621 addr64_t addr, nextAddr;
3622
3623 if (data->fMapped) {
3624 panic("fMapped %p %s %qx", this, getMetaClass()->getClassName(), (uint64_t) getLength());
3625 }
3626 addr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
3627 offset += length;
3628 while (offset < getLength()) {
3629 nextAddr = md->getPhysicalSegment(offset, &nextLength, kIOMemoryMapperNone);
3630 if ((addr + length) != nextAddr) {
3631 break;
3632 }
3633 length += nextLength;
3634 offset += nextLength;
3635 }
3636 data->fIOVMAddr = addr;
3637 data->fLength = length;
3638 } else if (kIOMDAddDMAMapSpec == op) {
3639 return kIOReturnUnsupported;
3640 } else if (kIOMDDMAMap == op) {
3641 if (dataSize < sizeof(IOMDDMAMapArgs)) {
3642 return kIOReturnUnderrun;
3643 }
3644 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3645
3646 err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3647
3648 return err;
3649 } else if (kIOMDDMAUnmap == op) {
3650 if (dataSize < sizeof(IOMDDMAMapArgs)) {
3651 return kIOReturnUnderrun;
3652 }
3653 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3654
3655 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3656
3657 return kIOReturnSuccess;
3658 } else {
3659 return kIOReturnBadArgument;
3660 }
3661
3662 return kIOReturnSuccess;
3663 }
3664
3665 IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3666 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
3667 IOOptionBits * oldState )
3668 {
3669 IOReturn err = kIOReturnSuccess;
3670
3671 vm_purgable_t control;
3672 int state;
3673
3674 assert(!(kIOMemoryRemote & _flags));
3675 if (kIOMemoryRemote & _flags) {
3676 return kIOReturnNotAttached;
3677 }
3678
3679 if (_memRef) {
3680 err = super::setPurgeable(newState, oldState);
3681 } else {
3682 if (kIOMemoryThreadSafe & _flags) {
3683 LOCK;
3684 }
3685 do{
3686 // Find the appropriate vm_map for the given task
3687 vm_map_t curMap;
3688 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
3689 err = kIOReturnNotReady;
3690 break;
3691 } else if (!_task) {
3692 err = kIOReturnUnsupported;
3693 break;
3694 } else {
3695 curMap = get_task_map(_task);
3696 if (NULL == curMap) {
3697 err = KERN_INVALID_ARGUMENT;
3698 break;
3699 }
3700 }
3701
3702 // can only do one range
3703 Ranges vec = _ranges;
3704 IOOptionBits type = _flags & kIOMemoryTypeMask;
3705 mach_vm_address_t addr;
3706 mach_vm_size_t len;
3707 getAddrLenForInd(addr, len, type, vec, 0, _task);
3708
3709 err = purgeableControlBits(newState, &control, &state);
3710 if (kIOReturnSuccess != err) {
3711 break;
3712 }
3713 err = vm_map_purgable_control(curMap, addr, control, &state);
3714 if (oldState) {
3715 if (kIOReturnSuccess == err) {
3716 err = purgeableStateBits(&state);
3717 *oldState = state;
3718 }
3719 }
3720 }while (false);
3721 if (kIOMemoryThreadSafe & _flags) {
3722 UNLOCK;
3723 }
3724 }
3725
3726 return err;
3727 }
3728
3729 IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3730 IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
3731 IOOptionBits * oldState )
3732 {
3733 IOReturn err = kIOReturnNotReady;
3734
3735 if (kIOMemoryThreadSafe & _flags) {
3736 LOCK;
3737 }
3738 if (_memRef) {
3739 err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
3740 }
3741 if (kIOMemoryThreadSafe & _flags) {
3742 UNLOCK;
3743 }
3744
3745 return err;
3746 }
3747
3748 IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3749 IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
3750 int newLedgerTag,
3751 IOOptionBits newLedgerOptions )
3752 {
3753 IOReturn err = kIOReturnSuccess;
3754
3755 assert(!(kIOMemoryRemote & _flags));
3756 if (kIOMemoryRemote & _flags) {
3757 return kIOReturnNotAttached;
3758 }
3759
3760 if (iokit_iomd_setownership_enabled == FALSE) {
3761 return kIOReturnUnsupported;
3762 }
3763
3764 if (_memRef) {
3765 err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3766 } else {
3767 err = kIOReturnUnsupported;
3768 }
3769
3770 return err;
3771 }
3772
3773 IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3774 IOMemoryDescriptor::setOwnership( task_t newOwner,
3775 int newLedgerTag,
3776 IOOptionBits newLedgerOptions )
3777 {
3778 IOReturn err = kIOReturnNotReady;
3779
3780 assert(!(kIOMemoryRemote & _flags));
3781 if (kIOMemoryRemote & _flags) {
3782 return kIOReturnNotAttached;
3783 }
3784
3785 if (iokit_iomd_setownership_enabled == FALSE) {
3786 return kIOReturnUnsupported;
3787 }
3788
3789 if (kIOMemoryThreadSafe & _flags) {
3790 LOCK;
3791 }
3792 if (_memRef) {
3793 err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
3794 } else {
3795 IOMultiMemoryDescriptor * mmd;
3796 IOSubMemoryDescriptor * smd;
3797 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3798 err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3799 } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3800 err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3801 }
3802 }
3803 if (kIOMemoryThreadSafe & _flags) {
3804 UNLOCK;
3805 }
3806
3807 return err;
3808 }
3809
3810
3811 uint64_t
getDMAMapLength(uint64_t * offset)3812 IOMemoryDescriptor::getDMAMapLength(uint64_t * offset)
3813 {
3814 uint64_t length;
3815
3816 if (_memRef) {
3817 length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef, offset);
3818 } else {
3819 IOByteCount iterate, segLen;
3820 IOPhysicalAddress sourceAddr, sourceAlign;
3821
3822 if (kIOMemoryThreadSafe & _flags) {
3823 LOCK;
3824 }
3825 length = 0;
3826 iterate = 0;
3827 while ((sourceAddr = getPhysicalSegment(iterate, &segLen, _kIOMemorySourceSegment))) {
3828 sourceAlign = (sourceAddr & page_mask);
3829 if (offset && !iterate) {
3830 *offset = sourceAlign;
3831 }
3832 length += round_page(sourceAddr + segLen) - trunc_page(sourceAddr);
3833 iterate += segLen;
3834 }
3835 if (!iterate) {
3836 length = getLength();
3837 if (offset) {
3838 *offset = 0;
3839 }
3840 }
3841 if (kIOMemoryThreadSafe & _flags) {
3842 UNLOCK;
3843 }
3844 }
3845
3846 return length;
3847 }
3848
3849
3850 IOReturn
getPageCounts(IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)3851 IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
3852 IOByteCount * dirtyPageCount )
3853 {
3854 IOReturn err = kIOReturnNotReady;
3855
3856 assert(!(kIOMemoryRemote & _flags));
3857 if (kIOMemoryRemote & _flags) {
3858 return kIOReturnNotAttached;
3859 }
3860
3861 if (kIOMemoryThreadSafe & _flags) {
3862 LOCK;
3863 }
3864 if (_memRef) {
3865 err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
3866 } else {
3867 IOMultiMemoryDescriptor * mmd;
3868 IOSubMemoryDescriptor * smd;
3869 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3870 err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3871 } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3872 err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3873 }
3874 }
3875 if (kIOMemoryThreadSafe & _flags) {
3876 UNLOCK;
3877 }
3878
3879 return err;
3880 }
3881
3882
3883 #if defined(__arm64__)
3884 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3885 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3886 #else /* defined(__arm64__) */
3887 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3888 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
3889 #endif /* defined(__arm64__) */
3890
3891 static void
SetEncryptOp(addr64_t pa,unsigned int count)3892 SetEncryptOp(addr64_t pa, unsigned int count)
3893 {
3894 ppnum_t page, end;
3895
3896 page = (ppnum_t) atop_64(round_page_64(pa));
3897 end = (ppnum_t) atop_64(trunc_page_64(pa + count));
3898 for (; page < end; page++) {
3899 pmap_clear_noencrypt(page);
3900 }
3901 }
3902
3903 static void
ClearEncryptOp(addr64_t pa,unsigned int count)3904 ClearEncryptOp(addr64_t pa, unsigned int count)
3905 {
3906 ppnum_t page, end;
3907
3908 page = (ppnum_t) atop_64(round_page_64(pa));
3909 end = (ppnum_t) atop_64(trunc_page_64(pa + count));
3910 for (; page < end; page++) {
3911 pmap_set_noencrypt(page);
3912 }
3913 }
3914
3915 IOReturn
performOperation(IOOptionBits options,IOByteCount offset,IOByteCount length)3916 IOMemoryDescriptor::performOperation( IOOptionBits options,
3917 IOByteCount offset, IOByteCount length )
3918 {
3919 IOByteCount remaining;
3920 unsigned int res;
3921 void (*func)(addr64_t pa, unsigned int count) = NULL;
3922 #if defined(__arm64__)
3923 void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
3924 #endif
3925
3926 assert(!(kIOMemoryRemote & _flags));
3927 if (kIOMemoryRemote & _flags) {
3928 return kIOReturnNotAttached;
3929 }
3930
3931 switch (options) {
3932 case kIOMemoryIncoherentIOFlush:
3933 #if defined(__arm64__)
3934 func_ext = &dcache_incoherent_io_flush64;
3935 #if __ARM_COHERENT_IO__
3936 func_ext(0, 0, 0, &res);
3937 return kIOReturnSuccess;
3938 #else /* __ARM_COHERENT_IO__ */
3939 break;
3940 #endif /* __ARM_COHERENT_IO__ */
3941 #else /* defined(__arm64__) */
3942 func = &dcache_incoherent_io_flush64;
3943 break;
3944 #endif /* defined(__arm64__) */
3945 case kIOMemoryIncoherentIOStore:
3946 #if defined(__arm64__)
3947 func_ext = &dcache_incoherent_io_store64;
3948 #if __ARM_COHERENT_IO__
3949 func_ext(0, 0, 0, &res);
3950 return kIOReturnSuccess;
3951 #else /* __ARM_COHERENT_IO__ */
3952 break;
3953 #endif /* __ARM_COHERENT_IO__ */
3954 #else /* defined(__arm64__) */
3955 func = &dcache_incoherent_io_store64;
3956 break;
3957 #endif /* defined(__arm64__) */
3958
3959 case kIOMemorySetEncrypted:
3960 func = &SetEncryptOp;
3961 break;
3962 case kIOMemoryClearEncrypted:
3963 func = &ClearEncryptOp;
3964 break;
3965 }
3966
3967 #if defined(__arm64__)
3968 if ((func == NULL) && (func_ext == NULL)) {
3969 return kIOReturnUnsupported;
3970 }
3971 #else /* defined(__arm64__) */
3972 if (!func) {
3973 return kIOReturnUnsupported;
3974 }
3975 #endif /* defined(__arm64__) */
3976
3977 if (kIOMemoryThreadSafe & _flags) {
3978 LOCK;
3979 }
3980
3981 res = 0x0UL;
3982 remaining = length = min(length, getLength() - offset);
3983 while (remaining) {
3984 // (process another target segment?)
3985 addr64_t dstAddr64;
3986 IOByteCount dstLen;
3987
3988 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3989 if (!dstAddr64) {
3990 break;
3991 }
3992
3993 // Clip segment length to remaining
3994 if (dstLen > remaining) {
3995 dstLen = remaining;
3996 }
3997 if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
3998 dstLen = (UINT_MAX - PAGE_SIZE + 1);
3999 }
4000 if (remaining > UINT_MAX) {
4001 remaining = UINT_MAX;
4002 }
4003
4004 #if defined(__arm64__)
4005 if (func) {
4006 (*func)(dstAddr64, (unsigned int) dstLen);
4007 }
4008 if (func_ext) {
4009 (*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res);
4010 if (res != 0x0UL) {
4011 remaining = 0;
4012 break;
4013 }
4014 }
4015 #else /* defined(__arm64__) */
4016 (*func)(dstAddr64, (unsigned int) dstLen);
4017 #endif /* defined(__arm64__) */
4018
4019 offset += dstLen;
4020 remaining -= dstLen;
4021 }
4022
4023 if (kIOMemoryThreadSafe & _flags) {
4024 UNLOCK;
4025 }
4026
4027 return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
4028 }
4029
4030 /*
4031 *
4032 */
4033
4034 #if defined(__i386__) || defined(__x86_64__)
4035
4036 extern vm_offset_t kc_highest_nonlinkedit_vmaddr;
4037
4038 /* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
4039 * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
4040 * kernel non-text data -- should we just add another range instead?
4041 */
4042 #define io_kernel_static_start vm_kernel_stext
4043 #define io_kernel_static_end (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
4044
4045 #elif defined(__arm64__)
4046
4047 extern vm_offset_t static_memory_end;
4048
4049 #if defined(__arm64__)
4050 #define io_kernel_static_start vm_kext_base
4051 #else /* defined(__arm64__) */
4052 #define io_kernel_static_start vm_kernel_stext
4053 #endif /* defined(__arm64__) */
4054
4055 #define io_kernel_static_end static_memory_end
4056
4057 #else
4058 #error io_kernel_static_end is undefined for this architecture
4059 #endif
4060
4061 static kern_return_t
io_get_kernel_static_upl(vm_map_t,uintptr_t offset,upl_size_t * upl_size,unsigned int * page_offset,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,ppnum_t * highest_page)4062 io_get_kernel_static_upl(
4063 vm_map_t /* map */,
4064 uintptr_t offset,
4065 upl_size_t *upl_size,
4066 unsigned int *page_offset,
4067 upl_t *upl,
4068 upl_page_info_array_t page_list,
4069 unsigned int *count,
4070 ppnum_t *highest_page)
4071 {
4072 unsigned int pageCount, page;
4073 ppnum_t phys;
4074 ppnum_t highestPage = 0;
4075
4076 pageCount = atop_32(round_page(*upl_size + (page_mask & offset)));
4077 if (pageCount > *count) {
4078 pageCount = *count;
4079 }
4080 *upl_size = (upl_size_t) ptoa_64(pageCount);
4081
4082 *upl = NULL;
4083 *page_offset = ((unsigned int) page_mask & offset);
4084
4085 for (page = 0; page < pageCount; page++) {
4086 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
4087 if (!phys) {
4088 break;
4089 }
4090 page_list[page].phys_addr = phys;
4091 page_list[page].free_when_done = 0;
4092 page_list[page].absent = 0;
4093 page_list[page].dirty = 0;
4094 page_list[page].precious = 0;
4095 page_list[page].device = 0;
4096 if (phys > highestPage) {
4097 highestPage = phys;
4098 }
4099 }
4100
4101 *highest_page = highestPage;
4102
4103 return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
4104 }
4105
4106 IOReturn
wireVirtual(IODirection forDirection)4107 IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
4108 {
4109 IOOptionBits type = _flags & kIOMemoryTypeMask;
4110 IOReturn error = kIOReturnSuccess;
4111 ioGMDData *dataP;
4112 upl_page_info_array_t pageInfo;
4113 ppnum_t mapBase;
4114 vm_tag_t tag = VM_KERN_MEMORY_NONE;
4115 mach_vm_size_t numBytesWired = 0;
4116
4117 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
4118
4119 if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
4120 forDirection = (IODirection) (forDirection | getDirection());
4121 }
4122
4123 dataP = getDataP(_memoryEntries);
4124 upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
4125 switch (kIODirectionOutIn & forDirection) {
4126 case kIODirectionOut:
4127 // Pages do not need to be marked as dirty on commit
4128 uplFlags = UPL_COPYOUT_FROM;
4129 dataP->fDMAAccess = kIODMAMapReadAccess;
4130 break;
4131
4132 case kIODirectionIn:
4133 dataP->fDMAAccess = kIODMAMapWriteAccess;
4134 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
4135 break;
4136
4137 default:
4138 dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
4139 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
4140 break;
4141 }
4142
4143 if (_wireCount) {
4144 if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
4145 OSReportWithBacktrace("IOMemoryDescriptor 0x%zx prepared read only",
4146 (size_t)VM_KERNEL_ADDRPERM(this));
4147 error = kIOReturnNotWritable;
4148 }
4149 } else {
4150 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection);
4151 IOMapper *mapper;
4152
4153 mapper = dataP->fMapper;
4154 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4155
4156 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
4157 tag = _kernelTag;
4158 if (VM_KERN_MEMORY_NONE == tag) {
4159 tag = IOMemoryTag(kernel_map);
4160 }
4161
4162 if (kIODirectionPrepareToPhys32 & forDirection) {
4163 if (!mapper) {
4164 uplFlags |= UPL_NEED_32BIT_ADDR;
4165 }
4166 if (dataP->fDMAMapNumAddressBits > 32) {
4167 dataP->fDMAMapNumAddressBits = 32;
4168 }
4169 }
4170 if (kIODirectionPrepareNoFault & forDirection) {
4171 uplFlags |= UPL_REQUEST_NO_FAULT;
4172 }
4173 if (kIODirectionPrepareNoZeroFill & forDirection) {
4174 uplFlags |= UPL_NOZEROFILLIO;
4175 }
4176 if (kIODirectionPrepareNonCoherent & forDirection) {
4177 uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
4178 }
4179
4180 mapBase = 0;
4181
4182 // Note that appendBytes(NULL) zeros the data up to the desired length
4183 size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
4184 if (uplPageSize > ((unsigned int)uplPageSize)) {
4185 error = kIOReturnNoMemory;
4186 traceInterval.setEndArg2(error);
4187 return error;
4188 }
4189 if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
4190 error = kIOReturnNoMemory;
4191 traceInterval.setEndArg2(error);
4192 return error;
4193 }
4194 dataP = NULL;
4195
4196 // Find the appropriate vm_map for the given task
4197 vm_map_t curMap;
4198 if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
4199 curMap = NULL;
4200 } else {
4201 curMap = get_task_map(_task);
4202 }
4203
4204 // Iterate over the vector of virtual ranges
4205 Ranges vec = _ranges;
4206 unsigned int pageIndex = 0;
4207 IOByteCount mdOffset = 0;
4208 ppnum_t highestPage = 0;
4209 bool byteAlignUPL;
4210
4211 IOMemoryEntry * memRefEntry = NULL;
4212 if (_memRef) {
4213 memRefEntry = &_memRef->entries[0];
4214 byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot));
4215 } else {
4216 byteAlignUPL = true;
4217 }
4218
4219 for (UInt range = 0; mdOffset < _length; range++) {
4220 ioPLBlock iopl;
4221 mach_vm_address_t startPage, startPageOffset;
4222 mach_vm_size_t numBytes;
4223 ppnum_t highPage = 0;
4224
4225 if (_memRef) {
4226 if (range >= _memRef->count) {
4227 panic("memRefEntry");
4228 }
4229 memRefEntry = &_memRef->entries[range];
4230 numBytes = memRefEntry->size;
4231 startPage = -1ULL;
4232 if (byteAlignUPL) {
4233 startPageOffset = 0;
4234 } else {
4235 startPageOffset = (memRefEntry->start & PAGE_MASK);
4236 }
4237 } else {
4238 // Get the startPage address and length of vec[range]
4239 getAddrLenForInd(startPage, numBytes, type, vec, range, _task);
4240 if (byteAlignUPL) {
4241 startPageOffset = 0;
4242 } else {
4243 startPageOffset = startPage & PAGE_MASK;
4244 startPage = trunc_page_64(startPage);
4245 }
4246 }
4247 iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset;
4248 numBytes += startPageOffset;
4249
4250 if (mapper) {
4251 iopl.fMappedPage = mapBase + pageIndex;
4252 } else {
4253 iopl.fMappedPage = 0;
4254 }
4255
4256 // Iterate over the current range, creating UPLs
4257 while (numBytes) {
4258 vm_address_t kernelStart = (vm_address_t) startPage;
4259 vm_map_t theMap;
4260 if (curMap) {
4261 theMap = curMap;
4262 } else if (_memRef) {
4263 theMap = NULL;
4264 } else {
4265 assert(_task == kernel_task);
4266 theMap = IOPageableMapForAddress(kernelStart);
4267 }
4268
4269 // ioplFlags is an in/out parameter
4270 upl_control_flags_t ioplFlags = uplFlags;
4271 dataP = getDataP(_memoryEntries);
4272 pageInfo = getPageList(dataP);
4273 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
4274
4275 mach_vm_size_t ioplPhysSize;
4276 upl_size_t ioplSize;
4277 unsigned int numPageInfo;
4278
4279 if (_memRef) {
4280 error = mach_memory_entry_map_size(memRefEntry->entry, NULL /*physical*/, 0, memRefEntry->size, &ioplPhysSize);
4281 DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize);
4282 } else {
4283 error = vm_map_range_physical_size(theMap, startPage, numBytes, &ioplPhysSize);
4284 DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize);
4285 }
4286 if (error != KERN_SUCCESS) {
4287 if (_memRef) {
4288 DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error);
4289 } else {
4290 DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error);
4291 }
4292 printf("entry size error %d\n", error);
4293 goto abortExit;
4294 }
4295 ioplPhysSize = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES;
4296 numPageInfo = atop_32(ioplPhysSize);
4297 if (byteAlignUPL) {
4298 if (numBytes > ioplPhysSize) {
4299 ioplSize = ((typeof(ioplSize))ioplPhysSize);
4300 } else {
4301 ioplSize = ((typeof(ioplSize))numBytes);
4302 }
4303 } else {
4304 ioplSize = ((typeof(ioplSize))ioplPhysSize);
4305 }
4306
4307 if (_memRef) {
4308 memory_object_offset_t entryOffset;
4309
4310 entryOffset = mdOffset;
4311 if (byteAlignUPL) {
4312 entryOffset = (entryOffset - memRefEntry->offset);
4313 } else {
4314 entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
4315 }
4316 if (ioplSize > (memRefEntry->size - entryOffset)) {
4317 ioplSize = ((typeof(ioplSize))(memRefEntry->size - entryOffset));
4318 }
4319 error = memory_object_iopl_request(memRefEntry->entry,
4320 entryOffset,
4321 &ioplSize,
4322 &iopl.fIOPL,
4323 baseInfo,
4324 &numPageInfo,
4325 &ioplFlags,
4326 tag);
4327 } else if ((theMap == kernel_map)
4328 && (kernelStart >= io_kernel_static_start)
4329 && (kernelStart < io_kernel_static_end)) {
4330 error = io_get_kernel_static_upl(theMap,
4331 kernelStart,
4332 &ioplSize,
4333 &iopl.fPageOffset,
4334 &iopl.fIOPL,
4335 baseInfo,
4336 &numPageInfo,
4337 &highPage);
4338 } else {
4339 assert(theMap);
4340 error = vm_map_create_upl(theMap,
4341 startPage,
4342 (upl_size_t*)&ioplSize,
4343 &iopl.fIOPL,
4344 baseInfo,
4345 &numPageInfo,
4346 &ioplFlags,
4347 tag);
4348 }
4349
4350 if (error != KERN_SUCCESS) {
4351 traceInterval.setEndArg2(error);
4352 DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize);
4353 goto abortExit;
4354 }
4355
4356 assert(ioplSize);
4357
4358 if (iopl.fIOPL) {
4359 highPage = upl_get_highest_page(iopl.fIOPL);
4360 }
4361 if (highPage > highestPage) {
4362 highestPage = highPage;
4363 }
4364
4365 if (baseInfo->device) {
4366 numPageInfo = 1;
4367 iopl.fFlags = kIOPLOnDevice;
4368 } else {
4369 iopl.fFlags = 0;
4370 }
4371
4372 if (byteAlignUPL) {
4373 if (iopl.fIOPL) {
4374 DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL));
4375 iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(iopl.fIOPL);
4376 }
4377 if (startPage != (mach_vm_address_t)-1) {
4378 // assert(iopl.fPageOffset == (startPage & PAGE_MASK));
4379 startPage -= iopl.fPageOffset;
4380 }
4381 ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo));
4382 numBytes += iopl.fPageOffset;
4383 }
4384
4385 iopl.fIOMDOffset = mdOffset;
4386 iopl.fPageInfo = pageIndex;
4387
4388 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
4389 // Clean up partial created and unsaved iopl
4390 if (iopl.fIOPL) {
4391 upl_abort(iopl.fIOPL, 0);
4392 upl_deallocate(iopl.fIOPL);
4393 }
4394 error = kIOReturnNoMemory;
4395 traceInterval.setEndArg2(error);
4396 goto abortExit;
4397 }
4398 dataP = NULL;
4399
4400 // Check for a multiple iopl's in one virtual range
4401 pageIndex += numPageInfo;
4402 mdOffset -= iopl.fPageOffset;
4403 numBytesWired += ioplSize;
4404 if (ioplSize < numBytes) {
4405 numBytes -= ioplSize;
4406 if (startPage != (mach_vm_address_t)-1) {
4407 startPage += ioplSize;
4408 }
4409 mdOffset += ioplSize;
4410 iopl.fPageOffset = 0;
4411 if (mapper) {
4412 iopl.fMappedPage = mapBase + pageIndex;
4413 }
4414 } else {
4415 mdOffset += numBytes;
4416 break;
4417 }
4418 }
4419 }
4420
4421 _highestPage = highestPage;
4422 DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage);
4423
4424 if (UPL_COPYOUT_FROM & uplFlags) {
4425 _flags |= kIOMemoryPreparedReadOnly;
4426 }
4427 traceInterval.setEndCodes(numBytesWired, error);
4428 }
4429
4430 #if IOTRACKING
4431 if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
4432 dataP = getDataP(_memoryEntries);
4433 if (!dataP->fWireTracking.link.next) {
4434 IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
4435 }
4436 }
4437 #endif /* IOTRACKING */
4438
4439 return error;
4440
4441 abortExit:
4442 {
4443 dataP = getDataP(_memoryEntries);
4444 UInt done = getNumIOPL(_memoryEntries, dataP);
4445 ioPLBlock *ioplList = getIOPLList(dataP);
4446
4447 for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) {
4448 if (ioplList[ioplIdx].fIOPL) {
4449 upl_abort(ioplList[ioplIdx].fIOPL, 0);
4450 upl_deallocate(ioplList[ioplIdx].fIOPL);
4451 }
4452 }
4453 _memoryEntries->setLength(computeDataSize(0, 0));
4454 }
4455
4456 if (error == KERN_FAILURE) {
4457 error = kIOReturnCannotWire;
4458 } else if (error == KERN_MEMORY_ERROR) {
4459 error = kIOReturnNoResources;
4460 }
4461
4462 return error;
4463 }
4464
4465 bool
initMemoryEntries(size_t size,IOMapper * mapper)4466 IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
4467 {
4468 ioGMDData * dataP;
4469
4470 if (size > UINT_MAX) {
4471 return false;
4472 }
4473 if (!_memoryEntries) {
4474 _memoryEntries = _IOMemoryDescriptorMixedData::withCapacity(size);
4475 if (!_memoryEntries) {
4476 return false;
4477 }
4478 } else if (!_memoryEntries->initWithCapacity(size)) {
4479 return false;
4480 }
4481
4482 _memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
4483 dataP = getDataP(_memoryEntries);
4484
4485 if (mapper == kIOMapperWaitSystem) {
4486 IOMapper::checkForSystemMapper();
4487 mapper = IOMapper::gSystem;
4488 }
4489 dataP->fMapper = mapper;
4490 dataP->fPageCnt = 0;
4491 dataP->fMappedBase = 0;
4492 dataP->fDMAMapNumAddressBits = 64;
4493 dataP->fDMAMapAlignment = 0;
4494 dataP->fPreparationID = kIOPreparationIDUnprepared;
4495 dataP->fCompletionError = false;
4496 dataP->fMappedBaseValid = false;
4497
4498 return true;
4499 }
4500
4501 IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4502 IOMemoryDescriptor::dmaMap(
4503 IOMapper * mapper,
4504 IOMemoryDescriptor * memory,
4505 IODMACommand * command,
4506 const IODMAMapSpecification * mapSpec,
4507 uint64_t offset,
4508 uint64_t length,
4509 uint64_t * mapAddress,
4510 uint64_t * mapLength)
4511 {
4512 IOReturn err;
4513 uint32_t mapOptions;
4514
4515 mapOptions = 0;
4516 mapOptions |= kIODMAMapReadAccess;
4517 if (!(kIOMemoryPreparedReadOnly & _flags)) {
4518 mapOptions |= kIODMAMapWriteAccess;
4519 }
4520
4521 err = mapper->iovmMapMemory(memory, offset, length, mapOptions,
4522 mapSpec, command, NULL, mapAddress, mapLength);
4523
4524 if (kIOReturnSuccess == err) {
4525 dmaMapRecord(mapper, command, *mapLength);
4526 }
4527
4528 return err;
4529 }
4530
4531 void
dmaMapRecord(IOMapper * mapper,IODMACommand * command,uint64_t mapLength)4532 IOMemoryDescriptor::dmaMapRecord(
4533 IOMapper * mapper,
4534 IODMACommand * command,
4535 uint64_t mapLength)
4536 {
4537 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this));
4538 kern_allocation_name_t alloc;
4539 int16_t prior;
4540
4541 if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
4542 kern_allocation_update_size(mapper->fAllocName, mapLength, NULL);
4543 }
4544
4545 if (!command) {
4546 return;
4547 }
4548 prior = OSAddAtomic16(1, &_dmaReferences);
4549 if (!prior) {
4550 if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4551 _mapName = alloc;
4552 mapLength = _length;
4553 kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
4554 } else {
4555 _mapName = NULL;
4556 }
4557 }
4558 }
4559
4560 IOReturn
dmaUnmap(IOMapper * mapper,IODMACommand * command,uint64_t offset,uint64_t mapAddress,uint64_t mapLength)4561 IOMemoryDescriptor::dmaUnmap(
4562 IOMapper * mapper,
4563 IODMACommand * command,
4564 uint64_t offset,
4565 uint64_t mapAddress,
4566 uint64_t mapLength)
4567 {
4568 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this));
4569 IOReturn ret;
4570 kern_allocation_name_t alloc;
4571 kern_allocation_name_t mapName;
4572 int16_t prior;
4573
4574 mapName = NULL;
4575 prior = 0;
4576 if (command) {
4577 mapName = _mapName;
4578 if (_dmaReferences) {
4579 prior = OSAddAtomic16(-1, &_dmaReferences);
4580 } else {
4581 panic("_dmaReferences underflow");
4582 }
4583 }
4584
4585 if (!mapLength) {
4586 traceInterval.setEndArg1(kIOReturnSuccess);
4587 return kIOReturnSuccess;
4588 }
4589
4590 ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
4591
4592 if ((alloc = mapper->fAllocName)) {
4593 kern_allocation_update_size(alloc, -mapLength, NULL);
4594 if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4595 mapLength = _length;
4596 kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
4597 }
4598 }
4599
4600 traceInterval.setEndArg1(ret);
4601 return ret;
4602 }
4603
4604 IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4605 IOGeneralMemoryDescriptor::dmaMap(
4606 IOMapper * mapper,
4607 IOMemoryDescriptor * memory,
4608 IODMACommand * command,
4609 const IODMAMapSpecification * mapSpec,
4610 uint64_t offset,
4611 uint64_t length,
4612 uint64_t * mapAddress,
4613 uint64_t * mapLength)
4614 {
4615 IOReturn err = kIOReturnSuccess;
4616 ioGMDData * dataP;
4617 IOOptionBits type = _flags & kIOMemoryTypeMask;
4618
4619 *mapAddress = 0;
4620 if (kIOMemoryHostOnly & _flags) {
4621 return kIOReturnSuccess;
4622 }
4623 if (kIOMemoryRemote & _flags) {
4624 return kIOReturnNotAttached;
4625 }
4626
4627 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
4628 || offset || (length != _length)) {
4629 err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength);
4630 } else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
4631 const ioPLBlock * ioplList = getIOPLList(dataP);
4632 upl_page_info_t * pageList;
4633 uint32_t mapOptions = 0;
4634
4635 IODMAMapSpecification mapSpec;
4636 bzero(&mapSpec, sizeof(mapSpec));
4637 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
4638 mapSpec.alignment = dataP->fDMAMapAlignment;
4639
4640 // For external UPLs the fPageInfo field points directly to
4641 // the upl's upl_page_info_t array.
4642 if (ioplList->fFlags & kIOPLExternUPL) {
4643 pageList = (upl_page_info_t *) ioplList->fPageInfo;
4644 mapOptions |= kIODMAMapPagingPath;
4645 } else {
4646 pageList = getPageList(dataP);
4647 }
4648
4649 if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
4650 mapOptions |= kIODMAMapPageListFullyOccupied;
4651 }
4652
4653 assert(dataP->fDMAAccess);
4654 mapOptions |= dataP->fDMAAccess;
4655
4656 // Check for direct device non-paged memory
4657 if (ioplList->fFlags & kIOPLOnDevice) {
4658 mapOptions |= kIODMAMapPhysicallyContiguous;
4659 }
4660
4661 IODMAMapPageList dmaPageList =
4662 {
4663 .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask),
4664 .pageListCount = _pages,
4665 .pageList = &pageList[0]
4666 };
4667 err = mapper->iovmMapMemory(memory, offset, length, mapOptions, &mapSpec,
4668 command, &dmaPageList, mapAddress, mapLength);
4669
4670 if (kIOReturnSuccess == err) {
4671 dmaMapRecord(mapper, command, *mapLength);
4672 }
4673 }
4674
4675 return err;
4676 }
4677
4678 /*
4679 * prepare
4680 *
4681 * Prepare the memory for an I/O transfer. This involves paging in
4682 * the memory, if necessary, and wiring it down for the duration of
4683 * the transfer. The complete() method completes the processing of
4684 * the memory after the I/O transfer finishes. This method needn't
4685 * called for non-pageable memory.
4686 */
4687
4688 IOReturn
prepare(IODirection forDirection)4689 IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
4690 {
4691 IOReturn error = kIOReturnSuccess;
4692 IOOptionBits type = _flags & kIOMemoryTypeMask;
4693 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_PREPARE), VM_KERNEL_ADDRHIDE(this), forDirection);
4694
4695 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4696 traceInterval.setEndArg1(kIOReturnSuccess);
4697 return kIOReturnSuccess;
4698 }
4699
4700 assert(!(kIOMemoryRemote & _flags));
4701 if (kIOMemoryRemote & _flags) {
4702 traceInterval.setEndArg1(kIOReturnNotAttached);
4703 return kIOReturnNotAttached;
4704 }
4705
4706 if (_prepareLock) {
4707 IOLockLock(_prepareLock);
4708 }
4709
4710 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4711 if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
4712 error = kIOReturnNotReady;
4713 goto finish;
4714 }
4715 error = wireVirtual(forDirection);
4716 }
4717
4718 if (kIOReturnSuccess == error) {
4719 if (1 == ++_wireCount) {
4720 if (kIOMemoryClearEncrypt & _flags) {
4721 performOperation(kIOMemoryClearEncrypted, 0, _length);
4722 }
4723
4724 ktraceEmitPhysicalSegments();
4725 }
4726 }
4727
4728 finish:
4729
4730 if (_prepareLock) {
4731 IOLockUnlock(_prepareLock);
4732 }
4733 traceInterval.setEndArg1(error);
4734
4735 return error;
4736 }
4737
4738 /*
4739 * complete
4740 *
4741 * Complete processing of the memory after an I/O transfer finishes.
4742 * This method should not be called unless a prepare was previously
4743 * issued; the prepare() and complete() must occur in pairs, before
4744 * before and after an I/O transfer involving pageable memory.
4745 */
4746
4747 IOReturn
complete(IODirection forDirection)4748 IOGeneralMemoryDescriptor::complete(IODirection forDirection)
4749 {
4750 IOOptionBits type = _flags & kIOMemoryTypeMask;
4751 ioGMDData * dataP;
4752 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_COMPLETE), VM_KERNEL_ADDRHIDE(this), forDirection);
4753
4754 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4755 traceInterval.setEndArg1(kIOReturnSuccess);
4756 return kIOReturnSuccess;
4757 }
4758
4759 assert(!(kIOMemoryRemote & _flags));
4760 if (kIOMemoryRemote & _flags) {
4761 traceInterval.setEndArg1(kIOReturnNotAttached);
4762 return kIOReturnNotAttached;
4763 }
4764
4765 if (_prepareLock) {
4766 IOLockLock(_prepareLock);
4767 }
4768 do{
4769 assert(_wireCount);
4770 if (!_wireCount) {
4771 break;
4772 }
4773 dataP = getDataP(_memoryEntries);
4774 if (!dataP) {
4775 break;
4776 }
4777
4778 if (kIODirectionCompleteWithError & forDirection) {
4779 dataP->fCompletionError = true;
4780 }
4781
4782 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
4783 performOperation(kIOMemorySetEncrypted, 0, _length);
4784 }
4785
4786 _wireCount--;
4787 if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
4788 ioPLBlock *ioplList = getIOPLList(dataP);
4789 UInt ind, count = getNumIOPL(_memoryEntries, dataP);
4790
4791 if (_wireCount) {
4792 // kIODirectionCompleteWithDataValid & forDirection
4793 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4794 vm_tag_t tag;
4795 tag = (typeof(tag))getVMTag(kernel_map);
4796 for (ind = 0; ind < count; ind++) {
4797 if (ioplList[ind].fIOPL) {
4798 iopl_valid_data(ioplList[ind].fIOPL, tag);
4799 }
4800 }
4801 }
4802 } else {
4803 if (_dmaReferences) {
4804 panic("complete() while dma active");
4805 }
4806
4807 if (dataP->fMappedBaseValid) {
4808 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
4809 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4810 }
4811 #if IOTRACKING
4812 if (dataP->fWireTracking.link.next) {
4813 IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
4814 }
4815 #endif /* IOTRACKING */
4816 // Only complete iopls that we created which are for TypeVirtual
4817 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4818 for (ind = 0; ind < count; ind++) {
4819 if (ioplList[ind].fIOPL) {
4820 if (dataP->fCompletionError) {
4821 upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
4822 } else {
4823 upl_commit(ioplList[ind].fIOPL, NULL, 0);
4824 }
4825 upl_deallocate(ioplList[ind].fIOPL);
4826 }
4827 }
4828 } else if (kIOMemoryTypeUPL == type) {
4829 upl_set_referenced(ioplList[0].fIOPL, false);
4830 }
4831
4832 _memoryEntries->setLength(computeDataSize(0, 0));
4833
4834 dataP->fPreparationID = kIOPreparationIDUnprepared;
4835 _flags &= ~kIOMemoryPreparedReadOnly;
4836
4837 if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED))) {
4838 IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED), getDescriptorID(), VM_KERNEL_ADDRHIDE(this));
4839 }
4840 }
4841 }
4842 }while (false);
4843
4844 if (_prepareLock) {
4845 IOLockUnlock(_prepareLock);
4846 }
4847
4848 traceInterval.setEndArg1(kIOReturnSuccess);
4849 return kIOReturnSuccess;
4850 }
4851
4852 IOOptionBits
memoryReferenceCreateOptions(IOOptionBits options,IOMemoryMap * mapping)4853 IOGeneralMemoryDescriptor::memoryReferenceCreateOptions(IOOptionBits options, IOMemoryMap * mapping)
4854 {
4855 IOOptionBits createOptions = 0;
4856
4857 if (!(kIOMap64Bit & options)) {
4858 panic("IOMemoryDescriptor::makeMapping !64bit");
4859 }
4860 if (!(kIOMapReadOnly & options)) {
4861 createOptions |= kIOMemoryReferenceWrite;
4862 #if DEVELOPMENT || DEBUG
4863 if ((kIODirectionOut == (kIODirectionOutIn & _flags))
4864 && (!reserved || (reserved->creator != mapping->fAddressTask))) {
4865 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
4866 }
4867 #endif
4868 }
4869 return createOptions;
4870 }
4871
4872 /*
4873 * Attempt to create any kIOMemoryMapCopyOnWrite named entry needed ahead of the global
4874 * lock taken in IOMemoryDescriptor::makeMapping() since it may allocate real pages on
4875 * creation.
4876 */
4877
4878 IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4879 IOGeneralMemoryDescriptor::makeMapping(
4880 IOMemoryDescriptor * owner,
4881 task_t __intoTask,
4882 IOVirtualAddress __address,
4883 IOOptionBits options,
4884 IOByteCount __offset,
4885 IOByteCount __length )
4886 {
4887 IOReturn err = kIOReturnSuccess;
4888 IOMemoryMap * mapping;
4889
4890 if ((kIOMemoryMapCopyOnWrite & _flags) && _task && !_memRef) {
4891 struct IOMemoryReference * newRef;
4892 err = memoryReferenceCreate(memoryReferenceCreateOptions(options, (IOMemoryMap *) __address), &newRef);
4893 if (kIOReturnSuccess == err) {
4894 if (!OSCompareAndSwapPtr(NULL, newRef, &_memRef)) {
4895 memoryReferenceFree(newRef);
4896 }
4897 }
4898 }
4899 if (kIOReturnSuccess != err) {
4900 return NULL;
4901 }
4902 mapping = IOMemoryDescriptor::makeMapping(
4903 owner, __intoTask, __address, options, __offset, __length);
4904
4905 #if IOTRACKING
4906 if ((mapping == (IOMemoryMap *) __address)
4907 && (0 == (kIOMapStatic & mapping->fOptions))
4908 && (NULL == mapping->fSuperMap)
4909 && ((kIOTracking & gIOKitDebug) || _task)) {
4910 // only dram maps in the default on development case
4911 IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4912 }
4913 #endif /* IOTRACKING */
4914
4915 return mapping;
4916 }
4917
4918 IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4919 IOGeneralMemoryDescriptor::doMap(
4920 vm_map_t __addressMap,
4921 IOVirtualAddress * __address,
4922 IOOptionBits options,
4923 IOByteCount __offset,
4924 IOByteCount __length )
4925 {
4926 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_MAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address), __length);
4927 traceInterval.setEndArg1(kIOReturnSuccess);
4928 #ifndef __LP64__
4929 if (!(kIOMap64Bit & options)) {
4930 panic("IOGeneralMemoryDescriptor::doMap !64bit");
4931 }
4932 #endif /* !__LP64__ */
4933
4934 kern_return_t err;
4935
4936 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
4937 mach_vm_size_t offset = mapping->fOffset + __offset;
4938 mach_vm_size_t length = mapping->fLength;
4939
4940 IOOptionBits type = _flags & kIOMemoryTypeMask;
4941 Ranges vec = _ranges;
4942
4943 mach_vm_address_t range0Addr = 0;
4944 mach_vm_size_t range0Len = 0;
4945
4946 if ((offset >= _length) || ((offset + length) > _length)) {
4947 traceInterval.setEndArg1(kIOReturnBadArgument);
4948 DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap, offset, length, (uint64_t)_length);
4949 // assert(offset == 0 && _length == 0 && length == 0);
4950 return kIOReturnBadArgument;
4951 }
4952
4953 assert(!(kIOMemoryRemote & _flags));
4954 if (kIOMemoryRemote & _flags) {
4955 return 0;
4956 }
4957
4958 if (vec.v) {
4959 getAddrLenForInd(range0Addr, range0Len, type, vec, 0, _task);
4960 }
4961
4962 // mapping source == dest? (could be much better)
4963 if (_task
4964 && (mapping->fAddressTask == _task)
4965 && (mapping->fAddressMap == get_task_map(_task))
4966 && (options & kIOMapAnywhere)
4967 && (!(kIOMapUnique & options))
4968 && (!(kIOMapGuardedMask & options))
4969 && (1 == _rangesCount)
4970 && (0 == offset)
4971 && range0Addr
4972 && (length <= range0Len)) {
4973 mapping->fAddress = range0Addr;
4974 mapping->fOptions |= kIOMapStatic;
4975
4976 return kIOReturnSuccess;
4977 }
4978
4979 if (!_memRef) {
4980 err = memoryReferenceCreate(memoryReferenceCreateOptions(options, mapping), &_memRef);
4981 if (kIOReturnSuccess != err) {
4982 traceInterval.setEndArg1(err);
4983 DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4984 return err;
4985 }
4986 }
4987
4988
4989 memory_object_t pager;
4990 pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
4991
4992 // <upl_transpose //
4993 if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
4994 do{
4995 upl_t redirUPL2;
4996 upl_size_t size;
4997 upl_control_flags_t flags;
4998 unsigned int lock_count;
4999
5000 if (!_memRef || (1 != _memRef->count)) {
5001 err = kIOReturnNotReadable;
5002 DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
5003 break;
5004 }
5005
5006 size = (upl_size_t) round_page(mapping->fLength);
5007 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5008 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5009
5010 if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
5011 NULL, NULL,
5012 &flags, (vm_tag_t) getVMTag(kernel_map))) {
5013 redirUPL2 = NULL;
5014 }
5015
5016 for (lock_count = 0;
5017 IORecursiveLockHaveLock(gIOMemoryLock);
5018 lock_count++) {
5019 UNLOCK;
5020 }
5021 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
5022 for (;
5023 lock_count;
5024 lock_count--) {
5025 LOCK;
5026 }
5027
5028 if (kIOReturnSuccess != err) {
5029 IOLog("upl_transpose(%x)\n", err);
5030 err = kIOReturnSuccess;
5031 }
5032
5033 if (redirUPL2) {
5034 upl_commit(redirUPL2, NULL, 0);
5035 upl_deallocate(redirUPL2);
5036 redirUPL2 = NULL;
5037 }
5038 {
5039 // swap the memEntries since they now refer to different vm_objects
5040 IOMemoryReference * me = _memRef;
5041 _memRef = mapping->fMemory->_memRef;
5042 mapping->fMemory->_memRef = me;
5043 }
5044 if (pager) {
5045 err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
5046 }
5047 }while (false);
5048 }
5049 // upl_transpose> //
5050 else {
5051 err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
5052 if (err) {
5053 DEBUG4K_ERROR("map %p err 0x%x\n", mapping->fAddressMap, err);
5054 }
5055 if ((err == KERN_SUCCESS) && pager) {
5056 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
5057
5058 if (err != KERN_SUCCESS) {
5059 doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
5060 } else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
5061 mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
5062 }
5063 }
5064 }
5065
5066 traceInterval.setEndArg1(err);
5067 if (err) {
5068 DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
5069 }
5070 return err;
5071 }
5072
5073 #if IOTRACKING
5074 IOReturn
IOMemoryMapTracking(IOTrackingUser * tracking,task_t * task,mach_vm_address_t * address,mach_vm_size_t * size)5075 IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
5076 mach_vm_address_t * address, mach_vm_size_t * size)
5077 {
5078 #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
5079
5080 IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
5081
5082 if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
5083 return kIOReturnNotReady;
5084 }
5085
5086 *task = map->fAddressTask;
5087 *address = map->fAddress;
5088 *size = map->fLength;
5089
5090 return kIOReturnSuccess;
5091 }
5092 #endif /* IOTRACKING */
5093
5094 IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5095 IOGeneralMemoryDescriptor::doUnmap(
5096 vm_map_t addressMap,
5097 IOVirtualAddress __address,
5098 IOByteCount __length )
5099 {
5100 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_UNMAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address), __length);
5101 IOReturn ret;
5102 ret = super::doUnmap(addressMap, __address, __length);
5103 traceInterval.setEndArg1(ret);
5104 return ret;
5105 }
5106
5107 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5108
5109 #undef super
5110 #define super OSObject
5111
5112 OSDefineMetaClassAndStructorsWithZone( IOMemoryMap, OSObject, ZC_NONE )
5113
5114 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
5115 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
5116 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
5117 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
5118 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
5119 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
5120 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
5121 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
5122
5123 /* ex-inline function implementation */
5124 IOPhysicalAddress
getPhysicalAddress()5125 IOMemoryMap::getPhysicalAddress()
5126 {
5127 return getPhysicalSegment( 0, NULL );
5128 }
5129
5130 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5131
5132 bool
init(task_t intoTask,mach_vm_address_t toAddress,IOOptionBits _options,mach_vm_size_t _offset,mach_vm_size_t _length)5133 IOMemoryMap::init(
5134 task_t intoTask,
5135 mach_vm_address_t toAddress,
5136 IOOptionBits _options,
5137 mach_vm_size_t _offset,
5138 mach_vm_size_t _length )
5139 {
5140 if (!intoTask) {
5141 return false;
5142 }
5143
5144 if (!super::init()) {
5145 return false;
5146 }
5147
5148 fAddressMap = get_task_map(intoTask);
5149 if (!fAddressMap) {
5150 return false;
5151 }
5152 vm_map_reference(fAddressMap);
5153
5154 fAddressTask = intoTask;
5155 fOptions = _options;
5156 fLength = _length;
5157 fOffset = _offset;
5158 fAddress = toAddress;
5159
5160 return true;
5161 }
5162
5163 bool
setMemoryDescriptor(IOMemoryDescriptor * _memory,mach_vm_size_t _offset)5164 IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
5165 {
5166 if (!_memory) {
5167 return false;
5168 }
5169
5170 if (!fSuperMap) {
5171 if ((_offset + fLength) > _memory->getLength()) {
5172 return false;
5173 }
5174 fOffset = _offset;
5175 }
5176
5177
5178 OSSharedPtr<IOMemoryDescriptor> tempval(_memory, OSRetain);
5179 if (fMemory) {
5180 if (fMemory != _memory) {
5181 fMemory->removeMapping(this);
5182 }
5183 }
5184 fMemory = os::move(tempval);
5185
5186 return true;
5187 }
5188
5189 IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5190 IOMemoryDescriptor::doMap(
5191 vm_map_t __addressMap,
5192 IOVirtualAddress * __address,
5193 IOOptionBits options,
5194 IOByteCount __offset,
5195 IOByteCount __length )
5196 {
5197 return kIOReturnUnsupported;
5198 }
5199
5200 IOReturn
handleFault(void * _pager,mach_vm_size_t sourceOffset,mach_vm_size_t length)5201 IOMemoryDescriptor::handleFault(
5202 void * _pager,
5203 mach_vm_size_t sourceOffset,
5204 mach_vm_size_t length)
5205 {
5206 if (kIOMemoryRedirected & _flags) {
5207 #if DEBUG
5208 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
5209 #endif
5210 do {
5211 SLEEP;
5212 } while (kIOMemoryRedirected & _flags);
5213 }
5214 return kIOReturnSuccess;
5215 }
5216
5217 IOReturn
populateDevicePager(void * _pager,vm_map_t addressMap,mach_vm_address_t address,mach_vm_size_t sourceOffset,mach_vm_size_t length,IOOptionBits options)5218 IOMemoryDescriptor::populateDevicePager(
5219 void * _pager,
5220 vm_map_t addressMap,
5221 mach_vm_address_t address,
5222 mach_vm_size_t sourceOffset,
5223 mach_vm_size_t length,
5224 IOOptionBits options )
5225 {
5226 IOReturn err = kIOReturnSuccess;
5227 memory_object_t pager = (memory_object_t) _pager;
5228 mach_vm_size_t size;
5229 mach_vm_size_t bytes;
5230 mach_vm_size_t page;
5231 mach_vm_size_t pageOffset;
5232 mach_vm_size_t pagerOffset;
5233 IOPhysicalLength segLen, chunk;
5234 addr64_t physAddr;
5235 IOOptionBits type;
5236
5237 type = _flags & kIOMemoryTypeMask;
5238
5239 if (reserved->dp.pagerContig) {
5240 sourceOffset = 0;
5241 pagerOffset = 0;
5242 }
5243
5244 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
5245 assert( physAddr );
5246 pageOffset = physAddr - trunc_page_64( physAddr );
5247 pagerOffset = sourceOffset;
5248
5249 size = length + pageOffset;
5250 physAddr -= pageOffset;
5251
5252 segLen += pageOffset;
5253 bytes = size;
5254 do{
5255 // in the middle of the loop only map whole pages
5256 if (segLen >= bytes) {
5257 segLen = bytes;
5258 } else if (segLen != trunc_page_64(segLen)) {
5259 err = kIOReturnVMError;
5260 }
5261 if (physAddr != trunc_page_64(physAddr)) {
5262 err = kIOReturnBadArgument;
5263 }
5264
5265 if (kIOReturnSuccess != err) {
5266 break;
5267 }
5268
5269 #if DEBUG || DEVELOPMENT
5270 if ((kIOMemoryTypeUPL != type)
5271 && pmap_has_managed_page((ppnum_t) atop_64(physAddr), (ppnum_t) atop_64(physAddr + segLen - 1))) {
5272 OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx",
5273 physAddr, (uint64_t)segLen);
5274 }
5275 #endif /* DEBUG || DEVELOPMENT */
5276
5277 chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
5278 for (page = 0;
5279 (page < segLen) && (KERN_SUCCESS == err);
5280 page += chunk) {
5281 err = device_pager_populate_object(pager, pagerOffset,
5282 (ppnum_t)(atop_64(physAddr + page)), chunk);
5283 pagerOffset += chunk;
5284 }
5285
5286 assert(KERN_SUCCESS == err);
5287 if (err) {
5288 break;
5289 }
5290
5291 // This call to vm_fault causes an early pmap level resolution
5292 // of the mappings created above for kernel mappings, since
5293 // faulting in later can't take place from interrupt level.
5294 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
5295 err = vm_fault(addressMap,
5296 (vm_map_offset_t)trunc_page_64(address),
5297 options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
5298 FALSE, VM_KERN_MEMORY_NONE,
5299 THREAD_UNINT, NULL,
5300 (vm_map_offset_t)0);
5301
5302 if (KERN_SUCCESS != err) {
5303 break;
5304 }
5305 }
5306
5307 sourceOffset += segLen - pageOffset;
5308 address += segLen;
5309 bytes -= segLen;
5310 pageOffset = 0;
5311 }while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
5312
5313 if (bytes) {
5314 err = kIOReturnBadArgument;
5315 }
5316
5317 return err;
5318 }
5319
5320 IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5321 IOMemoryDescriptor::doUnmap(
5322 vm_map_t addressMap,
5323 IOVirtualAddress __address,
5324 IOByteCount __length )
5325 {
5326 IOReturn err;
5327 IOMemoryMap * mapping;
5328 mach_vm_address_t address;
5329 mach_vm_size_t length;
5330
5331 if (__length) {
5332 panic("doUnmap");
5333 }
5334
5335 mapping = (IOMemoryMap *) __address;
5336 addressMap = mapping->fAddressMap;
5337 address = mapping->fAddress;
5338 length = mapping->fLength;
5339
5340 if (kIOMapOverwrite & mapping->fOptions) {
5341 err = KERN_SUCCESS;
5342 } else {
5343 if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
5344 addressMap = IOPageableMapForAddress( address );
5345 }
5346 #if DEBUG
5347 if (kIOLogMapping & gIOKitDebug) {
5348 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
5349 addressMap, address, length );
5350 }
5351 #endif
5352 err = IOMemoryDescriptorMapDealloc(mapping->fOptions, addressMap, address, length );
5353 if (vm_map_page_mask(addressMap) < PAGE_MASK) {
5354 DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap, address, length, err);
5355 }
5356 }
5357
5358 #if IOTRACKING
5359 IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
5360 #endif /* IOTRACKING */
5361
5362 return err;
5363 }
5364
5365 IOReturn
redirect(task_t safeTask,bool doRedirect)5366 IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
5367 {
5368 IOReturn err = kIOReturnSuccess;
5369 IOMemoryMap * mapping = NULL;
5370 OSSharedPtr<OSIterator> iter;
5371
5372 LOCK;
5373
5374 if (doRedirect) {
5375 _flags |= kIOMemoryRedirected;
5376 } else {
5377 _flags &= ~kIOMemoryRedirected;
5378 }
5379
5380 do {
5381 if ((iter = OSCollectionIterator::withCollection( _mappings.get()))) {
5382 memory_object_t pager;
5383
5384 if (reserved) {
5385 pager = (memory_object_t) reserved->dp.devicePager;
5386 } else {
5387 pager = MACH_PORT_NULL;
5388 }
5389
5390 while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
5391 mapping->redirect( safeTask, doRedirect );
5392 if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
5393 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
5394 }
5395 }
5396
5397 iter.reset();
5398 }
5399 } while (false);
5400
5401 if (!doRedirect) {
5402 WAKEUP;
5403 }
5404
5405 UNLOCK;
5406
5407 #ifndef __LP64__
5408 // temporary binary compatibility
5409 IOSubMemoryDescriptor * subMem;
5410 if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
5411 err = subMem->redirect( safeTask, doRedirect );
5412 } else {
5413 err = kIOReturnSuccess;
5414 }
5415 #endif /* !__LP64__ */
5416
5417 return err;
5418 }
5419
5420 IOReturn
redirect(task_t safeTask,bool doRedirect)5421 IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
5422 {
5423 IOReturn err = kIOReturnSuccess;
5424
5425 if (fSuperMap) {
5426 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
5427 } else {
5428 LOCK;
5429
5430 do{
5431 if (!fAddress) {
5432 break;
5433 }
5434 if (!fAddressMap) {
5435 break;
5436 }
5437
5438 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
5439 && (0 == (fOptions & kIOMapStatic))) {
5440 IOUnmapPages( fAddressMap, fAddress, fLength );
5441 err = kIOReturnSuccess;
5442 #if DEBUG
5443 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
5444 #endif
5445 } else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
5446 IOOptionBits newMode;
5447 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
5448 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
5449 }
5450 }while (false);
5451 UNLOCK;
5452 }
5453
5454 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5455 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
5456 && safeTask
5457 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
5458 fMemory->redirect(safeTask, doRedirect);
5459 }
5460
5461 return err;
5462 }
5463
5464 IOReturn
unmap(void)5465 IOMemoryMap::unmap( void )
5466 {
5467 IOReturn err;
5468
5469 LOCK;
5470
5471 if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
5472 && (0 == (kIOMapStatic & fOptions))) {
5473 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
5474 } else {
5475 err = kIOReturnSuccess;
5476 }
5477
5478 if (fAddressMap) {
5479 vm_map_deallocate(fAddressMap);
5480 fAddressMap = NULL;
5481 }
5482
5483 fAddress = 0;
5484
5485 UNLOCK;
5486
5487 return err;
5488 }
5489
5490 void
taskDied(void)5491 IOMemoryMap::taskDied( void )
5492 {
5493 LOCK;
5494 if (fUserClientUnmap) {
5495 unmap();
5496 }
5497 #if IOTRACKING
5498 else {
5499 IOTrackingRemoveUser(gIOMapTracking, &fTracking);
5500 }
5501 #endif /* IOTRACKING */
5502
5503 if (fAddressMap) {
5504 vm_map_deallocate(fAddressMap);
5505 fAddressMap = NULL;
5506 }
5507 fAddressTask = NULL;
5508 fAddress = 0;
5509 UNLOCK;
5510 }
5511
5512 IOReturn
userClientUnmap(void)5513 IOMemoryMap::userClientUnmap( void )
5514 {
5515 fUserClientUnmap = true;
5516 return kIOReturnSuccess;
5517 }
5518
5519 // Overload the release mechanism. All mappings must be a member
5520 // of a memory descriptors _mappings set. This means that we
5521 // always have 2 references on a mapping. When either of these mappings
5522 // are released we need to free ourselves.
5523 void
taggedRelease(const void * tag) const5524 IOMemoryMap::taggedRelease(const void *tag) const
5525 {
5526 LOCK;
5527 super::taggedRelease(tag, 2);
5528 UNLOCK;
5529 }
5530
5531 void
free()5532 IOMemoryMap::free()
5533 {
5534 unmap();
5535
5536 if (fMemory) {
5537 LOCK;
5538 fMemory->removeMapping(this);
5539 UNLOCK;
5540 fMemory.reset();
5541 }
5542
5543 if (fSuperMap) {
5544 fSuperMap.reset();
5545 }
5546
5547 if (fRedirUPL) {
5548 upl_commit(fRedirUPL, NULL, 0);
5549 upl_deallocate(fRedirUPL);
5550 }
5551
5552 super::free();
5553 }
5554
5555 IOByteCount
getLength()5556 IOMemoryMap::getLength()
5557 {
5558 return fLength;
5559 }
5560
5561 IOVirtualAddress
getVirtualAddress()5562 IOMemoryMap::getVirtualAddress()
5563 {
5564 #ifndef __LP64__
5565 if (fSuperMap) {
5566 fSuperMap->getVirtualAddress();
5567 } else if (fAddressMap
5568 && vm_map_is_64bit(fAddressMap)
5569 && (sizeof(IOVirtualAddress) < 8)) {
5570 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
5571 }
5572 #endif /* !__LP64__ */
5573
5574 return fAddress;
5575 }
5576
5577 #ifndef __LP64__
5578 mach_vm_address_t
getAddress()5579 IOMemoryMap::getAddress()
5580 {
5581 return fAddress;
5582 }
5583
5584 mach_vm_size_t
getSize()5585 IOMemoryMap::getSize()
5586 {
5587 return fLength;
5588 }
5589 #endif /* !__LP64__ */
5590
5591
5592 task_t
getAddressTask()5593 IOMemoryMap::getAddressTask()
5594 {
5595 if (fSuperMap) {
5596 return fSuperMap->getAddressTask();
5597 } else {
5598 return fAddressTask;
5599 }
5600 }
5601
5602 IOOptionBits
getMapOptions()5603 IOMemoryMap::getMapOptions()
5604 {
5605 return fOptions;
5606 }
5607
5608 IOMemoryDescriptor *
getMemoryDescriptor()5609 IOMemoryMap::getMemoryDescriptor()
5610 {
5611 return fMemory.get();
5612 }
5613
5614 IOMemoryMap *
copyCompatible(IOMemoryMap * newMapping)5615 IOMemoryMap::copyCompatible(
5616 IOMemoryMap * newMapping )
5617 {
5618 task_t task = newMapping->getAddressTask();
5619 mach_vm_address_t toAddress = newMapping->fAddress;
5620 IOOptionBits _options = newMapping->fOptions;
5621 mach_vm_size_t _offset = newMapping->fOffset;
5622 mach_vm_size_t _length = newMapping->fLength;
5623
5624 if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
5625 return NULL;
5626 }
5627 if ((fOptions ^ _options) & kIOMapReadOnly) {
5628 return NULL;
5629 }
5630 if ((fOptions ^ _options) & kIOMapGuardedMask) {
5631 return NULL;
5632 }
5633 if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
5634 && ((fOptions ^ _options) & kIOMapCacheMask)) {
5635 return NULL;
5636 }
5637
5638 if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
5639 return NULL;
5640 }
5641
5642 if (_offset < fOffset) {
5643 return NULL;
5644 }
5645
5646 _offset -= fOffset;
5647
5648 if ((_offset + _length) > fLength) {
5649 return NULL;
5650 }
5651
5652 if ((fLength == _length) && (!_offset)) {
5653 retain();
5654 newMapping = this;
5655 } else {
5656 newMapping->fSuperMap.reset(this, OSRetain);
5657 newMapping->fOffset = fOffset + _offset;
5658 newMapping->fAddress = fAddress + _offset;
5659 }
5660
5661 return newMapping;
5662 }
5663
5664 IOReturn
wireRange(uint32_t options,mach_vm_size_t offset,mach_vm_size_t length)5665 IOMemoryMap::wireRange(
5666 uint32_t options,
5667 mach_vm_size_t offset,
5668 mach_vm_size_t length)
5669 {
5670 IOReturn kr;
5671 mach_vm_address_t start = trunc_page_64(fAddress + offset);
5672 mach_vm_address_t end = round_page_64(fAddress + offset + length);
5673 vm_prot_t prot;
5674
5675 prot = (kIODirectionOutIn & options);
5676 if (prot) {
5677 kr = vm_map_wire_kernel(fAddressMap, start, end, prot, (vm_tag_t) fMemory->getVMTag(kernel_map), FALSE);
5678 } else {
5679 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
5680 }
5681
5682 return kr;
5683 }
5684
5685
5686 IOPhysicalAddress
5687 #ifdef __LP64__
getPhysicalSegment(IOByteCount _offset,IOPhysicalLength * _length,IOOptionBits _options)5688 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
5689 #else /* !__LP64__ */
5690 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
5691 #endif /* !__LP64__ */
5692 {
5693 IOPhysicalAddress address;
5694
5695 LOCK;
5696 #ifdef __LP64__
5697 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
5698 #else /* !__LP64__ */
5699 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
5700 #endif /* !__LP64__ */
5701 UNLOCK;
5702
5703 return address;
5704 }
5705
5706 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5707
5708 #undef super
5709 #define super OSObject
5710
5711 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5712
5713 void
initialize(void)5714 IOMemoryDescriptor::initialize( void )
5715 {
5716 if (NULL == gIOMemoryLock) {
5717 gIOMemoryLock = IORecursiveLockAlloc();
5718 }
5719
5720 gIOLastPage = IOGetLastPageNumber();
5721 }
5722
5723 void
free(void)5724 IOMemoryDescriptor::free( void )
5725 {
5726 if (_mappings) {
5727 _mappings.reset();
5728 }
5729
5730 if (reserved) {
5731 cleanKernelReserved(reserved);
5732 IOFreeType(reserved, IOMemoryDescriptorReserved);
5733 reserved = NULL;
5734 }
5735 super::free();
5736 }
5737
5738 OSSharedPtr<IOMemoryMap>
setMapping(task_t intoTask,IOVirtualAddress mapAddress,IOOptionBits options)5739 IOMemoryDescriptor::setMapping(
5740 task_t intoTask,
5741 IOVirtualAddress mapAddress,
5742 IOOptionBits options )
5743 {
5744 return createMappingInTask( intoTask, mapAddress,
5745 options | kIOMapStatic,
5746 0, getLength());
5747 }
5748
5749 OSSharedPtr<IOMemoryMap>
map(IOOptionBits options)5750 IOMemoryDescriptor::map(
5751 IOOptionBits options )
5752 {
5753 return createMappingInTask( kernel_task, 0,
5754 options | kIOMapAnywhere,
5755 0, getLength());
5756 }
5757
5758 #ifndef __LP64__
5759 OSSharedPtr<IOMemoryMap>
map(task_t intoTask,IOVirtualAddress atAddress,IOOptionBits options,IOByteCount offset,IOByteCount length)5760 IOMemoryDescriptor::map(
5761 task_t intoTask,
5762 IOVirtualAddress atAddress,
5763 IOOptionBits options,
5764 IOByteCount offset,
5765 IOByteCount length )
5766 {
5767 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
5768 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
5769 return NULL;
5770 }
5771
5772 return createMappingInTask(intoTask, atAddress,
5773 options, offset, length);
5774 }
5775 #endif /* !__LP64__ */
5776
5777 OSSharedPtr<IOMemoryMap>
createMappingInTask(task_t intoTask,mach_vm_address_t atAddress,IOOptionBits options,mach_vm_size_t offset,mach_vm_size_t length)5778 IOMemoryDescriptor::createMappingInTask(
5779 task_t intoTask,
5780 mach_vm_address_t atAddress,
5781 IOOptionBits options,
5782 mach_vm_size_t offset,
5783 mach_vm_size_t length)
5784 {
5785 IOMemoryMap * result;
5786 IOMemoryMap * mapping;
5787
5788 if (0 == length) {
5789 length = getLength();
5790 }
5791
5792 mapping = new IOMemoryMap;
5793
5794 #if 136275805
5795 /*
5796 * XXX: Redundantly check the mapping size here so that failure stack traces
5797 * are more useful. This has no functional value but is helpful because
5798 * telemetry traps can currently only capture the last five calls and
5799 * so we want to trap as shallow as possible in a select few cases
5800 * where we anticipate issues.
5801 *
5802 * When telemetry collection is complete, this will be removed.
5803 */
5804 if (__improbable(mapping && !vm_map_is_map_size_valid(
5805 get_task_map(intoTask), length, /* no_soft_limit */ false))) {
5806 mapping->release();
5807 mapping = NULL;
5808 }
5809 #endif /* 136275805 */
5810
5811 if (mapping
5812 && !mapping->init( intoTask, atAddress,
5813 options, offset, length )) {
5814 mapping->release();
5815 mapping = NULL;
5816 }
5817
5818 if (mapping) {
5819 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
5820 } else {
5821 result = nullptr;
5822 }
5823
5824 #if DEBUG
5825 if (!result) {
5826 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
5827 this, atAddress, (uint32_t) options, offset, length);
5828 }
5829 #endif
5830
5831 // already retained through makeMapping
5832 OSSharedPtr<IOMemoryMap> retval(result, OSNoRetain);
5833
5834 return retval;
5835 }
5836
5837 #ifndef __LP64__ // there is only a 64 bit version for LP64
5838 IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,IOByteCount offset)5839 IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5840 IOOptionBits options,
5841 IOByteCount offset)
5842 {
5843 return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
5844 }
5845 #endif
5846
5847 IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,mach_vm_size_t offset)5848 IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5849 IOOptionBits options,
5850 mach_vm_size_t offset)
5851 {
5852 IOReturn err = kIOReturnSuccess;
5853 OSSharedPtr<IOMemoryDescriptor> physMem;
5854
5855 LOCK;
5856
5857 if (fAddress && fAddressMap) {
5858 do{
5859 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5860 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5861 physMem = fMemory;
5862 }
5863
5864 if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
5865 upl_size_t size = (typeof(size))round_page(fLength);
5866 upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5867 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5868 if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
5869 NULL, NULL,
5870 &flags, (vm_tag_t) fMemory->getVMTag(kernel_map))) {
5871 fRedirUPL = NULL;
5872 }
5873
5874 if (physMem) {
5875 IOUnmapPages( fAddressMap, fAddress, fLength );
5876 if ((false)) {
5877 physMem->redirect(NULL, true);
5878 }
5879 }
5880 }
5881
5882 if (newBackingMemory) {
5883 if (newBackingMemory != fMemory) {
5884 fOffset = 0;
5885 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
5886 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
5887 offset, fLength)) {
5888 err = kIOReturnError;
5889 }
5890 }
5891 if (fRedirUPL) {
5892 upl_commit(fRedirUPL, NULL, 0);
5893 upl_deallocate(fRedirUPL);
5894 fRedirUPL = NULL;
5895 }
5896 if ((false) && physMem) {
5897 physMem->redirect(NULL, false);
5898 }
5899 }
5900 }while (false);
5901 }
5902
5903 UNLOCK;
5904
5905 return err;
5906 }
5907
5908 IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5909 IOMemoryDescriptor::makeMapping(
5910 IOMemoryDescriptor * owner,
5911 task_t __intoTask,
5912 IOVirtualAddress __address,
5913 IOOptionBits options,
5914 IOByteCount __offset,
5915 IOByteCount __length )
5916 {
5917 #ifndef __LP64__
5918 if (!(kIOMap64Bit & options)) {
5919 panic("IOMemoryDescriptor::makeMapping !64bit");
5920 }
5921 #endif /* !__LP64__ */
5922
5923 OSSharedPtr<IOMemoryDescriptor> mapDesc;
5924 __block IOMemoryMap * result = NULL;
5925
5926 IOMemoryMap * mapping = (IOMemoryMap *) __address;
5927 mach_vm_size_t offset = mapping->fOffset + __offset;
5928 mach_vm_size_t length = mapping->fLength;
5929
5930 mapping->fOffset = offset;
5931
5932 LOCK;
5933
5934 do{
5935 if (kIOMapStatic & options) {
5936 result = mapping;
5937 addMapping(mapping);
5938 mapping->setMemoryDescriptor(this, 0);
5939 continue;
5940 }
5941
5942 if (kIOMapUnique & options) {
5943 addr64_t phys;
5944 IOByteCount physLen;
5945
5946 // if (owner != this) continue;
5947
5948 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5949 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5950 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
5951 if (!phys || (physLen < length)) {
5952 continue;
5953 }
5954
5955 mapDesc = IOMemoryDescriptor::withAddressRange(
5956 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
5957 if (!mapDesc) {
5958 continue;
5959 }
5960 offset = 0;
5961 mapping->fOffset = offset;
5962 }
5963 } else {
5964 // look for a compatible existing mapping
5965 if (_mappings) {
5966 _mappings->iterateObjects(^(OSObject * object)
5967 {
5968 IOMemoryMap * lookMapping = (IOMemoryMap *) object;
5969 if ((result = lookMapping->copyCompatible(mapping))) {
5970 addMapping(result);
5971 result->setMemoryDescriptor(this, offset);
5972 return true;
5973 }
5974 return false;
5975 });
5976 }
5977 if (result || (options & kIOMapReference)) {
5978 if (result != mapping) {
5979 mapping->release();
5980 mapping = NULL;
5981 }
5982 continue;
5983 }
5984 }
5985
5986 if (!mapDesc) {
5987 mapDesc.reset(this, OSRetain);
5988 }
5989 IOReturn
5990 kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
5991 if (kIOReturnSuccess == kr) {
5992 result = mapping;
5993 mapDesc->addMapping(result);
5994 result->setMemoryDescriptor(mapDesc.get(), offset);
5995 } else {
5996 mapping->release();
5997 mapping = NULL;
5998 }
5999 }while (false);
6000
6001 UNLOCK;
6002
6003 return result;
6004 }
6005
6006 void
addMapping(IOMemoryMap * mapping)6007 IOMemoryDescriptor::addMapping(
6008 IOMemoryMap * mapping )
6009 {
6010 if (mapping) {
6011 if (NULL == _mappings) {
6012 _mappings = OSSet::withCapacity(1);
6013 }
6014 if (_mappings) {
6015 _mappings->setObject( mapping );
6016 }
6017 }
6018 }
6019
6020 void
removeMapping(IOMemoryMap * mapping)6021 IOMemoryDescriptor::removeMapping(
6022 IOMemoryMap * mapping )
6023 {
6024 if (_mappings) {
6025 _mappings->removeObject( mapping);
6026 }
6027 }
6028
6029 void
setMapperOptions(uint16_t options)6030 IOMemoryDescriptor::setMapperOptions( uint16_t options)
6031 {
6032 _iomapperOptions = options;
6033 }
6034
6035 uint16_t
getMapperOptions(void)6036 IOMemoryDescriptor::getMapperOptions( void )
6037 {
6038 return _iomapperOptions;
6039 }
6040
6041 #ifndef __LP64__
6042 // obsolete initializers
6043 // - initWithOptions is the designated initializer
6044 bool
initWithAddress(void * address,IOByteCount length,IODirection direction)6045 IOMemoryDescriptor::initWithAddress(void * address,
6046 IOByteCount length,
6047 IODirection direction)
6048 {
6049 return false;
6050 }
6051
6052 bool
initWithAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)6053 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
6054 IOByteCount length,
6055 IODirection direction,
6056 task_t task)
6057 {
6058 return false;
6059 }
6060
6061 bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)6062 IOMemoryDescriptor::initWithPhysicalAddress(
6063 IOPhysicalAddress address,
6064 IOByteCount length,
6065 IODirection direction )
6066 {
6067 return false;
6068 }
6069
6070 bool
initWithRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)6071 IOMemoryDescriptor::initWithRanges(
6072 IOVirtualRange * ranges,
6073 UInt32 withCount,
6074 IODirection direction,
6075 task_t task,
6076 bool asReference)
6077 {
6078 return false;
6079 }
6080
6081 bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)6082 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
6083 UInt32 withCount,
6084 IODirection direction,
6085 bool asReference)
6086 {
6087 return false;
6088 }
6089
6090 void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)6091 IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
6092 IOByteCount * lengthOfSegment)
6093 {
6094 return NULL;
6095 }
6096 #endif /* !__LP64__ */
6097
6098 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6099
6100 bool
serialize(OSSerialize * s) const6101 IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
6102 {
6103 OSSharedPtr<OSSymbol const> keys[2] = {NULL};
6104 OSSharedPtr<OSObject> values[2] = {NULL};
6105 OSSharedPtr<OSArray> array;
6106
6107 struct SerData {
6108 user_addr_t address;
6109 user_size_t length;
6110 };
6111
6112 unsigned int index;
6113
6114 IOOptionBits type = _flags & kIOMemoryTypeMask;
6115
6116 if (s == NULL) {
6117 return false;
6118 }
6119
6120 array = OSArray::withCapacity(4);
6121 if (!array) {
6122 return false;
6123 }
6124
6125 OSDataAllocation<struct SerData> vcopy(_rangesCount, OSAllocateMemory);
6126 if (!vcopy) {
6127 return false;
6128 }
6129
6130 keys[0] = OSSymbol::withCString("address");
6131 keys[1] = OSSymbol::withCString("length");
6132
6133 // Copy the volatile data so we don't have to allocate memory
6134 // while the lock is held.
6135 LOCK;
6136 if (vcopy.size() == _rangesCount) {
6137 Ranges vec = _ranges;
6138 for (index = 0; index < vcopy.size(); index++) {
6139 mach_vm_address_t addr; mach_vm_size_t len;
6140 getAddrLenForInd(addr, len, type, vec, index, _task);
6141 vcopy[index].address = addr;
6142 vcopy[index].length = len;
6143 }
6144 } else {
6145 // The descriptor changed out from under us. Give up.
6146 UNLOCK;
6147 return false;
6148 }
6149 UNLOCK;
6150
6151 for (index = 0; index < vcopy.size(); index++) {
6152 user_addr_t addr = vcopy[index].address;
6153 IOByteCount len = (IOByteCount) vcopy[index].length;
6154 values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
6155 if (values[0] == NULL) {
6156 return false;
6157 }
6158 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
6159 if (values[1] == NULL) {
6160 return false;
6161 }
6162 OSSharedPtr<OSDictionary> dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
6163 if (dict == NULL) {
6164 return false;
6165 }
6166 array->setObject(dict.get());
6167 dict.reset();
6168 values[0].reset();
6169 values[1].reset();
6170 }
6171
6172 return array->serialize(s);
6173 }
6174 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6175
6176 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 0);
6177 #ifdef __LP64__
6178 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
6179 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
6180 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
6181 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
6182 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
6183 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
6184 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
6185 #else /* !__LP64__ */
6186 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 1);
6187 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 2);
6188 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 3);
6189 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 4);
6190 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 5);
6191 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 6);
6192 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 7);
6193 #endif /* !__LP64__ */
6194 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
6195 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
6196 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
6197 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
6198 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
6199 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
6200 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
6201 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
6202
6203 /* for real this is a ioGMDData + upl_page_info_t + ioPLBlock */
6204 KALLOC_TYPE_VAR_DEFINE(KT_IOMD_MIXED_DATA,
6205 struct ioGMDData, struct ioPLBlock, KT_DEFAULT);
6206
6207 /* ex-inline function implementation */
6208 IOPhysicalAddress
getPhysicalAddress()6209 IOMemoryDescriptor::getPhysicalAddress()
6210 {
6211 return getPhysicalSegment( 0, NULL );
6212 }
6213
OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData,OSObject)6214 OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData, OSObject)
6215
6216 OSPtr<_IOMemoryDescriptorMixedData>
6217 _IOMemoryDescriptorMixedData::withCapacity(size_t capacity)
6218 {
6219 OSSharedPtr<_IOMemoryDescriptorMixedData> me = OSMakeShared<_IOMemoryDescriptorMixedData>();
6220 if (me && !me->initWithCapacity(capacity)) {
6221 return nullptr;
6222 }
6223 return me;
6224 }
6225
6226 bool
initWithCapacity(size_t capacity)6227 _IOMemoryDescriptorMixedData::initWithCapacity(size_t capacity)
6228 {
6229 if (_data && (!capacity || (_capacity < capacity))) {
6230 freeMemory();
6231 }
6232
6233 if (!OSObject::init()) {
6234 return false;
6235 }
6236
6237 if (!_data && capacity) {
6238 _data = kalloc_type_var_impl(KT_IOMD_MIXED_DATA, capacity,
6239 Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT), NULL);
6240 if (!_data) {
6241 return false;
6242 }
6243 _capacity = capacity;
6244 }
6245
6246 _length = 0;
6247
6248 return true;
6249 }
6250
6251 void
free()6252 _IOMemoryDescriptorMixedData::free()
6253 {
6254 freeMemory();
6255 OSObject::free();
6256 }
6257
6258 void
freeMemory()6259 _IOMemoryDescriptorMixedData::freeMemory()
6260 {
6261 kfree_type_var_impl(KT_IOMD_MIXED_DATA, _data, _capacity);
6262 _data = nullptr;
6263 _capacity = _length = 0;
6264 }
6265
6266 bool
appendBytes(const void * bytes,size_t length)6267 _IOMemoryDescriptorMixedData::appendBytes(const void * bytes, size_t length)
6268 {
6269 const auto oldLength = getLength();
6270 size_t newLength;
6271 if (os_add_overflow(oldLength, length, &newLength)) {
6272 return false;
6273 }
6274
6275 if (!setLength(newLength)) {
6276 return false;
6277 }
6278
6279 unsigned char * const dest = &(((unsigned char *)_data)[oldLength]);
6280 if (bytes) {
6281 bcopy(bytes, dest, length);
6282 }
6283
6284 return true;
6285 }
6286
6287 bool
setLength(size_t length)6288 _IOMemoryDescriptorMixedData::setLength(size_t length)
6289 {
6290 if (!_data || (length > _capacity)) {
6291 void *newData;
6292
6293 newData = __krealloc_type(KT_IOMD_MIXED_DATA, _data, _capacity,
6294 length, Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT),
6295 NULL);
6296 if (!newData) {
6297 return false;
6298 }
6299
6300 _data = newData;
6301 _capacity = length;
6302 }
6303
6304 _length = length;
6305 return true;
6306 }
6307
6308 const void *
getBytes() const6309 _IOMemoryDescriptorMixedData::getBytes() const
6310 {
6311 return _length ? _data : nullptr;
6312 }
6313
6314 size_t
getLength() const6315 _IOMemoryDescriptorMixedData::getLength() const
6316 {
6317 return _data ? _length : 0;
6318 }
6319