xref: /xnu-8020.140.41/iokit/Kernel/IOMemoryDescriptor.cpp (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 1998-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #define IOKIT_ENABLE_SHARED_PTR
29 
30 #include <sys/cdefs.h>
31 
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOKitKeysPrivate.h>
39 
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #include <IOKit/IOMultiMemoryDescriptor.h>
42 #include <IOKit/IOBufferMemoryDescriptor.h>
43 
44 #include <IOKit/IOKitDebug.h>
45 #include <IOKit/IOTimeStamp.h>
46 #include <libkern/OSDebug.h>
47 #include <libkern/OSKextLibPrivate.h>
48 
49 #include "IOKitKernelInternal.h"
50 
51 #include <libkern/c++/OSAllocation.h>
52 #include <libkern/c++/OSContainers.h>
53 #include <libkern/c++/OSDictionary.h>
54 #include <libkern/c++/OSArray.h>
55 #include <libkern/c++/OSSymbol.h>
56 #include <libkern/c++/OSNumber.h>
57 #include <os/overflow.h>
58 #include <os/cpp_util.h>
59 #include <os/base_private.h>
60 
61 #include <sys/uio.h>
62 
63 __BEGIN_DECLS
64 #include <vm/pmap.h>
65 #include <vm/vm_pageout.h>
66 #include <mach/memory_object_types.h>
67 #include <device/device_port.h>
68 
69 #include <mach/vm_prot.h>
70 #include <mach/mach_vm.h>
71 #include <mach/memory_entry.h>
72 #include <vm/vm_fault.h>
73 #include <vm/vm_protos.h>
74 
75 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
76 extern void ipc_port_release_send(ipc_port_t port);
77 
78 __END_DECLS
79 
80 #define kIOMapperWaitSystem     ((IOMapper *) 1)
81 
82 static IOMapper * gIOSystemMapper = NULL;
83 
84 ppnum_t           gIOLastPage;
85 
86 enum {
87 	kIOMapGuardSizeLarge = 65536
88 };
89 
90 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
91 
92 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
93 
94 #define super IOMemoryDescriptor
95 
96 OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor,
97     IOMemoryDescriptor, ZC_ZFREE_CLEARMEM)
98 
99 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
100 
101 static IORecursiveLock * gIOMemoryLock;
102 
103 #define LOCK    IORecursiveLockLock( gIOMemoryLock)
104 #define UNLOCK  IORecursiveLockUnlock( gIOMemoryLock)
105 #define SLEEP   IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
106 #define WAKEUP  \
107     IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
108 
109 #if 0
110 #define DEBG(fmt, args...)      { kprintf(fmt, ## args); }
111 #else
112 #define DEBG(fmt, args...)      {}
113 #endif
114 
115 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
116 
117 // Some data structures and accessor macros used by the initWithOptions
118 // Function
119 
120 enum ioPLBlockFlags {
121 	kIOPLOnDevice  = 0x00000001,
122 	kIOPLExternUPL = 0x00000002,
123 };
124 
125 struct IOMDPersistentInitData {
126 	const IOGeneralMemoryDescriptor * fMD;
127 	IOMemoryReference               * fMemRef;
128 };
129 
130 struct ioPLBlock {
131 	upl_t fIOPL;
132 	vm_address_t fPageInfo; // Pointer to page list or index into it
133 	uint64_t fIOMDOffset;       // The offset of this iopl in descriptor
134 	ppnum_t fMappedPage;        // Page number of first page in this iopl
135 	unsigned int fPageOffset;   // Offset within first page of iopl
136 	unsigned int fFlags;        // Flags
137 };
138 
139 enum { kMaxWireTags = 6 };
140 
141 struct ioGMDData {
142 	IOMapper *  fMapper;
143 	uint64_t    fDMAMapAlignment;
144 	uint64_t    fMappedBase;
145 	uint64_t    fMappedLength;
146 	uint64_t    fPreparationID;
147 #if IOTRACKING
148 	IOTracking  fWireTracking;
149 #endif /* IOTRACKING */
150 	unsigned int      fPageCnt;
151 	uint8_t           fDMAMapNumAddressBits;
152 	unsigned char     fCompletionError:1;
153 	unsigned char     fMappedBaseValid:1;
154 	unsigned char     _resv:4;
155 	unsigned char     fDMAAccess:2;
156 
157 	/* variable length arrays */
158 	upl_page_info_t fPageList[1]
159 #if __LP64__
160 	// align fPageList as for ioPLBlock
161 	__attribute__((aligned(sizeof(upl_t))))
162 #endif
163 	;
164 	//ioPLBlock fBlocks[1];
165 };
166 
167 #pragma GCC visibility push(hidden)
168 
169 class _IOMemoryDescriptorMixedData : public OSObject
170 {
171 	OSDeclareDefaultStructors(_IOMemoryDescriptorMixedData);
172 
173 public:
174 	static OSPtr<_IOMemoryDescriptorMixedData> withCapacity(size_t capacity);
175 	virtual bool initWithCapacity(size_t capacity);
176 	virtual void free() APPLE_KEXT_OVERRIDE;
177 
178 	virtual bool appendBytes(const void * bytes, size_t length);
179 	virtual void setLength(size_t length);
180 
181 	virtual const void * getBytes() const;
182 	virtual size_t getLength() const;
183 
184 private:
185 	void freeMemory();
186 
187 	void *  _data = nullptr;
188 	size_t  _length = 0;
189 	size_t  _capacity = 0;
190 };
191 
192 #pragma GCC visibility pop
193 
194 #define getDataP(osd)   ((ioGMDData *) (osd)->getBytes())
195 #define getIOPLList(d)  ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
196 #define getNumIOPL(osd, d)      \
197     ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)))
198 #define getPageList(d)  (&(d->fPageList[0]))
199 #define computeDataSize(p, u) \
200     (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
201 
202 enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
203 
204 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
205 
206 extern "C" {
207 kern_return_t
device_data_action(uintptr_t device_handle,ipc_port_t device_pager,vm_prot_t protection,vm_object_offset_t offset,vm_size_t size)208 device_data_action(
209 	uintptr_t               device_handle,
210 	ipc_port_t              device_pager,
211 	vm_prot_t               protection,
212 	vm_object_offset_t      offset,
213 	vm_size_t               size)
214 {
215 	kern_return_t        kr;
216 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
217 	OSSharedPtr<IOMemoryDescriptor> memDesc;
218 
219 	LOCK;
220 	if (ref->dp.memory) {
221 		memDesc.reset(ref->dp.memory, OSRetain);
222 		kr = memDesc->handleFault(device_pager, offset, size);
223 		memDesc.reset();
224 	} else {
225 		kr = KERN_ABORTED;
226 	}
227 	UNLOCK;
228 
229 	return kr;
230 }
231 
232 kern_return_t
device_close(uintptr_t device_handle)233 device_close(
234 	uintptr_t     device_handle)
235 {
236 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
237 
238 	IOFreeType( ref, IOMemoryDescriptorReserved );
239 
240 	return kIOReturnSuccess;
241 }
242 };      // end extern "C"
243 
244 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
245 
246 // Note this inline function uses C++ reference arguments to return values
247 // This means that pointers are not passed and NULLs don't have to be
248 // checked for as a NULL reference is illegal.
249 static inline void
getAddrLenForInd(mach_vm_address_t & addr,mach_vm_size_t & len,UInt32 type,IOGeneralMemoryDescriptor::Ranges r,UInt32 ind)250 getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
251     UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
252 {
253 	assert(kIOMemoryTypeUIO == type
254 	    || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
255 	    || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
256 	if (kIOMemoryTypeUIO == type) {
257 		user_size_t us;
258 		user_addr_t ad;
259 		uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
260 	}
261 #ifndef __LP64__
262 	else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
263 		IOAddressRange cur = r.v64[ind];
264 		addr = cur.address;
265 		len  = cur.length;
266 	}
267 #endif /* !__LP64__ */
268 	else {
269 		IOVirtualRange cur = r.v[ind];
270 		addr = cur.address;
271 		len  = cur.length;
272 	}
273 }
274 
275 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
276 
277 static IOReturn
purgeableControlBits(IOOptionBits newState,vm_purgable_t * control,int * state)278 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
279 {
280 	IOReturn err = kIOReturnSuccess;
281 
282 	*control = VM_PURGABLE_SET_STATE;
283 
284 	enum { kIOMemoryPurgeableControlMask = 15 };
285 
286 	switch (kIOMemoryPurgeableControlMask & newState) {
287 	case kIOMemoryPurgeableKeepCurrent:
288 		*control = VM_PURGABLE_GET_STATE;
289 		break;
290 
291 	case kIOMemoryPurgeableNonVolatile:
292 		*state = VM_PURGABLE_NONVOLATILE;
293 		break;
294 	case kIOMemoryPurgeableVolatile:
295 		*state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
296 		break;
297 	case kIOMemoryPurgeableEmpty:
298 		*state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
299 		break;
300 	default:
301 		err = kIOReturnBadArgument;
302 		break;
303 	}
304 
305 	if (*control == VM_PURGABLE_SET_STATE) {
306 		// let VM know this call is from the kernel and is allowed to alter
307 		// the volatility of the memory entry even if it was created with
308 		// MAP_MEM_PURGABLE_KERNEL_ONLY
309 		*control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
310 	}
311 
312 	return err;
313 }
314 
315 static IOReturn
purgeableStateBits(int * state)316 purgeableStateBits(int * state)
317 {
318 	IOReturn err = kIOReturnSuccess;
319 
320 	switch (VM_PURGABLE_STATE_MASK & *state) {
321 	case VM_PURGABLE_NONVOLATILE:
322 		*state = kIOMemoryPurgeableNonVolatile;
323 		break;
324 	case VM_PURGABLE_VOLATILE:
325 		*state = kIOMemoryPurgeableVolatile;
326 		break;
327 	case VM_PURGABLE_EMPTY:
328 		*state = kIOMemoryPurgeableEmpty;
329 		break;
330 	default:
331 		*state = kIOMemoryPurgeableNonVolatile;
332 		err = kIOReturnNotReady;
333 		break;
334 	}
335 	return err;
336 }
337 
338 typedef struct {
339 	unsigned int wimg;
340 	unsigned int object_type;
341 } iokit_memtype_entry;
342 
343 static const iokit_memtype_entry iomd_mem_types[] = {
344 	[kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
345 	[kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
346 	[kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
347 	[kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
348 	[kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
349 	[kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
350 	[kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
351 	[kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
352 	[kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
353 	[kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
354 };
355 
356 static vm_prot_t
vmProtForCacheMode(IOOptionBits cacheMode)357 vmProtForCacheMode(IOOptionBits cacheMode)
358 {
359 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
360 	vm_prot_t prot = 0;
361 	SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
362 	return prot;
363 }
364 
365 static unsigned int
pagerFlagsForCacheMode(IOOptionBits cacheMode)366 pagerFlagsForCacheMode(IOOptionBits cacheMode)
367 {
368 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
369 	if (cacheMode == kIODefaultCache) {
370 		return -1U;
371 	}
372 	return iomd_mem_types[cacheMode].wimg;
373 }
374 
375 static IOOptionBits
cacheModeForPagerFlags(unsigned int pagerFlags)376 cacheModeForPagerFlags(unsigned int pagerFlags)
377 {
378 	pagerFlags &= VM_WIMG_MASK;
379 	IOOptionBits cacheMode = kIODefaultCache;
380 	for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
381 		if (iomd_mem_types[i].wimg == pagerFlags) {
382 			cacheMode = i;
383 			break;
384 		}
385 	}
386 	return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
387 }
388 
389 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
390 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
391 
392 struct IOMemoryEntry {
393 	ipc_port_t entry;
394 	int64_t    offset;
395 	uint64_t   size;
396 	uint64_t   start;
397 };
398 
399 struct IOMemoryReference {
400 	volatile SInt32             refCount;
401 	vm_prot_t                   prot;
402 	uint32_t                    capacity;
403 	uint32_t                    count;
404 	struct IOMemoryReference  * mapRef;
405 	IOMemoryEntry               entries[0];
406 };
407 
408 enum{
409 	kIOMemoryReferenceReuse = 0x00000001,
410 	kIOMemoryReferenceWrite = 0x00000002,
411 	kIOMemoryReferenceCOW   = 0x00000004,
412 };
413 
414 SInt32 gIOMemoryReferenceCount;
415 
416 IOMemoryReference *
memoryReferenceAlloc(uint32_t capacity,IOMemoryReference * realloc)417 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
418 {
419 	IOMemoryReference * ref;
420 	size_t              newSize, oldSize, copySize;
421 
422 	newSize = (sizeof(IOMemoryReference)
423 	    - sizeof(ref->entries)
424 	    + capacity * sizeof(ref->entries[0]));
425 	ref = (typeof(ref))IOMalloc(newSize);
426 	if (realloc) {
427 		oldSize = (sizeof(IOMemoryReference)
428 		    - sizeof(realloc->entries)
429 		    + realloc->capacity * sizeof(realloc->entries[0]));
430 		copySize = oldSize;
431 		if (copySize > newSize) {
432 			copySize = newSize;
433 		}
434 		if (ref) {
435 			bcopy(realloc, ref, copySize);
436 		}
437 		IOFree(realloc, oldSize);
438 	} else if (ref) {
439 		bzero(ref, sizeof(*ref));
440 		ref->refCount = 1;
441 		OSIncrementAtomic(&gIOMemoryReferenceCount);
442 	}
443 	if (!ref) {
444 		return NULL;
445 	}
446 	ref->capacity = capacity;
447 	return ref;
448 }
449 
450 void
memoryReferenceFree(IOMemoryReference * ref)451 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
452 {
453 	IOMemoryEntry * entries;
454 	size_t          size;
455 
456 	if (ref->mapRef) {
457 		memoryReferenceFree(ref->mapRef);
458 		ref->mapRef = NULL;
459 	}
460 
461 	entries = ref->entries + ref->count;
462 	while (entries > &ref->entries[0]) {
463 		entries--;
464 		ipc_port_release_send(entries->entry);
465 	}
466 	size = (sizeof(IOMemoryReference)
467 	    - sizeof(ref->entries)
468 	    + ref->capacity * sizeof(ref->entries[0]));
469 	IOFree(ref, size);
470 
471 	OSDecrementAtomic(&gIOMemoryReferenceCount);
472 }
473 
474 void
memoryReferenceRelease(IOMemoryReference * ref)475 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
476 {
477 	if (1 == OSDecrementAtomic(&ref->refCount)) {
478 		memoryReferenceFree(ref);
479 	}
480 }
481 
482 
483 IOReturn
memoryReferenceCreate(IOOptionBits options,IOMemoryReference ** reference)484 IOGeneralMemoryDescriptor::memoryReferenceCreate(
485 	IOOptionBits         options,
486 	IOMemoryReference ** reference)
487 {
488 	enum { kCapacity = 4, kCapacityInc = 4 };
489 
490 	kern_return_t        err;
491 	IOMemoryReference *  ref;
492 	IOMemoryEntry *      entries;
493 	IOMemoryEntry *      cloneEntries = NULL;
494 	vm_map_t             map;
495 	ipc_port_t           entry, cloneEntry;
496 	vm_prot_t            prot;
497 	memory_object_size_t actualSize;
498 	uint32_t             rangeIdx;
499 	uint32_t             count;
500 	mach_vm_address_t    entryAddr, endAddr, entrySize;
501 	mach_vm_size_t       srcAddr, srcLen;
502 	mach_vm_size_t       nextAddr, nextLen;
503 	mach_vm_size_t       offset, remain;
504 	vm_map_offset_t      overmap_start = 0, overmap_end = 0;
505 	int                  misaligned_start = 0, misaligned_end = 0;
506 	IOByteCount          physLen;
507 	IOOptionBits         type = (_flags & kIOMemoryTypeMask);
508 	IOOptionBits         cacheMode;
509 	unsigned int         pagerFlags;
510 	vm_tag_t             tag;
511 	vm_named_entry_kernel_flags_t vmne_kflags;
512 
513 	ref = memoryReferenceAlloc(kCapacity, NULL);
514 	if (!ref) {
515 		return kIOReturnNoMemory;
516 	}
517 
518 	tag = (vm_tag_t) getVMTag(kernel_map);
519 	vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
520 	entries = &ref->entries[0];
521 	count = 0;
522 	err = KERN_SUCCESS;
523 
524 	offset = 0;
525 	rangeIdx = 0;
526 	remain = _length;
527 	if (_task) {
528 		getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
529 
530 		// account for IOBMD setLength(), use its capacity as length
531 		IOBufferMemoryDescriptor * bmd;
532 		if ((bmd = OSDynamicCast(IOBufferMemoryDescriptor, this))) {
533 			nextLen = bmd->getCapacity();
534 			remain  = nextLen;
535 		}
536 	} else {
537 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
538 		nextLen = physLen;
539 
540 		// default cache mode for physical
541 		if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
542 			IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
543 			_flags |= (mode << kIOMemoryBufferCacheShift);
544 		}
545 	}
546 
547 	// cache mode & vm_prot
548 	prot = VM_PROT_READ;
549 	cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
550 	prot |= vmProtForCacheMode(cacheMode);
551 	// VM system requires write access to change cache mode
552 	if (kIODefaultCache != cacheMode) {
553 		prot |= VM_PROT_WRITE;
554 	}
555 	if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
556 		prot |= VM_PROT_WRITE;
557 	}
558 	if (kIOMemoryReferenceWrite & options) {
559 		prot |= VM_PROT_WRITE;
560 	}
561 	if (kIOMemoryReferenceCOW   & options) {
562 		prot |= MAP_MEM_VM_COPY;
563 	}
564 
565 	if (kIOMemoryUseReserve & _flags) {
566 		prot |= MAP_MEM_GRAB_SECLUDED;
567 	}
568 
569 	if ((kIOMemoryReferenceReuse & options) && _memRef) {
570 		cloneEntries = &_memRef->entries[0];
571 		prot |= MAP_MEM_NAMED_REUSE;
572 	}
573 
574 	if (_task) {
575 		// virtual ranges
576 
577 		if (kIOMemoryBufferPageable & _flags) {
578 			int ledger_tag, ledger_no_footprint;
579 
580 			// IOBufferMemoryDescriptor alloc - set flags for entry + object create
581 			prot |= MAP_MEM_NAMED_CREATE;
582 
583 			// default accounting settings:
584 			//   + "none" ledger tag
585 			//   + include in footprint
586 			// can be changed later with ::setOwnership()
587 			ledger_tag = VM_LEDGER_TAG_NONE;
588 			ledger_no_footprint = 0;
589 
590 			if (kIOMemoryBufferPurgeable & _flags) {
591 				prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
592 				if (VM_KERN_MEMORY_SKYWALK == tag) {
593 					// Skywalk purgeable memory accounting:
594 					//    + "network" ledger tag
595 					//    + not included in footprint
596 					ledger_tag = VM_LEDGER_TAG_NETWORK;
597 					ledger_no_footprint = 1;
598 				} else {
599 					// regular purgeable memory accounting:
600 					//    + no ledger tag
601 					//    + included in footprint
602 					ledger_tag = VM_LEDGER_TAG_NONE;
603 					ledger_no_footprint = 0;
604 				}
605 			}
606 			vmne_kflags.vmnekf_ledger_tag = ledger_tag;
607 			vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
608 			if (kIOMemoryUseReserve & _flags) {
609 				prot |= MAP_MEM_GRAB_SECLUDED;
610 			}
611 
612 			prot |= VM_PROT_WRITE;
613 			map = NULL;
614 		} else {
615 			prot |= MAP_MEM_USE_DATA_ADDR;
616 			map = get_task_map(_task);
617 		}
618 		DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map, (uint64_t)_length, prot);
619 
620 		while (remain) {
621 			srcAddr  = nextAddr;
622 			srcLen   = nextLen;
623 			nextAddr = 0;
624 			nextLen  = 0;
625 			// coalesce addr range
626 			for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
627 				getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
628 				if ((srcAddr + srcLen) != nextAddr) {
629 					break;
630 				}
631 				srcLen += nextLen;
632 			}
633 
634 			if (MAP_MEM_USE_DATA_ADDR & prot) {
635 				entryAddr = srcAddr;
636 				endAddr   = srcAddr + srcLen;
637 			} else {
638 				entryAddr = trunc_page_64(srcAddr);
639 				endAddr   = round_page_64(srcAddr + srcLen);
640 			}
641 			if (vm_map_page_mask(get_task_map(_task)) < PAGE_MASK) {
642 				DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref, (uint32_t)_flags, prot, rangeIdx - 1, srcAddr, srcLen);
643 			}
644 
645 			do{
646 				entrySize = (endAddr - entryAddr);
647 				if (!entrySize) {
648 					break;
649 				}
650 				actualSize = entrySize;
651 
652 				cloneEntry = MACH_PORT_NULL;
653 				if (MAP_MEM_NAMED_REUSE & prot) {
654 					if (cloneEntries < &_memRef->entries[_memRef->count]) {
655 						cloneEntry = cloneEntries->entry;
656 					} else {
657 						prot &= ~MAP_MEM_NAMED_REUSE;
658 					}
659 				}
660 
661 				err = mach_make_memory_entry_internal(map,
662 				    &actualSize, entryAddr, prot, vmne_kflags, &entry, cloneEntry);
663 
664 				if (KERN_SUCCESS != err) {
665 					DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map, entryAddr, actualSize, prot, err);
666 					break;
667 				}
668 				if (MAP_MEM_USE_DATA_ADDR & prot) {
669 					if (actualSize > entrySize) {
670 						actualSize = entrySize;
671 					}
672 				} else if (actualSize > entrySize) {
673 					panic("mach_make_memory_entry_64 actualSize");
674 				}
675 
676 				memory_entry_check_for_adjustment(map, entry, &overmap_start, &overmap_end);
677 
678 				if (count && overmap_start) {
679 					/*
680 					 * Track misaligned start for all
681 					 * except the first entry.
682 					 */
683 					misaligned_start++;
684 				}
685 
686 				if (overmap_end) {
687 					/*
688 					 * Ignore misaligned end for the
689 					 * last entry.
690 					 */
691 					if ((entryAddr + actualSize) != endAddr) {
692 						misaligned_end++;
693 					}
694 				}
695 
696 				if (count) {
697 					/* Middle entries */
698 					if (misaligned_start || misaligned_end) {
699 						DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr);
700 						ipc_port_release_send(entry);
701 						err = KERN_NOT_SUPPORTED;
702 						break;
703 					}
704 				}
705 
706 				if (count >= ref->capacity) {
707 					ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
708 					entries = &ref->entries[count];
709 				}
710 				entries->entry  = entry;
711 				entries->size   = actualSize;
712 				entries->offset = offset + (entryAddr - srcAddr);
713 				entries->start = entryAddr;
714 				entryAddr += actualSize;
715 				if (MAP_MEM_NAMED_REUSE & prot) {
716 					if ((cloneEntries->entry == entries->entry)
717 					    && (cloneEntries->size == entries->size)
718 					    && (cloneEntries->offset == entries->offset)) {
719 						cloneEntries++;
720 					} else {
721 						prot &= ~MAP_MEM_NAMED_REUSE;
722 					}
723 				}
724 				entries++;
725 				count++;
726 			}while (true);
727 			offset += srcLen;
728 			remain -= srcLen;
729 		}
730 	} else {
731 		// _task == 0, physical or kIOMemoryTypeUPL
732 		memory_object_t pager;
733 		vm_size_t       size = ptoa_64(_pages);
734 
735 		if (!getKernelReserved()) {
736 			panic("getKernelReserved");
737 		}
738 
739 		reserved->dp.pagerContig = (1 == _rangesCount);
740 		reserved->dp.memory      = this;
741 
742 		pagerFlags = pagerFlagsForCacheMode(cacheMode);
743 		if (-1U == pagerFlags) {
744 			panic("phys is kIODefaultCache");
745 		}
746 		if (reserved->dp.pagerContig) {
747 			pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
748 		}
749 
750 		pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
751 		    size, pagerFlags);
752 		assert(pager);
753 		if (!pager) {
754 			DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size, pagerFlags);
755 			err = kIOReturnVMError;
756 		} else {
757 			srcAddr  = nextAddr;
758 			entryAddr = trunc_page_64(srcAddr);
759 			err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
760 			    size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
761 			assert(KERN_SUCCESS == err);
762 			if (KERN_SUCCESS != err) {
763 				device_pager_deallocate(pager);
764 			} else {
765 				reserved->dp.devicePager = pager;
766 				entries->entry  = entry;
767 				entries->size   = size;
768 				entries->offset = offset + (entryAddr - srcAddr);
769 				entries++;
770 				count++;
771 			}
772 		}
773 	}
774 
775 	ref->count = count;
776 	ref->prot  = prot;
777 
778 	if (_task && (KERN_SUCCESS == err)
779 	    && (kIOMemoryMapCopyOnWrite & _flags)
780 	    && !(kIOMemoryReferenceCOW & options)) {
781 		err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
782 		if (KERN_SUCCESS != err) {
783 			DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref, (unsigned int)options, err);
784 		}
785 	}
786 
787 	if (KERN_SUCCESS == err) {
788 		if (MAP_MEM_NAMED_REUSE & prot) {
789 			memoryReferenceFree(ref);
790 			OSIncrementAtomic(&_memRef->refCount);
791 			ref = _memRef;
792 		}
793 	} else {
794 		DEBUG4K_ERROR("ref %p err 0x%x\n", ref, err);
795 		memoryReferenceFree(ref);
796 		ref = NULL;
797 	}
798 
799 	*reference = ref;
800 
801 	return err;
802 }
803 
804 static mach_vm_size_t
IOMemoryDescriptorMapGuardSize(vm_map_t map,IOOptionBits options)805 IOMemoryDescriptorMapGuardSize(vm_map_t map, IOOptionBits options)
806 {
807 	switch (kIOMapGuardedMask & options) {
808 	default:
809 	case kIOMapGuardedSmall:
810 		return vm_map_page_size(map);
811 	case kIOMapGuardedLarge:
812 		assert(0 == (kIOMapGuardSizeLarge & vm_map_page_mask(map)));
813 		return kIOMapGuardSizeLarge;
814 	}
815 	;
816 }
817 
818 static kern_return_t
IOMemoryDescriptorMapDealloc(IOOptionBits options,vm_map_t map,vm_map_offset_t addr,mach_vm_size_t size)819 IOMemoryDescriptorMapDealloc(IOOptionBits options, vm_map_t map,
820     vm_map_offset_t addr, mach_vm_size_t size)
821 {
822 	kern_return_t   kr;
823 	vm_map_offset_t actualAddr;
824 	mach_vm_size_t  actualSize;
825 
826 	actualAddr = vm_map_trunc_page(addr, vm_map_page_mask(map));
827 	actualSize = vm_map_round_page(addr + size, vm_map_page_mask(map)) - actualAddr;
828 
829 	if (kIOMapGuardedMask & options) {
830 		mach_vm_size_t guardSize = IOMemoryDescriptorMapGuardSize(map, options);
831 		actualAddr -= guardSize;
832 		actualSize += 2 * guardSize;
833 	}
834 	kr = mach_vm_deallocate(map, actualAddr, actualSize);
835 
836 	return kr;
837 }
838 
839 kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map,void * _ref)840 IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
841 {
842 	IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
843 	IOReturn                        err;
844 	vm_map_offset_t                 addr;
845 	mach_vm_size_t                  size;
846 	mach_vm_size_t                  guardSize;
847 
848 	addr = ref->mapped;
849 	size = ref->size;
850 	guardSize = 0;
851 
852 	if (kIOMapGuardedMask & ref->options) {
853 		if (!(kIOMapAnywhere & ref->options)) {
854 			return kIOReturnBadArgument;
855 		}
856 		guardSize = IOMemoryDescriptorMapGuardSize(map, ref->options);
857 		size += 2 * guardSize;
858 	}
859 
860 	err = vm_map_enter_mem_object(map, &addr, size,
861 #if __ARM_MIXED_PAGE_SIZE__
862 	    // TODO4K this should not be necessary...
863 	    (vm_map_offset_t)((ref->options & kIOMapAnywhere) ? max(PAGE_MASK, vm_map_page_mask(map)) : 0),
864 #else /* __ARM_MIXED_PAGE_SIZE__ */
865 	    (vm_map_offset_t) 0,
866 #endif /* __ARM_MIXED_PAGE_SIZE__ */
867 	    (((ref->options & kIOMapAnywhere)
868 	    ? VM_FLAGS_ANYWHERE
869 	    : VM_FLAGS_FIXED)),
870 	    VM_MAP_KERNEL_FLAGS_NONE,
871 	    ref->tag,
872 	    IPC_PORT_NULL,
873 	    (memory_object_offset_t) 0,
874 	    false,                       /* copy */
875 	    ref->prot,
876 	    ref->prot,
877 	    VM_INHERIT_NONE);
878 	if (KERN_SUCCESS == err) {
879 		ref->mapped = (mach_vm_address_t) addr;
880 		ref->map = map;
881 		if (kIOMapGuardedMask & ref->options) {
882 			vm_map_offset_t lastpage = vm_map_trunc_page(addr + size - guardSize, vm_map_page_mask(map));
883 
884 			err = vm_map_protect(map, addr, addr + guardSize, VM_PROT_NONE, false /*set_max*/);
885 			assert(KERN_SUCCESS == err);
886 			err = vm_map_protect(map, lastpage, lastpage + guardSize, VM_PROT_NONE, false /*set_max*/);
887 			assert(KERN_SUCCESS == err);
888 			ref->mapped += guardSize;
889 		}
890 	}
891 
892 	return err;
893 }
894 
895 IOReturn
memoryReferenceMap(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)896 IOGeneralMemoryDescriptor::memoryReferenceMap(
897 	IOMemoryReference * ref,
898 	vm_map_t            map,
899 	mach_vm_size_t      inoffset,
900 	mach_vm_size_t      size,
901 	IOOptionBits        options,
902 	mach_vm_address_t * inaddr)
903 {
904 	IOReturn        err;
905 	int64_t         offset = inoffset;
906 	uint32_t        rangeIdx, entryIdx;
907 	vm_map_offset_t addr, mapAddr;
908 	vm_map_offset_t pageOffset, entryOffset, remain, chunk;
909 
910 	mach_vm_address_t nextAddr;
911 	mach_vm_size_t    nextLen;
912 	IOByteCount       physLen;
913 	IOMemoryEntry   * entry;
914 	vm_prot_t         prot, memEntryCacheMode;
915 	IOOptionBits      type;
916 	IOOptionBits      cacheMode;
917 	vm_tag_t          tag;
918 	// for the kIOMapPrefault option.
919 	upl_page_info_t * pageList = NULL;
920 	UInt              currentPageIndex = 0;
921 	bool              didAlloc;
922 
923 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
924 
925 	if (ref->mapRef) {
926 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
927 		return err;
928 	}
929 
930 	if (MAP_MEM_USE_DATA_ADDR & ref->prot) {
931 		err = memoryReferenceMapNew(ref, map, inoffset, size, options, inaddr);
932 		return err;
933 	}
934 
935 	type = _flags & kIOMemoryTypeMask;
936 
937 	prot = VM_PROT_READ;
938 	if (!(kIOMapReadOnly & options)) {
939 		prot |= VM_PROT_WRITE;
940 	}
941 	prot &= ref->prot;
942 
943 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
944 	if (kIODefaultCache != cacheMode) {
945 		// VM system requires write access to update named entry cache mode
946 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
947 	}
948 
949 	tag = (typeof(tag))getVMTag(map);
950 
951 	if (_task) {
952 		// Find first range for offset
953 		if (!_rangesCount) {
954 			return kIOReturnBadArgument;
955 		}
956 		for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
957 			getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
958 			if (remain < nextLen) {
959 				break;
960 			}
961 			remain -= nextLen;
962 		}
963 	} else {
964 		rangeIdx = 0;
965 		remain   = 0;
966 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
967 		nextLen  = size;
968 	}
969 
970 	assert(remain < nextLen);
971 	if (remain >= nextLen) {
972 		DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)remain, nextLen);
973 		return kIOReturnBadArgument;
974 	}
975 
976 	nextAddr  += remain;
977 	nextLen   -= remain;
978 #if __ARM_MIXED_PAGE_SIZE__
979 	pageOffset = (vm_map_page_mask(map) & nextAddr);
980 #else /* __ARM_MIXED_PAGE_SIZE__ */
981 	pageOffset = (page_mask & nextAddr);
982 #endif /* __ARM_MIXED_PAGE_SIZE__ */
983 	addr       = 0;
984 	didAlloc   = false;
985 
986 	if (!(options & kIOMapAnywhere)) {
987 		addr = *inaddr;
988 		if (pageOffset != (vm_map_page_mask(map) & addr)) {
989 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)addr, (uint64_t)page_mask, (uint64_t)pageOffset);
990 		}
991 		addr -= pageOffset;
992 	}
993 
994 	// find first entry for offset
995 	for (entryIdx = 0;
996 	    (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
997 	    entryIdx++) {
998 	}
999 	entryIdx--;
1000 	entry = &ref->entries[entryIdx];
1001 
1002 	// allocate VM
1003 #if __ARM_MIXED_PAGE_SIZE__
1004 	size = round_page_mask_64(size + pageOffset, vm_map_page_mask(map));
1005 #else
1006 	size = round_page_64(size + pageOffset);
1007 #endif
1008 	if (kIOMapOverwrite & options) {
1009 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1010 			map = IOPageableMapForAddress(addr);
1011 		}
1012 		err = KERN_SUCCESS;
1013 	} else {
1014 		IOMemoryDescriptorMapAllocRef ref;
1015 		ref.map     = map;
1016 		ref.tag     = tag;
1017 		ref.options = options;
1018 		ref.size    = size;
1019 		ref.prot    = prot;
1020 		if (options & kIOMapAnywhere) {
1021 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1022 			ref.mapped = 0;
1023 		} else {
1024 			ref.mapped = addr;
1025 		}
1026 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1027 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1028 		} else {
1029 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1030 		}
1031 		if (KERN_SUCCESS == err) {
1032 			addr     = ref.mapped;
1033 			map      = ref.map;
1034 			didAlloc = true;
1035 		}
1036 	}
1037 
1038 	/*
1039 	 * If the memory is associated with a device pager but doesn't have a UPL,
1040 	 * it will be immediately faulted in through the pager via populateDevicePager().
1041 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1042 	 * operations.
1043 	 */
1044 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1045 		options &= ~kIOMapPrefault;
1046 	}
1047 
1048 	/*
1049 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1050 	 * memory type, and the underlying data.
1051 	 */
1052 	if (options & kIOMapPrefault) {
1053 		/*
1054 		 * The memory must have been wired by calling ::prepare(), otherwise
1055 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1056 		 */
1057 		assert(_wireCount != 0);
1058 		assert(_memoryEntries != NULL);
1059 		if ((_wireCount == 0) ||
1060 		    (_memoryEntries == NULL)) {
1061 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr);
1062 			return kIOReturnBadArgument;
1063 		}
1064 
1065 		// Get the page list.
1066 		ioGMDData* dataP = getDataP(_memoryEntries);
1067 		ioPLBlock const* ioplList = getIOPLList(dataP);
1068 		pageList = getPageList(dataP);
1069 
1070 		// Get the number of IOPLs.
1071 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1072 
1073 		/*
1074 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1075 		 * the offset. The research will go past it, so we'll need to go back to the
1076 		 * right range at the end.
1077 		 */
1078 		UInt ioplIndex = 0;
1079 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1080 			ioplIndex++;
1081 		}
1082 		ioplIndex--;
1083 
1084 		// Retrieve the IOPL info block.
1085 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1086 
1087 		/*
1088 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1089 		 * array.
1090 		 */
1091 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1092 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1093 		} else {
1094 			pageList = &pageList[ioplInfo.fPageInfo];
1095 		}
1096 
1097 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1098 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1099 
1100 		// Retrieve the index of the first page corresponding to the offset.
1101 		currentPageIndex = atop_32(offsetInIOPL);
1102 	}
1103 
1104 	// enter mappings
1105 	remain  = size;
1106 	mapAddr = addr;
1107 	addr    += pageOffset;
1108 
1109 	while (remain && (KERN_SUCCESS == err)) {
1110 		entryOffset = offset - entry->offset;
1111 		if ((min(vm_map_page_mask(map), page_mask) & entryOffset) != pageOffset) {
1112 			err = kIOReturnNotAligned;
1113 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)entryOffset, (uint64_t)pageOffset);
1114 			break;
1115 		}
1116 
1117 		if (kIODefaultCache != cacheMode) {
1118 			vm_size_t unused = 0;
1119 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1120 			    memEntryCacheMode, NULL, entry->entry);
1121 			assert(KERN_SUCCESS == err);
1122 		}
1123 
1124 		entryOffset -= pageOffset;
1125 		if (entryOffset >= entry->size) {
1126 			panic("entryOffset");
1127 		}
1128 		chunk = entry->size - entryOffset;
1129 		if (chunk) {
1130 			vm_map_kernel_flags_t vmk_flags;
1131 
1132 			vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1133 			vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
1134 
1135 			if (chunk > remain) {
1136 				chunk = remain;
1137 			}
1138 			if (options & kIOMapPrefault) {
1139 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1140 
1141 				err = vm_map_enter_mem_object_prefault(map,
1142 				    &mapAddr,
1143 				    chunk, 0 /* mask */,
1144 				    (VM_FLAGS_FIXED
1145 				    | VM_FLAGS_OVERWRITE),
1146 				    vmk_flags,
1147 				    tag,
1148 				    entry->entry,
1149 				    entryOffset,
1150 				    prot,                        // cur
1151 				    prot,                        // max
1152 				    &pageList[currentPageIndex],
1153 				    nb_pages);
1154 
1155 				if (err || vm_map_page_mask(map) < PAGE_MASK) {
1156 					DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1157 				}
1158 				// Compute the next index in the page list.
1159 				currentPageIndex += nb_pages;
1160 				assert(currentPageIndex <= _pages);
1161 			} else {
1162 				err = vm_map_enter_mem_object(map,
1163 				    &mapAddr,
1164 				    chunk, 0 /* mask */,
1165 				    (VM_FLAGS_FIXED
1166 				    | VM_FLAGS_OVERWRITE),
1167 				    vmk_flags,
1168 				    tag,
1169 				    entry->entry,
1170 				    entryOffset,
1171 				    false,               // copy
1172 				    prot,               // cur
1173 				    prot,               // max
1174 				    VM_INHERIT_NONE);
1175 			}
1176 			if (KERN_SUCCESS != err) {
1177 				DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1178 				break;
1179 			}
1180 			remain -= chunk;
1181 			if (!remain) {
1182 				break;
1183 			}
1184 			mapAddr  += chunk;
1185 			offset   += chunk - pageOffset;
1186 		}
1187 		pageOffset = 0;
1188 		entry++;
1189 		entryIdx++;
1190 		if (entryIdx >= ref->count) {
1191 			err = kIOReturnOverrun;
1192 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map, inoffset, size, (uint32_t)options, *inaddr, entryIdx, ref->count);
1193 			break;
1194 		}
1195 	}
1196 
1197 	if ((KERN_SUCCESS != err) && didAlloc) {
1198 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1199 		addr = 0;
1200 	}
1201 	*inaddr = addr;
1202 
1203 	if (err /* || vm_map_page_mask(map) < PAGE_MASK */) {
1204 		DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map, vm_map_page_shift(map), inoffset, size, (uint32_t)options, *inaddr, err);
1205 	}
1206 	return err;
1207 }
1208 
1209 #define LOGUNALIGN 0
1210 IOReturn
memoryReferenceMapNew(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)1211 IOGeneralMemoryDescriptor::memoryReferenceMapNew(
1212 	IOMemoryReference * ref,
1213 	vm_map_t            map,
1214 	mach_vm_size_t      inoffset,
1215 	mach_vm_size_t      size,
1216 	IOOptionBits        options,
1217 	mach_vm_address_t * inaddr)
1218 {
1219 	IOReturn            err;
1220 	int64_t             offset = inoffset;
1221 	uint32_t            entryIdx, firstEntryIdx;
1222 	vm_map_offset_t     addr, mapAddr, mapAddrOut;
1223 	vm_map_offset_t     entryOffset, remain, chunk;
1224 
1225 	IOMemoryEntry    * entry;
1226 	vm_prot_t          prot, memEntryCacheMode;
1227 	IOOptionBits       type;
1228 	IOOptionBits       cacheMode;
1229 	vm_tag_t           tag;
1230 	// for the kIOMapPrefault option.
1231 	upl_page_info_t  * pageList = NULL;
1232 	UInt               currentPageIndex = 0;
1233 	bool               didAlloc;
1234 
1235 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
1236 
1237 	if (ref->mapRef) {
1238 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
1239 		return err;
1240 	}
1241 
1242 #if LOGUNALIGN
1243 	printf("MAP offset %qx, %qx\n", inoffset, size);
1244 #endif
1245 
1246 	type = _flags & kIOMemoryTypeMask;
1247 
1248 	prot = VM_PROT_READ;
1249 	if (!(kIOMapReadOnly & options)) {
1250 		prot |= VM_PROT_WRITE;
1251 	}
1252 	prot &= ref->prot;
1253 
1254 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
1255 	if (kIODefaultCache != cacheMode) {
1256 		// VM system requires write access to update named entry cache mode
1257 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
1258 	}
1259 
1260 	tag = (vm_tag_t) getVMTag(map);
1261 
1262 	addr       = 0;
1263 	didAlloc   = false;
1264 
1265 	if (!(options & kIOMapAnywhere)) {
1266 		addr = *inaddr;
1267 	}
1268 
1269 	// find first entry for offset
1270 	for (firstEntryIdx = 0;
1271 	    (firstEntryIdx < ref->count) && (offset >= ref->entries[firstEntryIdx].offset);
1272 	    firstEntryIdx++) {
1273 	}
1274 	firstEntryIdx--;
1275 
1276 	// calculate required VM space
1277 
1278 	entryIdx = firstEntryIdx;
1279 	entry = &ref->entries[entryIdx];
1280 
1281 	remain  = size;
1282 	int64_t iteroffset = offset;
1283 	uint64_t mapSize = 0;
1284 	while (remain) {
1285 		entryOffset = iteroffset - entry->offset;
1286 		if (entryOffset >= entry->size) {
1287 			panic("entryOffset");
1288 		}
1289 
1290 #if LOGUNALIGN
1291 		printf("[%d] size %qx offset %qx start %qx iter %qx\n",
1292 		    entryIdx, entry->size, entry->offset, entry->start, iteroffset);
1293 #endif
1294 
1295 		chunk = entry->size - entryOffset;
1296 		if (chunk) {
1297 			if (chunk > remain) {
1298 				chunk = remain;
1299 			}
1300 			mach_vm_size_t entrySize;
1301 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1302 			assert(KERN_SUCCESS == err);
1303 			mapSize += entrySize;
1304 
1305 			remain -= chunk;
1306 			if (!remain) {
1307 				break;
1308 			}
1309 			iteroffset   += chunk; // - pageOffset;
1310 		}
1311 		entry++;
1312 		entryIdx++;
1313 		if (entryIdx >= ref->count) {
1314 			panic("overrun");
1315 			err = kIOReturnOverrun;
1316 			break;
1317 		}
1318 	}
1319 
1320 	if (kIOMapOverwrite & options) {
1321 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1322 			map = IOPageableMapForAddress(addr);
1323 		}
1324 		err = KERN_SUCCESS;
1325 	} else {
1326 		IOMemoryDescriptorMapAllocRef ref;
1327 		ref.map     = map;
1328 		ref.tag     = tag;
1329 		ref.options = options;
1330 		ref.size    = mapSize;
1331 		ref.prot    = prot;
1332 		if (options & kIOMapAnywhere) {
1333 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1334 			ref.mapped = 0;
1335 		} else {
1336 			ref.mapped = addr;
1337 		}
1338 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1339 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1340 		} else {
1341 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1342 		}
1343 
1344 		if (KERN_SUCCESS == err) {
1345 			addr     = ref.mapped;
1346 			map      = ref.map;
1347 			didAlloc = true;
1348 		}
1349 #if LOGUNALIGN
1350 		IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
1351 #endif
1352 	}
1353 
1354 	/*
1355 	 * If the memory is associated with a device pager but doesn't have a UPL,
1356 	 * it will be immediately faulted in through the pager via populateDevicePager().
1357 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1358 	 * operations.
1359 	 */
1360 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1361 		options &= ~kIOMapPrefault;
1362 	}
1363 
1364 	/*
1365 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1366 	 * memory type, and the underlying data.
1367 	 */
1368 	if (options & kIOMapPrefault) {
1369 		/*
1370 		 * The memory must have been wired by calling ::prepare(), otherwise
1371 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1372 		 */
1373 		assert(_wireCount != 0);
1374 		assert(_memoryEntries != NULL);
1375 		if ((_wireCount == 0) ||
1376 		    (_memoryEntries == NULL)) {
1377 			return kIOReturnBadArgument;
1378 		}
1379 
1380 		// Get the page list.
1381 		ioGMDData* dataP = getDataP(_memoryEntries);
1382 		ioPLBlock const* ioplList = getIOPLList(dataP);
1383 		pageList = getPageList(dataP);
1384 
1385 		// Get the number of IOPLs.
1386 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1387 
1388 		/*
1389 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1390 		 * the offset. The research will go past it, so we'll need to go back to the
1391 		 * right range at the end.
1392 		 */
1393 		UInt ioplIndex = 0;
1394 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1395 			ioplIndex++;
1396 		}
1397 		ioplIndex--;
1398 
1399 		// Retrieve the IOPL info block.
1400 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1401 
1402 		/*
1403 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1404 		 * array.
1405 		 */
1406 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1407 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1408 		} else {
1409 			pageList = &pageList[ioplInfo.fPageInfo];
1410 		}
1411 
1412 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1413 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1414 
1415 		// Retrieve the index of the first page corresponding to the offset.
1416 		currentPageIndex = atop_32(offsetInIOPL);
1417 	}
1418 
1419 	// enter mappings
1420 	remain   = size;
1421 	mapAddr  = addr;
1422 	entryIdx = firstEntryIdx;
1423 	entry = &ref->entries[entryIdx];
1424 
1425 	while (remain && (KERN_SUCCESS == err)) {
1426 #if LOGUNALIGN
1427 		printf("offset %qx, %qx\n", offset, entry->offset);
1428 #endif
1429 		if (kIODefaultCache != cacheMode) {
1430 			vm_size_t unused = 0;
1431 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1432 			    memEntryCacheMode, NULL, entry->entry);
1433 			assert(KERN_SUCCESS == err);
1434 		}
1435 		entryOffset = offset - entry->offset;
1436 		if (entryOffset >= entry->size) {
1437 			panic("entryOffset");
1438 		}
1439 		chunk = entry->size - entryOffset;
1440 #if LOGUNALIGN
1441 		printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
1442 #endif
1443 		if (chunk) {
1444 			vm_map_kernel_flags_t vmk_flags;
1445 
1446 			vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1447 			vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
1448 
1449 			if (chunk > remain) {
1450 				chunk = remain;
1451 			}
1452 			mapAddrOut = mapAddr;
1453 			if (options & kIOMapPrefault) {
1454 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1455 
1456 				err = vm_map_enter_mem_object_prefault(map,
1457 				    &mapAddrOut,
1458 				    chunk, 0 /* mask */,
1459 				    (VM_FLAGS_FIXED
1460 				    | VM_FLAGS_OVERWRITE
1461 				    | VM_FLAGS_RETURN_DATA_ADDR),
1462 				    vmk_flags,
1463 				    tag,
1464 				    entry->entry,
1465 				    entryOffset,
1466 				    prot,                        // cur
1467 				    prot,                        // max
1468 				    &pageList[currentPageIndex],
1469 				    nb_pages);
1470 
1471 				// Compute the next index in the page list.
1472 				currentPageIndex += nb_pages;
1473 				assert(currentPageIndex <= _pages);
1474 			} else {
1475 #if LOGUNALIGN
1476 				printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
1477 #endif
1478 				err = vm_map_enter_mem_object(map,
1479 				    &mapAddrOut,
1480 				    chunk, 0 /* mask */,
1481 				    (VM_FLAGS_FIXED
1482 				    | VM_FLAGS_OVERWRITE
1483 				    | VM_FLAGS_RETURN_DATA_ADDR),
1484 				    vmk_flags,
1485 				    tag,
1486 				    entry->entry,
1487 				    entryOffset,
1488 				    false,               // copy
1489 				    prot,               // cur
1490 				    prot,               // max
1491 				    VM_INHERIT_NONE);
1492 			}
1493 			if (KERN_SUCCESS != err) {
1494 				panic("map enter err %x", err);
1495 				break;
1496 			}
1497 #if LOGUNALIGN
1498 			printf("mapAddr o %qx\n", mapAddrOut);
1499 #endif
1500 			if (entryIdx == firstEntryIdx) {
1501 				addr = mapAddrOut;
1502 			}
1503 			remain -= chunk;
1504 			if (!remain) {
1505 				break;
1506 			}
1507 			mach_vm_size_t entrySize;
1508 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1509 			assert(KERN_SUCCESS == err);
1510 			mapAddr += entrySize;
1511 			offset  += chunk;
1512 		}
1513 
1514 		entry++;
1515 		entryIdx++;
1516 		if (entryIdx >= ref->count) {
1517 			err = kIOReturnOverrun;
1518 			break;
1519 		}
1520 	}
1521 
1522 	if (KERN_SUCCESS != err) {
1523 		DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
1524 	}
1525 
1526 	if ((KERN_SUCCESS != err) && didAlloc) {
1527 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1528 		addr = 0;
1529 	}
1530 	*inaddr = addr;
1531 
1532 	return err;
1533 }
1534 
1535 uint64_t
memoryReferenceGetDMAMapLength(IOMemoryReference * ref,uint64_t * offset)1536 IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
1537 	IOMemoryReference * ref,
1538 	uint64_t          * offset)
1539 {
1540 	kern_return_t kr;
1541 	vm_object_offset_t data_offset = 0;
1542 	uint64_t total;
1543 	uint32_t idx;
1544 
1545 	assert(ref->count);
1546 	if (offset) {
1547 		*offset = (uint64_t) data_offset;
1548 	}
1549 	total = 0;
1550 	for (idx = 0; idx < ref->count; idx++) {
1551 		kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry,
1552 		    &data_offset);
1553 		if (KERN_SUCCESS != kr) {
1554 			DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
1555 		} else if (0 != data_offset) {
1556 			DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
1557 		}
1558 		if (offset && !idx) {
1559 			*offset = (uint64_t) data_offset;
1560 		}
1561 		total += round_page(data_offset + ref->entries[idx].size);
1562 	}
1563 
1564 	DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
1565 	    (offset ? *offset : (vm_object_offset_t)-1), total);
1566 
1567 	return total;
1568 }
1569 
1570 
1571 IOReturn
memoryReferenceGetPageCounts(IOMemoryReference * ref,IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)1572 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1573 	IOMemoryReference * ref,
1574 	IOByteCount       * residentPageCount,
1575 	IOByteCount       * dirtyPageCount)
1576 {
1577 	IOReturn        err;
1578 	IOMemoryEntry * entries;
1579 	unsigned int resident, dirty;
1580 	unsigned int totalResident, totalDirty;
1581 
1582 	totalResident = totalDirty = 0;
1583 	err = kIOReturnSuccess;
1584 	entries = ref->entries + ref->count;
1585 	while (entries > &ref->entries[0]) {
1586 		entries--;
1587 		err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1588 		if (KERN_SUCCESS != err) {
1589 			break;
1590 		}
1591 		totalResident += resident;
1592 		totalDirty    += dirty;
1593 	}
1594 
1595 	if (residentPageCount) {
1596 		*residentPageCount = totalResident;
1597 	}
1598 	if (dirtyPageCount) {
1599 		*dirtyPageCount    = totalDirty;
1600 	}
1601 	return err;
1602 }
1603 
1604 IOReturn
memoryReferenceSetPurgeable(IOMemoryReference * ref,IOOptionBits newState,IOOptionBits * oldState)1605 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1606 	IOMemoryReference * ref,
1607 	IOOptionBits        newState,
1608 	IOOptionBits      * oldState)
1609 {
1610 	IOReturn        err;
1611 	IOMemoryEntry * entries;
1612 	vm_purgable_t   control;
1613 	int             totalState, state;
1614 
1615 	totalState = kIOMemoryPurgeableNonVolatile;
1616 	err = kIOReturnSuccess;
1617 	entries = ref->entries + ref->count;
1618 	while (entries > &ref->entries[0]) {
1619 		entries--;
1620 
1621 		err = purgeableControlBits(newState, &control, &state);
1622 		if (KERN_SUCCESS != err) {
1623 			break;
1624 		}
1625 		err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1626 		if (KERN_SUCCESS != err) {
1627 			break;
1628 		}
1629 		err = purgeableStateBits(&state);
1630 		if (KERN_SUCCESS != err) {
1631 			break;
1632 		}
1633 
1634 		if (kIOMemoryPurgeableEmpty == state) {
1635 			totalState = kIOMemoryPurgeableEmpty;
1636 		} else if (kIOMemoryPurgeableEmpty == totalState) {
1637 			continue;
1638 		} else if (kIOMemoryPurgeableVolatile == totalState) {
1639 			continue;
1640 		} else if (kIOMemoryPurgeableVolatile == state) {
1641 			totalState = kIOMemoryPurgeableVolatile;
1642 		} else {
1643 			totalState = kIOMemoryPurgeableNonVolatile;
1644 		}
1645 	}
1646 
1647 	if (oldState) {
1648 		*oldState = totalState;
1649 	}
1650 	return err;
1651 }
1652 
1653 IOReturn
memoryReferenceSetOwnership(IOMemoryReference * ref,task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)1654 IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1655 	IOMemoryReference * ref,
1656 	task_t              newOwner,
1657 	int                 newLedgerTag,
1658 	IOOptionBits        newLedgerOptions)
1659 {
1660 	IOReturn        err, totalErr;
1661 	IOMemoryEntry * entries;
1662 
1663 	totalErr = kIOReturnSuccess;
1664 	entries = ref->entries + ref->count;
1665 	while (entries > &ref->entries[0]) {
1666 		entries--;
1667 
1668 		err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1669 		if (KERN_SUCCESS != err) {
1670 			totalErr = err;
1671 		}
1672 	}
1673 
1674 	return totalErr;
1675 }
1676 
1677 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1678 
1679 OSSharedPtr<IOMemoryDescriptor>
withAddress(void * address,IOByteCount length,IODirection direction)1680 IOMemoryDescriptor::withAddress(void *      address,
1681     IOByteCount   length,
1682     IODirection direction)
1683 {
1684 	return IOMemoryDescriptor::
1685 	       withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1686 }
1687 
1688 #ifndef __LP64__
1689 OSSharedPtr<IOMemoryDescriptor>
withAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)1690 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1691     IOByteCount  length,
1692     IODirection  direction,
1693     task_t       task)
1694 {
1695 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1696 	if (that) {
1697 		if (that->initWithAddress(address, length, direction, task)) {
1698 			return os::move(that);
1699 		}
1700 	}
1701 	return nullptr;
1702 }
1703 #endif /* !__LP64__ */
1704 
1705 OSSharedPtr<IOMemoryDescriptor>
withPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)1706 IOMemoryDescriptor::withPhysicalAddress(
1707 	IOPhysicalAddress       address,
1708 	IOByteCount             length,
1709 	IODirection             direction )
1710 {
1711 	return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
1712 }
1713 
1714 #ifndef __LP64__
1715 OSSharedPtr<IOMemoryDescriptor>
withRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)1716 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1717     UInt32           withCount,
1718     IODirection      direction,
1719     task_t           task,
1720     bool             asReference)
1721 {
1722 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1723 	if (that) {
1724 		if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1725 			return os::move(that);
1726 		}
1727 	}
1728 	return nullptr;
1729 }
1730 #endif /* !__LP64__ */
1731 
1732 OSSharedPtr<IOMemoryDescriptor>
withAddressRange(mach_vm_address_t address,mach_vm_size_t length,IOOptionBits options,task_t task)1733 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1734     mach_vm_size_t length,
1735     IOOptionBits   options,
1736     task_t         task)
1737 {
1738 	IOAddressRange range = { address, length };
1739 	return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
1740 }
1741 
1742 OSSharedPtr<IOMemoryDescriptor>
withAddressRanges(IOAddressRange * ranges,UInt32 rangeCount,IOOptionBits options,task_t task)1743 IOMemoryDescriptor::withAddressRanges(IOAddressRange *   ranges,
1744     UInt32           rangeCount,
1745     IOOptionBits     options,
1746     task_t           task)
1747 {
1748 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1749 	if (that) {
1750 		if (task) {
1751 			options |= kIOMemoryTypeVirtual64;
1752 		} else {
1753 			options |= kIOMemoryTypePhysical64;
1754 		}
1755 
1756 		if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
1757 			return os::move(that);
1758 		}
1759 	}
1760 
1761 	return nullptr;
1762 }
1763 
1764 
1765 /*
1766  * withOptions:
1767  *
1768  * Create a new IOMemoryDescriptor. The buffer is made up of several
1769  * virtual address ranges, from a given task.
1770  *
1771  * Passing the ranges as a reference will avoid an extra allocation.
1772  */
1773 OSSharedPtr<IOMemoryDescriptor>
withOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits opts,IOMapper * mapper)1774 IOMemoryDescriptor::withOptions(void *          buffers,
1775     UInt32          count,
1776     UInt32          offset,
1777     task_t          task,
1778     IOOptionBits    opts,
1779     IOMapper *      mapper)
1780 {
1781 	OSSharedPtr<IOGeneralMemoryDescriptor> self = OSMakeShared<IOGeneralMemoryDescriptor>();
1782 
1783 	if (self
1784 	    && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1785 		return nullptr;
1786 	}
1787 
1788 	return os::move(self);
1789 }
1790 
1791 bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1792 IOMemoryDescriptor::initWithOptions(void *         buffers,
1793     UInt32         count,
1794     UInt32         offset,
1795     task_t         task,
1796     IOOptionBits   options,
1797     IOMapper *     mapper)
1798 {
1799 	return false;
1800 }
1801 
1802 #ifndef __LP64__
1803 OSSharedPtr<IOMemoryDescriptor>
withPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)1804 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1805     UInt32          withCount,
1806     IODirection     direction,
1807     bool            asReference)
1808 {
1809 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1810 	if (that) {
1811 		if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1812 			return os::move(that);
1813 		}
1814 	}
1815 	return nullptr;
1816 }
1817 
1818 OSSharedPtr<IOMemoryDescriptor>
withSubRange(IOMemoryDescriptor * of,IOByteCount offset,IOByteCount length,IODirection direction)1819 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor *   of,
1820     IOByteCount             offset,
1821     IOByteCount             length,
1822     IODirection             direction)
1823 {
1824 	return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1825 }
1826 #endif /* !__LP64__ */
1827 
1828 OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOMemoryDescriptor * originalMD)1829 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1830 {
1831 	IOGeneralMemoryDescriptor *origGenMD =
1832 	    OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1833 
1834 	if (origGenMD) {
1835 		return IOGeneralMemoryDescriptor::
1836 		       withPersistentMemoryDescriptor(origGenMD);
1837 	} else {
1838 		return nullptr;
1839 	}
1840 }
1841 
1842 OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor * originalMD)1843 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1844 {
1845 	IOMemoryReference * memRef;
1846 	OSSharedPtr<IOGeneralMemoryDescriptor> self;
1847 
1848 	if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1849 		return nullptr;
1850 	}
1851 
1852 	if (memRef == originalMD->_memRef) {
1853 		self.reset(originalMD, OSRetain);
1854 		originalMD->memoryReferenceRelease(memRef);
1855 		return os::move(self);
1856 	}
1857 
1858 	self = OSMakeShared<IOGeneralMemoryDescriptor>();
1859 	IOMDPersistentInitData initData = { originalMD, memRef };
1860 
1861 	if (self
1862 	    && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
1863 		return nullptr;
1864 	}
1865 	return os::move(self);
1866 }
1867 
1868 #ifndef __LP64__
1869 bool
initWithAddress(void * address,IOByteCount withLength,IODirection withDirection)1870 IOGeneralMemoryDescriptor::initWithAddress(void *      address,
1871     IOByteCount   withLength,
1872     IODirection withDirection)
1873 {
1874 	_singleRange.v.address = (vm_offset_t) address;
1875 	_singleRange.v.length  = withLength;
1876 
1877 	return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1878 }
1879 
1880 bool
initWithAddress(IOVirtualAddress address,IOByteCount withLength,IODirection withDirection,task_t withTask)1881 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1882     IOByteCount    withLength,
1883     IODirection  withDirection,
1884     task_t       withTask)
1885 {
1886 	_singleRange.v.address = address;
1887 	_singleRange.v.length  = withLength;
1888 
1889 	return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1890 }
1891 
1892 bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount withLength,IODirection withDirection)1893 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1894 	IOPhysicalAddress      address,
1895 	IOByteCount            withLength,
1896 	IODirection            withDirection )
1897 {
1898 	_singleRange.p.address = address;
1899 	_singleRange.p.length  = withLength;
1900 
1901 	return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1902 }
1903 
1904 bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 count,IODirection direction,bool reference)1905 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1906 	IOPhysicalRange * ranges,
1907 	UInt32            count,
1908 	IODirection       direction,
1909 	bool              reference)
1910 {
1911 	IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1912 
1913 	if (reference) {
1914 		mdOpts |= kIOMemoryAsReference;
1915 	}
1916 
1917 	return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1918 }
1919 
1920 bool
initWithRanges(IOVirtualRange * ranges,UInt32 count,IODirection direction,task_t task,bool reference)1921 IOGeneralMemoryDescriptor::initWithRanges(
1922 	IOVirtualRange * ranges,
1923 	UInt32           count,
1924 	IODirection      direction,
1925 	task_t           task,
1926 	bool             reference)
1927 {
1928 	IOOptionBits mdOpts = direction;
1929 
1930 	if (reference) {
1931 		mdOpts |= kIOMemoryAsReference;
1932 	}
1933 
1934 	if (task) {
1935 		mdOpts |= kIOMemoryTypeVirtual;
1936 
1937 		// Auto-prepare if this is a kernel memory descriptor as very few
1938 		// clients bother to prepare() kernel memory.
1939 		// But it was not enforced so what are you going to do?
1940 		if (task == kernel_task) {
1941 			mdOpts |= kIOMemoryAutoPrepare;
1942 		}
1943 	} else {
1944 		mdOpts |= kIOMemoryTypePhysical;
1945 	}
1946 
1947 	return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1948 }
1949 #endif /* !__LP64__ */
1950 
1951 /*
1952  * initWithOptions:
1953  *
1954  *  IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1955  * from a given task, several physical ranges, an UPL from the ubc
1956  * system or a uio (may be 64bit) from the BSD subsystem.
1957  *
1958  * Passing the ranges as a reference will avoid an extra allocation.
1959  *
1960  * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1961  * existing instance -- note this behavior is not commonly supported in other
1962  * I/O Kit classes, although it is supported here.
1963  */
1964 
1965 bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1966 IOGeneralMemoryDescriptor::initWithOptions(void *       buffers,
1967     UInt32       count,
1968     UInt32       offset,
1969     task_t       task,
1970     IOOptionBits options,
1971     IOMapper *   mapper)
1972 {
1973 	IOOptionBits type = options & kIOMemoryTypeMask;
1974 
1975 #ifndef __LP64__
1976 	if (task
1977 	    && (kIOMemoryTypeVirtual == type)
1978 	    && vm_map_is_64bit(get_task_map(task))
1979 	    && ((IOVirtualRange *) buffers)->address) {
1980 		OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1981 		return false;
1982 	}
1983 #endif /* !__LP64__ */
1984 
1985 	// Grab the original MD's configuation data to initialse the
1986 	// arguments to this function.
1987 	if (kIOMemoryTypePersistentMD == type) {
1988 		IOMDPersistentInitData *initData = (typeof(initData))buffers;
1989 		const IOGeneralMemoryDescriptor *orig = initData->fMD;
1990 		ioGMDData *dataP = getDataP(orig->_memoryEntries);
1991 
1992 		// Only accept persistent memory descriptors with valid dataP data.
1993 		assert(orig->_rangesCount == 1);
1994 		if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
1995 			return false;
1996 		}
1997 
1998 		_memRef = initData->fMemRef; // Grab the new named entry
1999 		options = orig->_flags & ~kIOMemoryAsReference;
2000 		type = options & kIOMemoryTypeMask;
2001 		buffers = orig->_ranges.v;
2002 		count = orig->_rangesCount;
2003 
2004 		// Now grab the original task and whatever mapper was previously used
2005 		task = orig->_task;
2006 		mapper = dataP->fMapper;
2007 
2008 		// We are ready to go through the original initialisation now
2009 	}
2010 
2011 	switch (type) {
2012 	case kIOMemoryTypeUIO:
2013 	case kIOMemoryTypeVirtual:
2014 #ifndef __LP64__
2015 	case kIOMemoryTypeVirtual64:
2016 #endif /* !__LP64__ */
2017 		assert(task);
2018 		if (!task) {
2019 			return false;
2020 		}
2021 		break;
2022 
2023 	case kIOMemoryTypePhysical:     // Neither Physical nor UPL should have a task
2024 #ifndef __LP64__
2025 	case kIOMemoryTypePhysical64:
2026 #endif /* !__LP64__ */
2027 	case kIOMemoryTypeUPL:
2028 		assert(!task);
2029 		break;
2030 	default:
2031 		return false; /* bad argument */
2032 	}
2033 
2034 	assert(buffers);
2035 	assert(count);
2036 
2037 	/*
2038 	 * We can check the _initialized  instance variable before having ever set
2039 	 * it to an initial value because I/O Kit guarantees that all our instance
2040 	 * variables are zeroed on an object's allocation.
2041 	 */
2042 
2043 	if (_initialized) {
2044 		/*
2045 		 * An existing memory descriptor is being retargeted to point to
2046 		 * somewhere else.  Clean up our present state.
2047 		 */
2048 		IOOptionBits type = _flags & kIOMemoryTypeMask;
2049 		if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
2050 			while (_wireCount) {
2051 				complete();
2052 			}
2053 		}
2054 		if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2055 			if (kIOMemoryTypeUIO == type) {
2056 				uio_free((uio_t) _ranges.v);
2057 			}
2058 #ifndef __LP64__
2059 			else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2060 				IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2061 			}
2062 #endif /* !__LP64__ */
2063 			else {
2064 				IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2065 			}
2066 		}
2067 
2068 		options |= (kIOMemoryRedirected & _flags);
2069 		if (!(kIOMemoryRedirected & options)) {
2070 			if (_memRef) {
2071 				memoryReferenceRelease(_memRef);
2072 				_memRef = NULL;
2073 			}
2074 			if (_mappings) {
2075 				_mappings->flushCollection();
2076 			}
2077 		}
2078 	} else {
2079 		if (!super::init()) {
2080 			return false;
2081 		}
2082 		_initialized = true;
2083 	}
2084 
2085 	// Grab the appropriate mapper
2086 	if (kIOMemoryHostOrRemote & options) {
2087 		options |= kIOMemoryMapperNone;
2088 	}
2089 	if (kIOMemoryMapperNone & options) {
2090 		mapper = NULL; // No Mapper
2091 	} else if (mapper == kIOMapperSystem) {
2092 		IOMapper::checkForSystemMapper();
2093 		gIOSystemMapper = mapper = IOMapper::gSystem;
2094 	}
2095 
2096 	// Remove the dynamic internal use flags from the initial setting
2097 	options               &= ~(kIOMemoryPreparedReadOnly);
2098 	_flags                 = options;
2099 	_task                  = task;
2100 
2101 #ifndef __LP64__
2102 	_direction             = (IODirection) (_flags & kIOMemoryDirectionMask);
2103 #endif /* !__LP64__ */
2104 
2105 	_dmaReferences = 0;
2106 	__iomd_reservedA = 0;
2107 	__iomd_reservedB = 0;
2108 	_highestPage = 0;
2109 
2110 	if (kIOMemoryThreadSafe & options) {
2111 		if (!_prepareLock) {
2112 			_prepareLock = IOLockAlloc();
2113 		}
2114 	} else if (_prepareLock) {
2115 		IOLockFree(_prepareLock);
2116 		_prepareLock = NULL;
2117 	}
2118 
2119 	if (kIOMemoryTypeUPL == type) {
2120 		ioGMDData *dataP;
2121 		unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
2122 
2123 		if (!initMemoryEntries(dataSize, mapper)) {
2124 			return false;
2125 		}
2126 		dataP = getDataP(_memoryEntries);
2127 		dataP->fPageCnt = 0;
2128 		switch (kIOMemoryDirectionMask & options) {
2129 		case kIODirectionOut:
2130 			dataP->fDMAAccess = kIODMAMapReadAccess;
2131 			break;
2132 		case kIODirectionIn:
2133 			dataP->fDMAAccess = kIODMAMapWriteAccess;
2134 			break;
2135 		case kIODirectionNone:
2136 		case kIODirectionOutIn:
2137 		default:
2138 			panic("bad dir for upl 0x%x", (int) options);
2139 			break;
2140 		}
2141 		//       _wireCount++;	// UPLs start out life wired
2142 
2143 		_length    = count;
2144 		_pages    += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
2145 
2146 		ioPLBlock iopl;
2147 		iopl.fIOPL = (upl_t) buffers;
2148 		upl_set_referenced(iopl.fIOPL, true);
2149 		upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
2150 
2151 		if (upl_get_size(iopl.fIOPL) < (count + offset)) {
2152 			panic("short external upl");
2153 		}
2154 
2155 		_highestPage = upl_get_highest_page(iopl.fIOPL);
2156 		DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2157 
2158 		// Set the flag kIOPLOnDevice convieniently equal to 1
2159 		iopl.fFlags  = pageList->device | kIOPLExternUPL;
2160 		if (!pageList->device) {
2161 			// Pre-compute the offset into the UPL's page list
2162 			pageList = &pageList[atop_32(offset)];
2163 			offset &= PAGE_MASK;
2164 		}
2165 		iopl.fIOMDOffset = 0;
2166 		iopl.fMappedPage = 0;
2167 		iopl.fPageInfo = (vm_address_t) pageList;
2168 		iopl.fPageOffset = offset;
2169 		_memoryEntries->appendBytes(&iopl, sizeof(iopl));
2170 	} else {
2171 		// kIOMemoryTypeVirtual  | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
2172 		// kIOMemoryTypePhysical | kIOMemoryTypePhysical64
2173 
2174 		// Initialize the memory descriptor
2175 		if (options & kIOMemoryAsReference) {
2176 #ifndef __LP64__
2177 			_rangesIsAllocated = false;
2178 #endif /* !__LP64__ */
2179 
2180 			// Hack assignment to get the buffer arg into _ranges.
2181 			// I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
2182 			// work, C++ sigh.
2183 			// This also initialises the uio & physical ranges.
2184 			_ranges.v = (IOVirtualRange *) buffers;
2185 		} else {
2186 #ifndef __LP64__
2187 			_rangesIsAllocated = true;
2188 #endif /* !__LP64__ */
2189 			switch (type) {
2190 			case kIOMemoryTypeUIO:
2191 				_ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
2192 				break;
2193 
2194 #ifndef __LP64__
2195 			case kIOMemoryTypeVirtual64:
2196 			case kIOMemoryTypePhysical64:
2197 				if (count == 1
2198 #ifndef __arm__
2199 				    && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
2200 #endif
2201 				    ) {
2202 					if (kIOMemoryTypeVirtual64 == type) {
2203 						type = kIOMemoryTypeVirtual;
2204 					} else {
2205 						type = kIOMemoryTypePhysical;
2206 					}
2207 					_flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
2208 					_rangesIsAllocated = false;
2209 					_ranges.v = &_singleRange.v;
2210 					_singleRange.v.address = ((IOAddressRange *) buffers)->address;
2211 					_singleRange.v.length  = ((IOAddressRange *) buffers)->length;
2212 					break;
2213 				}
2214 				_ranges.v64 = IONew(IOAddressRange, count);
2215 				if (!_ranges.v64) {
2216 					return false;
2217 				}
2218 				bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
2219 				break;
2220 #endif /* !__LP64__ */
2221 			case kIOMemoryTypeVirtual:
2222 			case kIOMemoryTypePhysical:
2223 				if (count == 1) {
2224 					_flags |= kIOMemoryAsReference;
2225 #ifndef __LP64__
2226 					_rangesIsAllocated = false;
2227 #endif /* !__LP64__ */
2228 					_ranges.v = &_singleRange.v;
2229 				} else {
2230 					_ranges.v = IONew(IOVirtualRange, count);
2231 					if (!_ranges.v) {
2232 						return false;
2233 					}
2234 				}
2235 				bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
2236 				break;
2237 			}
2238 		}
2239 #if CONFIG_PROB_GZALLOC
2240 		if (task == kernel_task) {
2241 			for (UInt32 i = 0; i < count; i++) {
2242 				_ranges.v[i].address = pgz_decode(_ranges.v[i].address, _ranges.v[i].length);
2243 			}
2244 		}
2245 #endif /* CONFIG_PROB_GZALLOC */
2246 		_rangesCount = count;
2247 
2248 		// Find starting address within the vector of ranges
2249 		Ranges vec = _ranges;
2250 		mach_vm_size_t totalLength = 0;
2251 		unsigned int ind, pages = 0;
2252 		for (ind = 0; ind < count; ind++) {
2253 			mach_vm_address_t addr;
2254 			mach_vm_address_t endAddr;
2255 			mach_vm_size_t    len;
2256 
2257 			// addr & len are returned by this function
2258 			getAddrLenForInd(addr, len, type, vec, ind);
2259 			if (_task) {
2260 				mach_vm_size_t phys_size;
2261 				kern_return_t kret;
2262 				kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size);
2263 				if (KERN_SUCCESS != kret) {
2264 					break;
2265 				}
2266 				if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
2267 					break;
2268 				}
2269 			} else {
2270 				if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
2271 					break;
2272 				}
2273 				if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
2274 					break;
2275 				}
2276 				if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
2277 					break;
2278 				}
2279 			}
2280 			if (os_add_overflow(totalLength, len, &totalLength)) {
2281 				break;
2282 			}
2283 			if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2284 				uint64_t highPage = atop_64(addr + len - 1);
2285 				if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
2286 					_highestPage = (ppnum_t) highPage;
2287 					DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2288 				}
2289 			}
2290 		}
2291 		if ((ind < count)
2292 		    || (totalLength != ((IOByteCount) totalLength))) {
2293 			return false;                                   /* overflow */
2294 		}
2295 		_length      = totalLength;
2296 		_pages       = pages;
2297 
2298 		// Auto-prepare memory at creation time.
2299 		// Implied completion when descriptor is free-ed
2300 
2301 
2302 		if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2303 			_wireCount++; // Physical MDs are, by definition, wired
2304 		} else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
2305 			ioGMDData *dataP;
2306 			unsigned dataSize;
2307 
2308 			if (_pages > atop_64(max_mem)) {
2309 				return false;
2310 			}
2311 
2312 			dataSize = computeDataSize(_pages, /* upls */ count * 2);
2313 			if (!initMemoryEntries(dataSize, mapper)) {
2314 				return false;
2315 			}
2316 			dataP = getDataP(_memoryEntries);
2317 			dataP->fPageCnt = _pages;
2318 
2319 			if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
2320 			    && (VM_KERN_MEMORY_NONE == _kernelTag)) {
2321 				_kernelTag = IOMemoryTag(kernel_map);
2322 				if (_kernelTag == gIOSurfaceTag) {
2323 					_userTag = VM_MEMORY_IOSURFACE;
2324 				}
2325 			}
2326 
2327 			if ((kIOMemoryPersistent & _flags) && !_memRef) {
2328 				IOReturn
2329 				    err = memoryReferenceCreate(0, &_memRef);
2330 				if (kIOReturnSuccess != err) {
2331 					return false;
2332 				}
2333 			}
2334 
2335 			if ((_flags & kIOMemoryAutoPrepare)
2336 			    && prepare() != kIOReturnSuccess) {
2337 				return false;
2338 			}
2339 		}
2340 	}
2341 
2342 	return true;
2343 }
2344 
2345 /*
2346  * free
2347  *
2348  * Free resources.
2349  */
2350 void
free()2351 IOGeneralMemoryDescriptor::free()
2352 {
2353 	IOOptionBits type = _flags & kIOMemoryTypeMask;
2354 
2355 	if (reserved && reserved->dp.memory) {
2356 		LOCK;
2357 		reserved->dp.memory = NULL;
2358 		UNLOCK;
2359 	}
2360 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2361 		ioGMDData * dataP;
2362 		if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
2363 			dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
2364 			dataP->fMappedBaseValid = dataP->fMappedBase = 0;
2365 		}
2366 	} else {
2367 		while (_wireCount) {
2368 			complete();
2369 		}
2370 	}
2371 
2372 	if (_memoryEntries) {
2373 		_memoryEntries.reset();
2374 	}
2375 
2376 	if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2377 		if (kIOMemoryTypeUIO == type) {
2378 			uio_free((uio_t) _ranges.v);
2379 		}
2380 #ifndef __LP64__
2381 		else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2382 			IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2383 		}
2384 #endif /* !__LP64__ */
2385 		else {
2386 			IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2387 		}
2388 
2389 		_ranges.v = NULL;
2390 	}
2391 
2392 	if (reserved) {
2393 		cleanKernelReserved(reserved);
2394 		if (reserved->dp.devicePager) {
2395 			// memEntry holds a ref on the device pager which owns reserved
2396 			// (IOMemoryDescriptorReserved) so no reserved access after this point
2397 			device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
2398 		} else {
2399 			IOFreeType(reserved, IOMemoryDescriptorReserved);
2400 		}
2401 		reserved = NULL;
2402 	}
2403 
2404 	if (_memRef) {
2405 		memoryReferenceRelease(_memRef);
2406 	}
2407 	if (_prepareLock) {
2408 		IOLockFree(_prepareLock);
2409 	}
2410 
2411 	super::free();
2412 }
2413 
2414 #ifndef __LP64__
2415 void
unmapFromKernel()2416 IOGeneralMemoryDescriptor::unmapFromKernel()
2417 {
2418 	panic("IOGMD::unmapFromKernel deprecated");
2419 }
2420 
2421 void
mapIntoKernel(unsigned rangeIndex)2422 IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
2423 {
2424 	panic("IOGMD::mapIntoKernel deprecated");
2425 }
2426 #endif /* !__LP64__ */
2427 
2428 /*
2429  * getDirection:
2430  *
2431  * Get the direction of the transfer.
2432  */
2433 IODirection
getDirection() const2434 IOMemoryDescriptor::getDirection() const
2435 {
2436 #ifndef __LP64__
2437 	if (_direction) {
2438 		return _direction;
2439 	}
2440 #endif /* !__LP64__ */
2441 	return (IODirection) (_flags & kIOMemoryDirectionMask);
2442 }
2443 
2444 /*
2445  * getLength:
2446  *
2447  * Get the length of the transfer (over all ranges).
2448  */
2449 IOByteCount
getLength() const2450 IOMemoryDescriptor::getLength() const
2451 {
2452 	return _length;
2453 }
2454 
2455 void
setTag(IOOptionBits tag)2456 IOMemoryDescriptor::setTag( IOOptionBits tag )
2457 {
2458 	_tag = tag;
2459 }
2460 
2461 IOOptionBits
getTag(void)2462 IOMemoryDescriptor::getTag( void )
2463 {
2464 	return _tag;
2465 }
2466 
2467 uint64_t
getFlags(void)2468 IOMemoryDescriptor::getFlags(void)
2469 {
2470 	return _flags;
2471 }
2472 
2473 OSObject *
copyContext(void) const2474 IOMemoryDescriptor::copyContext(void) const
2475 {
2476 	if (reserved) {
2477 		OSObject * context = reserved->contextObject;
2478 		if (context) {
2479 			context->retain();
2480 		}
2481 		return context;
2482 	} else {
2483 		return NULL;
2484 	}
2485 }
2486 
2487 void
setContext(OSObject * obj)2488 IOMemoryDescriptor::setContext(OSObject * obj)
2489 {
2490 	if (this->reserved == NULL && obj == NULL) {
2491 		// No existing object, and no object to set
2492 		return;
2493 	}
2494 
2495 	IOMemoryDescriptorReserved * reserved = getKernelReserved();
2496 	if (reserved) {
2497 		OSObject * oldObject = reserved->contextObject;
2498 		if (oldObject && OSCompareAndSwapPtr(oldObject, NULL, &reserved->contextObject)) {
2499 			oldObject->release();
2500 		}
2501 		if (obj != NULL) {
2502 			obj->retain();
2503 			reserved->contextObject = obj;
2504 		}
2505 	}
2506 }
2507 
2508 #ifndef __LP64__
2509 #pragma clang diagnostic push
2510 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2511 
2512 // @@@ gvdl: who is using this API?  Seems like a wierd thing to implement.
2513 IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * length)2514 IOMemoryDescriptor::getSourceSegment( IOByteCount   offset, IOByteCount * length )
2515 {
2516 	addr64_t physAddr = 0;
2517 
2518 	if (prepare() == kIOReturnSuccess) {
2519 		physAddr = getPhysicalSegment64( offset, length );
2520 		complete();
2521 	}
2522 
2523 	return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
2524 }
2525 
2526 #pragma clang diagnostic pop
2527 
2528 #endif /* !__LP64__ */
2529 
2530 IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)2531 IOMemoryDescriptor::readBytes
2532 (IOByteCount offset, void *bytes, IOByteCount length)
2533 {
2534 	addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
2535 	IOByteCount endoffset;
2536 	IOByteCount remaining;
2537 
2538 
2539 	// Check that this entire I/O is within the available range
2540 	if ((offset > _length)
2541 	    || os_add_overflow(length, offset, &endoffset)
2542 	    || (endoffset > _length)) {
2543 		assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
2544 		return 0;
2545 	}
2546 	if (offset >= _length) {
2547 		return 0;
2548 	}
2549 
2550 	assert(!(kIOMemoryRemote & _flags));
2551 	if (kIOMemoryRemote & _flags) {
2552 		return 0;
2553 	}
2554 
2555 	if (kIOMemoryThreadSafe & _flags) {
2556 		LOCK;
2557 	}
2558 
2559 	remaining = length = min(length, _length - offset);
2560 	while (remaining) { // (process another target segment?)
2561 		addr64_t        srcAddr64;
2562 		IOByteCount     srcLen;
2563 
2564 		srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
2565 		if (!srcAddr64) {
2566 			break;
2567 		}
2568 
2569 		// Clip segment length to remaining
2570 		if (srcLen > remaining) {
2571 			srcLen = remaining;
2572 		}
2573 
2574 		if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
2575 			srcLen = (UINT_MAX - PAGE_SIZE + 1);
2576 		}
2577 		copypv(srcAddr64, dstAddr, (unsigned int) srcLen,
2578 		    cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
2579 
2580 		dstAddr   += srcLen;
2581 		offset    += srcLen;
2582 		remaining -= srcLen;
2583 	}
2584 
2585 	if (kIOMemoryThreadSafe & _flags) {
2586 		UNLOCK;
2587 	}
2588 
2589 	assert(!remaining);
2590 
2591 	return length - remaining;
2592 }
2593 
2594 IOByteCount
writeBytes(IOByteCount inoffset,const void * bytes,IOByteCount length)2595 IOMemoryDescriptor::writeBytes
2596 (IOByteCount inoffset, const void *bytes, IOByteCount length)
2597 {
2598 	addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
2599 	IOByteCount remaining;
2600 	IOByteCount endoffset;
2601 	IOByteCount offset = inoffset;
2602 
2603 	assert( !(kIOMemoryPreparedReadOnly & _flags));
2604 
2605 	// Check that this entire I/O is within the available range
2606 	if ((offset > _length)
2607 	    || os_add_overflow(length, offset, &endoffset)
2608 	    || (endoffset > _length)) {
2609 		assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
2610 		return 0;
2611 	}
2612 	if (kIOMemoryPreparedReadOnly & _flags) {
2613 		return 0;
2614 	}
2615 	if (offset >= _length) {
2616 		return 0;
2617 	}
2618 
2619 	assert(!(kIOMemoryRemote & _flags));
2620 	if (kIOMemoryRemote & _flags) {
2621 		return 0;
2622 	}
2623 
2624 	if (kIOMemoryThreadSafe & _flags) {
2625 		LOCK;
2626 	}
2627 
2628 	remaining = length = min(length, _length - offset);
2629 	while (remaining) { // (process another target segment?)
2630 		addr64_t    dstAddr64;
2631 		IOByteCount dstLen;
2632 
2633 		dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2634 		if (!dstAddr64) {
2635 			break;
2636 		}
2637 
2638 		// Clip segment length to remaining
2639 		if (dstLen > remaining) {
2640 			dstLen = remaining;
2641 		}
2642 
2643 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
2644 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
2645 		}
2646 		if (!srcAddr) {
2647 			bzero_phys(dstAddr64, (unsigned int) dstLen);
2648 		} else {
2649 			copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen,
2650 			    cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
2651 			srcAddr   += dstLen;
2652 		}
2653 		offset    += dstLen;
2654 		remaining -= dstLen;
2655 	}
2656 
2657 	if (kIOMemoryThreadSafe & _flags) {
2658 		UNLOCK;
2659 	}
2660 
2661 	assert(!remaining);
2662 
2663 #if defined(__x86_64__)
2664 	// copypv does not cppvFsnk on intel
2665 #else
2666 	if (!srcAddr) {
2667 		performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2668 	}
2669 #endif
2670 
2671 	return length - remaining;
2672 }
2673 
2674 #ifndef __LP64__
2675 void
setPosition(IOByteCount position)2676 IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2677 {
2678 	panic("IOGMD::setPosition deprecated");
2679 }
2680 #endif /* !__LP64__ */
2681 
2682 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2683 static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
2684 
2685 uint64_t
getPreparationID(void)2686 IOGeneralMemoryDescriptor::getPreparationID( void )
2687 {
2688 	ioGMDData *dataP;
2689 
2690 	if (!_wireCount) {
2691 		return kIOPreparationIDUnprepared;
2692 	}
2693 
2694 	if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2695 	    || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2696 		IOMemoryDescriptor::setPreparationID();
2697 		return IOMemoryDescriptor::getPreparationID();
2698 	}
2699 
2700 	if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2701 		return kIOPreparationIDUnprepared;
2702 	}
2703 
2704 	if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2705 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2706 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2707 	}
2708 	return dataP->fPreparationID;
2709 }
2710 
2711 void
cleanKernelReserved(IOMemoryDescriptorReserved * reserved)2712 IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2713 {
2714 	if (reserved->creator) {
2715 		task_deallocate(reserved->creator);
2716 		reserved->creator = NULL;
2717 	}
2718 
2719 	if (reserved->contextObject) {
2720 		reserved->contextObject->release();
2721 		reserved->contextObject = NULL;
2722 	}
2723 }
2724 
2725 IOMemoryDescriptorReserved *
getKernelReserved(void)2726 IOMemoryDescriptor::getKernelReserved( void )
2727 {
2728 	if (!reserved) {
2729 		reserved = IOMallocType(IOMemoryDescriptorReserved);
2730 	}
2731 	return reserved;
2732 }
2733 
2734 void
setPreparationID(void)2735 IOMemoryDescriptor::setPreparationID( void )
2736 {
2737 	if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2738 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2739 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2740 	}
2741 }
2742 
2743 uint64_t
getPreparationID(void)2744 IOMemoryDescriptor::getPreparationID( void )
2745 {
2746 	if (reserved) {
2747 		return reserved->preparationID;
2748 	} else {
2749 		return kIOPreparationIDUnsupported;
2750 	}
2751 }
2752 
2753 void
setDescriptorID(void)2754 IOMemoryDescriptor::setDescriptorID( void )
2755 {
2756 	if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
2757 		SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID);
2758 		OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
2759 	}
2760 }
2761 
2762 uint64_t
getDescriptorID(void)2763 IOMemoryDescriptor::getDescriptorID( void )
2764 {
2765 	setDescriptorID();
2766 
2767 	if (reserved) {
2768 		return reserved->descriptorID;
2769 	} else {
2770 		return kIODescriptorIDInvalid;
2771 	}
2772 }
2773 
2774 IOReturn
ktraceEmitPhysicalSegments(void)2775 IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
2776 {
2777 	if (!kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
2778 		return kIOReturnSuccess;
2779 	}
2780 
2781 	assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
2782 	if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
2783 		return kIOReturnBadArgument;
2784 	}
2785 
2786 	uint64_t descriptorID = getDescriptorID();
2787 	assert(descriptorID != kIODescriptorIDInvalid);
2788 	if (getDescriptorID() == kIODescriptorIDInvalid) {
2789 		return kIOReturnBadArgument;
2790 	}
2791 
2792 	IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength());
2793 
2794 #if __LP64__
2795 	static const uint8_t num_segments_page = 8;
2796 #else
2797 	static const uint8_t num_segments_page = 4;
2798 #endif
2799 	static const uint8_t num_segments_long = 2;
2800 
2801 	IOPhysicalAddress segments_page[num_segments_page];
2802 	IOPhysicalRange   segments_long[num_segments_long];
2803 	memset(segments_page, UINT32_MAX, sizeof(segments_page));
2804 	memset(segments_long, 0, sizeof(segments_long));
2805 
2806 	uint8_t segment_page_idx = 0;
2807 	uint8_t segment_long_idx = 0;
2808 
2809 	IOPhysicalRange physical_segment;
2810 	for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
2811 		physical_segment.address = getPhysicalSegment(offset, &physical_segment.length);
2812 
2813 		if (physical_segment.length == 0) {
2814 			break;
2815 		}
2816 
2817 		/**
2818 		 * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages.  To optimize for trace
2819 		 * buffer memory, pack segment events according to the following.
2820 		 *
2821 		 * Mappings must be emitted in ascending order starting from offset 0.  Mappings can be associated with the previous
2822 		 * IOMDPA_MAPPED event emitted on by the current thread_id.
2823 		 *
2824 		 * IOMDPA_SEGMENTS_PAGE        = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
2825 		 * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
2826 		 * - unmapped pages will have a ppn of MAX_INT_32
2827 		 * IOMDPA_SEGMENTS_LONG	= up to 2 virtually contiguous mappings of variable length
2828 		 * - address_0, length_0, address_0, length_1
2829 		 * - unmapped pages will have an address of 0
2830 		 *
2831 		 * During each iteration do the following depending on the length of the mapping:
2832 		 * 1. add the current segment to the appropriate queue of pending segments
2833 		 * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
2834 		 * 1a. if FALSE emit and reset all events in the previous queue
2835 		 * 2. check if we have filled up the current queue of pending events
2836 		 * 2a. if TRUE emit and reset all events in the pending queue
2837 		 * 3. after completing all iterations emit events in the current queue
2838 		 */
2839 
2840 		bool emit_page = false;
2841 		bool emit_long = false;
2842 		if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
2843 			segments_page[segment_page_idx] = physical_segment.address;
2844 			segment_page_idx++;
2845 
2846 			emit_long = segment_long_idx != 0;
2847 			emit_page = segment_page_idx == num_segments_page;
2848 
2849 			if (os_unlikely(emit_long)) {
2850 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2851 				    segments_long[0].address, segments_long[0].length,
2852 				    segments_long[1].address, segments_long[1].length);
2853 			}
2854 
2855 			if (os_unlikely(emit_page)) {
2856 #if __LP64__
2857 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2858 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2859 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2860 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2861 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2862 #else
2863 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2864 				    (ppnum_t) atop_32(segments_page[1]),
2865 				    (ppnum_t) atop_32(segments_page[2]),
2866 				    (ppnum_t) atop_32(segments_page[3]),
2867 				    (ppnum_t) atop_32(segments_page[4]));
2868 #endif
2869 			}
2870 		} else {
2871 			segments_long[segment_long_idx] = physical_segment;
2872 			segment_long_idx++;
2873 
2874 			emit_page = segment_page_idx != 0;
2875 			emit_long = segment_long_idx == num_segments_long;
2876 
2877 			if (os_unlikely(emit_page)) {
2878 #if __LP64__
2879 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2880 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2881 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2882 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2883 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2884 #else
2885 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2886 				    (ppnum_t) atop_32(segments_page[1]),
2887 				    (ppnum_t) atop_32(segments_page[2]),
2888 				    (ppnum_t) atop_32(segments_page[3]),
2889 				    (ppnum_t) atop_32(segments_page[4]));
2890 #endif
2891 			}
2892 
2893 			if (emit_long) {
2894 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2895 				    segments_long[0].address, segments_long[0].length,
2896 				    segments_long[1].address, segments_long[1].length);
2897 			}
2898 		}
2899 
2900 		if (os_unlikely(emit_page)) {
2901 			memset(segments_page, UINT32_MAX, sizeof(segments_page));
2902 			segment_page_idx = 0;
2903 		}
2904 
2905 		if (os_unlikely(emit_long)) {
2906 			memset(segments_long, 0, sizeof(segments_long));
2907 			segment_long_idx = 0;
2908 		}
2909 	}
2910 
2911 	if (segment_page_idx != 0) {
2912 		assert(segment_long_idx == 0);
2913 #if __LP64__
2914 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2915 		    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2916 		    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2917 		    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2918 		    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2919 #else
2920 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2921 		    (ppnum_t) atop_32(segments_page[1]),
2922 		    (ppnum_t) atop_32(segments_page[2]),
2923 		    (ppnum_t) atop_32(segments_page[3]),
2924 		    (ppnum_t) atop_32(segments_page[4]));
2925 #endif
2926 	} else if (segment_long_idx != 0) {
2927 		assert(segment_page_idx == 0);
2928 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2929 		    segments_long[0].address, segments_long[0].length,
2930 		    segments_long[1].address, segments_long[1].length);
2931 	}
2932 
2933 	return kIOReturnSuccess;
2934 }
2935 
2936 void
setVMTags(uint32_t kernelTag,uint32_t userTag)2937 IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
2938 {
2939 	_kernelTag = (vm_tag_t) kernelTag;
2940 	_userTag   = (vm_tag_t) userTag;
2941 }
2942 
2943 uint32_t
getVMTag(vm_map_t map)2944 IOMemoryDescriptor::getVMTag(vm_map_t map)
2945 {
2946 	if (vm_kernel_map_is_kernel(map)) {
2947 		if (VM_KERN_MEMORY_NONE != _kernelTag) {
2948 			return (uint32_t) _kernelTag;
2949 		}
2950 	} else {
2951 		if (VM_KERN_MEMORY_NONE != _userTag) {
2952 			return (uint32_t) _userTag;
2953 		}
2954 	}
2955 	return IOMemoryTag(map);
2956 }
2957 
2958 IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const2959 IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2960 {
2961 	IOReturn err = kIOReturnSuccess;
2962 	DMACommandOps params;
2963 	IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2964 	ioGMDData *dataP;
2965 
2966 	params = (op & ~kIOMDDMACommandOperationMask & op);
2967 	op &= kIOMDDMACommandOperationMask;
2968 
2969 	if (kIOMDDMAMap == op) {
2970 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
2971 			return kIOReturnUnderrun;
2972 		}
2973 
2974 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2975 
2976 		if (!_memoryEntries
2977 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2978 			return kIOReturnNoMemory;
2979 		}
2980 
2981 		if (_memoryEntries && data->fMapper) {
2982 			bool remap, keepMap;
2983 			dataP = getDataP(_memoryEntries);
2984 
2985 			if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2986 				dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2987 			}
2988 			if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
2989 				dataP->fDMAMapAlignment      = data->fMapSpec.alignment;
2990 			}
2991 
2992 			keepMap = (data->fMapper == gIOSystemMapper);
2993 			keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
2994 
2995 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
2996 				IOLockLock(_prepareLock);
2997 			}
2998 
2999 			remap = (!keepMap);
3000 			remap |= (dataP->fDMAMapNumAddressBits < 64)
3001 			    && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
3002 			remap |= (dataP->fDMAMapAlignment > page_size);
3003 
3004 			if (remap || !dataP->fMappedBaseValid) {
3005 				err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3006 				if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
3007 					dataP->fMappedBase      = data->fAlloc;
3008 					dataP->fMappedBaseValid = true;
3009 					dataP->fMappedLength    = data->fAllocLength;
3010 					data->fAllocLength      = 0;    // IOMD owns the alloc now
3011 				}
3012 			} else {
3013 				data->fAlloc = dataP->fMappedBase;
3014 				data->fAllocLength = 0;         // give out IOMD map
3015 				md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
3016 			}
3017 
3018 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3019 				IOLockUnlock(_prepareLock);
3020 			}
3021 		}
3022 		return err;
3023 	}
3024 	if (kIOMDDMAUnmap == op) {
3025 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3026 			return kIOReturnUnderrun;
3027 		}
3028 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3029 
3030 		err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3031 
3032 		return kIOReturnSuccess;
3033 	}
3034 
3035 	if (kIOMDAddDMAMapSpec == op) {
3036 		if (dataSize < sizeof(IODMAMapSpecification)) {
3037 			return kIOReturnUnderrun;
3038 		}
3039 
3040 		IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
3041 
3042 		if (!_memoryEntries
3043 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3044 			return kIOReturnNoMemory;
3045 		}
3046 
3047 		if (_memoryEntries) {
3048 			dataP = getDataP(_memoryEntries);
3049 			if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
3050 				dataP->fDMAMapNumAddressBits = data->numAddressBits;
3051 			}
3052 			if (data->alignment > dataP->fDMAMapAlignment) {
3053 				dataP->fDMAMapAlignment = data->alignment;
3054 			}
3055 		}
3056 		return kIOReturnSuccess;
3057 	}
3058 
3059 	if (kIOMDGetCharacteristics == op) {
3060 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3061 			return kIOReturnUnderrun;
3062 		}
3063 
3064 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3065 		data->fLength = _length;
3066 		data->fSGCount = _rangesCount;
3067 		data->fPages = _pages;
3068 		data->fDirection = getDirection();
3069 		if (!_wireCount) {
3070 			data->fIsPrepared = false;
3071 		} else {
3072 			data->fIsPrepared = true;
3073 			data->fHighestPage = _highestPage;
3074 			if (_memoryEntries) {
3075 				dataP = getDataP(_memoryEntries);
3076 				ioPLBlock *ioplList = getIOPLList(dataP);
3077 				UInt count = getNumIOPL(_memoryEntries, dataP);
3078 				if (count == 1) {
3079 					data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
3080 				}
3081 			}
3082 		}
3083 
3084 		return kIOReturnSuccess;
3085 	} else if (kIOMDDMAActive == op) {
3086 		if (params) {
3087 			int16_t prior;
3088 			prior = OSAddAtomic16(1, &md->_dmaReferences);
3089 			if (!prior) {
3090 				md->_mapName = NULL;
3091 			}
3092 		} else {
3093 			if (md->_dmaReferences) {
3094 				OSAddAtomic16(-1, &md->_dmaReferences);
3095 			} else {
3096 				panic("_dmaReferences underflow");
3097 			}
3098 		}
3099 	} else if (kIOMDWalkSegments != op) {
3100 		return kIOReturnBadArgument;
3101 	}
3102 
3103 	// Get the next segment
3104 	struct InternalState {
3105 		IOMDDMAWalkSegmentArgs fIO;
3106 		mach_vm_size_t fOffset2Index;
3107 		mach_vm_size_t fNextOffset;
3108 		UInt fIndex;
3109 	} *isP;
3110 
3111 	// Find the next segment
3112 	if (dataSize < sizeof(*isP)) {
3113 		return kIOReturnUnderrun;
3114 	}
3115 
3116 	isP = (InternalState *) vData;
3117 	uint64_t offset = isP->fIO.fOffset;
3118 	uint8_t mapped = isP->fIO.fMapped;
3119 	uint64_t mappedBase;
3120 
3121 	if (mapped && (kIOMemoryRemote & _flags)) {
3122 		return kIOReturnNotAttached;
3123 	}
3124 
3125 	if (IOMapper::gSystem && mapped
3126 	    && (!(kIOMemoryHostOnly & _flags))
3127 	    && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
3128 //	&& (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
3129 		if (!_memoryEntries
3130 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3131 			return kIOReturnNoMemory;
3132 		}
3133 
3134 		dataP = getDataP(_memoryEntries);
3135 		if (dataP->fMapper) {
3136 			IODMAMapSpecification mapSpec;
3137 			bzero(&mapSpec, sizeof(mapSpec));
3138 			mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3139 			mapSpec.alignment = dataP->fDMAMapAlignment;
3140 			err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
3141 			if (kIOReturnSuccess != err) {
3142 				return err;
3143 			}
3144 			dataP->fMappedBaseValid = true;
3145 		}
3146 	}
3147 
3148 	if (mapped) {
3149 		if (IOMapper::gSystem
3150 		    && (!(kIOMemoryHostOnly & _flags))
3151 		    && _memoryEntries
3152 		    && (dataP = getDataP(_memoryEntries))
3153 		    && dataP->fMappedBaseValid) {
3154 			mappedBase = dataP->fMappedBase;
3155 		} else {
3156 			mapped = 0;
3157 		}
3158 	}
3159 
3160 	if (offset >= _length) {
3161 		return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
3162 	}
3163 
3164 	// Validate the previous offset
3165 	UInt ind;
3166 	mach_vm_size_t off2Ind = isP->fOffset2Index;
3167 	if (!params
3168 	    && offset
3169 	    && (offset == isP->fNextOffset || off2Ind <= offset)) {
3170 		ind = isP->fIndex;
3171 	} else {
3172 		ind = off2Ind = 0; // Start from beginning
3173 	}
3174 	mach_vm_size_t length;
3175 	UInt64 address;
3176 
3177 	if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
3178 		// Physical address based memory descriptor
3179 		const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
3180 
3181 		// Find the range after the one that contains the offset
3182 		mach_vm_size_t len;
3183 		for (len = 0; off2Ind <= offset; ind++) {
3184 			len = physP[ind].length;
3185 			off2Ind += len;
3186 		}
3187 
3188 		// Calculate length within range and starting address
3189 		length   = off2Ind - offset;
3190 		address  = physP[ind - 1].address + len - length;
3191 
3192 		if (true && mapped) {
3193 			address = mappedBase + offset;
3194 		} else {
3195 			// see how far we can coalesce ranges
3196 			while (ind < _rangesCount && address + length == physP[ind].address) {
3197 				len = physP[ind].length;
3198 				length += len;
3199 				off2Ind += len;
3200 				ind++;
3201 			}
3202 		}
3203 
3204 		// correct contiguous check overshoot
3205 		ind--;
3206 		off2Ind -= len;
3207 	}
3208 #ifndef __LP64__
3209 	else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
3210 		// Physical address based memory descriptor
3211 		const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
3212 
3213 		// Find the range after the one that contains the offset
3214 		mach_vm_size_t len;
3215 		for (len = 0; off2Ind <= offset; ind++) {
3216 			len = physP[ind].length;
3217 			off2Ind += len;
3218 		}
3219 
3220 		// Calculate length within range and starting address
3221 		length   = off2Ind - offset;
3222 		address  = physP[ind - 1].address + len - length;
3223 
3224 		if (true && mapped) {
3225 			address = mappedBase + offset;
3226 		} else {
3227 			// see how far we can coalesce ranges
3228 			while (ind < _rangesCount && address + length == physP[ind].address) {
3229 				len = physP[ind].length;
3230 				length += len;
3231 				off2Ind += len;
3232 				ind++;
3233 			}
3234 		}
3235 		// correct contiguous check overshoot
3236 		ind--;
3237 		off2Ind -= len;
3238 	}
3239 #endif /* !__LP64__ */
3240 	else {
3241 		do {
3242 			if (!_wireCount) {
3243 				panic("IOGMD: not wired for the IODMACommand");
3244 			}
3245 
3246 			assert(_memoryEntries);
3247 
3248 			dataP = getDataP(_memoryEntries);
3249 			const ioPLBlock *ioplList = getIOPLList(dataP);
3250 			UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
3251 			upl_page_info_t *pageList = getPageList(dataP);
3252 
3253 			assert(numIOPLs > 0);
3254 
3255 			// Scan through iopl info blocks looking for block containing offset
3256 			while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
3257 				ind++;
3258 			}
3259 
3260 			// Go back to actual range as search goes past it
3261 			ioPLBlock ioplInfo = ioplList[ind - 1];
3262 			off2Ind = ioplInfo.fIOMDOffset;
3263 
3264 			if (ind < numIOPLs) {
3265 				length = ioplList[ind].fIOMDOffset;
3266 			} else {
3267 				length = _length;
3268 			}
3269 			length -= offset;       // Remainder within iopl
3270 
3271 			// Subtract offset till this iopl in total list
3272 			offset -= off2Ind;
3273 
3274 			// If a mapped address is requested and this is a pre-mapped IOPL
3275 			// then just need to compute an offset relative to the mapped base.
3276 			if (mapped) {
3277 				offset += (ioplInfo.fPageOffset & PAGE_MASK);
3278 				address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
3279 				continue; // Done leave do/while(false) now
3280 			}
3281 
3282 			// The offset is rebased into the current iopl.
3283 			// Now add the iopl 1st page offset.
3284 			offset += ioplInfo.fPageOffset;
3285 
3286 			// For external UPLs the fPageInfo field points directly to
3287 			// the upl's upl_page_info_t array.
3288 			if (ioplInfo.fFlags & kIOPLExternUPL) {
3289 				pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
3290 			} else {
3291 				pageList = &pageList[ioplInfo.fPageInfo];
3292 			}
3293 
3294 			// Check for direct device non-paged memory
3295 			if (ioplInfo.fFlags & kIOPLOnDevice) {
3296 				address = ptoa_64(pageList->phys_addr) + offset;
3297 				continue; // Done leave do/while(false) now
3298 			}
3299 
3300 			// Now we need compute the index into the pageList
3301 			UInt pageInd = atop_32(offset);
3302 			offset &= PAGE_MASK;
3303 
3304 			// Compute the starting address of this segment
3305 			IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
3306 			if (!pageAddr) {
3307 				panic("!pageList phys_addr");
3308 			}
3309 
3310 			address = ptoa_64(pageAddr) + offset;
3311 
3312 			// length is currently set to the length of the remainider of the iopl.
3313 			// We need to check that the remainder of the iopl is contiguous.
3314 			// This is indicated by pageList[ind].phys_addr being sequential.
3315 			IOByteCount contigLength = PAGE_SIZE - offset;
3316 			while (contigLength < length
3317 			    && ++pageAddr == pageList[++pageInd].phys_addr) {
3318 				contigLength += PAGE_SIZE;
3319 			}
3320 
3321 			if (contigLength < length) {
3322 				length = contigLength;
3323 			}
3324 
3325 
3326 			assert(address);
3327 			assert(length);
3328 		} while (false);
3329 	}
3330 
3331 	// Update return values and state
3332 	isP->fIO.fIOVMAddr = address;
3333 	isP->fIO.fLength   = length;
3334 	isP->fIndex        = ind;
3335 	isP->fOffset2Index = off2Ind;
3336 	isP->fNextOffset   = isP->fIO.fOffset + length;
3337 
3338 	return kIOReturnSuccess;
3339 }
3340 
3341 addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3342 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3343 {
3344 	IOReturn          ret;
3345 	mach_vm_address_t address = 0;
3346 	mach_vm_size_t    length  = 0;
3347 	IOMapper *        mapper  = gIOSystemMapper;
3348 	IOOptionBits      type    = _flags & kIOMemoryTypeMask;
3349 
3350 	if (lengthOfSegment) {
3351 		*lengthOfSegment = 0;
3352 	}
3353 
3354 	if (offset >= _length) {
3355 		return 0;
3356 	}
3357 
3358 	// IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
3359 	// support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
3360 	// map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
3361 	// due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
3362 
3363 	if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
3364 		unsigned rangesIndex = 0;
3365 		Ranges vec = _ranges;
3366 		mach_vm_address_t addr;
3367 
3368 		// Find starting address within the vector of ranges
3369 		for (;;) {
3370 			getAddrLenForInd(addr, length, type, vec, rangesIndex);
3371 			if (offset < length) {
3372 				break;
3373 			}
3374 			offset -= length; // (make offset relative)
3375 			rangesIndex++;
3376 		}
3377 
3378 		// Now that we have the starting range,
3379 		// lets find the last contiguous range
3380 		addr   += offset;
3381 		length -= offset;
3382 
3383 		for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
3384 			mach_vm_address_t newAddr;
3385 			mach_vm_size_t    newLen;
3386 
3387 			getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
3388 			if (addr + length != newAddr) {
3389 				break;
3390 			}
3391 			length += newLen;
3392 		}
3393 		if (addr) {
3394 			address = (IOPhysicalAddress) addr; // Truncate address to 32bit
3395 		}
3396 	} else {
3397 		IOMDDMAWalkSegmentState _state;
3398 		IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
3399 
3400 		state->fOffset = offset;
3401 		state->fLength = _length - offset;
3402 		state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
3403 
3404 		ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
3405 
3406 		if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
3407 			DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
3408 			    ret, this, state->fOffset,
3409 			    state->fIOVMAddr, state->fLength);
3410 		}
3411 		if (kIOReturnSuccess == ret) {
3412 			address = state->fIOVMAddr;
3413 			length  = state->fLength;
3414 		}
3415 
3416 		// dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
3417 		// with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
3418 
3419 		if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
3420 			if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
3421 				addr64_t    origAddr = address;
3422 				IOByteCount origLen  = length;
3423 
3424 				address = mapper->mapToPhysicalAddress(origAddr);
3425 				length = page_size - (address & (page_size - 1));
3426 				while ((length < origLen)
3427 				    && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
3428 					length += page_size;
3429 				}
3430 				if (length > origLen) {
3431 					length = origLen;
3432 				}
3433 			}
3434 		}
3435 	}
3436 
3437 	if (!address) {
3438 		length = 0;
3439 	}
3440 
3441 	if (lengthOfSegment) {
3442 		*lengthOfSegment = length;
3443 	}
3444 
3445 	return address;
3446 }
3447 
3448 #ifndef __LP64__
3449 #pragma clang diagnostic push
3450 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3451 
3452 addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3453 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3454 {
3455 	addr64_t address = 0;
3456 
3457 	if (options & _kIOMemorySourceSegment) {
3458 		address = getSourceSegment(offset, lengthOfSegment);
3459 	} else if (options & kIOMemoryMapperNone) {
3460 		address = getPhysicalSegment64(offset, lengthOfSegment);
3461 	} else {
3462 		address = getPhysicalSegment(offset, lengthOfSegment);
3463 	}
3464 
3465 	return address;
3466 }
3467 #pragma clang diagnostic pop
3468 
3469 addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3470 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3471 {
3472 	return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
3473 }
3474 
3475 IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3476 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3477 {
3478 	addr64_t    address = 0;
3479 	IOByteCount length  = 0;
3480 
3481 	address = getPhysicalSegment(offset, lengthOfSegment, 0);
3482 
3483 	if (lengthOfSegment) {
3484 		length = *lengthOfSegment;
3485 	}
3486 
3487 	if ((address + length) > 0x100000000ULL) {
3488 		panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
3489 		    address, (long) length, (getMetaClass())->getClassName());
3490 	}
3491 
3492 	return (IOPhysicalAddress) address;
3493 }
3494 
3495 addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3496 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3497 {
3498 	IOPhysicalAddress phys32;
3499 	IOByteCount       length;
3500 	addr64_t          phys64;
3501 	IOMapper *        mapper = NULL;
3502 
3503 	phys32 = getPhysicalSegment(offset, lengthOfSegment);
3504 	if (!phys32) {
3505 		return 0;
3506 	}
3507 
3508 	if (gIOSystemMapper) {
3509 		mapper = gIOSystemMapper;
3510 	}
3511 
3512 	if (mapper) {
3513 		IOByteCount origLen;
3514 
3515 		phys64 = mapper->mapToPhysicalAddress(phys32);
3516 		origLen = *lengthOfSegment;
3517 		length = page_size - (phys64 & (page_size - 1));
3518 		while ((length < origLen)
3519 		    && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
3520 			length += page_size;
3521 		}
3522 		if (length > origLen) {
3523 			length = origLen;
3524 		}
3525 
3526 		*lengthOfSegment = length;
3527 	} else {
3528 		phys64 = (addr64_t) phys32;
3529 	}
3530 
3531 	return phys64;
3532 }
3533 
3534 IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3535 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3536 {
3537 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
3538 }
3539 
3540 IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3541 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3542 {
3543 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
3544 }
3545 
3546 #pragma clang diagnostic push
3547 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3548 
3549 void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3550 IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3551     IOByteCount * lengthOfSegment)
3552 {
3553 	if (_task == kernel_task) {
3554 		return (void *) getSourceSegment(offset, lengthOfSegment);
3555 	} else {
3556 		panic("IOGMD::getVirtualSegment deprecated");
3557 	}
3558 
3559 	return NULL;
3560 }
3561 #pragma clang diagnostic pop
3562 #endif /* !__LP64__ */
3563 
3564 IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const3565 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3566 {
3567 	IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
3568 	DMACommandOps params;
3569 	IOReturn err;
3570 
3571 	params = (op & ~kIOMDDMACommandOperationMask & op);
3572 	op &= kIOMDDMACommandOperationMask;
3573 
3574 	if (kIOMDGetCharacteristics == op) {
3575 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3576 			return kIOReturnUnderrun;
3577 		}
3578 
3579 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3580 		data->fLength = getLength();
3581 		data->fSGCount = 0;
3582 		data->fDirection = getDirection();
3583 		data->fIsPrepared = true; // Assume prepared - fails safe
3584 	} else if (kIOMDWalkSegments == op) {
3585 		if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
3586 			return kIOReturnUnderrun;
3587 		}
3588 
3589 		IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
3590 		IOByteCount offset  = (IOByteCount) data->fOffset;
3591 		IOPhysicalLength length, nextLength;
3592 		addr64_t         addr, nextAddr;
3593 
3594 		if (data->fMapped) {
3595 			panic("fMapped %p %s %qx", this, getMetaClass()->getClassName(), (uint64_t) getLength());
3596 		}
3597 		addr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
3598 		offset += length;
3599 		while (offset < getLength()) {
3600 			nextAddr = md->getPhysicalSegment(offset, &nextLength, kIOMemoryMapperNone);
3601 			if ((addr + length) != nextAddr) {
3602 				break;
3603 			}
3604 			length += nextLength;
3605 			offset += nextLength;
3606 		}
3607 		data->fIOVMAddr = addr;
3608 		data->fLength   = length;
3609 	} else if (kIOMDAddDMAMapSpec == op) {
3610 		return kIOReturnUnsupported;
3611 	} else if (kIOMDDMAMap == op) {
3612 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3613 			return kIOReturnUnderrun;
3614 		}
3615 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3616 
3617 		err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3618 
3619 		return err;
3620 	} else if (kIOMDDMAUnmap == op) {
3621 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3622 			return kIOReturnUnderrun;
3623 		}
3624 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3625 
3626 		err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3627 
3628 		return kIOReturnSuccess;
3629 	} else {
3630 		return kIOReturnBadArgument;
3631 	}
3632 
3633 	return kIOReturnSuccess;
3634 }
3635 
3636 IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3637 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
3638     IOOptionBits * oldState )
3639 {
3640 	IOReturn      err = kIOReturnSuccess;
3641 
3642 	vm_purgable_t control;
3643 	int           state;
3644 
3645 	assert(!(kIOMemoryRemote & _flags));
3646 	if (kIOMemoryRemote & _flags) {
3647 		return kIOReturnNotAttached;
3648 	}
3649 
3650 	if (_memRef) {
3651 		err = super::setPurgeable(newState, oldState);
3652 	} else {
3653 		if (kIOMemoryThreadSafe & _flags) {
3654 			LOCK;
3655 		}
3656 		do{
3657 			// Find the appropriate vm_map for the given task
3658 			vm_map_t curMap;
3659 			if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
3660 				err = kIOReturnNotReady;
3661 				break;
3662 			} else if (!_task) {
3663 				err = kIOReturnUnsupported;
3664 				break;
3665 			} else {
3666 				curMap = get_task_map(_task);
3667 				if (NULL == curMap) {
3668 					err = KERN_INVALID_ARGUMENT;
3669 					break;
3670 				}
3671 			}
3672 
3673 			// can only do one range
3674 			Ranges vec = _ranges;
3675 			IOOptionBits type = _flags & kIOMemoryTypeMask;
3676 			mach_vm_address_t addr;
3677 			mach_vm_size_t    len;
3678 			getAddrLenForInd(addr, len, type, vec, 0);
3679 
3680 			err = purgeableControlBits(newState, &control, &state);
3681 			if (kIOReturnSuccess != err) {
3682 				break;
3683 			}
3684 			err = vm_map_purgable_control(curMap, addr, control, &state);
3685 			if (oldState) {
3686 				if (kIOReturnSuccess == err) {
3687 					err = purgeableStateBits(&state);
3688 					*oldState = state;
3689 				}
3690 			}
3691 		}while (false);
3692 		if (kIOMemoryThreadSafe & _flags) {
3693 			UNLOCK;
3694 		}
3695 	}
3696 
3697 	return err;
3698 }
3699 
3700 IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3701 IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
3702     IOOptionBits * oldState )
3703 {
3704 	IOReturn err = kIOReturnNotReady;
3705 
3706 	if (kIOMemoryThreadSafe & _flags) {
3707 		LOCK;
3708 	}
3709 	if (_memRef) {
3710 		err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
3711 	}
3712 	if (kIOMemoryThreadSafe & _flags) {
3713 		UNLOCK;
3714 	}
3715 
3716 	return err;
3717 }
3718 
3719 IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3720 IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
3721     int newLedgerTag,
3722     IOOptionBits newLedgerOptions )
3723 {
3724 	IOReturn      err = kIOReturnSuccess;
3725 
3726 	assert(!(kIOMemoryRemote & _flags));
3727 	if (kIOMemoryRemote & _flags) {
3728 		return kIOReturnNotAttached;
3729 	}
3730 
3731 	if (iokit_iomd_setownership_enabled == FALSE) {
3732 		return kIOReturnUnsupported;
3733 	}
3734 
3735 	if (_memRef) {
3736 		err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3737 	} else {
3738 		err = kIOReturnUnsupported;
3739 	}
3740 
3741 	return err;
3742 }
3743 
3744 IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3745 IOMemoryDescriptor::setOwnership( task_t newOwner,
3746     int newLedgerTag,
3747     IOOptionBits newLedgerOptions )
3748 {
3749 	IOReturn err = kIOReturnNotReady;
3750 
3751 	assert(!(kIOMemoryRemote & _flags));
3752 	if (kIOMemoryRemote & _flags) {
3753 		return kIOReturnNotAttached;
3754 	}
3755 
3756 	if (iokit_iomd_setownership_enabled == FALSE) {
3757 		return kIOReturnUnsupported;
3758 	}
3759 
3760 	if (kIOMemoryThreadSafe & _flags) {
3761 		LOCK;
3762 	}
3763 	if (_memRef) {
3764 		err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
3765 	} else {
3766 		IOMultiMemoryDescriptor * mmd;
3767 		IOSubMemoryDescriptor   * smd;
3768 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3769 			err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3770 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3771 			err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3772 		}
3773 	}
3774 	if (kIOMemoryThreadSafe & _flags) {
3775 		UNLOCK;
3776 	}
3777 
3778 	return err;
3779 }
3780 
3781 
3782 uint64_t
getDMAMapLength(uint64_t * offset)3783 IOMemoryDescriptor::getDMAMapLength(uint64_t * offset)
3784 {
3785 	uint64_t length;
3786 
3787 	if (_memRef) {
3788 		length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef, offset);
3789 	} else {
3790 		IOByteCount       iterate, segLen;
3791 		IOPhysicalAddress sourceAddr, sourceAlign;
3792 
3793 		if (kIOMemoryThreadSafe & _flags) {
3794 			LOCK;
3795 		}
3796 		length = 0;
3797 		iterate = 0;
3798 		while ((sourceAddr = getPhysicalSegment(iterate, &segLen, _kIOMemorySourceSegment))) {
3799 			sourceAlign = (sourceAddr & page_mask);
3800 			if (offset && !iterate) {
3801 				*offset = sourceAlign;
3802 			}
3803 			length += round_page(sourceAddr + segLen) - trunc_page(sourceAddr);
3804 			iterate += segLen;
3805 		}
3806 		if (kIOMemoryThreadSafe & _flags) {
3807 			UNLOCK;
3808 		}
3809 	}
3810 
3811 	return length;
3812 }
3813 
3814 
3815 IOReturn
getPageCounts(IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)3816 IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
3817     IOByteCount * dirtyPageCount )
3818 {
3819 	IOReturn err = kIOReturnNotReady;
3820 
3821 	assert(!(kIOMemoryRemote & _flags));
3822 	if (kIOMemoryRemote & _flags) {
3823 		return kIOReturnNotAttached;
3824 	}
3825 
3826 	if (kIOMemoryThreadSafe & _flags) {
3827 		LOCK;
3828 	}
3829 	if (_memRef) {
3830 		err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
3831 	} else {
3832 		IOMultiMemoryDescriptor * mmd;
3833 		IOSubMemoryDescriptor   * smd;
3834 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3835 			err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3836 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3837 			err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3838 		}
3839 	}
3840 	if (kIOMemoryThreadSafe & _flags) {
3841 		UNLOCK;
3842 	}
3843 
3844 	return err;
3845 }
3846 
3847 
3848 #if defined(__arm__) || defined(__arm64__)
3849 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3850 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3851 #else /* defined(__arm__) || defined(__arm64__) */
3852 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3853 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
3854 #endif /* defined(__arm__) || defined(__arm64__) */
3855 
3856 static void
SetEncryptOp(addr64_t pa,unsigned int count)3857 SetEncryptOp(addr64_t pa, unsigned int count)
3858 {
3859 	ppnum_t page, end;
3860 
3861 	page = (ppnum_t) atop_64(round_page_64(pa));
3862 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
3863 	for (; page < end; page++) {
3864 		pmap_clear_noencrypt(page);
3865 	}
3866 }
3867 
3868 static void
ClearEncryptOp(addr64_t pa,unsigned int count)3869 ClearEncryptOp(addr64_t pa, unsigned int count)
3870 {
3871 	ppnum_t page, end;
3872 
3873 	page = (ppnum_t) atop_64(round_page_64(pa));
3874 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
3875 	for (; page < end; page++) {
3876 		pmap_set_noencrypt(page);
3877 	}
3878 }
3879 
3880 IOReturn
performOperation(IOOptionBits options,IOByteCount offset,IOByteCount length)3881 IOMemoryDescriptor::performOperation( IOOptionBits options,
3882     IOByteCount offset, IOByteCount length )
3883 {
3884 	IOByteCount remaining;
3885 	unsigned int res;
3886 	void (*func)(addr64_t pa, unsigned int count) = NULL;
3887 #if defined(__arm__) || defined(__arm64__)
3888 	void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
3889 #endif
3890 
3891 	assert(!(kIOMemoryRemote & _flags));
3892 	if (kIOMemoryRemote & _flags) {
3893 		return kIOReturnNotAttached;
3894 	}
3895 
3896 	switch (options) {
3897 	case kIOMemoryIncoherentIOFlush:
3898 #if defined(__arm__) || defined(__arm64__)
3899 		func_ext = &dcache_incoherent_io_flush64;
3900 #if __ARM_COHERENT_IO__
3901 		func_ext(0, 0, 0, &res);
3902 		return kIOReturnSuccess;
3903 #else /* __ARM_COHERENT_IO__ */
3904 		break;
3905 #endif /* __ARM_COHERENT_IO__ */
3906 #else /* defined(__arm__) || defined(__arm64__) */
3907 		func = &dcache_incoherent_io_flush64;
3908 		break;
3909 #endif /* defined(__arm__) || defined(__arm64__) */
3910 	case kIOMemoryIncoherentIOStore:
3911 #if defined(__arm__) || defined(__arm64__)
3912 		func_ext = &dcache_incoherent_io_store64;
3913 #if __ARM_COHERENT_IO__
3914 		func_ext(0, 0, 0, &res);
3915 		return kIOReturnSuccess;
3916 #else /* __ARM_COHERENT_IO__ */
3917 		break;
3918 #endif /* __ARM_COHERENT_IO__ */
3919 #else /* defined(__arm__) || defined(__arm64__) */
3920 		func = &dcache_incoherent_io_store64;
3921 		break;
3922 #endif /* defined(__arm__) || defined(__arm64__) */
3923 
3924 	case kIOMemorySetEncrypted:
3925 		func = &SetEncryptOp;
3926 		break;
3927 	case kIOMemoryClearEncrypted:
3928 		func = &ClearEncryptOp;
3929 		break;
3930 	}
3931 
3932 #if defined(__arm__) || defined(__arm64__)
3933 	if ((func == NULL) && (func_ext == NULL)) {
3934 		return kIOReturnUnsupported;
3935 	}
3936 #else /* defined(__arm__) || defined(__arm64__) */
3937 	if (!func) {
3938 		return kIOReturnUnsupported;
3939 	}
3940 #endif /* defined(__arm__) || defined(__arm64__) */
3941 
3942 	if (kIOMemoryThreadSafe & _flags) {
3943 		LOCK;
3944 	}
3945 
3946 	res = 0x0UL;
3947 	remaining = length = min(length, getLength() - offset);
3948 	while (remaining) {
3949 		// (process another target segment?)
3950 		addr64_t    dstAddr64;
3951 		IOByteCount dstLen;
3952 
3953 		dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3954 		if (!dstAddr64) {
3955 			break;
3956 		}
3957 
3958 		// Clip segment length to remaining
3959 		if (dstLen > remaining) {
3960 			dstLen = remaining;
3961 		}
3962 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
3963 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
3964 		}
3965 		if (remaining > UINT_MAX) {
3966 			remaining = UINT_MAX;
3967 		}
3968 
3969 #if defined(__arm__) || defined(__arm64__)
3970 		if (func) {
3971 			(*func)(dstAddr64, (unsigned int) dstLen);
3972 		}
3973 		if (func_ext) {
3974 			(*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res);
3975 			if (res != 0x0UL) {
3976 				remaining = 0;
3977 				break;
3978 			}
3979 		}
3980 #else /* defined(__arm__) || defined(__arm64__) */
3981 		(*func)(dstAddr64, (unsigned int) dstLen);
3982 #endif /* defined(__arm__) || defined(__arm64__) */
3983 
3984 		offset    += dstLen;
3985 		remaining -= dstLen;
3986 	}
3987 
3988 	if (kIOMemoryThreadSafe & _flags) {
3989 		UNLOCK;
3990 	}
3991 
3992 	return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
3993 }
3994 
3995 /*
3996  *
3997  */
3998 
3999 #if defined(__i386__) || defined(__x86_64__)
4000 
4001 extern vm_offset_t kc_highest_nonlinkedit_vmaddr;
4002 
4003 /* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
4004  * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
4005  * kernel non-text data -- should we just add another range instead?
4006  */
4007 #define io_kernel_static_start  vm_kernel_stext
4008 #define io_kernel_static_end    (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
4009 
4010 #elif defined(__arm__) || defined(__arm64__)
4011 
4012 extern vm_offset_t              static_memory_end;
4013 
4014 #if defined(__arm64__)
4015 #define io_kernel_static_start vm_kext_base
4016 #else /* defined(__arm64__) */
4017 #define io_kernel_static_start vm_kernel_stext
4018 #endif /* defined(__arm64__) */
4019 
4020 #define io_kernel_static_end    static_memory_end
4021 
4022 #else
4023 #error io_kernel_static_end is undefined for this architecture
4024 #endif
4025 
4026 static kern_return_t
io_get_kernel_static_upl(vm_map_t,uintptr_t offset,upl_size_t * upl_size,unsigned int * page_offset,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,ppnum_t * highest_page)4027 io_get_kernel_static_upl(
4028 	vm_map_t                /* map */,
4029 	uintptr_t               offset,
4030 	upl_size_t              *upl_size,
4031 	unsigned int            *page_offset,
4032 	upl_t                   *upl,
4033 	upl_page_info_array_t   page_list,
4034 	unsigned int            *count,
4035 	ppnum_t                 *highest_page)
4036 {
4037 	unsigned int pageCount, page;
4038 	ppnum_t phys;
4039 	ppnum_t highestPage = 0;
4040 
4041 	pageCount = atop_32(round_page(*upl_size + (page_mask & offset)));
4042 	if (pageCount > *count) {
4043 		pageCount = *count;
4044 	}
4045 	*upl_size = (upl_size_t) ptoa_64(pageCount);
4046 
4047 	*upl = NULL;
4048 	*page_offset = ((unsigned int) page_mask & offset);
4049 
4050 	for (page = 0; page < pageCount; page++) {
4051 		phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
4052 		if (!phys) {
4053 			break;
4054 		}
4055 		page_list[page].phys_addr = phys;
4056 		page_list[page].free_when_done = 0;
4057 		page_list[page].absent    = 0;
4058 		page_list[page].dirty     = 0;
4059 		page_list[page].precious  = 0;
4060 		page_list[page].device    = 0;
4061 		if (phys > highestPage) {
4062 			highestPage = phys;
4063 		}
4064 	}
4065 
4066 	*highest_page = highestPage;
4067 
4068 	return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
4069 }
4070 
4071 IOReturn
wireVirtual(IODirection forDirection)4072 IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
4073 {
4074 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4075 	IOReturn error = kIOReturnSuccess;
4076 	ioGMDData *dataP;
4077 	upl_page_info_array_t pageInfo;
4078 	ppnum_t mapBase;
4079 	vm_tag_t tag = VM_KERN_MEMORY_NONE;
4080 	mach_vm_size_t numBytesWired = 0;
4081 
4082 	assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
4083 
4084 	if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
4085 		forDirection = (IODirection) (forDirection | getDirection());
4086 	}
4087 
4088 	dataP = getDataP(_memoryEntries);
4089 	upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
4090 	switch (kIODirectionOutIn & forDirection) {
4091 	case kIODirectionOut:
4092 		// Pages do not need to be marked as dirty on commit
4093 		uplFlags = UPL_COPYOUT_FROM;
4094 		dataP->fDMAAccess = kIODMAMapReadAccess;
4095 		break;
4096 
4097 	case kIODirectionIn:
4098 		dataP->fDMAAccess = kIODMAMapWriteAccess;
4099 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4100 		break;
4101 
4102 	default:
4103 		dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
4104 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4105 		break;
4106 	}
4107 
4108 	if (_wireCount) {
4109 		if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
4110 			OSReportWithBacktrace("IOMemoryDescriptor 0x%zx prepared read only",
4111 			    (size_t)VM_KERNEL_ADDRPERM(this));
4112 			error = kIOReturnNotWritable;
4113 		}
4114 	} else {
4115 		IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection);
4116 		IOMapper *mapper;
4117 
4118 		mapper = dataP->fMapper;
4119 		dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4120 
4121 		uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
4122 		tag = _kernelTag;
4123 		if (VM_KERN_MEMORY_NONE == tag) {
4124 			tag = IOMemoryTag(kernel_map);
4125 		}
4126 
4127 		if (kIODirectionPrepareToPhys32 & forDirection) {
4128 			if (!mapper) {
4129 				uplFlags |= UPL_NEED_32BIT_ADDR;
4130 			}
4131 			if (dataP->fDMAMapNumAddressBits > 32) {
4132 				dataP->fDMAMapNumAddressBits = 32;
4133 			}
4134 		}
4135 		if (kIODirectionPrepareNoFault    & forDirection) {
4136 			uplFlags |= UPL_REQUEST_NO_FAULT;
4137 		}
4138 		if (kIODirectionPrepareNoZeroFill & forDirection) {
4139 			uplFlags |= UPL_NOZEROFILLIO;
4140 		}
4141 		if (kIODirectionPrepareNonCoherent & forDirection) {
4142 			uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
4143 		}
4144 
4145 		mapBase = 0;
4146 
4147 		// Note that appendBytes(NULL) zeros the data up to the desired length
4148 		size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
4149 		if (uplPageSize > ((unsigned int)uplPageSize)) {
4150 			error = kIOReturnNoMemory;
4151 			traceInterval.setEndArg2(error);
4152 			return error;
4153 		}
4154 		if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
4155 			error = kIOReturnNoMemory;
4156 			traceInterval.setEndArg2(error);
4157 			return error;
4158 		}
4159 		dataP = NULL;
4160 
4161 		// Find the appropriate vm_map for the given task
4162 		vm_map_t curMap;
4163 		if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
4164 			curMap = NULL;
4165 		} else {
4166 			curMap = get_task_map(_task);
4167 		}
4168 
4169 		// Iterate over the vector of virtual ranges
4170 		Ranges vec = _ranges;
4171 		unsigned int pageIndex  = 0;
4172 		IOByteCount mdOffset    = 0;
4173 		ppnum_t highestPage     = 0;
4174 		bool         byteAlignUPL;
4175 
4176 		IOMemoryEntry * memRefEntry = NULL;
4177 		if (_memRef) {
4178 			memRefEntry = &_memRef->entries[0];
4179 			byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot));
4180 		} else {
4181 			byteAlignUPL = true;
4182 		}
4183 
4184 		for (UInt range = 0; mdOffset < _length; range++) {
4185 			ioPLBlock iopl;
4186 			mach_vm_address_t startPage, startPageOffset;
4187 			mach_vm_size_t    numBytes;
4188 			ppnum_t highPage = 0;
4189 
4190 			if (_memRef) {
4191 				if (range >= _memRef->count) {
4192 					panic("memRefEntry");
4193 				}
4194 				memRefEntry = &_memRef->entries[range];
4195 				numBytes    = memRefEntry->size;
4196 				startPage   = -1ULL;
4197 				if (byteAlignUPL) {
4198 					startPageOffset = 0;
4199 				} else {
4200 					startPageOffset = (memRefEntry->start & PAGE_MASK);
4201 				}
4202 			} else {
4203 				// Get the startPage address and length of vec[range]
4204 				getAddrLenForInd(startPage, numBytes, type, vec, range);
4205 				if (byteAlignUPL) {
4206 					startPageOffset = 0;
4207 				} else {
4208 					startPageOffset = startPage & PAGE_MASK;
4209 					startPage = trunc_page_64(startPage);
4210 				}
4211 			}
4212 			iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset;
4213 			numBytes += startPageOffset;
4214 
4215 			if (mapper) {
4216 				iopl.fMappedPage = mapBase + pageIndex;
4217 			} else {
4218 				iopl.fMappedPage = 0;
4219 			}
4220 
4221 			// Iterate over the current range, creating UPLs
4222 			while (numBytes) {
4223 				vm_address_t kernelStart = (vm_address_t) startPage;
4224 				vm_map_t theMap;
4225 				if (curMap) {
4226 					theMap = curMap;
4227 				} else if (_memRef) {
4228 					theMap = NULL;
4229 				} else {
4230 					assert(_task == kernel_task);
4231 					theMap = IOPageableMapForAddress(kernelStart);
4232 				}
4233 
4234 				// ioplFlags is an in/out parameter
4235 				upl_control_flags_t ioplFlags = uplFlags;
4236 				dataP = getDataP(_memoryEntries);
4237 				pageInfo = getPageList(dataP);
4238 				upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
4239 
4240 				mach_vm_size_t ioplPhysSize;
4241 				upl_size_t     ioplSize;
4242 				unsigned int   numPageInfo;
4243 
4244 				if (_memRef) {
4245 					error = mach_memory_entry_map_size(memRefEntry->entry, NULL /*physical*/, 0, memRefEntry->size, &ioplPhysSize);
4246 					DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize);
4247 				} else {
4248 					error = vm_map_range_physical_size(theMap, startPage, numBytes, &ioplPhysSize);
4249 					DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize);
4250 				}
4251 				if (error != KERN_SUCCESS) {
4252 					if (_memRef) {
4253 						DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error);
4254 					} else {
4255 						DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error);
4256 					}
4257 					printf("entry size error %d\n", error);
4258 					goto abortExit;
4259 				}
4260 				ioplPhysSize    = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES;
4261 				numPageInfo = atop_32(ioplPhysSize);
4262 				if (byteAlignUPL) {
4263 					if (numBytes > ioplPhysSize) {
4264 						ioplSize = ((typeof(ioplSize))ioplPhysSize);
4265 					} else {
4266 						ioplSize = ((typeof(ioplSize))numBytes);
4267 					}
4268 				} else {
4269 					ioplSize = ((typeof(ioplSize))ioplPhysSize);
4270 				}
4271 
4272 				if (_memRef) {
4273 					memory_object_offset_t entryOffset;
4274 
4275 					entryOffset = mdOffset;
4276 					if (byteAlignUPL) {
4277 						entryOffset = (entryOffset - memRefEntry->offset);
4278 					} else {
4279 						entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
4280 					}
4281 					if (ioplSize > (memRefEntry->size - entryOffset)) {
4282 						ioplSize =  ((typeof(ioplSize))(memRefEntry->size - entryOffset));
4283 					}
4284 					error = memory_object_iopl_request(memRefEntry->entry,
4285 					    entryOffset,
4286 					    &ioplSize,
4287 					    &iopl.fIOPL,
4288 					    baseInfo,
4289 					    &numPageInfo,
4290 					    &ioplFlags,
4291 					    tag);
4292 				} else if ((theMap == kernel_map)
4293 				    && (kernelStart >= io_kernel_static_start)
4294 				    && (kernelStart < io_kernel_static_end)) {
4295 					error = io_get_kernel_static_upl(theMap,
4296 					    kernelStart,
4297 					    &ioplSize,
4298 					    &iopl.fPageOffset,
4299 					    &iopl.fIOPL,
4300 					    baseInfo,
4301 					    &numPageInfo,
4302 					    &highPage);
4303 				} else {
4304 					assert(theMap);
4305 					error = vm_map_create_upl(theMap,
4306 					    startPage,
4307 					    (upl_size_t*)&ioplSize,
4308 					    &iopl.fIOPL,
4309 					    baseInfo,
4310 					    &numPageInfo,
4311 					    &ioplFlags,
4312 					    tag);
4313 				}
4314 
4315 				if (error != KERN_SUCCESS) {
4316 					traceInterval.setEndArg2(error);
4317 					DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize);
4318 					goto abortExit;
4319 				}
4320 
4321 				assert(ioplSize);
4322 
4323 				if (iopl.fIOPL) {
4324 					highPage = upl_get_highest_page(iopl.fIOPL);
4325 				}
4326 				if (highPage > highestPage) {
4327 					highestPage = highPage;
4328 				}
4329 
4330 				if (baseInfo->device) {
4331 					numPageInfo = 1;
4332 					iopl.fFlags = kIOPLOnDevice;
4333 				} else {
4334 					iopl.fFlags = 0;
4335 				}
4336 
4337 				if (byteAlignUPL) {
4338 					if (iopl.fIOPL) {
4339 						DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL));
4340 						iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(iopl.fIOPL);
4341 					}
4342 					if (startPage != (mach_vm_address_t)-1) {
4343 						// assert(iopl.fPageOffset == (startPage & PAGE_MASK));
4344 						startPage -= iopl.fPageOffset;
4345 					}
4346 					ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo));
4347 					numBytes += iopl.fPageOffset;
4348 				}
4349 
4350 				iopl.fIOMDOffset = mdOffset;
4351 				iopl.fPageInfo = pageIndex;
4352 
4353 				if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
4354 					// Clean up partial created and unsaved iopl
4355 					if (iopl.fIOPL) {
4356 						upl_abort(iopl.fIOPL, 0);
4357 						upl_deallocate(iopl.fIOPL);
4358 					}
4359 					error = kIOReturnNoMemory;
4360 					traceInterval.setEndArg2(error);
4361 					goto abortExit;
4362 				}
4363 				dataP = NULL;
4364 
4365 				// Check for a multiple iopl's in one virtual range
4366 				pageIndex += numPageInfo;
4367 				mdOffset -= iopl.fPageOffset;
4368 				numBytesWired += ioplSize;
4369 				if (ioplSize < numBytes) {
4370 					numBytes -= ioplSize;
4371 					if (startPage != (mach_vm_address_t)-1) {
4372 						startPage += ioplSize;
4373 					}
4374 					mdOffset += ioplSize;
4375 					iopl.fPageOffset = 0;
4376 					if (mapper) {
4377 						iopl.fMappedPage = mapBase + pageIndex;
4378 					}
4379 				} else {
4380 					mdOffset += numBytes;
4381 					break;
4382 				}
4383 			}
4384 		}
4385 
4386 		_highestPage = highestPage;
4387 		DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage);
4388 
4389 		if (UPL_COPYOUT_FROM & uplFlags) {
4390 			_flags |= kIOMemoryPreparedReadOnly;
4391 		}
4392 		traceInterval.setEndCodes(numBytesWired, error);
4393 	}
4394 
4395 #if IOTRACKING
4396 	if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
4397 		dataP = getDataP(_memoryEntries);
4398 		if (!dataP->fWireTracking.link.next) {
4399 			IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
4400 		}
4401 	}
4402 #endif /* IOTRACKING */
4403 
4404 	return error;
4405 
4406 abortExit:
4407 	{
4408 		dataP = getDataP(_memoryEntries);
4409 		UInt done = getNumIOPL(_memoryEntries, dataP);
4410 		ioPLBlock *ioplList = getIOPLList(dataP);
4411 
4412 		for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) {
4413 			if (ioplList[ioplIdx].fIOPL) {
4414 				upl_abort(ioplList[ioplIdx].fIOPL, 0);
4415 				upl_deallocate(ioplList[ioplIdx].fIOPL);
4416 			}
4417 		}
4418 		_memoryEntries->setLength(computeDataSize(0, 0));
4419 	}
4420 
4421 	if (error == KERN_FAILURE) {
4422 		error = kIOReturnCannotWire;
4423 	} else if (error == KERN_MEMORY_ERROR) {
4424 		error = kIOReturnNoResources;
4425 	}
4426 
4427 	return error;
4428 }
4429 
4430 bool
initMemoryEntries(size_t size,IOMapper * mapper)4431 IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
4432 {
4433 	ioGMDData * dataP;
4434 
4435 	if (size > UINT_MAX) {
4436 		return false;
4437 	}
4438 	if (!_memoryEntries) {
4439 		_memoryEntries = _IOMemoryDescriptorMixedData::withCapacity(size);
4440 		if (!_memoryEntries) {
4441 			return false;
4442 		}
4443 	} else if (!_memoryEntries->initWithCapacity(size)) {
4444 		return false;
4445 	}
4446 
4447 	_memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
4448 	dataP = getDataP(_memoryEntries);
4449 
4450 	if (mapper == kIOMapperWaitSystem) {
4451 		IOMapper::checkForSystemMapper();
4452 		mapper = IOMapper::gSystem;
4453 	}
4454 	dataP->fMapper               = mapper;
4455 	dataP->fPageCnt              = 0;
4456 	dataP->fMappedBase           = 0;
4457 	dataP->fDMAMapNumAddressBits = 64;
4458 	dataP->fDMAMapAlignment      = 0;
4459 	dataP->fPreparationID        = kIOPreparationIDUnprepared;
4460 	dataP->fCompletionError      = false;
4461 	dataP->fMappedBaseValid      = false;
4462 
4463 	return true;
4464 }
4465 
4466 IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4467 IOMemoryDescriptor::dmaMap(
4468 	IOMapper                    * mapper,
4469 	IOMemoryDescriptor          * memory,
4470 	IODMACommand                * command,
4471 	const IODMAMapSpecification * mapSpec,
4472 	uint64_t                      offset,
4473 	uint64_t                      length,
4474 	uint64_t                    * mapAddress,
4475 	uint64_t                    * mapLength)
4476 {
4477 	IOReturn err;
4478 	uint32_t mapOptions;
4479 
4480 	mapOptions = 0;
4481 	mapOptions |= kIODMAMapReadAccess;
4482 	if (!(kIOMemoryPreparedReadOnly & _flags)) {
4483 		mapOptions |= kIODMAMapWriteAccess;
4484 	}
4485 
4486 	err = mapper->iovmMapMemory(memory, offset, length, mapOptions,
4487 	    mapSpec, command, NULL, mapAddress, mapLength);
4488 
4489 	if (kIOReturnSuccess == err) {
4490 		dmaMapRecord(mapper, command, *mapLength);
4491 	}
4492 
4493 	return err;
4494 }
4495 
4496 void
dmaMapRecord(IOMapper * mapper,IODMACommand * command,uint64_t mapLength)4497 IOMemoryDescriptor::dmaMapRecord(
4498 	IOMapper                    * mapper,
4499 	IODMACommand                * command,
4500 	uint64_t                      mapLength)
4501 {
4502 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this));
4503 	kern_allocation_name_t alloc;
4504 	int16_t                prior;
4505 
4506 	if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
4507 		kern_allocation_update_size(mapper->fAllocName, mapLength);
4508 	}
4509 
4510 	if (!command) {
4511 		return;
4512 	}
4513 	prior = OSAddAtomic16(1, &_dmaReferences);
4514 	if (!prior) {
4515 		if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4516 			_mapName  = alloc;
4517 			mapLength = _length;
4518 			kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
4519 		} else {
4520 			_mapName = NULL;
4521 		}
4522 	}
4523 }
4524 
4525 IOReturn
dmaUnmap(IOMapper * mapper,IODMACommand * command,uint64_t offset,uint64_t mapAddress,uint64_t mapLength)4526 IOMemoryDescriptor::dmaUnmap(
4527 	IOMapper                    * mapper,
4528 	IODMACommand                * command,
4529 	uint64_t                      offset,
4530 	uint64_t                      mapAddress,
4531 	uint64_t                      mapLength)
4532 {
4533 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this));
4534 	IOReturn ret;
4535 	kern_allocation_name_t alloc;
4536 	kern_allocation_name_t mapName;
4537 	int16_t prior;
4538 
4539 	mapName = NULL;
4540 	prior = 0;
4541 	if (command) {
4542 		mapName = _mapName;
4543 		if (_dmaReferences) {
4544 			prior = OSAddAtomic16(-1, &_dmaReferences);
4545 		} else {
4546 			panic("_dmaReferences underflow");
4547 		}
4548 	}
4549 
4550 	if (!mapLength) {
4551 		traceInterval.setEndArg1(kIOReturnSuccess);
4552 		return kIOReturnSuccess;
4553 	}
4554 
4555 	ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
4556 
4557 	if ((alloc = mapper->fAllocName)) {
4558 		kern_allocation_update_size(alloc, -mapLength);
4559 		if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4560 			mapLength = _length;
4561 			kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
4562 		}
4563 	}
4564 
4565 	traceInterval.setEndArg1(ret);
4566 	return ret;
4567 }
4568 
4569 IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4570 IOGeneralMemoryDescriptor::dmaMap(
4571 	IOMapper                    * mapper,
4572 	IOMemoryDescriptor          * memory,
4573 	IODMACommand                * command,
4574 	const IODMAMapSpecification * mapSpec,
4575 	uint64_t                      offset,
4576 	uint64_t                      length,
4577 	uint64_t                    * mapAddress,
4578 	uint64_t                    * mapLength)
4579 {
4580 	IOReturn          err = kIOReturnSuccess;
4581 	ioGMDData *       dataP;
4582 	IOOptionBits      type = _flags & kIOMemoryTypeMask;
4583 
4584 	*mapAddress = 0;
4585 	if (kIOMemoryHostOnly & _flags) {
4586 		return kIOReturnSuccess;
4587 	}
4588 	if (kIOMemoryRemote & _flags) {
4589 		return kIOReturnNotAttached;
4590 	}
4591 
4592 	if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
4593 	    || offset || (length != _length)) {
4594 		err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength);
4595 	} else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
4596 		const ioPLBlock * ioplList = getIOPLList(dataP);
4597 		upl_page_info_t * pageList;
4598 		uint32_t          mapOptions = 0;
4599 
4600 		IODMAMapSpecification mapSpec;
4601 		bzero(&mapSpec, sizeof(mapSpec));
4602 		mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
4603 		mapSpec.alignment = dataP->fDMAMapAlignment;
4604 
4605 		// For external UPLs the fPageInfo field points directly to
4606 		// the upl's upl_page_info_t array.
4607 		if (ioplList->fFlags & kIOPLExternUPL) {
4608 			pageList = (upl_page_info_t *) ioplList->fPageInfo;
4609 			mapOptions |= kIODMAMapPagingPath;
4610 		} else {
4611 			pageList = getPageList(dataP);
4612 		}
4613 
4614 		if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
4615 			mapOptions |= kIODMAMapPageListFullyOccupied;
4616 		}
4617 
4618 		assert(dataP->fDMAAccess);
4619 		mapOptions |= dataP->fDMAAccess;
4620 
4621 		// Check for direct device non-paged memory
4622 		if (ioplList->fFlags & kIOPLOnDevice) {
4623 			mapOptions |= kIODMAMapPhysicallyContiguous;
4624 		}
4625 
4626 		IODMAMapPageList dmaPageList =
4627 		{
4628 			.pageOffset    = (uint32_t)(ioplList->fPageOffset & page_mask),
4629 			.pageListCount = _pages,
4630 			.pageList      = &pageList[0]
4631 		};
4632 		err = mapper->iovmMapMemory(memory, offset, length, mapOptions, &mapSpec,
4633 		    command, &dmaPageList, mapAddress, mapLength);
4634 
4635 		if (kIOReturnSuccess == err) {
4636 			dmaMapRecord(mapper, command, *mapLength);
4637 		}
4638 	}
4639 
4640 	return err;
4641 }
4642 
4643 /*
4644  * prepare
4645  *
4646  * Prepare the memory for an I/O transfer.  This involves paging in
4647  * the memory, if necessary, and wiring it down for the duration of
4648  * the transfer.  The complete() method completes the processing of
4649  * the memory after the I/O transfer finishes.  This method needn't
4650  * called for non-pageable memory.
4651  */
4652 
4653 IOReturn
prepare(IODirection forDirection)4654 IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
4655 {
4656 	IOReturn     error    = kIOReturnSuccess;
4657 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4658 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_PREPARE), VM_KERNEL_ADDRHIDE(this), forDirection);
4659 
4660 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4661 		traceInterval.setEndArg1(kIOReturnSuccess);
4662 		return kIOReturnSuccess;
4663 	}
4664 
4665 	assert(!(kIOMemoryRemote & _flags));
4666 	if (kIOMemoryRemote & _flags) {
4667 		traceInterval.setEndArg1(kIOReturnNotAttached);
4668 		return kIOReturnNotAttached;
4669 	}
4670 
4671 	if (_prepareLock) {
4672 		IOLockLock(_prepareLock);
4673 	}
4674 
4675 	if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4676 		if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
4677 			error = kIOReturnNotReady;
4678 			goto finish;
4679 		}
4680 		error = wireVirtual(forDirection);
4681 	}
4682 
4683 	if (kIOReturnSuccess == error) {
4684 		if (1 == ++_wireCount) {
4685 			if (kIOMemoryClearEncrypt & _flags) {
4686 				performOperation(kIOMemoryClearEncrypted, 0, _length);
4687 			}
4688 
4689 			ktraceEmitPhysicalSegments();
4690 		}
4691 	}
4692 
4693 finish:
4694 
4695 	if (_prepareLock) {
4696 		IOLockUnlock(_prepareLock);
4697 	}
4698 	traceInterval.setEndArg1(error);
4699 
4700 	return error;
4701 }
4702 
4703 /*
4704  * complete
4705  *
4706  * Complete processing of the memory after an I/O transfer finishes.
4707  * This method should not be called unless a prepare was previously
4708  * issued; the prepare() and complete() must occur in pairs, before
4709  * before and after an I/O transfer involving pageable memory.
4710  */
4711 
4712 IOReturn
complete(IODirection forDirection)4713 IOGeneralMemoryDescriptor::complete(IODirection forDirection)
4714 {
4715 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4716 	ioGMDData  * dataP;
4717 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_COMPLETE), VM_KERNEL_ADDRHIDE(this), forDirection);
4718 
4719 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4720 		traceInterval.setEndArg1(kIOReturnSuccess);
4721 		return kIOReturnSuccess;
4722 	}
4723 
4724 	assert(!(kIOMemoryRemote & _flags));
4725 	if (kIOMemoryRemote & _flags) {
4726 		traceInterval.setEndArg1(kIOReturnNotAttached);
4727 		return kIOReturnNotAttached;
4728 	}
4729 
4730 	if (_prepareLock) {
4731 		IOLockLock(_prepareLock);
4732 	}
4733 	do{
4734 		assert(_wireCount);
4735 		if (!_wireCount) {
4736 			break;
4737 		}
4738 		dataP = getDataP(_memoryEntries);
4739 		if (!dataP) {
4740 			break;
4741 		}
4742 
4743 		if (kIODirectionCompleteWithError & forDirection) {
4744 			dataP->fCompletionError = true;
4745 		}
4746 
4747 		if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
4748 			performOperation(kIOMemorySetEncrypted, 0, _length);
4749 		}
4750 
4751 		_wireCount--;
4752 		if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
4753 			ioPLBlock *ioplList = getIOPLList(dataP);
4754 			UInt ind, count = getNumIOPL(_memoryEntries, dataP);
4755 
4756 			if (_wireCount) {
4757 				// kIODirectionCompleteWithDataValid & forDirection
4758 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4759 					vm_tag_t tag;
4760 					tag = (typeof(tag))getVMTag(kernel_map);
4761 					for (ind = 0; ind < count; ind++) {
4762 						if (ioplList[ind].fIOPL) {
4763 							iopl_valid_data(ioplList[ind].fIOPL, tag);
4764 						}
4765 					}
4766 				}
4767 			} else {
4768 				if (_dmaReferences) {
4769 					panic("complete() while dma active");
4770 				}
4771 
4772 				if (dataP->fMappedBaseValid) {
4773 					dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
4774 					dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4775 				}
4776 #if IOTRACKING
4777 				if (dataP->fWireTracking.link.next) {
4778 					IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
4779 				}
4780 #endif /* IOTRACKING */
4781 				// Only complete iopls that we created which are for TypeVirtual
4782 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4783 					for (ind = 0; ind < count; ind++) {
4784 						if (ioplList[ind].fIOPL) {
4785 							if (dataP->fCompletionError) {
4786 								upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
4787 							} else {
4788 								upl_commit(ioplList[ind].fIOPL, NULL, 0);
4789 							}
4790 							upl_deallocate(ioplList[ind].fIOPL);
4791 						}
4792 					}
4793 				} else if (kIOMemoryTypeUPL == type) {
4794 					upl_set_referenced(ioplList[0].fIOPL, false);
4795 				}
4796 
4797 				_memoryEntries->setLength(computeDataSize(0, 0));
4798 
4799 				dataP->fPreparationID = kIOPreparationIDUnprepared;
4800 				_flags &= ~kIOMemoryPreparedReadOnly;
4801 
4802 				if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED))) {
4803 					IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED), getDescriptorID(), VM_KERNEL_ADDRHIDE(this));
4804 				}
4805 			}
4806 		}
4807 	}while (false);
4808 
4809 	if (_prepareLock) {
4810 		IOLockUnlock(_prepareLock);
4811 	}
4812 
4813 	traceInterval.setEndArg1(kIOReturnSuccess);
4814 	return kIOReturnSuccess;
4815 }
4816 
4817 IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4818 IOGeneralMemoryDescriptor::doMap(
4819 	vm_map_t                __addressMap,
4820 	IOVirtualAddress *      __address,
4821 	IOOptionBits            options,
4822 	IOByteCount             __offset,
4823 	IOByteCount             __length )
4824 {
4825 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_MAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address), __length);
4826 	traceInterval.setEndArg1(kIOReturnSuccess);
4827 #ifndef __LP64__
4828 	if (!(kIOMap64Bit & options)) {
4829 		panic("IOGeneralMemoryDescriptor::doMap !64bit");
4830 	}
4831 #endif /* !__LP64__ */
4832 
4833 	kern_return_t  err;
4834 
4835 	IOMemoryMap *  mapping = (IOMemoryMap *) *__address;
4836 	mach_vm_size_t offset  = mapping->fOffset + __offset;
4837 	mach_vm_size_t length  = mapping->fLength;
4838 
4839 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4840 	Ranges vec = _ranges;
4841 
4842 	mach_vm_address_t range0Addr = 0;
4843 	mach_vm_size_t    range0Len = 0;
4844 
4845 	if ((offset >= _length) || ((offset + length) > _length)) {
4846 		traceInterval.setEndArg1(kIOReturnBadArgument);
4847 		DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap, offset, length, (uint64_t)_length);
4848 		// assert(offset == 0 && _length == 0 && length == 0);
4849 		return kIOReturnBadArgument;
4850 	}
4851 
4852 	assert(!(kIOMemoryRemote & _flags));
4853 	if (kIOMemoryRemote & _flags) {
4854 		return 0;
4855 	}
4856 
4857 	if (vec.v) {
4858 		getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
4859 	}
4860 
4861 	// mapping source == dest? (could be much better)
4862 	if (_task
4863 	    && (mapping->fAddressTask == _task)
4864 	    && (mapping->fAddressMap == get_task_map(_task))
4865 	    && (options & kIOMapAnywhere)
4866 	    && (!(kIOMapUnique & options))
4867 	    && (!(kIOMapGuardedMask & options))
4868 	    && (1 == _rangesCount)
4869 	    && (0 == offset)
4870 	    && range0Addr
4871 	    && (length <= range0Len)) {
4872 		mapping->fAddress = range0Addr;
4873 		mapping->fOptions |= kIOMapStatic;
4874 
4875 		return kIOReturnSuccess;
4876 	}
4877 
4878 	if (!_memRef) {
4879 		IOOptionBits createOptions = 0;
4880 		if (!(kIOMapReadOnly & options)) {
4881 			createOptions |= kIOMemoryReferenceWrite;
4882 #if DEVELOPMENT || DEBUG
4883 			if ((kIODirectionOut == (kIODirectionOutIn & _flags))
4884 			    && (!reserved || (reserved->creator != mapping->fAddressTask))) {
4885 				OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
4886 			}
4887 #endif
4888 		}
4889 		err = memoryReferenceCreate(createOptions, &_memRef);
4890 		if (kIOReturnSuccess != err) {
4891 			traceInterval.setEndArg1(err);
4892 			DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4893 			return err;
4894 		}
4895 	}
4896 
4897 	memory_object_t pager;
4898 	pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
4899 
4900 	// <upl_transpose //
4901 	if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
4902 		do{
4903 			upl_t               redirUPL2;
4904 			upl_size_t          size;
4905 			upl_control_flags_t flags;
4906 			unsigned int        lock_count;
4907 
4908 			if (!_memRef || (1 != _memRef->count)) {
4909 				err = kIOReturnNotReadable;
4910 				DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4911 				break;
4912 			}
4913 
4914 			size = (upl_size_t) round_page(mapping->fLength);
4915 			flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4916 			    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4917 
4918 			if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
4919 			    NULL, NULL,
4920 			    &flags, (vm_tag_t) getVMTag(kernel_map))) {
4921 				redirUPL2 = NULL;
4922 			}
4923 
4924 			for (lock_count = 0;
4925 			    IORecursiveLockHaveLock(gIOMemoryLock);
4926 			    lock_count++) {
4927 				UNLOCK;
4928 			}
4929 			err = upl_transpose(redirUPL2, mapping->fRedirUPL);
4930 			for (;
4931 			    lock_count;
4932 			    lock_count--) {
4933 				LOCK;
4934 			}
4935 
4936 			if (kIOReturnSuccess != err) {
4937 				IOLog("upl_transpose(%x)\n", err);
4938 				err = kIOReturnSuccess;
4939 			}
4940 
4941 			if (redirUPL2) {
4942 				upl_commit(redirUPL2, NULL, 0);
4943 				upl_deallocate(redirUPL2);
4944 				redirUPL2 = NULL;
4945 			}
4946 			{
4947 				// swap the memEntries since they now refer to different vm_objects
4948 				IOMemoryReference * me = _memRef;
4949 				_memRef = mapping->fMemory->_memRef;
4950 				mapping->fMemory->_memRef = me;
4951 			}
4952 			if (pager) {
4953 				err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
4954 			}
4955 		}while (false);
4956 	}
4957 	// upl_transpose> //
4958 	else {
4959 		err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
4960 		if (err) {
4961 			DEBUG4K_ERROR("map %p err 0x%x\n", mapping->fAddressMap, err);
4962 		}
4963 #if IOTRACKING
4964 		if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) {
4965 			// only dram maps in the default on developement case
4966 			IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4967 		}
4968 #endif /* IOTRACKING */
4969 		if ((err == KERN_SUCCESS) && pager) {
4970 			err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
4971 
4972 			if (err != KERN_SUCCESS) {
4973 				doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
4974 			} else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
4975 				mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
4976 			}
4977 		}
4978 	}
4979 
4980 	traceInterval.setEndArg1(err);
4981 	if (err) {
4982 		DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4983 	}
4984 	return err;
4985 }
4986 
4987 #if IOTRACKING
4988 IOReturn
IOMemoryMapTracking(IOTrackingUser * tracking,task_t * task,mach_vm_address_t * address,mach_vm_size_t * size)4989 IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
4990     mach_vm_address_t * address, mach_vm_size_t * size)
4991 {
4992 #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
4993 
4994 	IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
4995 
4996 	if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
4997 		return kIOReturnNotReady;
4998 	}
4999 
5000 	*task    = map->fAddressTask;
5001 	*address = map->fAddress;
5002 	*size    = map->fLength;
5003 
5004 	return kIOReturnSuccess;
5005 }
5006 #endif /* IOTRACKING */
5007 
5008 IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5009 IOGeneralMemoryDescriptor::doUnmap(
5010 	vm_map_t                addressMap,
5011 	IOVirtualAddress        __address,
5012 	IOByteCount             __length )
5013 {
5014 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_UNMAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address), __length);
5015 	IOReturn ret;
5016 	ret = super::doUnmap(addressMap, __address, __length);
5017 	traceInterval.setEndArg1(ret);
5018 	return ret;
5019 }
5020 
5021 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5022 
5023 #undef super
5024 #define super OSObject
5025 
5026 OSDefineMetaClassAndStructorsWithZone( IOMemoryMap, OSObject, ZC_NONE )
5027 
5028 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
5029 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
5030 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
5031 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
5032 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
5033 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
5034 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
5035 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
5036 
5037 /* ex-inline function implementation */
5038 IOPhysicalAddress
getPhysicalAddress()5039 IOMemoryMap::getPhysicalAddress()
5040 {
5041 	return getPhysicalSegment( 0, NULL );
5042 }
5043 
5044 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5045 
5046 bool
init(task_t intoTask,mach_vm_address_t toAddress,IOOptionBits _options,mach_vm_size_t _offset,mach_vm_size_t _length)5047 IOMemoryMap::init(
5048 	task_t                  intoTask,
5049 	mach_vm_address_t       toAddress,
5050 	IOOptionBits            _options,
5051 	mach_vm_size_t          _offset,
5052 	mach_vm_size_t          _length )
5053 {
5054 	if (!intoTask) {
5055 		return false;
5056 	}
5057 
5058 	if (!super::init()) {
5059 		return false;
5060 	}
5061 
5062 	fAddressMap  = get_task_map(intoTask);
5063 	if (!fAddressMap) {
5064 		return false;
5065 	}
5066 	vm_map_reference(fAddressMap);
5067 
5068 	fAddressTask = intoTask;
5069 	fOptions     = _options;
5070 	fLength      = _length;
5071 	fOffset      = _offset;
5072 	fAddress     = toAddress;
5073 
5074 	return true;
5075 }
5076 
5077 bool
setMemoryDescriptor(IOMemoryDescriptor * _memory,mach_vm_size_t _offset)5078 IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
5079 {
5080 	if (!_memory) {
5081 		return false;
5082 	}
5083 
5084 	if (!fSuperMap) {
5085 		if ((_offset + fLength) > _memory->getLength()) {
5086 			return false;
5087 		}
5088 		fOffset = _offset;
5089 	}
5090 
5091 
5092 	OSSharedPtr<IOMemoryDescriptor> tempval(_memory, OSRetain);
5093 	if (fMemory) {
5094 		if (fMemory != _memory) {
5095 			fMemory->removeMapping(this);
5096 		}
5097 	}
5098 	fMemory = os::move(tempval);
5099 
5100 	return true;
5101 }
5102 
5103 IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5104 IOMemoryDescriptor::doMap(
5105 	vm_map_t                __addressMap,
5106 	IOVirtualAddress *      __address,
5107 	IOOptionBits            options,
5108 	IOByteCount             __offset,
5109 	IOByteCount             __length )
5110 {
5111 	return kIOReturnUnsupported;
5112 }
5113 
5114 IOReturn
handleFault(void * _pager,mach_vm_size_t sourceOffset,mach_vm_size_t length)5115 IOMemoryDescriptor::handleFault(
5116 	void *                  _pager,
5117 	mach_vm_size_t          sourceOffset,
5118 	mach_vm_size_t          length)
5119 {
5120 	if (kIOMemoryRedirected & _flags) {
5121 #if DEBUG
5122 		IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
5123 #endif
5124 		do {
5125 			SLEEP;
5126 		} while (kIOMemoryRedirected & _flags);
5127 	}
5128 	return kIOReturnSuccess;
5129 }
5130 
5131 IOReturn
populateDevicePager(void * _pager,vm_map_t addressMap,mach_vm_address_t address,mach_vm_size_t sourceOffset,mach_vm_size_t length,IOOptionBits options)5132 IOMemoryDescriptor::populateDevicePager(
5133 	void *                  _pager,
5134 	vm_map_t                addressMap,
5135 	mach_vm_address_t       address,
5136 	mach_vm_size_t          sourceOffset,
5137 	mach_vm_size_t          length,
5138 	IOOptionBits            options )
5139 {
5140 	IOReturn            err = kIOReturnSuccess;
5141 	memory_object_t     pager = (memory_object_t) _pager;
5142 	mach_vm_size_t      size;
5143 	mach_vm_size_t      bytes;
5144 	mach_vm_size_t      page;
5145 	mach_vm_size_t      pageOffset;
5146 	mach_vm_size_t      pagerOffset;
5147 	IOPhysicalLength    segLen, chunk;
5148 	addr64_t            physAddr;
5149 	IOOptionBits        type;
5150 
5151 	type = _flags & kIOMemoryTypeMask;
5152 
5153 	if (reserved->dp.pagerContig) {
5154 		sourceOffset = 0;
5155 		pagerOffset  = 0;
5156 	}
5157 
5158 	physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
5159 	assert( physAddr );
5160 	pageOffset = physAddr - trunc_page_64( physAddr );
5161 	pagerOffset = sourceOffset;
5162 
5163 	size = length + pageOffset;
5164 	physAddr -= pageOffset;
5165 
5166 	segLen += pageOffset;
5167 	bytes = size;
5168 	do{
5169 		// in the middle of the loop only map whole pages
5170 		if (segLen >= bytes) {
5171 			segLen = bytes;
5172 		} else if (segLen != trunc_page_64(segLen)) {
5173 			err = kIOReturnVMError;
5174 		}
5175 		if (physAddr != trunc_page_64(physAddr)) {
5176 			err = kIOReturnBadArgument;
5177 		}
5178 
5179 		if (kIOReturnSuccess != err) {
5180 			break;
5181 		}
5182 
5183 #if DEBUG || DEVELOPMENT
5184 		if ((kIOMemoryTypeUPL != type)
5185 		    && pmap_has_managed_page((ppnum_t) atop_64(physAddr), (ppnum_t) atop_64(physAddr + segLen - 1))) {
5186 			OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx",
5187 			    physAddr, (uint64_t)segLen);
5188 		}
5189 #endif /* DEBUG || DEVELOPMENT */
5190 
5191 		chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
5192 		for (page = 0;
5193 		    (page < segLen) && (KERN_SUCCESS == err);
5194 		    page += chunk) {
5195 			err = device_pager_populate_object(pager, pagerOffset,
5196 			    (ppnum_t)(atop_64(physAddr + page)), chunk);
5197 			pagerOffset += chunk;
5198 		}
5199 
5200 		assert(KERN_SUCCESS == err);
5201 		if (err) {
5202 			break;
5203 		}
5204 
5205 		// This call to vm_fault causes an early pmap level resolution
5206 		// of the mappings created above for kernel mappings, since
5207 		// faulting in later can't take place from interrupt level.
5208 		if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
5209 			err = vm_fault(addressMap,
5210 			    (vm_map_offset_t)trunc_page_64(address),
5211 			    options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
5212 			    FALSE, VM_KERN_MEMORY_NONE,
5213 			    THREAD_UNINT, NULL,
5214 			    (vm_map_offset_t)0);
5215 
5216 			if (KERN_SUCCESS != err) {
5217 				break;
5218 			}
5219 		}
5220 
5221 		sourceOffset += segLen - pageOffset;
5222 		address += segLen;
5223 		bytes -= segLen;
5224 		pageOffset = 0;
5225 	}while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
5226 
5227 	if (bytes) {
5228 		err = kIOReturnBadArgument;
5229 	}
5230 
5231 	return err;
5232 }
5233 
5234 IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5235 IOMemoryDescriptor::doUnmap(
5236 	vm_map_t                addressMap,
5237 	IOVirtualAddress        __address,
5238 	IOByteCount             __length )
5239 {
5240 	IOReturn          err;
5241 	IOMemoryMap *     mapping;
5242 	mach_vm_address_t address;
5243 	mach_vm_size_t    length;
5244 
5245 	if (__length) {
5246 		panic("doUnmap");
5247 	}
5248 
5249 	mapping = (IOMemoryMap *) __address;
5250 	addressMap = mapping->fAddressMap;
5251 	address    = mapping->fAddress;
5252 	length     = mapping->fLength;
5253 
5254 	if (kIOMapOverwrite & mapping->fOptions) {
5255 		err = KERN_SUCCESS;
5256 	} else {
5257 		if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
5258 			addressMap = IOPageableMapForAddress( address );
5259 		}
5260 #if DEBUG
5261 		if (kIOLogMapping & gIOKitDebug) {
5262 			IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
5263 			    addressMap, address, length );
5264 		}
5265 #endif
5266 		err = IOMemoryDescriptorMapDealloc(mapping->fOptions, addressMap, address, length );
5267 		if (vm_map_page_mask(addressMap) < PAGE_MASK) {
5268 			DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap, address, length, err);
5269 		}
5270 	}
5271 
5272 #if IOTRACKING
5273 	IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
5274 #endif /* IOTRACKING */
5275 
5276 	return err;
5277 }
5278 
5279 IOReturn
redirect(task_t safeTask,bool doRedirect)5280 IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
5281 {
5282 	IOReturn            err = kIOReturnSuccess;
5283 	IOMemoryMap *       mapping = NULL;
5284 	OSSharedPtr<OSIterator>        iter;
5285 
5286 	LOCK;
5287 
5288 	if (doRedirect) {
5289 		_flags |= kIOMemoryRedirected;
5290 	} else {
5291 		_flags &= ~kIOMemoryRedirected;
5292 	}
5293 
5294 	do {
5295 		if ((iter = OSCollectionIterator::withCollection( _mappings.get()))) {
5296 			memory_object_t   pager;
5297 
5298 			if (reserved) {
5299 				pager = (memory_object_t) reserved->dp.devicePager;
5300 			} else {
5301 				pager = MACH_PORT_NULL;
5302 			}
5303 
5304 			while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
5305 				mapping->redirect( safeTask, doRedirect );
5306 				if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
5307 					err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
5308 				}
5309 			}
5310 
5311 			iter.reset();
5312 		}
5313 	} while (false);
5314 
5315 	if (!doRedirect) {
5316 		WAKEUP;
5317 	}
5318 
5319 	UNLOCK;
5320 
5321 #ifndef __LP64__
5322 	// temporary binary compatibility
5323 	IOSubMemoryDescriptor * subMem;
5324 	if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
5325 		err = subMem->redirect( safeTask, doRedirect );
5326 	} else {
5327 		err = kIOReturnSuccess;
5328 	}
5329 #endif /* !__LP64__ */
5330 
5331 	return err;
5332 }
5333 
5334 IOReturn
redirect(task_t safeTask,bool doRedirect)5335 IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
5336 {
5337 	IOReturn err = kIOReturnSuccess;
5338 
5339 	if (fSuperMap) {
5340 //        err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
5341 	} else {
5342 		LOCK;
5343 
5344 		do{
5345 			if (!fAddress) {
5346 				break;
5347 			}
5348 			if (!fAddressMap) {
5349 				break;
5350 			}
5351 
5352 			if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
5353 			    && (0 == (fOptions & kIOMapStatic))) {
5354 				IOUnmapPages( fAddressMap, fAddress, fLength );
5355 				err = kIOReturnSuccess;
5356 #if DEBUG
5357 				IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
5358 #endif
5359 			} else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
5360 				IOOptionBits newMode;
5361 				newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
5362 				IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
5363 			}
5364 		}while (false);
5365 		UNLOCK;
5366 	}
5367 
5368 	if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5369 	    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
5370 	    && safeTask
5371 	    && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
5372 		fMemory->redirect(safeTask, doRedirect);
5373 	}
5374 
5375 	return err;
5376 }
5377 
5378 IOReturn
unmap(void)5379 IOMemoryMap::unmap( void )
5380 {
5381 	IOReturn    err;
5382 
5383 	LOCK;
5384 
5385 	if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
5386 	    && (0 == (kIOMapStatic & fOptions))) {
5387 		err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
5388 	} else {
5389 		err = kIOReturnSuccess;
5390 	}
5391 
5392 	if (fAddressMap) {
5393 		vm_map_deallocate(fAddressMap);
5394 		fAddressMap = NULL;
5395 	}
5396 
5397 	fAddress = 0;
5398 
5399 	UNLOCK;
5400 
5401 	return err;
5402 }
5403 
5404 void
taskDied(void)5405 IOMemoryMap::taskDied( void )
5406 {
5407 	LOCK;
5408 	if (fUserClientUnmap) {
5409 		unmap();
5410 	}
5411 #if IOTRACKING
5412 	else {
5413 		IOTrackingRemoveUser(gIOMapTracking, &fTracking);
5414 	}
5415 #endif /* IOTRACKING */
5416 
5417 	if (fAddressMap) {
5418 		vm_map_deallocate(fAddressMap);
5419 		fAddressMap = NULL;
5420 	}
5421 	fAddressTask = NULL;
5422 	fAddress     = 0;
5423 	UNLOCK;
5424 }
5425 
5426 IOReturn
userClientUnmap(void)5427 IOMemoryMap::userClientUnmap( void )
5428 {
5429 	fUserClientUnmap = true;
5430 	return kIOReturnSuccess;
5431 }
5432 
5433 // Overload the release mechanism.  All mappings must be a member
5434 // of a memory descriptors _mappings set.  This means that we
5435 // always have 2 references on a mapping.  When either of these mappings
5436 // are released we need to free ourselves.
5437 void
taggedRelease(const void * tag) const5438 IOMemoryMap::taggedRelease(const void *tag) const
5439 {
5440 	LOCK;
5441 	super::taggedRelease(tag, 2);
5442 	UNLOCK;
5443 }
5444 
5445 void
free()5446 IOMemoryMap::free()
5447 {
5448 	unmap();
5449 
5450 	if (fMemory) {
5451 		LOCK;
5452 		fMemory->removeMapping(this);
5453 		UNLOCK;
5454 		fMemory.reset();
5455 	}
5456 
5457 	if (fSuperMap) {
5458 		fSuperMap.reset();
5459 	}
5460 
5461 	if (fRedirUPL) {
5462 		upl_commit(fRedirUPL, NULL, 0);
5463 		upl_deallocate(fRedirUPL);
5464 	}
5465 
5466 	super::free();
5467 }
5468 
5469 IOByteCount
getLength()5470 IOMemoryMap::getLength()
5471 {
5472 	return fLength;
5473 }
5474 
5475 IOVirtualAddress
getVirtualAddress()5476 IOMemoryMap::getVirtualAddress()
5477 {
5478 #ifndef __LP64__
5479 	if (fSuperMap) {
5480 		fSuperMap->getVirtualAddress();
5481 	} else if (fAddressMap
5482 	    && vm_map_is_64bit(fAddressMap)
5483 	    && (sizeof(IOVirtualAddress) < 8)) {
5484 		OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
5485 	}
5486 #endif /* !__LP64__ */
5487 
5488 	return fAddress;
5489 }
5490 
5491 #ifndef __LP64__
5492 mach_vm_address_t
getAddress()5493 IOMemoryMap::getAddress()
5494 {
5495 	return fAddress;
5496 }
5497 
5498 mach_vm_size_t
getSize()5499 IOMemoryMap::getSize()
5500 {
5501 	return fLength;
5502 }
5503 #endif /* !__LP64__ */
5504 
5505 
5506 task_t
getAddressTask()5507 IOMemoryMap::getAddressTask()
5508 {
5509 	if (fSuperMap) {
5510 		return fSuperMap->getAddressTask();
5511 	} else {
5512 		return fAddressTask;
5513 	}
5514 }
5515 
5516 IOOptionBits
getMapOptions()5517 IOMemoryMap::getMapOptions()
5518 {
5519 	return fOptions;
5520 }
5521 
5522 IOMemoryDescriptor *
getMemoryDescriptor()5523 IOMemoryMap::getMemoryDescriptor()
5524 {
5525 	return fMemory.get();
5526 }
5527 
5528 IOMemoryMap *
copyCompatible(IOMemoryMap * newMapping)5529 IOMemoryMap::copyCompatible(
5530 	IOMemoryMap * newMapping )
5531 {
5532 	task_t              task      = newMapping->getAddressTask();
5533 	mach_vm_address_t   toAddress = newMapping->fAddress;
5534 	IOOptionBits        _options  = newMapping->fOptions;
5535 	mach_vm_size_t      _offset   = newMapping->fOffset;
5536 	mach_vm_size_t      _length   = newMapping->fLength;
5537 
5538 	if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
5539 		return NULL;
5540 	}
5541 	if ((fOptions ^ _options) & kIOMapReadOnly) {
5542 		return NULL;
5543 	}
5544 	if ((fOptions ^ _options) & kIOMapGuardedMask) {
5545 		return NULL;
5546 	}
5547 	if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
5548 	    && ((fOptions ^ _options) & kIOMapCacheMask)) {
5549 		return NULL;
5550 	}
5551 
5552 	if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
5553 		return NULL;
5554 	}
5555 
5556 	if (_offset < fOffset) {
5557 		return NULL;
5558 	}
5559 
5560 	_offset -= fOffset;
5561 
5562 	if ((_offset + _length) > fLength) {
5563 		return NULL;
5564 	}
5565 
5566 	if ((fLength == _length) && (!_offset)) {
5567 		retain();
5568 		newMapping = this;
5569 	} else {
5570 		newMapping->fSuperMap.reset(this, OSRetain);
5571 		newMapping->fOffset   = fOffset + _offset;
5572 		newMapping->fAddress  = fAddress + _offset;
5573 	}
5574 
5575 	return newMapping;
5576 }
5577 
5578 IOReturn
wireRange(uint32_t options,mach_vm_size_t offset,mach_vm_size_t length)5579 IOMemoryMap::wireRange(
5580 	uint32_t                options,
5581 	mach_vm_size_t          offset,
5582 	mach_vm_size_t          length)
5583 {
5584 	IOReturn kr;
5585 	mach_vm_address_t start = trunc_page_64(fAddress + offset);
5586 	mach_vm_address_t end   = round_page_64(fAddress + offset + length);
5587 	vm_prot_t prot;
5588 
5589 	prot = (kIODirectionOutIn & options);
5590 	if (prot) {
5591 		kr = vm_map_wire_kernel(fAddressMap, start, end, prot, (vm_tag_t) fMemory->getVMTag(kernel_map), FALSE);
5592 	} else {
5593 		kr = vm_map_unwire(fAddressMap, start, end, FALSE);
5594 	}
5595 
5596 	return kr;
5597 }
5598 
5599 
5600 IOPhysicalAddress
5601 #ifdef __LP64__
getPhysicalSegment(IOByteCount _offset,IOPhysicalLength * _length,IOOptionBits _options)5602 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
5603 #else /* !__LP64__ */
5604 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
5605 #endif /* !__LP64__ */
5606 {
5607 	IOPhysicalAddress   address;
5608 
5609 	LOCK;
5610 #ifdef __LP64__
5611 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
5612 #else /* !__LP64__ */
5613 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
5614 #endif /* !__LP64__ */
5615 	UNLOCK;
5616 
5617 	return address;
5618 }
5619 
5620 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5621 
5622 #undef super
5623 #define super OSObject
5624 
5625 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5626 
5627 void
initialize(void)5628 IOMemoryDescriptor::initialize( void )
5629 {
5630 	if (NULL == gIOMemoryLock) {
5631 		gIOMemoryLock = IORecursiveLockAlloc();
5632 	}
5633 
5634 	gIOLastPage = IOGetLastPageNumber();
5635 }
5636 
5637 void
free(void)5638 IOMemoryDescriptor::free( void )
5639 {
5640 	if (_mappings) {
5641 		_mappings.reset();
5642 	}
5643 
5644 	if (reserved) {
5645 		cleanKernelReserved(reserved);
5646 		IOFreeType(reserved, IOMemoryDescriptorReserved);
5647 		reserved = NULL;
5648 	}
5649 	super::free();
5650 }
5651 
5652 OSSharedPtr<IOMemoryMap>
setMapping(task_t intoTask,IOVirtualAddress mapAddress,IOOptionBits options)5653 IOMemoryDescriptor::setMapping(
5654 	task_t                  intoTask,
5655 	IOVirtualAddress        mapAddress,
5656 	IOOptionBits            options )
5657 {
5658 	return createMappingInTask( intoTask, mapAddress,
5659 	           options | kIOMapStatic,
5660 	           0, getLength());
5661 }
5662 
5663 OSSharedPtr<IOMemoryMap>
map(IOOptionBits options)5664 IOMemoryDescriptor::map(
5665 	IOOptionBits            options )
5666 {
5667 	return createMappingInTask( kernel_task, 0,
5668 	           options | kIOMapAnywhere,
5669 	           0, getLength());
5670 }
5671 
5672 #ifndef __LP64__
5673 OSSharedPtr<IOMemoryMap>
map(task_t intoTask,IOVirtualAddress atAddress,IOOptionBits options,IOByteCount offset,IOByteCount length)5674 IOMemoryDescriptor::map(
5675 	task_t                  intoTask,
5676 	IOVirtualAddress        atAddress,
5677 	IOOptionBits            options,
5678 	IOByteCount             offset,
5679 	IOByteCount             length )
5680 {
5681 	if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
5682 		OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
5683 		return NULL;
5684 	}
5685 
5686 	return createMappingInTask(intoTask, atAddress,
5687 	           options, offset, length);
5688 }
5689 #endif /* !__LP64__ */
5690 
5691 OSSharedPtr<IOMemoryMap>
createMappingInTask(task_t intoTask,mach_vm_address_t atAddress,IOOptionBits options,mach_vm_size_t offset,mach_vm_size_t length)5692 IOMemoryDescriptor::createMappingInTask(
5693 	task_t                  intoTask,
5694 	mach_vm_address_t       atAddress,
5695 	IOOptionBits            options,
5696 	mach_vm_size_t          offset,
5697 	mach_vm_size_t          length)
5698 {
5699 	IOMemoryMap * result;
5700 	IOMemoryMap * mapping;
5701 
5702 	if (0 == length) {
5703 		length = getLength();
5704 	}
5705 
5706 	mapping = new IOMemoryMap;
5707 
5708 	if (mapping
5709 	    && !mapping->init( intoTask, atAddress,
5710 	    options, offset, length )) {
5711 		mapping->release();
5712 		mapping = NULL;
5713 	}
5714 
5715 	if (mapping) {
5716 		result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
5717 	} else {
5718 		result = nullptr;
5719 	}
5720 
5721 #if DEBUG
5722 	if (!result) {
5723 		IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
5724 		    this, atAddress, (uint32_t) options, offset, length);
5725 	}
5726 #endif
5727 
5728 	// already retained through makeMapping
5729 	OSSharedPtr<IOMemoryMap> retval(result, OSNoRetain);
5730 
5731 	return retval;
5732 }
5733 
5734 #ifndef __LP64__ // there is only a 64 bit version for LP64
5735 IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,IOByteCount offset)5736 IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5737     IOOptionBits         options,
5738     IOByteCount          offset)
5739 {
5740 	return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
5741 }
5742 #endif
5743 
5744 IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,mach_vm_size_t offset)5745 IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5746     IOOptionBits         options,
5747     mach_vm_size_t       offset)
5748 {
5749 	IOReturn err = kIOReturnSuccess;
5750 	OSSharedPtr<IOMemoryDescriptor> physMem;
5751 
5752 	LOCK;
5753 
5754 	if (fAddress && fAddressMap) {
5755 		do{
5756 			if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5757 			    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5758 				physMem = fMemory;
5759 			}
5760 
5761 			if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
5762 				upl_size_t          size = (typeof(size))round_page(fLength);
5763 				upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5764 				    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5765 				if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
5766 				    NULL, NULL,
5767 				    &flags, (vm_tag_t) fMemory->getVMTag(kernel_map))) {
5768 					fRedirUPL = NULL;
5769 				}
5770 
5771 				if (physMem) {
5772 					IOUnmapPages( fAddressMap, fAddress, fLength );
5773 					if ((false)) {
5774 						physMem->redirect(NULL, true);
5775 					}
5776 				}
5777 			}
5778 
5779 			if (newBackingMemory) {
5780 				if (newBackingMemory != fMemory) {
5781 					fOffset = 0;
5782 					if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
5783 					    options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
5784 					    offset, fLength)) {
5785 						err = kIOReturnError;
5786 					}
5787 				}
5788 				if (fRedirUPL) {
5789 					upl_commit(fRedirUPL, NULL, 0);
5790 					upl_deallocate(fRedirUPL);
5791 					fRedirUPL = NULL;
5792 				}
5793 				if ((false) && physMem) {
5794 					physMem->redirect(NULL, false);
5795 				}
5796 			}
5797 		}while (false);
5798 	}
5799 
5800 	UNLOCK;
5801 
5802 	return err;
5803 }
5804 
5805 IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5806 IOMemoryDescriptor::makeMapping(
5807 	IOMemoryDescriptor *    owner,
5808 	task_t                  __intoTask,
5809 	IOVirtualAddress        __address,
5810 	IOOptionBits            options,
5811 	IOByteCount             __offset,
5812 	IOByteCount             __length )
5813 {
5814 #ifndef __LP64__
5815 	if (!(kIOMap64Bit & options)) {
5816 		panic("IOMemoryDescriptor::makeMapping !64bit");
5817 	}
5818 #endif /* !__LP64__ */
5819 
5820 	OSSharedPtr<IOMemoryDescriptor> mapDesc;
5821 	__block IOMemoryMap * result  = NULL;
5822 
5823 	IOMemoryMap *  mapping = (IOMemoryMap *) __address;
5824 	mach_vm_size_t offset  = mapping->fOffset + __offset;
5825 	mach_vm_size_t length  = mapping->fLength;
5826 
5827 	mapping->fOffset = offset;
5828 
5829 	LOCK;
5830 
5831 	do{
5832 		if (kIOMapStatic & options) {
5833 			result = mapping;
5834 			addMapping(mapping);
5835 			mapping->setMemoryDescriptor(this, 0);
5836 			continue;
5837 		}
5838 
5839 		if (kIOMapUnique & options) {
5840 			addr64_t phys;
5841 			IOByteCount       physLen;
5842 
5843 //	    if (owner != this)		continue;
5844 
5845 			if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5846 			    || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5847 				phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
5848 				if (!phys || (physLen < length)) {
5849 					continue;
5850 				}
5851 
5852 				mapDesc = IOMemoryDescriptor::withAddressRange(
5853 					phys, length, getDirection() | kIOMemoryMapperNone, NULL);
5854 				if (!mapDesc) {
5855 					continue;
5856 				}
5857 				offset = 0;
5858 				mapping->fOffset = offset;
5859 			}
5860 		} else {
5861 			// look for a compatible existing mapping
5862 			if (_mappings) {
5863 				_mappings->iterateObjects(^(OSObject * object)
5864 				{
5865 					IOMemoryMap * lookMapping = (IOMemoryMap *) object;
5866 					if ((result = lookMapping->copyCompatible(mapping))) {
5867 					        addMapping(result);
5868 					        result->setMemoryDescriptor(this, offset);
5869 					        return true;
5870 					}
5871 					return false;
5872 				});
5873 			}
5874 			if (result || (options & kIOMapReference)) {
5875 				if (result != mapping) {
5876 					mapping->release();
5877 					mapping = NULL;
5878 				}
5879 				continue;
5880 			}
5881 		}
5882 
5883 		if (!mapDesc) {
5884 			mapDesc.reset(this, OSRetain);
5885 		}
5886 		IOReturn
5887 		    kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
5888 		if (kIOReturnSuccess == kr) {
5889 			result = mapping;
5890 			mapDesc->addMapping(result);
5891 			result->setMemoryDescriptor(mapDesc.get(), offset);
5892 		} else {
5893 			mapping->release();
5894 			mapping = NULL;
5895 		}
5896 	}while (false);
5897 
5898 	UNLOCK;
5899 
5900 	return result;
5901 }
5902 
5903 void
addMapping(IOMemoryMap * mapping)5904 IOMemoryDescriptor::addMapping(
5905 	IOMemoryMap * mapping )
5906 {
5907 	if (mapping) {
5908 		if (NULL == _mappings) {
5909 			_mappings = OSSet::withCapacity(1);
5910 		}
5911 		if (_mappings) {
5912 			_mappings->setObject( mapping );
5913 		}
5914 	}
5915 }
5916 
5917 void
removeMapping(IOMemoryMap * mapping)5918 IOMemoryDescriptor::removeMapping(
5919 	IOMemoryMap * mapping )
5920 {
5921 	if (_mappings) {
5922 		_mappings->removeObject( mapping);
5923 	}
5924 }
5925 
5926 void
setMapperOptions(uint16_t options)5927 IOMemoryDescriptor::setMapperOptions( uint16_t options)
5928 {
5929 	_iomapperOptions = options;
5930 }
5931 
5932 uint16_t
getMapperOptions(void)5933 IOMemoryDescriptor::getMapperOptions( void )
5934 {
5935 	return _iomapperOptions;
5936 }
5937 
5938 #ifndef __LP64__
5939 // obsolete initializers
5940 // - initWithOptions is the designated initializer
5941 bool
initWithAddress(void * address,IOByteCount length,IODirection direction)5942 IOMemoryDescriptor::initWithAddress(void *      address,
5943     IOByteCount   length,
5944     IODirection direction)
5945 {
5946 	return false;
5947 }
5948 
5949 bool
initWithAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)5950 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
5951     IOByteCount    length,
5952     IODirection  direction,
5953     task_t       task)
5954 {
5955 	return false;
5956 }
5957 
5958 bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)5959 IOMemoryDescriptor::initWithPhysicalAddress(
5960 	IOPhysicalAddress      address,
5961 	IOByteCount            length,
5962 	IODirection            direction )
5963 {
5964 	return false;
5965 }
5966 
5967 bool
initWithRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)5968 IOMemoryDescriptor::initWithRanges(
5969 	IOVirtualRange * ranges,
5970 	UInt32           withCount,
5971 	IODirection      direction,
5972 	task_t           task,
5973 	bool             asReference)
5974 {
5975 	return false;
5976 }
5977 
5978 bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)5979 IOMemoryDescriptor::initWithPhysicalRanges(     IOPhysicalRange * ranges,
5980     UInt32           withCount,
5981     IODirection      direction,
5982     bool             asReference)
5983 {
5984 	return false;
5985 }
5986 
5987 void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)5988 IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
5989     IOByteCount * lengthOfSegment)
5990 {
5991 	return NULL;
5992 }
5993 #endif /* !__LP64__ */
5994 
5995 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5996 
5997 bool
serialize(OSSerialize * s) const5998 IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
5999 {
6000 	OSSharedPtr<OSSymbol const>     keys[2] = {NULL};
6001 	OSSharedPtr<OSObject>           values[2] = {NULL};
6002 	OSSharedPtr<OSArray>            array;
6003 
6004 	struct SerData {
6005 		user_addr_t address;
6006 		user_size_t length;
6007 	};
6008 
6009 	unsigned int index;
6010 
6011 	IOOptionBits type = _flags & kIOMemoryTypeMask;
6012 
6013 	if (s == NULL) {
6014 		return false;
6015 	}
6016 
6017 	array = OSArray::withCapacity(4);
6018 	if (!array) {
6019 		return false;
6020 	}
6021 
6022 	OSDataAllocation<struct SerData> vcopy(_rangesCount, OSAllocateMemory);
6023 	if (!vcopy) {
6024 		return false;
6025 	}
6026 
6027 	keys[0] = OSSymbol::withCString("address");
6028 	keys[1] = OSSymbol::withCString("length");
6029 
6030 	// Copy the volatile data so we don't have to allocate memory
6031 	// while the lock is held.
6032 	LOCK;
6033 	if (vcopy.size() == _rangesCount) {
6034 		Ranges vec = _ranges;
6035 		for (index = 0; index < vcopy.size(); index++) {
6036 			mach_vm_address_t addr; mach_vm_size_t len;
6037 			getAddrLenForInd(addr, len, type, vec, index);
6038 			vcopy[index].address = addr;
6039 			vcopy[index].length  = len;
6040 		}
6041 	} else {
6042 		// The descriptor changed out from under us.  Give up.
6043 		UNLOCK;
6044 		return false;
6045 	}
6046 	UNLOCK;
6047 
6048 	for (index = 0; index < vcopy.size(); index++) {
6049 		user_addr_t addr = vcopy[index].address;
6050 		IOByteCount len = (IOByteCount) vcopy[index].length;
6051 		values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
6052 		if (values[0] == NULL) {
6053 			return false;
6054 		}
6055 		values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
6056 		if (values[1] == NULL) {
6057 			return false;
6058 		}
6059 		OSSharedPtr<OSDictionary> dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
6060 		if (dict == NULL) {
6061 			return false;
6062 		}
6063 		array->setObject(dict.get());
6064 		dict.reset();
6065 		values[0].reset();
6066 		values[1].reset();
6067 	}
6068 
6069 	return array->serialize(s);
6070 }
6071 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6072 
6073 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 0);
6074 #ifdef __LP64__
6075 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
6076 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
6077 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
6078 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
6079 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
6080 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
6081 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
6082 #else /* !__LP64__ */
6083 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 1);
6084 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 2);
6085 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 3);
6086 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 4);
6087 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 5);
6088 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 6);
6089 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 7);
6090 #endif /* !__LP64__ */
6091 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
6092 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
6093 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
6094 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
6095 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
6096 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
6097 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
6098 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
6099 
6100 /* ex-inline function implementation */
6101 IOPhysicalAddress
getPhysicalAddress()6102 IOMemoryDescriptor::getPhysicalAddress()
6103 {
6104 	return getPhysicalSegment( 0, NULL );
6105 }
6106 
OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData,OSObject)6107 OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData, OSObject)
6108 
6109 OSPtr<_IOMemoryDescriptorMixedData>
6110 _IOMemoryDescriptorMixedData::withCapacity(size_t capacity)
6111 {
6112 	OSSharedPtr<_IOMemoryDescriptorMixedData> me = OSMakeShared<_IOMemoryDescriptorMixedData>();
6113 	if (me && !me->initWithCapacity(capacity)) {
6114 		return nullptr;
6115 	}
6116 	return me;
6117 }
6118 
6119 bool
initWithCapacity(size_t capacity)6120 _IOMemoryDescriptorMixedData::initWithCapacity(size_t capacity)
6121 {
6122 	if (_data && (!capacity || (_capacity < capacity))) {
6123 		freeMemory();
6124 	}
6125 
6126 	if (!OSObject::init()) {
6127 		return false;
6128 	}
6129 
6130 	if (!_data && capacity) {
6131 		_data = IOMalloc(capacity);
6132 		if (!_data) {
6133 			return false;
6134 		}
6135 		_capacity = capacity;
6136 	}
6137 
6138 	_length = 0;
6139 
6140 	return true;
6141 }
6142 
6143 void
free()6144 _IOMemoryDescriptorMixedData::free()
6145 {
6146 	freeMemory();
6147 	OSObject::free();
6148 }
6149 
6150 void
freeMemory()6151 _IOMemoryDescriptorMixedData::freeMemory()
6152 {
6153 	IOFree(_data, _capacity);
6154 	_data = nullptr;
6155 	_capacity = _length = 0;
6156 }
6157 
6158 bool
appendBytes(const void * bytes,size_t length)6159 _IOMemoryDescriptorMixedData::appendBytes(const void * bytes, size_t length)
6160 {
6161 	const auto oldLength = getLength();
6162 	size_t newLength;
6163 	if (os_add_overflow(oldLength, length, &newLength)) {
6164 		return false;
6165 	}
6166 
6167 	if (newLength > _capacity) {
6168 		void * const newData = IOMalloc(newLength);
6169 		if (_data) {
6170 			bcopy(_data, newData, oldLength);
6171 			IOFree(_data, _capacity);
6172 		}
6173 		_data = newData;
6174 		_capacity = newLength;
6175 	}
6176 
6177 	unsigned char * const dest = &(((unsigned char *)_data)[oldLength]);
6178 	if (bytes) {
6179 		bcopy(bytes, dest, length);
6180 	} else {
6181 		bzero(dest, length);
6182 	}
6183 
6184 	_length = newLength;
6185 
6186 	return true;
6187 }
6188 
6189 void
setLength(size_t length)6190 _IOMemoryDescriptorMixedData::setLength(size_t length)
6191 {
6192 	if (!_data || (length > _capacity)) {
6193 		void * const newData = IOMallocZero(length);
6194 		if (_data) {
6195 			bcopy(_data, newData, _length);
6196 			IOFree(_data, _capacity);
6197 		}
6198 		_data = newData;
6199 		_capacity = length;
6200 	}
6201 	_length = length;
6202 }
6203 
6204 const void *
getBytes() const6205 _IOMemoryDescriptorMixedData::getBytes() const
6206 {
6207 	return _length ? _data : nullptr;
6208 }
6209 
6210 size_t
getLength() const6211 _IOMemoryDescriptorMixedData::getLength() const
6212 {
6213 	return _data ? _length : 0;
6214 }
6215